Stefan Bruckner

Stefan Bruckner is a full professor of visualization at the Department of Informatics of the University of Bergen, Norway. He received his master's degree (2004) and Ph.D. (2008), both in Computer Science, from the TU Wien, Austria, and was awarded the habilitation (venia docendi) in Practical Computer Science in 2012. Before his appointment in Bergen in 2013, he was an assistant professor at the Institute of Computer Graphics and Algorithms of the TU Wien.

His research interests include all aspects of data visualization, with a particular focus on interactive techniques for the exploration and analysis of complex heterogeneous data spaces. He has made significant contributions to areas such as illustrative visualization, volume rendering, smart visual interfaces, biomedical data visualization, and visual parameter space exploration. In addition to his contributions in basic research, he has successfully led industry collaborations with major companies such as GE Healthcare and Agfa HealthCare, and has 7 granted patents.

He is a recipient of the Eurographics Young Researcher Award, the Karl-Heinz-Höhne Award for Medical Visualization, and his research has received 11 best paper awards and honorable mentions at international events. He was program co-chair of EuroVis, PacificVis, the Eurographics Workshop on Visual Computing for Biology and Medicine, the Eurographics Medical Prize, and is a member of the editorial boards of IEEE Transactions on Visualization and Computer Graphics as well as Computers & Graphics. He currently serves on the Eurographics Executive Committee, and is a member of ACM SIGGRAPH, Eurographics, and the IEEE Computer Society.

Starting February 2023, I’m heading the Chair of Visual Analytics at the University of Rostock in Germany. Please update your contact information.

 

Publications

2023

    [PDF] [DOI] [Bibtex]
    @article{mittenentzwei2023heros,
    journal = {Computer Graphics Forum},
    title = {{Do Disease Stories need a Hero? Effects of Human Protagonists on a Narrative Visualization about Cerebral Small Vessel Disease}},
    author = {Mittenentzwei, Sarah and Weiß, Veronika and Schreiber, Stefanie and Garrison, Laura A. and Bruckner, Stefan and Pfister, Malte and Preim, Bernhard and Meuschke, Monique},
    year = {2023},
    publisher = {The Eurographics Association and John Wiley & Sons Ltd.},
    ISSN = {1467-8659},
    DOI = {10.1111/cgf.14817},
    abstract = {Authors use various media formats to convey disease information to a broad audience, from articles and videos to interviews or documentaries. These media often include human characters, such as patients or treating physicians, who are involved with the disease. While artistic media, such as hand-crafted illustrations and animations are used for health communication in many cases, our goal is to focus on data-driven visualizations. Over the last decade, narrative visualization has experienced increasing prominence, employing storytelling techniques to present data in an understandable way. Similar to classic storytelling formats, narrative medical visualizations may also take a human character-centered design approach. However, the impact of this form of data communication on the user is largely unexplored. This study investigates the protagonist's influence on user experience in terms of engagement, identification, self-referencing, emotional response, perceived credibility, and time spent in the story. Our experimental setup utilizes a character-driven story structure for disease stories derived from Joseph Campbell's Hero's Journey. Using this structure, we generated three conditions for a cerebral small vessel disease story that vary by their protagonist: (1) a patient, (2) a physician, and (3) a base condition with no human protagonist. These story variants formed the basis for our hypotheses on the effect of a human protagonist in disease stories, which we evaluated in an online study with 30 participants. Our findings indicate that a human protagonist exerts various influences on the story perception and that these also vary depending on the type of protagonist.},
    pdf = {pdfs/garrison-diseasestories.pdf},
    images = {images/garrison-diseasestories.png},
    thumbnails = {images/garrison-diseasestories-thumb.png},
    project = {VIDI}
    }
    [PDF] [Bibtex]
    @incollection{garrison2023narrativemedvisbook,
    title = {Current Approaches in Narrative Medical Visualization},
    author = {Garrison, Laura Ann and Meuschke, Monique and Preim, Bernhard and Bruckner, Stefan},
    year = 2023,
    booktitle = {Approaches for Science Illustration and Communication},
    publisher = {Springer},
    address = {Gewerbestrasse 11, 6330 Cham, Switzerland},
    pages = {},
    note = {in publication},
    editor = {Mark Roughley},
    chapter = 4,
    pdf = {pdfs/garrison2023narrativemedvisbook.pdf},
    images = {images/garrison2023narrativemedvisbook.png},
    thumbnails = {images/garrison2023narrativemedvisbook-thumb.png},
    project = {VIDI}
    }
    [PDF] [DOI] [Bibtex]
    @article{mittenentzwei2023investigating,
    title={Investigating user behavior in slideshows and scrollytelling as narrative genres in medical visualization},
    author={Mittenentzwei, Sarah and Garrison, Laura A and M{\"o}rth, Eric and Lawonn, Kai and Bruckner, Stefan and Preim, Bernhard and Meuschke, Monique},
    journal={Computers \& Graphics},
    year={2023},
    publisher={Elsevier},
    abstract={In this study, we explore the impact of genre and navigation on user comprehension, preferences, and behaviors when experiencing data-driven disease stories. Our between-subject study (n=85) evaluated these aspects in-the-wild, with results pointing towards some general design considerations to keep in mind when authoring data-driven disease stories. Combining storytelling with interactive new media techniques, narrative medical visualization is a promising approach to communicating topics in medicine to a general audience in an accessible manner. For patients, visual storytelling may help them to better understand medical procedures and treatment options for more informed decision-making, boost their confidence and alleviate anxiety, and promote stronger personal health advocacy. Narrative medical visualization provides the building blocks for producing data-driven disease stories, which may be presented in several visual styles. These different styles correspond to different narrative genres, e.g., a Slideshow. Narrative genres can employ different navigational approaches. For instance, a Slideshow may rely on click interactions to advance through a story, while Scrollytelling typically uses vertical scrolling for navigation. While a common goal of a narrative medical visualization is to encourage a particular behavior, e.g., quitting smoking, it is unclear to what extent the choice of genre influences subsequent user behavior. Our study opens a new research direction into choice of narrative genre on user preferences and behavior in data-driven disease stories.},
    pdf = {pdfs/mittenentzwei2023userbehavior.pdf},
    images = {images/mittenentzwei2023userbehavior.png},
    thumbnails = {images/mittenentzwei2023userbehavior-thumb.png},
    project = {VIDI},
    doi={10.1016/j.cag.2023.06.011}
    }
    [PDF] [DOI] [Bibtex]
    @article{garrison2023molaesthetics,
    author={Garrison, Laura A. and Goodsell, David S. and Bruckner, Stefan},
    journal={IEEE Computer Graphics and Applications},
    title={Changing Aesthetics in Biomolecular Graphics},
    year={2023},
    volume={43},
    number={3},
    pages={94-101},
    doi={10.1109/MCG.2023.3250680},
    abstract={Aesthetics for the visualization of biomolecular structures have evolved over the years according to technological advances, user needs, and modes of dissemination. In this article, we explore the goals, challenges, and solutions that have shaped the current landscape of biomolecular imagery from the overlapping perspectives of computer science, structural biology, and biomedical illustration. We discuss changing approaches to rendering, color, human–computer interface, and narrative in the development and presentation of biomolecular graphics. With this historical perspective on the evolving styles and trends in each of these areas, we identify opportunities and challenges for future aesthetics in biomolecular graphics that encourage continued collaboration from multiple intersecting fields.},
    pdf = {pdfs/garrison-aestheticsmol.pdf},
    images = {images/garrison-aestheticsmol.png},
    thumbnails = {images/garrison-aestheticsmol-thumb.png},
    project = {VIDI}
    }

2022

    [PDF] [DOI] [YT] [Bibtex]
    @inproceedings {Trautner-2022-HCP,
    author = {Trautner, Thomas and Sbardellati, Maximilian and Stoppel, Sergej and Bruckner, Stefan},
    title = {{Honeycomb Plots: Visual Enhancements for Hexagonal Maps}},
    booktitle = {Proc. of VMV 2022: Vision, Modeling, and Visualization},
    editor = {Bender, Jan and Botsch, Mario and Keim, Daniel A.},
    pages = {65--73},
    year = {2022},
    publisher = {The Eurographics Association},
    ISBN = {978-3-03868-189-2},
    DOI = {10.2312/vmv.20221205},
    abstract = {Aggregation through binning is a commonly used technique for visualizing large, dense, and overplotted two-dimensional data sets. However, aggregation can hide nuanced data-distribution features and complicates the display of multiple data-dependent variables, since color mapping is the primary means of encoding. In this paper, we present novel techniques for enhancing hexplots with spatialization cues while avoiding common disadvantages of three-dimensional visualizations. In particular, we focus on techniques relying on preattentive features that exploit shading and shape cues to emphasize relative value differences. Furthermore, we introduce a novel visual encoding that conveys information about the data distributions or trends within individual tiles. Based on multiple usage examples from different domains and real-world scenarios, we generate expressive visualizations that increase the information content of classic hexplots and validate their effectiveness in a user study.},
    pdf = "pdfs/Trautner-2022-HCP.pdf",
    thumbnails = "images/Trautner-2022-HCP-thumb.png",
    images = "images/Trautner-2022-HCP-thumb.png",
    youtube = "https://youtu.be/mU7QFVP3yKQ",
    git = "https://github.com/TTrautner/HoneycombPlots"
    }
    [PDF] [DOI] [Bibtex]
    @inproceedings {EichnerMoerth2022MuSIC,
    booktitle = {Eurographics Workshop on Visual Computing for Biology and Medicine},
    editor = {Renata G. Raidou and Björn Sommer and Torsten W. Kuhlen and Michael Krone and Thomas Schultz and Hsiang-Yun Wu},
    title = {{MuSIC: Multi-Sequential Interactive Co-Registration for Cancer Imaging Data based on Segmentation Masks}},
    author = {Eichner, Tanja* and Mörth, Eric* and Wagner-Larsen, Kari S. and Lura, Njål and Haldorsen, Ingfrid S. and Gröller, Eduard and Bruckner, Stefan and Smit, Noeska N.},
    note = {Best Paper Honorable Mention at VCBM2022},
    project = {ttmedvis},
    year = {2022},
    abstract = {In gynecologic cancer imaging, multiple magnetic resonance imaging (MRI) sequences are acquired per patient to reveal different tissue characteristics. However, after image acquisition, the anatomical structures can be misaligned in the various sequences due to changing patient location in the scanner and organ movements. The co-registration process aims to align the sequences to allow for multi-sequential tumor imaging analysis. However, automatic co-registration often leads to unsatisfying results. To address this problem, we propose the web-based application MuSIC (Multi-Sequential Interactive Co-registration). The approach allows medical experts to co-register multiple sequences simultaneously based on a pre-defined segmentation mask generated for one of the sequences. Our contributions lie in our proposed workflow. First, a shape matching algorithm based on dual annealing searches for the tumor position in each sequence. The user can then interactively adapt the proposed segmentation positions if needed. During this procedure, we include a multi-modal magic lens visualization for visual quality assessment. Then, we register the volumes based on the segmentation mask positions. We allow for both rigid and deformable registration. Finally, we conducted a usability analysis with seven medical and machine learning experts to verify the utility of our approach. Our participants highly appreciate the multi-sequential setup and see themselves using MuSIC in the future.
    Best Paper Honorable Mention at VCBM2022},
    publisher = {The Eurographics Association},
    ISSN = {2070-5786},
    ISBN = {978-3-03868-177-9},
    DOI = {10.2312/vcbm.20221190},
    pdf = {pdfs/EichnerMoerth_2022.pdf},
    thumbnails = {images/EichnerMoerth_2022.PNG},
    images = {images/EichnerMoerth_2022.PNG},
    }
    [PDF] [DOI] [Bibtex]
    @inproceedings {Kleinau2022Tornado,
    booktitle = {Eurographics Workshop on Visual Computing for Biology and Medicine},
    editor = {Renata G. Raidou and Björn Sommer and Torsten W. Kuhlen and Michael Krone and Thomas Schultz and Hsiang-Yun Wu},
    title = {{Is there a Tornado in Alex's Blood Flow? A Case Study for Narrative Medical Visualization}},
    project = {ttmedvis},
    author = {Kleinau, Anna and Stupak, Evgenia and Mörth, Eric and Garrison, Laura A. and Mittenentzwei, Sarah and Smit, Noeska N. and Lawonn, Kai and Bruckner, Stefan and Gutberlet, Matthias and Preim, Bernhard and Meuschke, Monique},
    year = {2022},
    abstract = {Narrative visualization advantageously combines storytelling with new media formats and techniques, like interactivity, to create improved learning experiences. In medicine, it has the potential to improve patient understanding of diagnostic procedures and treatment options, promote confidence, reduce anxiety, and support informed decision-making. However, limited scientific research has been conducted regarding the use of narrative visualization in medicine. To explore the value of narrative visualization in this domain, we introduce a data-driven story to inform a broad audience about the usage of measured blood flow data to diagnose and treat cardiovascular diseases. The focus of the story is on blood flow vortices in the aorta, with which imaging technique they are examined, and why they can be dangerous. In an interdisciplinary team, we define the main contents of the story and the resulting design questions. We sketch the iterative design process and implement the story based on two genres. In a between-subject study, we evaluate the suitability and understandability of the story and the influence of different navigation concepts on user experience. Finally, we discuss reusable concepts for further narrative medical visualization projects.},
    publisher = {The Eurographics Association},
    ISSN = {2070-5786},
    ISBN = {978-3-03868-177-9},
    DOI = {10.2312/vcbm.20221183},
    pdf = {pdfs/Kleinau_2022.pdf},
    thumbnails = {images/Kleinau_2022.PNG},
    images = {images/Kleinau_2022.PNG},
    }
    [PDF] [DOI] [Bibtex]
    @article{Meuschke2022narrative,
    title = {Narrative medical visualization to communicate disease data},
    author = {Meuschke, Monique and Garrison, Laura A. and Smit, Noeska N. and Bach, Benjamin and Mittenentzwei, Sarah and Wei{\ss}, Veronika and Bruckner, Stefan and Lawonn, Kai and Preim, Bernhard},
    year = 2022,
    journal = {Computers & Graphics},
    volume = 107,
    pages = {144--157},
    doi = {10.1016/j.cag.2022.07.017},
    issn = {0097-8493},
    url = {https://www.sciencedirect.com/science/article/pii/S009784932200139X},
    abstract = {This paper explores narrative techniques combined with medical visualizations to tell data-driven stories about diseases for a general audience. The field of medical illustration uses narrative visualization through hand-crafted techniques to promote health literacy. However, data-driven narrative visualization has rarely been applied to medical data. We derived a template for creating stories about diseases and applied it to three selected diseases to demonstrate how narrative techniques could support visual communication and facilitate understanding of medical data. One of our main considerations is how interactive 3D anatomical models can be integrated into the story and whether this leads to compelling stories in which the users feel involved. A between-subject study with 90 participants suggests that the combination of a carefully designed narrative structure, the constant involvement of a specific patient, high-qualitative visualizations combined with easy-to-use interactions, are critical for an understandable story about diseases that would be remembered by participants.},
    pdf = {pdfs/Narrative_medical_MEUSCHKE_DOA18072022_AFV.pdf},
    thumbnails = {images/Meuschke2022narrative-thumb.png},
    images = {images/Meuschke2022narrative.png},
    project = {VIDI}
    }
    [PDF] [DOI] [Bibtex]
    @Article{Moerth2022ScrollyVis,
    author = {Mörth, Eric and Bruckner, Stefan and Smit, Noeska N.},
    title = {ScrollyVis: Interactive visual authoring of guided dynamic narratives for scientific scrollytelling},
    journal = {IEEE Transactions on Visualization and Computer Graphics},
    year = {2022},
    volume = {},
    abstract = {Visual stories are an effective and powerful tool to convey specific information to a diverse public. Scrollytelling is a recent visual storytelling technique extensively used on the web, where content appears or changes as users scroll up or down a page. By employing the familiar gesture of scrolling as its primary interaction mechanism, it provides users with a sense of control, exploration and discoverability while still offering a simple and intuitive interface. In this paper, we present a novel approach for authoring, editing, and presenting data-driven scientific narratives using scrollytelling. Our method flexibly integrates common sources such as images, text, and video, but also supports more specialized visualization techniques such as interactive maps as well as scalar field and mesh data visualizations. We show that scrolling navigation can be used to traverse dynamic narratives and demonstrate how it can be combined with interactive parameter exploration. The resulting system consists of an extensible web-based authoring tool capable of exporting stand-alone stories that can be hosted on any web server. We demonstrate the power and utility of our approach with case studies from several diverse scientific fields and with a user study including 12 participants of diverse professional backgrounds. Furthermore, an expert in creating interactive articles assessed the usefulness of our approach and the quality of the created stories.},
    project = {ttmedvis},
    pdf = {pdfs/Moerth_2022_ScrollyVis.pdf},
    thumbnails = {images/Moerth_2022_ScrollyVis.png},
    images = {images/Moerth_2022_ScrollyVis.png},
    pages={1-12},
    doi={10.1109/TVCG.2022.3205769},
    }
    [PDF] [DOI] [VID] [Bibtex]
    @article{Moerth2022ICEVis,
    title = {ICEVis: Interactive Clustering Exploration for tumor sub-region analysis in multiparametric cancer imaging},
    author = {Mörth, Eric and Eichner, Tanja and Ingfrid, Haldorsen and Bruckner, Stefan and Smit, Noeska N.},
    year = 2022,
    journal = {Proceedings of the International Symposium on Visual Information Communication and Interaction (VINCI'22)},
    volume = {15},
    pages = {5},
    doi = {10.1145/3554944.3554958},
    issn = {},
    url = {},
    abstract = {Tumor tissue characteristics derived from imaging data are gaining importance in clinical research. Tumor sub-regions may play a critical role in defining tumor types and may hold essential information about tumor aggressiveness. Depending on the tumor’s location within the body, such sub-regions can be easily identified and determined by physiology, but these sub-regions are not readily visible to others. Regions within a tumor are currently explored by comparing the image sequences and analyzing the tissue heterogeneity present. To improve the exploration of such tumor sub-regions, we propose a visual analytics tool called ICEVis. ICEVis supports the identification of tumor sub-regions and corresponding features combined with cluster visualizations highlighting cluster validity. It is often difficult to estimate the optimal number of clusters; we provide rich facilities to support this task, incorporating various statistical measures and interactive exploration of the results. We evaluated our tool with three clinical researchers to show the potential of our approach.
    Best Short Paper at VINCI2022},
    images = "images/Moerth_2022_ICEVis.png",
    thumbnails = "images/Moerth_2022_ICEVis.png",
    pdf = {pdfs/Moerth_2022_ICEVis.pdf},
    vid = {vids/ICEVis.mp4},
    project = "ttmedvis",
    }
    [PDF] [Bibtex]
    @article{Kristiansen2022ContentDriven,
    title = {Content-Driven Layout for Visualization Design},
    author = {Kristiansen, Yngve and Garrison, Laura and Bruckner, Stefan},
    year = 2022,
    journal = {Proceedings of the International Symposium on Visual Information Communication and Interaction (to appear)},
    volume = {},
    pages = {},
    doi = {},
    issn = {},
    url = {},
    abstract = {Multi-view visualizations are typically presented in a grid layout with elements positioned according to their bounding rectangles. These rectangles often contain unused white space. In cases where Tufte’s Shrink Principle can be applied to reduce non-data-ink without impairing the communication of information, unused white space can be utilized for the placement of other elements. This is often done in manually “hand-crafted” layouts by designers. However, upon changes to individual elements, this design process has to be repeated. To reduce non-data-ink and repetitive manual design, we contribute a method for automatically turning a grid layout into a content-driven layout, where elements are positioned with respect to their contents. Existing approaches have explored the use of a force simulation in conjunction with proxy geometries to simplify collision handling for irregular shapes. Such customized force directed layouts are usually unstable, and often require additional constraints to run properly. In addition, proxy geometries become less accurate and effective with more irregular shapes. To solve these shortcomings, we contribute an approach for identifying central elements in an original grid layout in order to set up corresponding attractive forces. Furthermore, we utilize an imagebased approach for collision detection and avoidance that works accurately for highly irregular shapes. We demonstrate the utility of our approach with three case studies.},
    images = "images/Kristiansen-2022-LungsDt.PNG",
    thumbnails = "images/Kristiansen-2022-LungsDt.PNG",
    pdf = {pdfs/Kristiansen-2022-CDL.pdf},
    project = "MetaVis",
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE {Garrison2022MolColor,
    author = "Laura A. Garrison and Stefan Bruckner",
    title = "Considering Best Practices in Color Palettes for Molecular Visualizations",
    journal = "Journal of Integrative Bioinformatics",
    year = "2022",
    abstract = "Biomedical illustration and visualization techniques provide a window into complex molecular worlds that are difficult to capture through experimental means alone. Biomedical illustrators frequently employ color to help tell a molecular story, e.g., to identify key molecules in a signaling pathway. Currently, color use for molecules is largely arbitrary and often chosen based on the client, cultural factors, or personal taste. The study of molecular dynamics is relatively young, and some stakeholders argue that color use guidelines would throttle the growth of the field. Instead, content authors have ample creative freedom to choose an aesthetic that, e.g., supports the story they want to tell. However, such creative freedom comes at a price. The color design process is challenging, particularly for those without a background in color theory. The result is a semantically inconsistent color space that reduces the interpretability and effectiveness of molecular visualizations as a whole. Our contribution in this paper is threefold. We first discuss some of the factors that contribute to this array of color palettes. Second, we provide a brief sampling of color palettes used in both industry and research sectors. Lastly, we suggest considerations for developing best practices around color palettes applied to molecular visualization.",
    images = "images/garrison-molecularcolor-full.png",
    thumbnails = "images/garrison-molecularcolor-thumb.png",
    pdf = "pdfs/garrison-molecularcolor.pdf",
    publisher = "De Gruyter",
    doi = "10.1515/jib-2022-0016",
    project = "VIDI"
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE {Garrison2022PhysioSTAR,
    author = "Laura A. Garrison and Ivan Kolesar and Ivan Viola and Helwig Hauser and Stefan Bruckner",
    title = "Trends & Opportunities in Visualization for Physiology: A Multiscale Overview",
    journal = "Computer Graphics Forum",
    year = "2022",
    volume = "41",
    number = "3",
    publisher = "The Eurographics Association and John Wiley & Sons Ltd.",
    pages = "609-643",
    doi = "10.1111/cgf.14575",
    abstract = "Combining elements of biology, chemistry, physics, and medicine, the science of human physiology is complex and multifaceted. In this report, we offer a broad and multiscale perspective on key developments and challenges in visualization for physiology. Our literature search process combined standard methods with a state-of-the-art visual analysis search tool to identify surveys and representative individual approaches for physiology. Our resulting taxonomy sorts literature on two levels. The first level categorizes literature according to organizational complexity and ranges from molecule to organ. A second level identifies any of three high-level visualization tasks within a given work: exploration, analysis, and communication. The findings of this report may be used by visualization researchers to understand the overarching trends, challenges, and opportunities in visualization for physiology and to provide a foundation for discussion and future research directions in this area. ",
    images = "images/garrison-STAR-taxonomy.png",
    thumbnails = "images/garrison-STAR-thumb.png",
    pdf = "pdfs/Garrison_STAR_cameraready.pdf",
    publisher = "The Eurographics Association and John Wiley \& Sons Ltd.",
    project = "VIDI"
    }

2021

    [PDF] [DOI] [VID] [Bibtex]
    @Article{Kristiansen-2021-SSG,
    author = {Kristiansen, Y. S. and Garrison, L. and Bruckner, S.},
    title = {Semantic Snapping for Guided Multi-View Visualization Design},
    journal = {IEEE Transactions on Visualization and Computer Graphics},
    year = {2021},
    volume = {},
    pages = {},
    doi = {},
    abstract = {Visual information displays are typically composed of multiple visualizations that are used to facilitate an understanding of the underlying data. A common example are dashboards, which are frequently used in domains such as finance, process monitoring and business intelligence. However, users may not be aware of existing guidelines and lack expert design knowledge when composing such multi-view visualizations. In this paper, we present semantic snapping, an approach to help non-expert users design effective multi-view visualizations from sets of pre-existing views. When a particular view is placed on a canvas, it is “aligned” with the remaining views–not with respect to its geometric layout, but based on aspects of the visual encoding itself, such as how data dimensions are mapped to channels. Our method uses an on-the-fly procedure to detect and suggest resolutions for conflicting, misleading, or ambiguous designs, as well as to provide suggestions for alternative presentations. With this approach, users can be guided to avoid common pitfalls encountered when composing visualizations. Our provided examples and case studies demonstrate the usefulness and validity of our approach.},
    note = {Accepted for publication, to be presented at IEEE VIS 2021},
    project = {MetaVis,VIDI},
    pdf = {pdfs/Kristiansen-2021-SSG.pdf},
    vid = {vids/Kristiansen-2021-SSG.mp4},
    thumbnails = {images/Kristiansen-2021-SSG.png},
    images = {images/Kristiansen-2021-SSG.jpg},
    keywords = {tabular data, guidelines, mixed initiative human-machine analysis, coordinated and multiple views},
    doi = {10.1109/TVCG.2021.3114860},
    }
    [PDF] [Bibtex]
    @InProceedings{Garrison-2021-EPP,
    author = {Laura Garrison and Monique Meuschke and Jennifer Fairman and Noeska Smit and Bernhard Preim and Stefan Bruckner},
    title = {An Exploration of Practice and Preferences for the Visual Communication of Biomedical Processes},
    booktitle = {Proceedings of VCBM},
    year = {2021},
    pages = {},
    doi = {},
    abstract = {The visual communication of biomedical processes draws from diverse techniques in both visualization and biomedical illustration. However, matching these techniques to their intended audience often relies on practice-based heuristics or narrow-scope evaluations. We present an exploratory study of the criteria that audiences use when evaluating a biomedical process visualization targeted for communication. Designed over a series of expert interviews and focus groups, our study focuses on common communication scenarios of five well-known biomedical processes and their standard visual representations. We framed these scenarios in a survey with participant expertise spanning from minimal to expert knowledge of a given topic. Our results show frequent overlap in abstraction preferences between expert and non-expert audiences, with similar prioritization of clarity and the ability of an asset to meet a given communication objective. We also found that some illustrative conventions are not as clear as we thought, e.g., glows have broadly ambiguous meaning, while other approaches were unexpectedly preferred, e.g., biomedical illustrations in place of data-driven visualizations. Our findings suggest numerous opportunities for the continued convergence of visualization and biomedical illustration techniques for targeted visualization design.
    Best Paper Honorable Mention at VCBM 2021},
    note = {Accepted for publication, to be presented at VCBM 2021},
    project = {VIDI,ttmedvis},
    pdf = {pdfs/Garrison-2021-EPP.pdf},
    thumbnails = {images/Garrison-2021-EPP.png},
    images = {images/Garrison-2021-EPP.jpg},
    url = {https://github.com/lauragarrison87/Biomedical_Process_Vis},
    keywords = {biomedical illustration, visual communication, survey},
    }
    [PDF] [DOI] [VID] [YT] [Bibtex]
    @article{Trautner-2021-LWI,
    author = {Trautner, Thomas and Bruckner, Stefan},
    title = {Line Weaver: Importance-Driven Order Enhanced Rendering of Dense Line Charts},
    journal = {Computer Graphics Forum},
    volume = {40},
    number = {3},
    pages = {399--410},
    keywords = {information visualization, visualization techniques, line charts},
    doi = {10.1111/cgf.14316},
    abstract = {Line charts are an effective and widely used technique for visualizing series of ordered two-dimensional data points. The relationship between consecutive points is indicated by connecting line segments, revealing potential trends or clusters in the underlying data. However, when dealing with an increasing number of lines, the render order substantially influences the resulting visualization. Rendering transparent lines can help but unfortunately the blending order is currently either ignored or naively used, for example, assuming it is implicitly given by the order in which the data was saved in a file. Due to the noncommutativity of classic alpha blending, this results in contradicting visualizations of the same underlying data set, so-called "hallucinators". In this paper, we therefore present line weaver, a novel visualization technique for dense line charts. Using an importance function, we developed an approach that correctly considers the blending order independently of the render order and without any prior sorting of the data. We allow for importance functions which are either explicitly given or implicitly derived from the geometric properties of the data if no external data is available. The importance can then be applied globally to entire lines, or locally per pixel which simultaneously supports various types of user interaction. Finally, we discuss the potential of our contribution based on different synthetic and real-world data sets where classic or naive approaches would fail.},
    year = {2021},
    pdf = "pdfs/Trautner-2021-LWI.pdf",
    thumbnails = "images/Trautner-2021-LWI-thumb.png",
    images = "images/Trautner-2021-LWI-thumb.png",
    vid = "vids/Trautner_2021_LineWeaver_video.mp4",
    youtube = "https://youtu.be/-hLF5XSR_ws",
    project = "MetaVis",
    git = "https://github.com/TTrautner/LineWeaver"
    }
    [PDF] [VID] [Bibtex]
    @article{Diehl-2021-HTC,
    author = {Alexandra Diehl and Rodrigo Pelorosso and Juan Ruiz and Renato Pajarola and Meister Eduard Gr\"{o}ller and Stefan Bruckner},
    title = {Hornero: Thunderstorms Characterization using Visual Analytics},
    journal = {Computer Graphics Forum},
    volume = {40},
    number = {3},
    pages = {},
    keywords = {visual analytics, weather forecasting, nowcasting},
    doi = {},
    abstract = {Analyzing the evolution of thunderstorms is critical in determining the potential for the development of severe weather events. Existing visualization systems for short-term weather forecasting (nowcasting) allow for basic analysis and prediction of storm developments. However, they lack advanced visual features for efficient decision-making. We developed a visual analytics tool for the detection of hazardous thunderstorms and their characterization, using a visual design centered on a reformulated expert task workflow that includes visual features to overview storms and quickly identify high-impact weather events, a novel storm graph visualization to inspect and analyze the storm structure, as well as a set of interactive views for efficient identification of similar storm cells (known as analogs) in historical data and their use for nowcasting. Our tool was designed with and evaluated by meteorologists and expert forecasters working in short-term operational weather forecasting of severe weather events. Results show that our solution suits the forecasters’ workflow. Our visual design is expressive, easy to use, and effective for prompt analysis and quick decision-making in the context of short-range operational weather forecasting.},
    year = {2021},
    pdf = "pdfs/Diehl-2021-HTC.pdf",
    thumbnails = "images/Diehl-2021-HTC.png",
    images = "images/Diehl-2021-HTC.jpg",
    vid = "vids/Diehl-2021-HTC.mp4",
    project = "MetaVis"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Garrison-2021-DimLift,
    author = {Garrison, Laura and M\"{u}ller, Juliane and Schreiber, Stefanie and Oeltze-Jafra, Steffen and Hauser, Helwig and Bruckner, Stefan},
    title = {DimLift: Interactive Hierarchical Data Exploration through Dimensional Bundling},
    journal={IEEE Transactions on Visualization and Computer Graphics},
    year = {2021},
    abstract = {The identification of interesting patterns and relationships is essential to exploratory data analysis. This becomes increasingly difficult in high dimensional datasets. While dimensionality reduction techniques can be utilized to reduce the analysis space, these may unintentionally bury key dimensions within a larger grouping and obfuscate meaningful patterns. With this work we introduce DimLift, a novel visual analysis method for creating and interacting with dimensional bundles. Generated through an iterative dimensionality reduction or user-driven approach, dimensional bundles are expressive groups of dimensions that contribute similarly to the variance of a dataset. Interactive exploration and reconstruction methods via a layered parallel coordinates plot allow users to lift interesting and subtle relationships to the surface, even in complex scenarios of missing and mixed data types. We exemplify the power of this technique in an expert case study on clinical cohort data alongside two additional case examples from nutrition and ecology.},
    volume = {27},
    number = {6},
    pages = {2908--2922},
    pdf = {pdfs/garrison-2021-dimlift.pdf},
    images = {images/garrison_dimlift.jpg},
    thumbnails = {images/garrison_dimlift_thumb.jpg},
    youtube = {https://youtu.be/JSZuhnDyugA},
    doi = {10.1109/TVCG.2021.3057519},
    git = {https://github.com/lauragarrison87/DimLift},
    project = {VIDI},
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE {Mueller-2021-IDA,
    author = {M\"{u}ller, Juliane and Garrison, Laura and Ulbrich, Philipp and Schreiber, Stefanie and Bruckner, Stefan and Hauser, Helwig and Oeltze-Jafra, Steffen},
    title = {Integrated Dual Analysis of Quantitative and Qualitative High-Dimensional Data},
    journal={IEEE Transactions on Visualization and Computer Graphics},
    year = {2021},
    abstract = {The Dual Analysis framework is a powerful enabling technology for the exploration of high dimensional quantitative data by treating data dimensions as first-class objects that can be explored in tandem with data values. In this work, we extend the Dual Analysis framework through the joint treatment of quantitative (numerical) and qualitative (categorical) dimensions. Computing common measures for all dimensions allows us to visualize both quantitative and qualitative dimensions in the same view. This enables a natural joint treatment of mixed data during interactive visual exploration and analysis. Several measures of variation for nominal qualitative data can also be applied to ordinal qualitative and quantitative data. For example, instead of measuring variability from a mean or median, other measures assess inter-data variation or average variation from a mode. In this work, we demonstrate how these measures can be integrated into the Dual Analysis framework to explore and generate hypotheses about high-dimensional mixed data. A medical case study using clinical routine data of patients suffering from Cerebral Small Vessel Disease (CSVD), conducted with a senior neurologist and a medical student, shows that a joint Dual Analysis approach for quantitative and qualitative data can rapidly lead to new insights based on which new hypotheses may be generated.},
    volume = {27},
    number = {6},
    pages = {2953--2966},
    pdf = {pdfs/Mueller_2020_IDA.pdf},
    images = {images/Mueller_2020_IDA.jpg},
    thumbnails = {images/Mueller_2020_IDA.png},
    doi = {10.1109/TVCG.2021.3056424},
    git = {https://github.com/JulianeMu/IntegratedDualAnalysisAproach_MDA},
    project = {VIDI},
    }
    [PDF] [DOI] [Bibtex]
    @article{bolte2020splitstreams,
    author= {Bolte, Fabian and Nourani, Mahsan and Ragan, Eric and Bruckner, Stefan},
    journal= {IEEE Transactions on Visualization and Computer Graphics},
    title= {SplitStreams: A Visual Metaphor for Evolving Hierarchies},
    year= {2021},
    keywords= {Information Visualization, Trees, Data Structures and Data Types, Visualization Techniques and Methodologies},
    doi= {10.1109/TVCG.2020.2973564},
    url= {https://arxiv.org/pdf/2002.03891.pdf},
    volume = {27},
    number = {8},
    doi = {10.1109/TVCG.2020.2973564},
    abstract= {The visualization of hierarchically structured data over time is an ongoing challenge and several approaches exist trying to solve it. Techniques such as animated or juxtaposed tree visualizations are not capable of providing a good overview of the time series and lack expressiveness in conveying changes over time. Nested streamgraphs provide a better understanding of the data evolution, but lack the clear outline of hierarchical structures at a given timestep. Furthermore, these approaches are often limited to static hierarchies or exclude complex hierarchical changes in the data, limiting their use cases. We propose a novel visual metaphor capable of providing a static overview of all hierarchical changes over time, as well as clearly outlining the hierarchical structure at each individual time step. Our method allows for smooth transitions between tree maps and nested streamgraphs, enabling the exploration of the trade-off between dynamic behavior and hierarchical structure. As our technique handles topological changes of all types, it is suitable for a wide range of applications. We demonstrate the utility of our method on several use cases, evaluate it with a user study, and provide its full source code.},
    pdf= {pdfs/Bolte-2020-SplitStreams.pdf},
    images= {images/Bolte-2020-SplitStreams.png},
    thumbnails= {images/Bolte-2020-SplitStreams_thumb.png},
    project = "MetaVis",
    git = "https://github.com/cadanox/SplitStreams"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @article{bolte2019visavis,
    author= {Bolte, Fabian and Bruckner, Stefan},
    journal= {IEEE Transactions on Visualization and Computer Graphics},
    title= {Vis-a-Vis: Visual Exploration of Visualization Source Code Evolution},
    year= {2021},
    keywords= {Visualization System and Toolkit Design;User Interfaces;Integrating Spatial and Non-Spatial Data Visualization;Software Visualization},
    doi= {10.1109/TVCG.2019.2963651},
    issn= {2160-9306},
    url= {https://arxiv.org/pdf/2001.02092.pdf},
    abstract= {Developing an algorithm for a visualization prototype often involves the direct comparison of different development stages and design decisions, and even minor modifications may dramatically affect the results. While existing development tools provide visualizations for gaining general insight into performance and structural aspects of the source code, they neglect the central importance of result images unique to graphical algorithms. In this paper, we present a novel approach that enables visualization programmers to simultaneously explore the evolution of their algorithm during the development phase together with its corresponding visual outcomes by providing an automatically updating meta visualization. Our interactive system allows for the direct comparison of all development states on both the visual and the source code level, by providing easy to use navigation and comparison tools. The on-the-fly construction of difference images, source code differences, and a visual representation of the source code structure further enhance the user's insight into the states' interconnected changes over time. Our solution is accessible via a web-based interface that provides GPU-accelerated live execution of C++ and GLSL code, as well as supporting a domain-specific programming language for scientific visualization.},
    pdf= {pdfs/Bolte-2019-Visavis.pdf},
    images= {images/Bolte-2019-Visavis.png},
    thumbnails= {images/Bolte-2019-Visavis_thumb.png},
    youtube= {https://www.youtube.com/watch?v=5XO6BU4j1KQ},
    volume = {27},
    number = {7},
    pages = {3153--3167},
    project = "MetaVis"
    }

2020

    [PDF] [DOI] [Bibtex]
    @article{Garrison-2020-IVE,
    author = {Garrison, Laura and Va\v{s}\'{i}\v{c}ek, Jakub and Craven, Alex R. and Gr\"{u}ner, Renate and Smit, Noeska and Bruckner, Stefan},
    title = {Interactive Visual Exploration of Metabolite Ratios in MR Spectroscopy Studies},
    journal = {Computers \& Graphics},
    volume = {92},
    pages = {1--12},
    keywords = {medical visualization, magnetic resonance spectroscopy data, information visualization, user-centered design},
    doi = {10.1016/j.cag.2020.08.001},
    abstract = {Magnetic resonance spectroscopy (MRS) is an advanced biochemical technique used to identify metabolic compounds in living tissue. While its sensitivity and specificity to chemical imbalances render it a valuable tool in clinical assessment, the results from this modality are abstract and difficult to interpret. With this design study we characterized and explored the tasks and requirements for evaluating these data from the perspective of a MRS research specialist. Our resulting tool, SpectraMosaic, links with upstream spectroscopy quantification software to provide a means for precise interactive visual analysis of metabolites with both single- and multi-peak spectral signatures. Using a layered visual approach, SpectraMosaic allows researchers to analyze any permutation of metabolites in ratio form for an entire cohort, or by sample region, individual, acquisition date, or brain activity status at the time of acquisition. A case study with three MRS researchers demonstrates the utility of our approach in rapid and iterative spectral data analysis.},
    year = {2020},
    pdf = "pdfs/Garrison-2020-IVE.pdf",
    thumbnails = "images/Garrison-2020-IVE.png",
    images = "images/Garrison-2020-IVE.jpg",
    project = "VIDI",
    git = "https://github.com/mmiv-center/spectramosaic-public",
    }
    [PDF] [DOI] [Bibtex]
    @article{Kristiansen-2020-VIV,
    author = {Yngve Sekse Kristiansen and Stefan Bruckner},
    title = {Visception: An Interactive Visual Framework for Nested Visualization Design},
    journal = {Computers \& Graphics},
    volume = {92},
    pages = {13--27},
    keywords = {information visualization, nested visualizations, nesting},
    doi = {10.1016/j.cag.2020.08.007},
    abstract = {Nesting is the embedding of charts into the marks of another chart. Related to principles such as Tufte’s rule of utilizing micro/macro readings, nested visualizations have been employed to increase information density, providing compact representations of multi-dimensional and multi-typed data entities. Visual authoring tools are becoming increasingly prevalent, as they make visualization technology accessible to non-expert users such as data journalists, but existing frameworks provide no or only very limited functionality related to the creation of nested visualizations. In this paper, we present an interactive visual approach for the flexible generation of nested multilayer visualizations. Based on a hierarchical representation of nesting relationships coupled with a highly customizable mechanism for specifying data mappings, we contribute a flexible framework that enables defining and editing data-driven multi-level visualizations. As a demonstration of the viability of our framework, we contribute a visual builder for exploring, customizing and switching between different designs, along with example visualizations to demonstrate the range of expression. The resulting system allows for the generation of complex nested charts with a high degree of flexibility and fluidity using a drag and drop interface.},
    year = {2020},
    pdf = "pdfs/Kristiansen-2020-VIV.pdf",
    thumbnails = "images/Kristiansen-2020-VIV.png",
    images = "images/Kristiansen-2020-VIV.jpg",
    project = "MetaVis"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @article{RadEx,
    author = {M\"{o}rth, E. and Wagner-Larsen, K. and Hodneland, E. and Krakstad, C. and Haldorsen, I. S. and Bruckner, S. and Smit, N. N.},
    title = {RadEx: Integrated Visual Exploration of Multiparametric Studies for Radiomic Tumor Profiling},
    journal = {Computer Graphics Forum},
    volume = {39},
    number = {7},
    year = {2020},
    pages = {611--622},
    abstract = {Better understanding of the complex processes driving tumor growth and metastases is critical for developing targeted treatment strategies in cancer. Radiomics extracts large amounts of features from medical images which enables radiomic tumor profiling in combination with clinical markers. However, analyzing complex imaging data in combination with clinical data is not trivial and supporting tools aiding in these exploratory analyses are presently missing. In this paper, we present an approach that aims to enable the analysis of multiparametric medical imaging data in combination with numerical, ordinal, and categorical clinical parameters to validate established and unravel novel biomarkers. We propose a hybrid approach where dimensionality reduction to a single axis is combined with multiple linked views allowing clinical experts to formulate hypotheses based on all available imaging data and clinical parameters. This may help to reveal novel tumor characteristics in relation to molecular targets for treatment, thus providing better tools for enabling more personalized targeted treatment strategies. To confirm the utility of our approach, we closely collaborate with experts from the field of gynecological cancer imaging and conducted an evaluation with six experts in this field.},
    pdf = "pdfs/Moerth-2020-RadEx.pdf",
    images = "images/Moerth-2020-RadEx.jpg",
    youtube = "https://youtu.be/zwtDzwwX790",
    thumbnails = "images/Moerth-2020-RadEx-thumb.jpg",
    project = "ttmedvis",
    doi = {10.1111/cgf.14172}
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS{Moerth-2020-CGI,
    author = "M\"{o}rth, E. and Haldorsen, I.S. and Bruckner, S. and Smit, N.N.",
    title = "ParaGlyder: Probe-driven Interactive Visual Analysis for Multiparametric Medical Imaging Data",
    booktitle = "Proceedings of Computer Graphics International",
    pages = "351--363",
    year = "2020",
    abstract = "Multiparametric medical imaging describes approaches that include multiple imaging sequences acquired within the same imaging examination, as opposed to one single imaging sequence or imaging from multiple imaging modalities. Multiparametric imaging in cancer has been shown to be useful for tumor detection and may also depict functional tumor characteristics relevant for clinical phenotypes. However, when confronted with datasets consisting of multiple values per voxel, traditional reading of the imaging series fails to capture complicated patterns. Those patterns of potentially important imaging properties of the parameter space may be critical for the analysis. Standard approaches, such as transfer functions and juxtapositioned visualizations, fail to convey the shape of the multiparametric parameter distribution in sufficient detail. For these reasons, in this paper we present an approach that aims to enable the exploration and analysis of such multiparametric studies using an interactive visual analysis application to remedy the trade-offs between details in the value domain and in spatial resolution. Interactive probing within or across subjects allows for a digital biopsy that is able to uncover multiparametric tissue properties. This may aid in the discrimination between healthy and cancerous tissue, unravel radiomic tissue features that could be linked to targetable pathogenic mechanisms, and potentially highlight metastases that evolved from the primary tumor. We conducted an evaluation with eleven domain experts from the field of gynecological cancer imaging, neurological imaging, and machine learning research to confirm the utility of our approach.",
    note= "The final authenticated version is available online at https://doi.org/10.1007/978-3-030-61864-3_29",
    pdf = "pdfs/Moerth-2020-CGI-ParaGlyder.pdf",
    images = "images/Moerth-2020-ParaGlyder.PNG",
    thumbnails = "images/Moerth-2020-ParaGlyder-thumb.png",
    youtube = "https://youtu.be/S_M4CWXKz0U",
    publisher = "LNCS by Springer",
    project = "ttmedvis",
    doi = "10.1007/978-3-030-61864-3_29"
    }
    [PDF] [DOI] [Bibtex]
    @article{StormFurru-2020-VGT,
    author = {Syver Storm-Furru and Stefan Bruckner},
    title = {VA-TRAC: Geospatial Trajectory Analysis for Monitoring, Identification, and Verification in Fishing Vessel Operations},
    journal = {Computer Graphics Forum},
    volume = {39},
    number = {3},
    pages = {101--114},
    keywords = {visual analytics, fisheries, monitoring},
    doi = {10.1111/cgf.13966},
    abstract = {In order to ensure sustainability, fishing operations are governed by many rules and regulations that restrict the use of certain techniques and equipment, specify the species and size of fish that can be harvested, and regulate commercial activities based on licensing schemes. As the world's second largest exporter of fish and seafood products, Norway invests a significant amount of effort into maintaining natural ecosystem dynamics by ensuring compliance with its constantly evolving sciencebased regulatory body. This paper introduces VA-TRAC, a geovisual analytics application developed in collaboration with the Norwegian Directorate of Fisheries in order to address this complex task. Our approach uses automatic methods to identify possible catch operations based on fishing vessel trajectories, embedded in an interactive web-based visual interface used to explore the results, compare them with licensing information, and incorporate the analysts' domain knowledge into the decision making process. We present a data and task analysis based on a close collaboration with domain experts, and the design and implementation of VA-TRAC to address the identified requirements.},
    year = {2020},
    pdf = "pdfs/StormFurru-2020-VGT.pdf",
    thumbnails = "images/StormFurru-2020-VGT.png",
    images = "images/StormFurru-2020-VGT.jpg",
    project = "MetaVis"
    }
    [PDF] [DOI] [VID] [YT] [Bibtex]
    @article{Trautner-2020-SunspotPlots,
    author = {Trautner, T. and Bolte, F. and Stoppel, S. and Bruckner, S.},
    title = {Sunspot Plots: Model-based Structure Enhancement for Dense Scatter Plots},
    journal = {Computer Graphics Forum},
    volume = {39},
    number = {3},
    pages = {551--563},
    keywords = {information visualization, scatterplots, kernel density estimation},
    doi = {10.1111/cgf.14001},
    abstract = {Scatter plots are a powerful and well-established technique for visualizing the relationships between two variables as a collection of discrete points. However, especially when dealing with large and dense data, scatter plots often exhibit problems such as overplotting, making the data interpretation arduous. Density plots are able to overcome these limitations in highly populated regions, but fail to provide accurate information of individual data points. This is particularly problematic in sparse regions where the density estimate may not provide a good representation of the underlying data. In this paper, we present sunspot plots, a visualization technique that communicates dense data as a continuous data distribution, while preserving the discrete nature of data samples in sparsely populated areas. We furthermore demonstrate the advantages of our approach on typical failure cases of scatter plots within synthetic and real-world data sets and validate its effectiveness in a user study.},
    year = {2020},
    pdf = "pdfs/Trautner_2020_SunspotPlots_PDF.pdf",
    thumbnails = "images/Trautner_2020_SunspotPlots_thumb.png",
    images = "images/Trautner_2020_SunspotPlots_thumb.png",
    vid = "vids/Trautner_2020_SunspotPlots_video.mp4",
    youtube = "https://youtu.be/G6l-y6YGjzQ",
    project = "MetaVis"
    }
    [PDF] [DOI] [Bibtex]
    @INPROCEEDINGS {Bolte-2020-ONC,
    author = "Fabian Bolte and Stefan Bruckner",
    title = "Organic Narrative Charts",
    booktitle = "Proceedings of Eurographics 2020 (Short Papers)",
    year = "2020",
    pages = "93--96",
    doi = "10.2312/egs.20201026",
    month = "may",
    abstract = "Storyline visualizations display the interactions of groups and entities and their development over time. Existing approaches have successfully adopted the general layout from hand-drawn illustrations to automatically create similar depictions. Ward Shelley is the author of several diagrammatic paintings that show the timeline of art-related subjects, such as Downtown Body, a history of art scenes. His drawings include many stylistic elements that are not covered by existing storyline visualizations, like links between entities, splits and merges of streams, and tags or labels to describe the individual elements. We present a visualization method that provides a visual mapping for the complex relationships in the data, creates a layout for their display, and adopts a similar styling of elements to imitate the artistic appeal of such illustrations.We compare our results to the original drawings and provide an open-source authoring tool prototype.",
    pdf = "pdfs/Bolte-2020-ONC.pdf",
    images = "images/Bolte-2020-ONC.jpg",
    thumbnails = "images/Bolte-2020-ONC.png",
    event = "Eurographics 2020",
    keywords = "narrative charts, storylines, aesthetics",
    project = "MetaVis",
    git = "https://github.com/cadanox/orcha"
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE{Palenik-2019-Splatting,
    author={J. P\'{a}lenik and J. By\v{s}ka and S. Bruckner and H. Hauser},
    journal={IEEE Transactions on Visualization and Computer Graphics},
    title={Scale-Space Splatting: Reforming Spacetime for Cross-Scale Exploration of Integral Measures in Molecular Dynamics},
    year={2020},
    volume={26},
    number={1},
    pages={643--653},
    keywords={Data visualization;Computational modeling;Time series analysis;Atmospheric measurements;Particle measurements;Analytical models;Kernel;Scale space;time-series;scientific simulation;multi-scale analysis;space-time cube;molecular dynamics},
    doi={10.1109/TVCG.2019.2934258},
    ISSN={1077-2626},
    month={},
    pdf = "pdfs/scale-space-splatting.pdf",
    images = "images/scale-space-teaser.png",
    thumbnails = "images/scale-space-teaser-thumb.png",
    abstract = "Understanding large amounts of spatiotemporal data from particle-based simulations, such as molecular dynamics, often relies on the computation and analysis of aggregate measures. These, however, by virtue of aggregation, hide structural information about the space/time localization of the studied phenomena. This leads to degenerate cases where the measures fail to capture distinct behaviour. In order to drill into these aggregate values, we propose a multi-scale visual exploration technique. Our novel representation, based on partial domain aggregation, enables the construction of a continuous scale-space for discrete datasets and the simultaneous exploration of scales in both space and time. We link these two scale-spaces in a scale-space space-time cube and model linked views as orthogonal slices through this cube, thus enabling the rapid identification of spatio-temporal patterns at multiple scales. To demonstrate the effectiveness of our approach, we showcase an advanced exploration of a protein-ligand simulation.",
    }
    [PDF] [DOI] [Bibtex]
    @incollection {Bolte-2019-MVS,
    author = {Bolte, Fabian and Bruckner, Stefan},
    title = {Measures in Visualization Space},
    booktitle = {Foundations of Data Visualization},
    chapter = {3},
    publisher = {Springer},
    year = {2020},
    pdf = {pdfs/Bolte-2019-MVS.pdf},
    images = {images/Bolte-2019-MVS.png},
    thumbnails = {images/Bolte-2019-MVS.png},
    abstract = {Measurement is an integral part of modern science, providing the fundamental means for evaluation, comparison, and prediction. In the context of visualization, several different types of measures have been proposed, ranging from approaches that evaluate particular aspects of individual visualization techniques, their perceptual characteristics, and even economic factors. Furthermore, there are approaches that attempt to provide means for measuring general properties of the visualization process as a whole. Measures can be quantitative or qualitative, and one of the primary goals is to provide objective means for reasoning about visualizations and their effectiveness. As such, they play a central role in the development of scientific theories for visualization. In this chapter, we provide an overview of the current state of the art, survey and classify different types of visualization measures, characterize their strengths and drawbacks, and provide an outline of open challenges for future research.},
    note = {This is a preprint of a chapter for a planned book that was initiated by participants of the Dagstuhl Seminar 18041 ("Foundations of Data Visualization") and that is expected to be published by Springer. The final book chapter will differ from this preprint.},
    url = {https://arxiv.org/abs/1909.05295},
    project = "MetaVis",
    isbn = {978-3-030-34443-6},
    doi = {10.1007/978-3-030-34444-3_3}
    }
    [PDF] [DOI] [Bibtex]
    @article{Solteszova-2019-MLT,
    author = {Solteszova, V. and Smit, N. N. and Stoppel, S. and Gr\"{u}ner, R. and Bruckner, S.},
    title = {Memento: Localized Time-Warping for Spatio-Temporal Selection},
    journal = {Computer Graphics Forum},
    volume = {39},
    number = {1},
    pages = {231--243},
    year = {2020},
    keywords = {interaction, temporal data, visualization, spatio-temporal projection},
    images = "images/Solteszova-2019-MLT.jpg",
    thumbnails = "images/Solteszova-2019-MLT-1.jpg",
    pdf = "pdfs/Solteszova-2019-MLT.pdf",
    doi = {10.1111/cgf.13763},
    abstract = {Abstract Interaction techniques for temporal data are often focused on affecting the spatial aspects of the data, for instance through the use of transfer functions, camera navigation or clipping planes. However, the temporal aspect of the data interaction is often neglected. The temporal component is either visualized as individual time steps, an animation or a static summary over the temporal domain. When dealing with streaming data, these techniques are unable to cope with the task of re-viewing an interesting local spatio-temporal event, while continuing to observe the rest of the feed. We propose a novel technique that allows users to interactively specify areas of interest in the spatio-temporal domain. By employing a time-warp function, we are able to slow down time, freeze time or even travel back in time, around spatio-temporal events of interest. The combination of such a (pre-defined) time-warp function and brushing directly in the data to select regions of interest allows for a detailed review of temporally and spatially localized events, while maintaining an overview of the global spatio-temporal data. We demonstrate the utility of our technique with several usage scenarios.},
    project = "MetaVis,ttmedvis,VIDI"
    }

2019

    [PDF] [DOI] [Bibtex]
    @inproceedings {Bartsch-2019-MVA,
    booktitle = {Proceedings of VCBM 2019 (Short Papers)},
    title = {MedUse: A Visual Analysis Tool for Medication Use Data in the ABCD Study},
    author = {Bartsch, Hauke and Garrison, Laura and Bruckner, Stefan and Wang, Ariel and Tapert, Susan F. and Gr\"{u}ner, Renate},
    abstract = {The RxNorm vocabulary is a yearly-published biomedical resource providing normalized names for medications. It is used to capture medication use in the Adolescent Brain Cognitive Development (ABCD) study, an active and publicly available longitudinal research study following 11,800 children over 10 years. In this work, we present medUse, a visual tool allowing researchers to explore and analyze the relationship of drug category to cognitive or imaging derived measures using ABCD study data. Our tool provides position-based context for tree traversal and selection granularity of both study participants and drug category. Developed as part of the Data Exploration and Analysis Portal (DEAP), medUse is available to more than 600 ABCD researchers world-wide. By integrating medUse into an actively used research product we are able to reach a wide audience and increase the practical relevance of visualization for the biomedical field.},
    year = {2019},
    pages = {97--101},
    images = "images/Bartsch-2019-MVA.jpg",
    thumbnails = "images/Bartsch-2019-MVA.png",
    pdf = "pdfs/Bartsch-2019-MVA.pdf",
    publisher = {The Eurographics Association},
    ISSN = {2070-5786},
    ISBN = {978-3-03868-081-9},
    DOI = {10.2312/vcbm.20191236},
    project = {VIDI}
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Garrison2019SM,
    author = {Garrison, Laura and Va\v{s}\'{\i}\v{c}ek, Jakub and Gr\"{u}ner, Renate and Smit, Noeska and Bruckner, Stefan},
    title = {SpectraMosaic: An Exploratory Tool for the Interactive Visual Analysis of Magnetic Resonance Spectroscopy Data},
    journal = {Computer Graphics Forum},
    month = {sep},
    year = {2019},
    booktitle = {Proceedings of VCBM 2019},
    pages = {1--10},
    event = "VCBM 2019",
    proceedings = "Proceedings of the 9th Eurographics Workshop on Visual Computing in Biology and Medicine",
    keywords = {medical visualization, magnetic resonance spectroscopy data, information visualization, user-centered design},
    images = "images/garrison_VCBM19spectramosaic_full.PNG",
    thumbnails = "images/garrison_VCBM19spectramosaic_thumb.png",
    pdf = "pdfs/garrison_VCBM19spectramosaic.pdf",
    youtube = "https://www.youtube.com/watch?v=Rzl7sl4WvdQ",
    abstract = {Magnetic resonance spectroscopy (MRS) allows for assessment of tissue metabolite characteristics used often for early detection and treatment evaluation of brain-related pathologies. However, meaningful variations in ratios of tissue metabolites within a sample area are difficult to capture with current visualization tools. Furthermore, the learning curve to interpretation is steep and limits the more widespread adoption of MRS in clinical practice. In this design study, we collaborated with domain experts to design a novel visualization tool for the exploration of tissue metabolite concentration ratios in spectroscopy clinical and research studies. We present a data and task analysis for this domain, where MRS data attributes can be categorized into tiers of visual priority. We furthermore introduce a novel set of visual encodings for these attributes. Our result is SpectraMosaic (see Figure~\ref{fig:teaser}), an interactive insight-generation tool for rapid exploration and comparison of metabolite ratios. We validate our approach with two case studies from MR spectroscopy experts, providing early qualitative evidence of the efficacy of the system for visualization of spectral data and affording deeper insights into these complex heterogeneous data.},
    git = "https://git.app.uib.no/Laura.Garrison/spectramosaic",
    doi = "0.2312/vcbm.20191225",
    project = "VIDI"
    }
    [DOI] [Bibtex]
    @incollection{Smit-2019-AtlasVis,
    title={Towards Advanced Interactive Visualization for Virtual Atlases},
    author={Smit, Noeska and Bruckner, Stefan},
    booktitle={Biomedical Visualisation},
    pages={85--96},
    year={2019},
    publisher={Springer},
    doi = {10.1007/978-3-030-19385-0_6},
    url = "http://noeskasmit.com/wp-content/uploads/2019/07/Smit_AtlasVis_2019.pdf",
    images = "images/Smit-2019-AtlasVis.png",
    thumbnails = "images/Smit-2019-AtlasVis.png",
    abstract = "An atlas is generally defined as a bound collection of tables, charts or illustrations describing a phenomenon. In an anatomical atlas for example, a collection of representative illustrations and text describes anatomy for the purpose of communicating anatomical knowledge. The atlas serves as reference frame for comparing and integrating data from different sources by spatially or semantically relating collections of drawings, imaging data, and/or text. In the field of medical image processing, atlas information is often constructed from a collection of regions of interest, which are based on medical images that are annotated by domain experts. Such an atlas may be employed for example for automatic segmentation of medical imaging data. The combination of interactive visualization techniques with atlas information opens up new possibilities for content creation, curation, and navigation in virtual atlases. With interactive visualization of atlas information, students are able to inspect and explore anatomical atlases in ways that were not possible with the traditional method of presenting anatomical atlases in book format, such as viewing the illustrations from other viewpoints. With advanced interaction techniques, it becomes possible to query the data that forms the basis for the atlas, thus empowering researchers to access a wealth of information in new ways. So far, atlasbased visualization has been employed for mainly medical education, as well as biological research. In this survey, we provide an overview of current digital biomedical atlas tasks and applications and summarize relevant visualization techniques. We discuss recent approaches for providing next-generation visual interfaces to navigate atlas data that go beyond common text-based search and hierarchical lists. Finally, we reflect on open challenges and opportunities for the next steps in interactive atlas visualization. ",
    project = "ttmedvis,MetaVis,VIDI"
    }
    [PDF] [YT] [Bibtex]
    @MISC {Garrison2019SM_eurovis,
    title = {A Visual Encoding System for Comparative Exploration of Magnetic Resonance Spectroscopy Data},
    author = {Garrison, Laura and Va\v{s}\'{\i}\v{c}ek, Jakub and Gr\"{u}ner, Renate and Smit, Noeska and Bruckner, Stefan},
    abstract = "Magnetic resonance spectroscopy (MRS) allows for assessment of tissue metabolite characteristics used often for early detection and treatment evaluation of intracranial pathologies. In particular, this non-invasive technique is important in the study of metabolic changes related to brain tumors, strokes, seizure disorders, Alzheimer's disease, depression, as well as other diseases and disorders affecting the brain. However, meaningful variations in ratios of tissue metabolites within a sample area are difficult to capture with current visualization tools. Furthermore, the learning curve to interpretation is steep and limits the more widespread adoption of MRS in clinical practice. In this work we present a novel, tiered visual encoding system for multi-dimensional MRS data to aid in the visual exploration of metabolite concentration ratios. Our system was developed in close collaboration with domain experts including detailed data and task analyses. This visual encoding system was subsequently realized as part of an interactive insight-generation tool for rapid exploration and comparison of metabolite ratio variation for deeper insights to these complex data.",
    booktitle = {Proceedings of the EuroVis Conference - Posters (EuroVis 2019)},
    year = {2019},
    howpublished = "Poster presented at the EuroVis conference 2019",
    keywords = {medical visualization, magnetic resonance spectroscopy data, information visualization, user-centered design},
    images = "images/garrison_eurovis2019_SM_encodings.png",
    thumbnails = "images/garrison_eurovis2019_SM_encodings.png",
    pdf = "pdfs/garrison_eurovis2019_SM.pdf",
    youtube = "https://youtu.be/Rzl7sl4WvdQ",
    project = "VIDI"
    }
    [PDF] [DOI] [Bibtex]
    @inproceedings {Smit-2019-DBP,
    booktitle = {Eurographics 2019 - Dirk Bartz Prize},
    editor = {Bruckner, Stefan and Oeltze-Jafra, Steffen},
    title = {{Model-based Visualization for Medical Education and Training}},
    author = {Smit, Noeska and Lawonn, Kai and Kraima, Annelot and deRuiter, Marco and Bruckner, Stefan and Eisemann, Elmar and Vilanova, Anna},
    year = {2019},
    publisher = {The Eurographics Association},
    ISSN = {1017-4656},
    DOI = {10.2312/egm.20191033},
    pdf = "pdfs/Smit_DBPrize_2019.pdf",
    images = "images/Smit_DBPrize_2019.png",
    thumbnails = "images/Smit_DBPrize_2019.png",
    abstract = "Anatomy, or the study of the structure of the human body, is an essential component of medical education. Certain parts of human anatomy are considered to be more complex to understand than others, due to a multitude of closely related structures. Furthermore, there are many potential variations in anatomy, e.g., different topologies of vessels, and knowledge of these variations is critical for many in medical practice.
    Some aspects of individual anatomy, such as the autonomic nerves, are not visible in individuals through medical imaging techniques or even during surgery, placing these nerves at risk for damage.
    3D models and interactive visualization techniques can be used to improve understanding of this complex anatomy, in combination with traditional medical education paradigms.
    We present a framework incorporating several advanced medical visualization techniques and applications for teaching and training purposes, which is the result of an interdisciplinary project.
    In contrast to previous approaches which focus on general anatomy visualization or direct visualization of medical imaging data, we employ model-based techniques to represent variational anatomy, as well as anatomy not visible from imaging. Our framework covers the complete spectrum including general anatomy, anatomical variations, and anatomy in individual patients.
    Applications within our framework were evaluated positively with medical users, and our educational tool for general anatomy is in use in a Massive Open Online Course (MOOC) on anatomy, which had over 17000 participants worldwide in the first run.",
    project = "ttmedvis,VIDI"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Stoppel-2019-LFL,
    author = "Stoppel, Sergej and Bruckner, Stefan",
    title = "LinesLab: A Flexible Low-Cost Approach for the Generation of Physical Monochrome Art",
    journal = "Computer Graphics Forum",
    year = "2019",
    abstract = "The desire for the physical generation of computer art has seen a significant body of research that has resulted in sophisticated robots and painting machines, together with specialized algorithms mimicking particular artistic techniques. The resulting setups are often expensive and complex, making them unavailable for recreational and hobbyist use. In recent years, however, a new class of affordable low-cost plotters and cutting machines has reached the market. In this paper, we present a novel system for the physical generation of line and cut-out art based on digital images, targeted at such off-the-shelf devices. Our approach uses a meta-optimization process to generate results that represent the tonal content of a digital image while conforming to the physical and mechanical constraints of home-use devices. By flexibly combining basic sets of positional and shape encodings, we are able to recreate a wide range of artistic styles. Furthermore, our system optimizes the output in terms of visual perception based on the desired viewing distance, while remaining scalable with respect to the medium size.",
    pdf = "pdfs/Stoppel-2019-LFL.pdf",
    images = "images/Stoppel-2019-LFL.jpg",
    thumbnails = "images/Stoppel-2019-LFL.png",
    publisher = "The Eurographics Association and John Wiley and Sons Ltd.",
    doi = "10.1111/cgf.13609",
    youtube = "https://www.youtube.com/watch?v=WdZJmU6fOAY",
    project = "MetaVis"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Bruckner-2019-DVM,
    author = "Bruckner, Stefan",
    title = "Dynamic Visibility-Driven Molecular Surfaces",
    journal = "Computer Graphics Forum",
    year = "2019",
    volume = "38",
    number = "2",
    pages = "317--329",
    abstract = "Molecular surface representations are an important tool for the visual analysis of molecular structure and function. In this paper, we present a novel method for the visualization of dynamic molecular surfaces based on the Gaussian model. In contrast to previous approaches, our technique does not rely on the construction of intermediate representations such as grids or triangulated surfaces. Instead, it operates entirely in image space, which enables us to exploit visibility information to efficiently skip unnecessary computations. With this visibility-driven approach, we can visualize dynamic high-quality surfaces for molecules consisting of millions of atoms. Our approach requires no preprocessing, allows for the interactive adjustment of all properties and parameters, and is significantly faster than previous approaches, while providing superior quality.",
    pdf = "pdfs/Bruckner-2019-DVM.pdf",
    images = "images/Bruckner-2019-DVM-1.jpg",
    thumbnails = "images/Bruckner-2019-DVM.png",
    publisher = "The Eurographics Association and John Wiley and Sons Ltd.",
    doi = "10.1111/cgf.13640",
    youtube = "https://www.youtube.com/watch?v=aZmDhTbJlAM",
    git = "https://github.com/sbruckner/dynamol.git",
    project = "MetaVis"
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE {Bruckner-2018-MSD,
    author = "Stefan Bruckner and Tobias Isenberg and Timo Ropinski and Alexander Wiebel",
    title = "A Model of Spatial Directness in Interactive Visualization",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    volume = "25",
    number = "8",
    year = "2019",
    abstract = "We discuss the concept of directness in the context of spatial interaction with visualization. In particular, we propose a modelthat allows practitioners to analyze and describe the spatial directness of interaction techniques, ultimately to be able to better understandinteraction issues that may affect usability. To reach these goals, we distinguish between different types of directness. Each type ofdirectness depends on a particular mapping between different spaces, for which we consider the data space, the visualization space, theoutput space, the user space, the manipulation space, and the interaction space. In addition to the introduction of the model itself, we alsoshow how to apply it to several real-world interaction scenarios in visualization, and thus discuss the resulting types of spatial directness,without recommending either more direct or more indirect interaction techniques. In particular, we will demonstrate descriptive andevaluative usage of the proposed model, and also briefly discuss its generative usage.",
    pdf = "pdfs/Bruckner-2018-MSD.pdf",
    images = "images/Bruckner-2018-MSD.jpg",
    thumbnails = "images/Bruckner-2018-MSD.png",
    doi = "10.1109/TVCG.2018.2848906",
    project = "MetaVis"
    }
    [PDF] [DOI] [VID] [Bibtex]
    @ARTICLE {Stoppel-2019-FVI,
    author = "Sergej Stoppel and Magnus Paulson Erga and Stefan Bruckner",
    title = "Firefly: Virtual Illumination Drones for Interactive Visualization",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2019",
    volume = "25",
    pages = "1204-1213",
    abstract = "Light specification in three dimensional scenes is a complex problem and several approaches have been presented that aim to automate this process. However, there are many scenarios where a static light setup is insufficient, as the scene content and camera position may change. Simultaneous manual control over the camera and light position imposes a high cognitive load on the user. To address this challenge, we introduce a novel approach for automatic scene illumination with Fireflies. Fireflies are intelligent virtual light drones that illuminate the scene by traveling on a closed path. The Firefly path automatically adapts to changes in the scene based on an outcome-oriented energy function. To achieve interactive performance, we employ a parallel rendering pipeline for the light path evaluations. We provide a catalog of energy functions for various application scenarios and discuss the applicability of our method on several examples.",
    pdf = "pdfs/VIS2018-Firefly.pdf",
    vid = "vids/FinalVideo.mp4",
    images = "images/Teaser.png",
    thumbnails = "images/HeadRightCroppedThumbnail.png",
    doi = "10.1109/TVCG.2018.2864656",
    project = "MetaVis"
    }

2018

    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Magnus-2018-VPI,
    author = "Jens G. Magnus and Stefan Bruckner",
    title = "Interactive Dynamic Volume Illumination with Refraction and Caustics",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2018",
    volume = "24",
    number = "1",
    pages = "984--993",
    month = "jan",
    abstract = "In recent years, significant progress has been made indeveloping high-quality interactive methods for realistic volumeillumination. However, refraction -- despite being an important aspectof light propagation in participating media -- has so far only receivedlittle attention. In this paper, we present a novel approach forrefractive volume illumination including caustics capable of interactiveframe rates. By interleaving light and viewing ray propagation, ourtechnique avoids memory-intensive storage of illumination informationand does not require any precomputation. It is fully dynamic and allparameters such as light position and transfer function can be modifiedinteractively without a performance penalty.",
    pdf = "pdfs/Magnus-2018-IDV.pdf",
    images = "images/Magnus-2018-IDV.jpg",
    thumbnails = "images/Magnus-2018-IDV.png",
    youtube = "https://www.youtube.com/watch?v=3tn6sSXw4NQ",
    doi = "10.1109/TVCG.2017.2744438",
    event = "IEEE SciVis 2017",
    keywords = "interactive volume rendering, illumination, refraction, shadows, caustics",
    location = "Phoenix, USA",
    project = "MetaVis"
    }
    [PDF] [Bibtex]
    @MISC {Smit18MMIV,
    author = "N. N. Smit and S. Bruckner and H. Hauser and I. Haldorsen and A. Lundervold and A. S. Lundervold and E. Hodneland and L. Oltedal and K. Specht and E. R. Gruner",
    title = "Research Agenda of the Mohn Medical Imaging and Visualization Centre in Bergen, Norway",
    howpublished = "Poster presented at the EG VCBM workshop 2018",
    month = "September",
    year = "2018",
    abstract = "The Mohn Medical Imaging and Visualization Centre (MMIV) was recently established in collaboration between the University of Bergen, Norway, and the Haukeland University Hospital in Bergen with generous financial support from the Bergen Research Foundation (BFS) to conduct cross-disciplinary research related to state-of-the-art medical imaging, including preclinical and clinical high-field MRI, CT and hybrid PET/CT/MR.The overall goal of the Centre is to research new methods in quantitative imaging and interactive visualization to predict changes in health and disease across spatial and temporal scales. This encompasses research in feature detection, feature extraction, and feature prediction, as well as on methods and techniques for the interactive visualization of spatial and abstract data related to and derived from these features.With special emphasis on the natural and medical sciences, the long-term goal of the Centre is to consolidate excellence in the interplay between medical imaging (physics, chemistry, radiography, radiology), and visualization (computer science and mathematics) and develop novel and refined imaging methods that may ultimately improve patient care. In this poster, we describe the overall research agenda of MMIV and describe the four core projects in the centre.",
    pdf = "pdfs/smit2018posterabstract.pdf",
    images = "images/MMIVPoster.png",
    thumbnails = "images/MMIVPoster.png",
    location = "Granada, Spain",
    project = "VIDI"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Stoppel-2018-SSW,
    author = "Sergej Stoppel and Stefan Bruckner",
    title = "Smart Surrogate Widgets for Direct Volume Manipulation",
    booktitle = "Proceedings of IEEE PacificVis 2018",
    year = "2018",
    pages = "36--45",
    month = "apr",
    abstract = "Interaction is an essential aspect in volume visualization, yet commonmanipulation tools such as bounding boxes or clipping planewidgets provide rather crude tools as they neglect the complex structureof the underlying data. In this paper, we introduce a novelvolume interaction approach based on smart widgets that are automaticallyplaced directly into the data in a visibility-driven manner.By adapting to what the user actually sees, they act as proxies thatallow for goal-oriented modifications while still providing an intuitiveset of simple operations that is easy to control. In particular, ourmethod is well-suited for direct manipulation scenarios such as touchscreens, where traditional user interface elements commonly exhibitlimited utility. To evaluate out approach we conducted a qualitativeuser study with nine participants with various backgrounds.",
    pdf = "pdfs/Stoppel-2018-SSW.pdf",
    images = "images/Stoppel-2018-SSW.jpg",
    thumbnails = "images/Stoppel-2018-SSW.png",
    youtube = "https://www.youtube.com/watch?v=wMRw-W0SrLk",
    event = "IEEE PacificVis 2018",
    keywords = "smart interfaces, volume manipulation, volume visualization",
    doi = "10.1109/PacificVis.2018.00014",
    project = "MetaVis"
    }

2017

    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Stoppel-2017-VPI,
    author = "Sergej Stoppel and Stefan Bruckner",
    title = "Vol²velle: Printable Interactive Volume Visualization",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2017",
    volume = "23",
    number = "1",
    pages = "861--870",
    month = "jan",
    abstract = "Interaction is an indispensable aspect of data visualization. The  presentation of volumetric data, in particular, often significantly  benefits from interactive manipulation of parameters such as transfer  functions, rendering styles, or clipping planes. However, when we  want to create hardcopies of such visualizations, this essential  aspect is lost. In this paper, we present a novel approach for creating  hardcopies of volume visualizations which preserves a certain degree  of interactivity. We present a method for automatically generating  Volvelles, printable tangible wheel charts that can be manipulated  to explore different parameter settings. Our interactive system allows  the flexible mapping of arbitrary visualization parameters and supports  advanced features such as linked views. The resulting designs can  be easily reproduced using a standard printer and assembled within  a few minutes.",
    pdf = "pdfs/Stoppel-2017-VPI.pdf",
    images = "images/Stoppel-2017-VPI.jpg",
    thumbnails = "images/Stoppel-2017-VPI.png",
    youtube = "https://www.youtube.com/watch?v=Z1K8t-FCiXI",
    doi = "10.1109/TVCG.2016.2599211",
    event = "IEEE SciVis 2016",
    keywords = "physical visualization, interaction, volume visualization, illustrative visualization",
    location = "Baltimore, USA"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Smit-2017-PAS,
    author = "Noeska Smit and Kai Lawonn and Annelot Kraima and Marco DeRuiter and Hessam Sokooti and Stefan Bruckner and Elmar Eisemann and Anna Vilanova",
    title = "PelVis: Atlas-based Surgical Planning for Oncological Pelvic Surgery",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2017",
    volume = "23",
    number = "1",
    pages = "741--750",
    month = "jan",
    abstract = "Due to the intricate relationship between the pelvic organs and vital  structures, such as vessels and nerves, pelvic anatomy is often considered  to be complex to comprehend. In oncological pelvic surgery, a trade-off  has to be made between complete tumor resection and preserving function  by preventing damage to the nerves. Damage to the autonomic nerves  causes undesirable post-operative side-effects such as fecal and  urinal incontinence, as well as sexual dysfunction in up to 80 percent  of the cases. Since these autonomic nerves are not visible in pre-operative  MRI scans or during surgery, avoiding nerve damage during such a  surgical procedure becomes challenging. In this work, we present  visualization methods to represent context, target, and risk structures  for surgical planning. We employ distance-based and occlusion management  techniques in an atlas-based surgical planning tool for oncological  pelvic surgery. Patient-specific pre-operative MRI scans are registered  to an atlas model that includes nerve information. Through several  interactive linked views, the spatial relationships and distances  between the organs, tumor and risk zones are visualized to improve  understanding, while avoiding occlusion. In this way, the surgeon  can examine surgically relevant structures and plan the procedure  before going into the operating theater, thus raising awareness of  the autonomic nerve zone regions and potentially reducing post-operative  complications. Furthermore, we present the results of a domain expert  evaluation with surgical oncologists that demonstrates the advantages  of our approach.",
    pdf = "pdfs/Smit-2017-PAS.pdf",
    images = "images/Smit-2017-PAS.jpg",
    thumbnails = "images/Smit-2017-PAS.png",
    youtube = "https://www.youtube.com/watch?v=vHp05I5-hp8",
    doi = "10.1109/TVCG.2016.2598826",
    event = "IEEE SciVis 2016",
    keywords = "atlas, surgical planning, medical visualization",
    location = "Baltimore, USA"
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE {Lind-2017-CCR,
    author = "Andreas Johnsen Lind and Stefan Bruckner",
    title = "Comparing Cross-Sections and 3D Renderings for Surface Matching Tasks using Physical Ground Truths",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2017",
    volume = "23",
    number = "1",
    pages = "781--790",
    month = "jan",
    abstract = "Within the visualization community there are some well-known techniques  for visualizing 3D spatial data and some general assumptions about  how perception affects the performance of these techniques in practice.  However, there is a lack of empirical research backing up the possible  performance differences among the basic techniques for general tasks.  One such assumption is that 3D renderings are better for obtaining  an overview, whereas cross sectional visualizations such as the commonly  used Multi- Planar Reformation (MPR) are better for supporting detailed  analysis tasks. In the present study we investigated this common  assumption by examining the difference in performance between MPR  and 3D rendering for correctly identifying a known surface. We also  examined whether prior experience working with image data affects  the participant’s performance, and whether there was any difference  between interactive or static versions of the visualizations. Answering  this question is important because it can be used as part of a scientific  and empirical basis for determining when to use which of the two  techniques. An advantage of the present study compared to other studies  is that several factors were taken into account to compare the two  techniques. The problem was examined through an experiment with 45  participants, where physical objects were used as the known surface  (ground truth). Our findings showed that: 1. The 3D renderings largely  outperformed the cross sections; 2. Interactive visualizations were  partially more effective than static visualizations; and 3. The high  experience group did not generally outperform the low experience  group.",
    pdf = "pdfs/Lind-2017-CCR.pdf",
    images = "images/Lind-2017-CCR.jpg",
    thumbnails = "images/Lind-2017-CCR.png",
    doi = "10.1109/TVCG.2016.2598602",
    event = "IEEE SciVis 2016",
    keywords = "human-computer interaction, quantitative evaluation, volume visualization",
    location = "Baltimore, USA"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Solteszova-2017-OFS,
    author = "Veronika \v{S}olt{\'e}szov{\'a} and {\AA}smund Birkeland and Sergej Stoppel and Ivan Viola and Stefan Bruckner",
    title = "Output-Sensitive Filtering of Streaming Volume Data",
    journal = "Computer Graphics Forum",
    year = "2017",
    volume = "36",
    number = "1",
    pages = "249--262",
    month = "jan",
    abstract = "Real-time volume data acquisition poses substantial challenges for  the traditional visualization pipeline where data enhancement is  typically seen as a pre-processing step. In the case of 4D ultrasound  data, for instance, costly processing operations to reduce noise  and to remove artifacts need to be executed for every frame. To enable  the use of high quality filtering operations in such scenarios, we  propose an output-sensitive approach to the visualization of streaming  volume data. Our method evaluates the potential contribution of all  voxels to the final image, allowing us to skip expensive processing  operations that have little or no effect on the visualization. As  filtering operations modify the data values which may affect the  visibility, our main contribution is a fast scheme to predict their  maximum effect on the final image. Our approach prioritizes filtering  of voxels with high contribution to the final visualization based  on a maximal permissible error per pixel. With zero permissible error,  the optimized filtering will yield a result identical to filtering  of the entire volume. We provide a thorough technical evaluation  of the approach and demonstrate it on several typical scenarios that  require on-the-fly processing.",
    pdf = "pdfs/Solteszova-2017-OFS.pdf",
    images = "images/Solteszova-2017-OFS.jpg",
    thumbnails = "images/Solteszova-2017-OFS.png",
    youtube = "https://www.youtube.com/watch?v=xGPs560ttp0",
    doi = "10.1111/cgf.12799",
    keywords = "output-sensitive processing, volume data, filtering"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Mindek-2017-DVN,
    author = "Peter Mindek and Gabriel Mistelbauer and Meister Eduard Gr{\"o}ller and Stefan Bruckner",
    title = "Data-Sensitive Visual Navigation",
    journal = "Computers \& Graphics",
    year = "2017",
    volume = "67",
    pages = "77--85",
    month = "oct",
    abstract = "In visualization systems it is often the case that thechanges of the input parameters are not proportional to the visualchange of the generated output. In this paper, we propose a model forenabling data-sensitive navigation for user-interface elements. Thismodel is applied to normalize the user input according to the visualchange, and also to visually communicate this normalization. In thisway, the exploration of heterogeneous data using common interactionelements can be performed in an efficient way. We apply our model to thefield of medical visualization and present guided navigation tools fortraversing vascular structures and for camera rotation around 3Dvolumes. The presented examples demonstrate that the model scales touser-interface elements where multiple parameters are setsimultaneously.",
    pdf = "pdfs/Mindek-2017-DVN.pdf",
    images = "images/Mindek-2017-DVN.jpg",
    thumbnails = "images/Mindek-2017-DVN.png",
    youtube = "https://www.youtube.com/watch?v=FnhbjX7BRXI",
    note = "SCCG 2017 Best Paper Award",
    doi = "10.1016/j.cag.2017.05.012",
    event = "SCCG 2017",
    keywords = "navigation, exploration, medical visualization",
    location = "Mikulov, Czech Republic",
    project = "MetaVis"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Diehl-2017-AVA,
    author = "Alexandra Diehl and Leandro Pelorosso and Kresimir Matkovic and Juan Ruiz and Meister Eduard Gr{\"o}ller and Stefan Bruckner",
    title = "Albero: A Visual Analytics Approach for Probabilistic Weather Forecasting",
    journal = "Computer Graphics Forum",
    year = "2017",
    volume = "36",
    number = "7",
    pages = "135--144",
    month = "oct",
    abstract = "Probabilistic weather forecasts are amongst the most popularways to quantify numerical forecast uncertainties. The analogregression method can quantify uncertainties and express them asprobabilities. The method comprises the analysis of errorsfrom a large database of past forecasts generated with a specificnumerical model and observational data. Current visualizationtools based on this method are essentially automated and provide limitedanalysis capabilities. In this paper, we propose a novelapproach that breaks down the automatic process using the experience andknowledge of the users and creates a new interactivevisual workflow. Our approach allows forecasters to study probabilisticforecasts, their inner analogs and observations, theirassociated spatial errors, and additional statistical information bymeans of coordinated and linked views. We designed thepresented solution following a participatory methodology together withdomain experts. Several meteorologists with differentbackgrounds validated the approach. Two case studies illustrate thecapabilities of our solution. It successfully facilitates theanalysis of uncertainty and systematic model biases for improveddecision-making and process-quality measurements.",
    pdf = "pdfs/Diehl-2017-AVA.pdf",
    images = "images/Diehl-2017-AVA.jpg",
    thumbnails = "images/Diehl-2017-AVA.png",
    youtube = "https://www.youtube.com/watch?v=-yqoeEgkz28",
    doi = "10.1111/cgf.13279",
    keywords = "visual analytics, weather forecasting, uncertainty",
    project = "MetaVis"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Swoboda-2017-VQI,
    author = "Nicolas Swoboda and Judith Moosburner and Stefan Bruckner and Jai Y. Yu and Barry J. Dickson and Katja B{\"u}hler",
    title = "Visualization and Quantification for Interactive Analysis of Neural Connectivity in Drosophila",
    journal = "Computer Graphics Forum",
    year = "2017",
    volume = "36",
    number = "1",
    pages = "160--171",
    month = "jan",
    abstract = "Neurobiologists investigate the brain of the common fruit fly Drosophila  melanogaster to discover neural circuits and link them to complex  behavior. Formulating new hypotheses about connectivity requires  potential connectivity information between individual neurons, indicated  by overlaps of arborizations of two or more neurons. As the number  of higher order overlaps (i.e., overlaps of three or more arborizations)  increases exponentially with the number of neurons under investigation,  visualization is impeded by clutter and quantification becomes a  burden. Existing solutions are restricted to visual or quantitative  analysis of pairwise overlaps, as they rely on precomputed overlap  data. We present a novel tool that complements existing methods for  potential connectivity exploration by providing for the first time  the possibility to compute and visualize higher order arborization  overlaps on the fly and to interactively explore this information  in both its spatial anatomical context and on a quantitative level.  Qualitative evaluation by neuroscientists and non-experts demonstrated  the utility and usability of the tool",
    pdf = "pdfs/Swoboda-2017-VQI.pdf",
    images = "images/Swoboda-2017-VQI.jpg",
    thumbnails = "images/Swoboda-2017-VQI.png",
    youtube = "https://www.youtube.com/watch?v=bycWGQQpqks",
    doi = "10.1111/cgf.12792",
    keywords = "visual analysis, neurobiology"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Kolesar-2017-FCC,
    author = "Ivan Kolesar and Stefan Bruckner and Ivan Viola and Helwig Hauser",
    title = "A Fractional Cartesian Composition Model for Semi-spatial Comparative Visualization Design",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2017",
    volume = "23",
    number = "1",
    pages = "851--860",
    month = "jan",
    abstract = "The study of spatial data ensembles leads to substantial visualization  challenges in a variety of applications. In this paper, we present  a model for comparative visualization that supports the design of  according ensemble visualization solutions by partial automation.  We focus on applications, where the user is interested in preserving  selected spatial data characteristics of the data as much as possible—even  when many ensemble members should be jointly studied using comparative  visualization. In our model, we separate the design challenge into  a minimal set of user-specified parameters and an optimization component  for the automatic configuration of the remaining design variables.  We provide an illustrated formal description of our model and exemplify  our approach in the context of several application examples from  different domains in order to demonstrate its generality within the  class of comparative visualization problems for spatial data ensembles.",
    pdf = "pdfs/Kolesar-2017-FCC.pdf",
    images = "images/Kolesar-2017-FCC.jpg",
    thumbnails = "images/Kolesar-2017-FCC.png",
    youtube = "https://www.youtube.com/watch?v=_zk67fmryok",
    doi = "10.1109/TVCG.2016.2598870",
    event = "IEEE SciVis 2016",
    keywords = "visualization models, integrating spatial and non-spatial data visualization, design methodologies",
    location = "Baltimore, USA",
    project = "physioillustration"
    }

2016

    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Stoppel-2016-GIR,
    author = "Sergej Stoppel and Erlend Hodneland and Helwig Hauser and Stefan Bruckner",
    title = "Graxels: Information Rich Primitives for the Visualization of Time-Dependent Spatial Data",
    booktitle = "Proceedings of VCBM 2016",
    year = "2016",
    pages = "183--192",
    month = "sep",
    abstract = "Time-dependent volumetric data has important applications in areas  as diverse as medicine, climatology, and engineering. However, the  simultaneous quantitative assessment of spatial and temporal features  is very challenging. Common visualization techniques show either  the whole volume in one time step (for example using direct volume  rendering) or let the user select a region of interest (ROI) for  which a collection of time-intensity curves is shown. In this paper,  we propose a novel approach that dynamically embeds quantitative  detail views in a spatial layout. Inspired by the concept of small  multiples, we introduce a new primitive graxel (graph pixel). Graxels  are view dependent primitives of time-intensity graphs, generated  on-the-fly by aggregating per-ray information over time and image  regions. Our method enables the detailed feature-aligned visual analysis  of time-dependent volume data and allows interactive refinement and  filtering. Temporal behaviors like frequency relations, aperiodic  or periodic oscillations and their spatial context are easily perceived  with our method. We demonstrate the power of our approach using examples  from medicine and the natural sciences.",
    pdf = "pdfs/Stoppel-2016-GIR.pdf",
    images = "images/Stoppel-2016-GIR.jpg",
    thumbnails = "images/Stoppel-2016-GIR.png",
    youtube = "https://www.youtube.com/watch?v=UsClj3ytd0Y",
    doi = "10.2312/vcbm.20161286",
    event = "VCBM 2016",
    keywords = "time-dependent data, volume data, small multiples",
    location = "Bergen, Norway"
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE {Labschuetz-2016-JJC,
    author = "Matthias Labsch{\"u}tz and Stefan Bruckner and Meister Eduard Gr{\"o}ller and Markus Hadwiger and Peter Rautek",
    title = "JiTTree: A Just-in-Time Compiled Sparse GPU Volume Data Structure",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2016",
    volume = "22",
    number = "1",
    pages = "1025--1034",
    month = "jan",
    abstract = "Abstract—Sparse volume data structures enable the efficient representation  of large but sparse volumes in GPU memory for com putation and visualization.  However, the choice of a specific data structure for a given data  set depends on several factors, such as the memory budget, the sparsity  of the data, and data access patterns. In general, there is no single  optimal sparse data structure, but a set of several candidates with  individual strengths and drawbacks. One solution to this problem  are hybrid data structures which locally adapt themselves to the  sparsity. However, they typically suffer from increased traversal  overhead which limits their utility in many applications. This paper  presents JiTTree, a novel sparse hybrid volume data structure that  uses just-in-time compilation to overcome these problems. By combining  multiple sparse data structures and reducing traversal overhead we  leverage their individual advantages. We demonstrate that hybrid  data structures adapt well to a large range of data sets. They are  especially superior to other sparse data structures for data sets  that locally vary in sparsity. Possible optimization criteria are  memory, performance and a combination thereof. Through just-in-time  (JIT) compilation, JiTTree reduces the traversal overhead of the  resulting optimal data structure. As a result, our hybrid volume  data structure enables efficient computations on the GPU, while being  superior in terms of memory usage when compared to non-hybrid data  structures.",
    pdf = "pdfs/Labschuetz-2016-JJC.pdf",
    images = "images/Labschuetz-2016-JJC.jpg",
    thumbnails = "images/Labschuetz-2016-JJC.png",
    doi = "10.1109/TVCG.2015.2467331",
    event = "IEEE SciVis 2015",
    keywords = "data transformation and representation, GPUs and multi-core architectures, volume rendering",
    location = "Chicago, USA"
    }
    [PDF] [DOI] [Bibtex]
    @INPROCEEDINGS {Klein-2016-TIV,
    author = "Tobias Klein and Stefan Bruckner and Meister Eduard Gr{\"o}ller and Markus Hadwiger and Peter Rautek",
    title = "Towards Interactive Visual Exploration of Parallel Programs using a Domain-Specific Language",
    booktitle = "Proceedings of the International Workshop on OpenCL 2016",
    year = "2016",
    month = "apr",
    abstract = "The use of GPUs and the massively parallel computing paradigm have  become wide-spread. We describe a framework for the interactive visualization  and visual analysis of the run-time behavior of massively parallel  programs, especially OpenCL kernels. This facilitates understanding  a program's function and structure, finding the causes of possible  slowdowns, locating program bugs, and interactively exploring and  visually comparing different code variants in order to improve performance  and correctness. Our approach enables very specific, user-centered  analysis, both in terms of the recording of the run-time behavior  and the visualization itself. Instead of having to manually write  instrumented code to record data, simple code annotations tell the  source-to-source compiler which code instrumentation to generate  automatically. The visualization part of our framework then enables  the interactive analysis of kernel run-time behavior in a way that  can be very specific to a particular problem or optimization goal,  such as analyzing the causes of memory bank conflicts or understanding  an entire parallel algorithm.",
    pdf = "pdfs/Klein-2016-TIV.pdf",
    images = "images/Klein-2016-TIV.jpg",
    thumbnails = "images/Klein-2016-TIV.png",
    doi = "10.1145/2909437.2909459",
    event = "IWOCL 2016",
    extra = "pdfs/Klein-2016-TIV-Poster.pdf",
    keywords = "domain specific languages, GPU programming, visual exploration",
    location = "Vienna, Austria",
    owner = "bruckner"
    }

2015

    [PDF] [Bibtex]
    @ARTICLE {Angelelli-2015-PQA,
    author = "Paolo Angelelli and Stefan Bruckner",
    title = "Performance and Quality Analysis of Convolution-Based Volume Illumination",
    journal = "Journal of WSCG",
    year = "2015",
    volume = "23",
    number = "2",
    pages = "131--138",
    month = "jun",
    abstract = "Convolution-based techniques for volume rendering are among the fastest  in the on-the-fly volumetric illumination category. Such methods,  however, are still considerably slower than conventional local illumination  techniques. In this paper we describe how to adapt two commonly used  strategies for reducing aliasing artifacts, namely pre-integration  and supersampling, to such techniques. These strategies can help  reduce the sampling rate of the lighting information (thus the number  of convolutions), bringing considerable performance benefits. We  present a comparative analysis of their effectiveness in offering  performance improvements. We also analyze the (negligible) differences  they introduce when comparing their output to the reference method.  These strategies can be highly beneficial in setups where direct  volume rendering of continuously streaming data is desired and continuous  recomputation of full lighting information is too expensive, or where  memory constraints make it preferable not to keep additional precomputed  volumetric data in memory. In such situations these strategies make  single pass, convolution-based volumetric illumination models viable  for a broader range of applications, and this paper provides practical  guidelines for using and tuning such strategies to specific use cases.",
    pdf = "pdfs/Angelelli-2015-PQA.pdf",
    images = "images/Angelelli-2015-PQA.jpg",
    thumbnails = "images/Angelelli-2015-PQA.png",
    keywords = "volume rendering, global illumination, scientific visualization, medical visualization"
    }
    [PDF] [DOI] [Bibtex]
    @INPROCEEDINGS {Mindek-2015-ASM,
    author = "Peter Mindek and Ladislav \v{C}mol{\'i}k and Ivan Viola and Meister Eduard Gr{\"o}ller and Stefan Bruckner",
    title = "Automatized Summarization of Multiplayer Games",
    booktitle = "Proceedings of SCCG 2015",
    year = "2015",
    pages = "93--100",
    month = "apr",
    abstract = "We present a novel method for creating automatized gameplay dramatization  of multiplayer video games. The dramatization serves as a visual  form of guidance through dynamic 3D scenes with multiple foci, typical  for such games. Our goal is to convey interesting aspects of the  gameplay by animated sequences creating a summary of events which  occurred during the game. Our technique is based on processing many  cameras, which we refer to as a flock of cameras, and events captured  during the gameplay, which we organize into a so-called event graph.  Each camera has a lifespan with a certain time interval and its parameters  such as position or look-up vector are changing over time. Additionally,  during its lifespan each camera is assigned an importance function,  which is dependent on the significance of the structures that are  being captured by the camera. The images captured by the cameras  are composed into a single continuous video using a set of operators  based on cinematographic effects. The sequence of operators is selected  by traversing the event graph and looking for specific patterns corresponding  to the respective operators. In this way, a large number of cameras  can be processed to generate an informative visual story presenting  the gameplay. Our compositing approach supports insets of camera  views to account for several important cameras simultaneously. Additionally,  we create seamless transitions between individual selected camera  views in order to preserve temporal continuity, which helps the user  to follow the virtual story of the gameplay.",
    pdf = "pdfs/Mindek-2015-ASM.pdf",
    images = "images/Mindek-2015-ASM.jpg",
    thumbnails = "images/Mindek-2015-ASM.png",
    note = "SCCG 2015 Best Paper Award",
    doi = "10.1145/2788539.2788549",
    keywords = "animation, storytelling, game visualization",
    location = "Smolenice, Slovakia",
    owner = "bruckner",
    timestamp = "2015.06.08",
    url = "http://www.cg.tuwien.ac.at/research/publications/2015/mindek-2015-mc/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Diehl-2015-VAS,
    author = "Alexandra Diehl and Leandro Pelorosso and Claudio Delrieux and Celeste Saulo and Juan Ruiz and Meister Eduard Gr{\"o}ller and Stefan Bruckner",
    title = "Visual Analysis of Spatio-Temporal Data: Applications in Weather Forecasting",
    journal = "Computer Graphics Forum",
    year = "2015",
    volume = "34",
    number = "3",
    pages = "381--390",
    month = "may",
    abstract = "Weather conditions affect multiple aspects of human life such as economy,  safety, security, and social activities. For this reason, weather  forecast plays a major role in society. Currently weather forecasts  are based on Numerical Weather Prediction (NWP) models that generate  a representation of the atmospheric flow. Interactive visualization  of geo-spatial data has been widely used in order to facilitate the  analysis of NWP models. This paper presents a visualization system  for the analysis of spatio-temporal patterns in short-term weather  forecasts. For this purpose, we provide an interactive visualization  interface that guides users from simple visual overviews to more  advanced visualization techniques. Our solution presents multiple  views that include a timeline with geo-referenced maps, an integrated  webmap view, a forecast operation tool, a curve-pattern selector,  spatial filters, and a linked meteogram. Two key contributions of  this work are the timeline with geo-referenced maps and the curve-pattern  selector. The latter provides novel functionality that allows users  to specify and search for meaningful patterns in the data. The visual  interface of our solution allows users to detect both possible weather  trends and errors in the weather forecast model.We illustrate the  usage of our solution with a series of case studies that were designed  and validated in collaboration with domain experts.",
    pdf = "pdfs/Diehl-2015-VAS.pdf",
    images = "images/Diehl-2015-VAS.jpg",
    thumbnails = "images/Diehl-2015-VAS.png",
    youtube = "https://www.youtube.com/watch?v=hhQwsuXpHo8",
    doi = "10.1111/cgf.12650",
    event = "EuroVis 2015",
    keywords = "weather forecasting, visual analysis, spatiotemporal data",
    location = "Cagliari, Italy",
    owner = "bruckner",
    timestamp = "2015.06.08"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Karimov-2015-GVE,
    author = "Alexey Karimov and Gabriel Mistelbauer and Thomas Auzinger and Stefan Bruckner",
    title = "Guided Volume Editing based on Histogram Dissimilarity",
    journal = "Computer Graphics Forum",
    year = "2015",
    volume = "34",
    number = "3",
    pages = "91--100",
    month = "may",
    abstract = "Segmentation of volumetric data is an important part of many analysis  pipelines, but frequently requires manual inspection and correction.  While plenty of volume editing techniques exist, it remains cumbersome  and error-prone for the user to find and select appropriate regions  for editing. We propose an approach to improve volume editing by  detecting potential segmentation defects while considering the underlying  structure of the object of interest. Our method is based on a novel  histogram dissimilarity measure between individual regions, derived  from structural information extracted from the initial segmentation.  Based on this information, our interactive system guides the user  towards potential defects, provides integrated tools for their inspection,  and automatically generates suggestions for their resolution. We  demonstrate that our approach can reduce interaction effort and supports  the user in a comprehensive investigation for high-quality segmentations.",
    pdf = "pdfs/Karimov-2015-GVE.pdf",
    images = "images/Karimov-2015-GVE.jpg",
    thumbnails = "images/Karimov-2015-GVE.png",
    youtube = "https://www.youtube.com/watch?v=zjTYkXTm_dM",
    doi = "10.1111/cgf.12621",
    event = "EuroVis 2015",
    keywords = "medical visualization, segmentation, volume editing, interaction",
    location = "Cagliari, Italy",
    owner = "bruckner",
    timestamp = "2015.06.08",
    url = "http://www.cg.tuwien.ac.at/research/publications/2015/karimov-2015-HD/"
    }

2014

    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Schmidt-2014-YMC,
    author = "Johanna Schmidt and Reinhold Preiner and Thomas Auzinger and Michael Wimmer and Meister Eduard Gr{\"o}ller and Stefan Bruckner",
    title = "YMCA - Your Mesh Comparison Application",
    booktitle = "Proceedings of IEEE VAST 2014",
    year = "2014",
    pages = "153--62",
    month = "nov",
    abstract = "Polygonal meshes can be created in several different ways. In this  paper we focus on the reconstruction of meshes from point clouds,  which are sets of points in 3D. Several algorithms that tackle this  task already exist, but they have different benefits and drawbacks,  which leads to a large number of possible reconstruction results  (i.e., meshes). The evaluation of those techniques requires extensive  comparisons between different meshes which is up to now done by either  placing images of rendered meshes side-by-side, or by encoding differences  by heat maps. A major drawback of both approaches is that they do  not scale well with the number of meshes. This paper introduces a  new comparative visual analysis technique for 3D meshes which enables  the simultaneous comparison of several meshes and allows for the  interactive exploration of their differences. Our approach gives  an overview of the differences of the input meshes in a 2D view.  By selecting certain areas of interest, the user can switch to a  3D representation and explore the spatial differences in detail.  To inspect local variations, we provide a magic lens tool in 3D.  The location and size of the lens provide further information on  the variations of the reconstructions in the selected area. With  our comparative visualization approach, differences between several  mesh reconstruction algorithms can be easily localized and inspected.",
    pdf = "pdfs/Schmidt-2014-YMC.pdf",
    images = "images/Schmidt-2014-YMC.jpg",
    thumbnails = "images/Schmidt-2014-YMC.png",
    youtube = "https://www.youtube.com/watch?v=1s-AmFCQRzM",
    doi = "10.1109/VAST.2014.7042491",
    event = "IEEE VIS 2014",
    keywords = "visual analysis, comparative visualization, 3D data exploration, focus+context, mesh comparison",
    location = "Paris, France",
    proceedings = "Proceedings of IEEE VAST 2014",
    url = "http://www.cg.tuwien.ac.at/research/publications/2014/ymca/"
    }
    [PDF] [DOI] [Bibtex]
    @INPROCEEDINGS {Waldner-2014-GHI,
    author = "Manuela Waldner and Stefan Bruckner and Ivan Viola",
    title = "Graphical Histories of Information Foraging",
    booktitle = "Proceedings of NordiCHI 2014",
    year = "2014",
    pages = "295--304",
    month = "oct",
    abstract = "During information foraging, knowledge workers iteratively seek, filter,  read, and extract information. When using multiple information sources  and different applications for information processing, re-examination  of activities for validation of previous decisions or re-discovery  of previously used information sources is challenging. In this paper,  we present a novel representation of cross-application histories  to support recall of past operations and re-discovery of information  resources. Our graphical history consists of a cross-scale visualization  combining an overview node-link diagram of used desktop resources  with nested (animated) snapshot sequences, based on a recording of  the visual screen output during the users’ desktop work. This representation  makes key elements of the users’ tasks visually stand out, while  exploiting the power of visual memory to recover subtle details of  their activities. In a preliminary study, users found our graphical  history helpful to recall details of an information foraging task  and commented positively on the ability to expand overview nodes  into snapshot and video sequences.",
    pdf = "pdfs/Waldner-2014-GHI.pdf",
    images = "images/Waldner-2014-GHI.jpg",
    thumbnails = "images/Waldner-2014-GHI.png",
    doi = "10.1145/2639189.2641202",
    keywords = "interaction history, graph visualization, provenance",
    owner = "bruckner",
    timestamp = "2014.12.30",
    url = "http://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-ghi/"
    }
    [PDF] [VID] [Bibtex]
    @INPROCEEDINGS {Kolesar-2014-IPT,
    author = "Ivan Kolesar and Julius Parulek and Ivan Viola and Stefan Bruckner and Anne-Kristin Stavrum and Helwig Hauser",
    title = "Illustrating Polymerization using Three-level Model Fusion",
    booktitle = "Proceedings of IEEE BioVis 2014",
    year = "2014",
    month = "aug",
    abstract = "Research in cell biology is steadily contributing new knowledge about  many different aspects of physiological processes like polymerization,  both with respect to the involved molecular structures as well as  their related function. Illustrations of the spatio-temporal development  of such processes are not only used in biomedical education, but  also can serve scientists as an additional platform for in-silico  experiments. In this paper, we contribute a new, three-level modeling  approach to illustrate physiological processes from the class of  polymerization at different time scales. We integrate physical and  empirical modeling, according to which approach suits the different  involved levels of detail best, and we additionally enable a simple  form of interactive steering while the process is illustrated. We  demonstrate the suitability of our approach in the context of several  polymerization processes and report from a first evaluation with  domain experts.",
    pdf = "pdfs/Kolesar-2014-IPT.pdf",
    vid = "vids/Kolesar14Polymers.mp4",
    images = "images/Kolesar-2014-IPT.jpg",
    thumbnails = "images/Kolesar-2014-IPT.png",
    keywords = "biochemical visualization, L-system modeling, multi-agent modeling, visualization of physiology, polymerization",
    owner = "bruckner",
    project = "physioillustration",
    timestamp = "2014.12.29"
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE {Parulek-2014-CLV,
    author = "Julius Parulek and Daniel J{\"o}nsson and Timo Ropinski and Stefan Bruckner and Anders Ynnerman and Ivan Viola",
    title = "Continuous Levels-of-Detail and Visual Abstraction for Seamless Molecular Visualization",
    journal = "Computer Graphics Forum",
    year = "2014",
    volume = "33",
    number = "6",
    pages = "276--287",
    month = "sep",
    abstract = "Molecular visualization is often challenged with rendering of large  molecular structures in real time. We introduce a novel approach  that enables us to show even large protein complexes. Our method  is based on the level-of-detail concept, where we exploit three different  abstractions combined in one visualization. Firstly, molecular surface  abstraction exploits three different surfaces, solvent-excluded surface  (SES), Gaussian kernels and van der Waals spheres, combined as one  surface by linear interpolation. Secondly, we introduce three shading  abstraction levels and a method for creating seamless transitions  between these representations. The SES representation with full shading  and added contours stands in focus while on the other side a sphere  representation of a cluster of atoms with constant shading and without  contours provide the context. Thirdly, we propose a hierarchical  abstraction based on a set of clusters formed on molecular atoms.  All three abstraction models are driven by one importance function  classifying the scene into the near-, mid- and far-field. Moreover,  we introduce a methodology to render the entire molecule directly  using the A-buffer technique, which further improves the performance.  The rendering performance is evaluated on series of molecules of  varying atom counts.",
    pdf = "pdfs/Parulek-2014-CLV.pdf",
    images = "images/Parulek-2014-CLV.jpg",
    thumbnails = "images/Parulek-2014-CLV.png",
    issn = "1467-8659",
    doi = "10.1111/cgf.12349",
    keywords = "level of detail algorithms, implicit surfaces, clustering, scientific visualization",
    project = "physioillustration"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Swoboda-2014-VQA,
    author = "Nicolas Swoboda and Judith Moosburner and Stefan Bruckner and Jai Y. Yu and Barry J. Dickson and Katja B{\"u}hler",
    title = "Visual and Quantitative Analysis of Higher Order Arborization Overlaps for Neural Circuit Research",
    booktitle = "Proceedings of VCBM 2014",
    year = "2014",
    pages = "107--116",
    month = "sep",
    abstract = "Neuroscientists investigate neural circuits in the brain of the common  fruit fly Drosophila melanogaster to discover how complex behavior  is generated. Hypothesis building on potential connections between  individual neurons is an essential step in the discovery of circuits  that govern a specific behavior. Overlaps of arborizations of two  or more neurons indicate a potential anatomical connection, i.e.  the presence of joint synapses responsible for signal transmission  between neurons. Obviously, the number of higher order overlaps (i.e.  overlaps of three and more arborizations) increases exponentially  with the number of neurons under investigation making it almost impossible  to precompute quantitative information for all possible combinations.  Thus, existing solutions are restricted to pairwise comparison of  overlaps as they are relying on precomputed overlap quantification.  Analyzing overlaps by visual inspection of more than two arborizations  in 2D sections or in 3D is impeded by visual clutter or occlusion.  This work contributes a novel tool that complements existing methods  for potential connectivity exploration by providing for the first  time the possibility to compute and visualize higher order arborization  overlaps on the fly and to interactively explore this information  in its spatial anatomical context and on a quantitative level. Qualitative  evaluation with neuroscientists and non-expert users demonstrated  the utility and usability of the tool.",
    pdf = "pdfs/Swoboda-2014-VQA.pdf",
    images = "images/Swoboda-2014-VQA.jpg",
    thumbnails = "images/Swoboda-2014-VQA.png",
    youtube = "https://www.youtube.com/watch?v=iW2iVppPnsE",
    note = "VCBM 2014 Best Paper Honorable Mention",
    doi = "10.2312/vcbm.20141189",
    event = "VCBM 2014",
    keywords = "visual analysis, neurobiology",
    location = "Vienna, Austria"
    }
    [PDF] [DOI] [Bibtex]
    @INCOLLECTION {Amirkhanov-2014-HSH,
    author = "Artem Amirkhanov and Stefan Bruckner and Christoph Heinzl and Meister Eduard Gr{\"o}ller",
    title = "The Haunted Swamps of Heuristics: Uncertainty in Problem Solving",
    booktitle = "Scientific Visualization: Uncertainty, Multifield, Biomedical, and Scalable Visualization",
    publisher = "Springer",
    year = "2014",
    editor = "Min Chen and Hans Hagen and Charles D. Hansen and Christopher R. Johnson and Arie E. Kaufman",
    series = "Mathematics and Visualization",
    chapter = "5",
    pages = "51--60",
    month = "sep",
    abstract = "In scientific visualization the key task of research is the provision  of insight into a problem. Finding the solution to a problem may  be seen as finding a path through some rugged terrain which contains  mountains, chasms, swamps, and few flatlands. This path - an algorithm  discovered by the researcher - helps users to easily move around  this unknown area. If this way is a wide road paved with stones it  will be used for a long time by many travelers. However, a narrow  footpath leading through deep forests and deadly swamps will attract  only a few adventure seekers. There are many different paths with  different levels of comfort, length, and stability, which are uncertain  during the research process. Finding a systematic way to deal with  this uncertainty can greatly assist the search for a safe path which  is in our case the development of a suitable visualization algorithm  for a specific problem. In this work we will analyze the sources  of uncertainty in heuristically solving visualization problems and  will propose directions to handle these uncertainties.",
    pdf = "pdfs/Amirkhanov-2014-HSH.pdf",
    images = "images/Amirkhanov-2014-HSH.jpg",
    thumbnails = "images/Amirkhanov-2014-HSH.png",
    doi = "10.1007/978-1-4471-6497-5_5",
    keywords = "uncertainty, heuristics, problem solving",
    owner = "bruckner",
    timestamp = "2014.12.30",
    url = "http://www.springer.com/mathematics/computational+science+%26+engineering/book/978-1-4471-6496-8"
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE {Sedlmair-2014-VPS,
    author = "Michael Sedlmair and Christoph Heinzl and Stefan Bruckner and Harald Piringer and Torsten M{\"o}ller",
    title = "Visual Parameter Space Analysis: A Conceptual Framework",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2014",
    volume = "20",
    number = "12",
    pages = "2161--2170",
    month = "dec",
    abstract = "Various case studies in different application domains have shown the  great potential of visual parameter space analysis to support validating  and using simulation models. In order to guide and systematize research  endeavors in this area, we provide a conceptual framework for visual  parameter space analysis problems. The framework is based on our  own experience and a structured analysis of the visualization literature.  It contains three major components: (1) a data flow model that helps  to abstractly describe visual parameter space analysis problems independent  of their application domain; (2) a set of four navigation strategies  of how parameter space analysis can be supported by visualization  tools; and (3) a characterization of six analysis tasks. Based on  our framework, we analyze and classify the current body of literature,  and identify three open research gaps in visual parameter space analysis.  The framework and its discussion are meant to support visualization  designers and researchers in characterizing parameter space analysis  problems and to guide their design and evaluation processes.",
    pdf = "pdfs/Sedlmair-2014-VPS.pdf",
    images = "images/Sedlmair-2014-VPS.jpg",
    thumbnails = "images/Sedlmair-2014-VPS.png",
    doi = "10.1109/TVCG.2014.2346321",
    event = "IEEE VIS 2014",
    keywords = "parameter space analysis, input-output model, simulation, task characterization, literature analysis",
    location = "Paris, France"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Rautek-2014-VSI,
    author = "Peter Rautek and Stefan Bruckner and Meister Eduard Gr{\"o}ller and Markus Hadwiger",
    title = "ViSlang: A System for Interpreted Domain-Specific Languages for Scientific Visualization",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2014",
    volume = "20",
    number = "12",
    pages = "2388--2396",
    month = "dec",
    abstract = "Researchers from many domains use scientific visualization in their  daily practice. Existing implementations of algorithms usually come  with a graphical user interface (high-level interface), or as software  library or source code (low-level interface). In this paper we present  a system that integrates domain-specific languages (DSLs) and facilitates  the creation of new DSLs. DSLs provide an effective interface for  domain scientists avoiding the difficulties involved with low-level  interfaces and at the same time offering more flexibility than high-level  interfaces. We describe the design and implementation of ViSlang,  an interpreted language specifically tailored for scientific visualization.  A major contribution of our design is the extensibility of the ViSlang  language. Novel DSLs that are tailored to the problems of the domain  can be created and integrated into ViSlang. We show that our approach  can be added to existing user interfaces to increase the flexibility  for expert users on demand, but at the same time does not interfere  with the user experience of novice users. To demonstrate the flexibility  of our approach we present new DSLs for volume processing, querying  and visualization. We report the implementation effort for new DSLs  and compare our approach with Matlab and Python implementations in  terms of run-time performance.",
    pdf = "pdfs/Rautek-2014-VSI.pdf",
    images = "images/Rautek-2014-VSI.jpg",
    thumbnails = "images/Rautek-2014-VSI.png",
    youtube = "https://www.youtube.com/watch?v=DbWazwyMRNw",
    doi = "10.1109/TVCG.2014.2346318",
    event = "IEEE VIS 2014",
    keywords = "domain-specific languages, volume visualization, volume visualization framework",
    location = "Paris, France",
    url = "http://vcc.kaust.edu.sa/Pages/Pub-ViSlang-Sys-Int-Dom-Spe-Lang-SC.aspx"
    }
    [PDF] [DOI] [Bibtex]
    @INCOLLECTION {Pfister-2014-VIC,
    author = "Hanspeter Pfister and Verena Kaynig and Charl P. Botha and Stefan Bruckner and Vincent J. Dercksen and Hans-Christian Hege and Jos B.T.M. Roerdink",
    title = "Visualization in Connectomics",
    booktitle = "Scientific Visualization: Uncertainty, Multifield, Biomedical, and Scalable Visualization",
    publisher = "Springer",
    year = "2014",
    editor = "Min Chen and Hans Hagen and Charles D. Hansen and Christopher R. Johnson and Arie E. Kaufman",
    series = "Mathematics and Visualization",
    chapter = "21",
    pages = "221--245",
    month = "sep",
    abstract = "Connectomics is a branch of neuroscience that attempts to create a  connectome, i.e., a completemap of the neuronal system and all connections  between neuronal structures. This representation can be used to understand  how functional brain states emerge from their underlying anatomical  structures and how dysfunction and neuronal diseases arise. We review  the current state-of-the-art of visualization and image processing  techniques in the field of connectomics and describe a number of  challenges. After a brief summary of the biological background and  an overview of relevant imaging modalities, we review current techniques  to extract connectivit",
    pdf = "pdfs/Pfister-2014-VIC.pdf",
    images = "images/Pfister-2014-VIC.jpg",
    thumbnails = "images/Pfister-2014-VIC.png",
    doi = "10.1007/978-1-4471-6497-5_21",
    keywords = "connectomics, neuroscience, visualization, imaging",
    owner = "bruckner",
    timestamp = "2014.12.30",
    url = "http://www.springer.com/mathematics/computational+science+%26+engineering/book/978-1-4471-6496-8"
    }
    [PDF] [DOI] [Bibtex]
    @INPROCEEDINGS {Angelelli-2014-LUP,
    author = "Paolo Angelelli and Sten Roar Snare and Siri Ann Nyrnes and Stefan Bruckner and Helwig Hauser and Lasse L{\o}vstakken",
    title = "Live Ultrasound-based Particle Visualization of Blood Flow in the Heart",
    booktitle = "Proceedings of SCCG 2014",
    year = "2014",
    pages = "42--49",
    month = "may",
    abstract = "We introduce an integrated method for the acquisition, processing  and visualization of live, in-vivo blood flow in the heart. The method  is based on ultrasound imaging, using a plane wave acquisition acquisition  protocol, which produces high frame rate ensemble data that are efficiently  processed to extract directional flow information not previously  available based on conventional Doppler imaging. These data are then  visualized using a tailored pathlet-based visualization approach,  to convey the slice-contained dynamic movement of the blood in the  heart. This is especially important when imaging patients with possible  congenital heart diseases, who typically exhibit complex flow patterns  that are challenging to interpret. With this approach, it now is  possible for the first time to achieve a real-time integration-based  visualization of 2D blood flow aspects based on ultrasonic imaging.  We demonstrate our solution in the context of selected cases of congenital  heart diseases in neonates, showing how our technique allows for  a more accurate and intuitive visualization of shunt flow and vortices.",
    pdf = "pdfs/Angelelli-2014-LUP.pdf",
    images = "images/Angelelli-2014-LUP.jpg",
    thumbnails = "images/Angelelli-2014-LUP.png",
    doi = "10.1145/2643188.2643200",
    keywords = "ultrasound medical visualization, real-time visualization, blood flow visualization",
    url = "http://dx.doi.org/10.1145/2643188.2643200"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Kolesar-2014-IIP,
    author = "Ivan Kolesar and Julius Parulek and Ivan Viola and Stefan Bruckner and Anne-Kristin Stavrum and Helwig Hauser",
    title = "Interactively Illustrating Polymerization using Three-level Model Fusion",
    journal = "BMC Bioinformatics",
    year = "2014",
    volume = "15",
    pages = "345",
    month = "oct",
    abstract = "Research in cell biology is steadily contributing new knowledge about  many aspects of physiological processes, both with respect to the  involved molecular structures as well as their related function.  Illustrations of the spatio-temporal development of such processes  are not only used in biomedical education, but also can serve scientists  as an additional platform for in-silico experiments. Results In this  paper, we contribute a new, three-level modeling approach to illustrate  physiological processes from the class of polymerization at different  time scales. We integrate physical and empirical modeling, according  to which approach best suits the different involved levels of detail,  and we additionally enable a form of interactive steering, while  the process is illustrated. We demonstrate the suitability of our  approach in the context of several polymerization processes and report  from a first evaluation with domain experts. Conclusion We conclude  that our approach provides a new, hybrid modeling approach for illustrating  the process of emergence in physiology, embedded in a densely filled  environment. Our approach of a complementary fusion of three systems  combines the strong points from the different modeling approaches  and is capable to bridge different spatial and temporal scales.",
    pdf = "pdfs/Kolesar-2014-IIP.pdf",
    images = "images/Kolesar-2014-IIP.jpg",
    thumbnails = "images/Kolesar-2014-IIP.png",
    youtube = "https://www.youtube.com/watch?v=iMl5nDicmhg",
    doi = "10.1186/1471-2105-15-345",
    keywords = "biochemical visualization, L-system modeling, multi-agent modeling, visualization of physiology, polymerization",
    owner = "bruckner",
    project = "physioillustration",
    timestamp = "2014.12.29",
    url = "http://www.ii.uib.no/vis/projects/physioillustration/research/interactive-molecular-illustration.html"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Mindek-2014-MSS,
    author = "Peter Mindek and Meister Eduard Gr{\"o}ller and Stefan Bruckner",
    title = "Managing Spatial Selections with Contextual Snapshots",
    journal = "Computer Graphics Forum",
    year = "2014",
    volume = "33",
    number = "8",
    pages = "132--144",
    month = "dec",
    abstract = "Spatial selections are a ubiquitous concept in visualization. By localizing  particular features, they can be analysed and compared in different  views. However, the semantics of such selections often depend on  specific parameter settings and it can be difficult to reconstruct  them without additional information. In this paper, we present the  concept of contextual snapshots as an effective means for managing  spatial selections in visualized data. The selections are automatically  associated with the context in which they have been created. Contextual  snapshots can also be used as the basis for interactive integrated  and linked views, which enable in-place investigation and comparison  of multiple visual representations of data. Our approach is implemented  as a flexible toolkit with well-defined interfaces for integration  into existing systems. We demonstrate the power and generality of  our techniques by applying them to several distinct scenarios such  as the visualization of simulation data, the analysis of historical  documents and the display of anatomical data.",
    pdf = "pdfs/Mindek-2014-MSS.pdf",
    images = "images/Mindek-2014-MSS.jpg",
    thumbnails = "images/Mindek-2014-MSS.png",
    youtube = "https://www.youtube.com/watch?v=rxEf-Okp8Xo",
    doi = "10.1111/cgf.12406",
    keywords = "interaction, visual analytics, spatial selections, annotations",
    url = "http://www.cg.tuwien.ac.at/downloads/csl/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Solteszova-2014-VPS,
    author = "Veronika \v{S}olt{\'e}szov{\'a} and {\AA}smund Birkeland and Ivan Viola and Stefan Bruckner",
    title = "Visibility-Driven Processing of Streaming Volume Data",
    booktitle = "Proceedings of VCBM 2014",
    year = "2014",
    pages = "127--136",
    month = "sep",
    abstract = "In real-time volume data acquisition, such as 4D ultrasound, the raw  data is challenging to visualize directly without additional processing.  Noise removal and feature detection are common operations, but many  methods are too costly to compute over the whole volume when dealing  with live streamed data. In this paper, we propose a visibility-driven  processing scheme for handling costly on-the-fly processing of volumetric  data in real-time. In contrast to the traditional visualization pipeline,  our scheme utilizes a fast computation of the potentially visible  subset of voxels which significantly reduces the amount of data required  to process. As filtering operations modify the data values which  may affect their visibility, our method for visibility-mask generation  ensures that the set of elements deemed visible does not change after  processing. Our approach also exploits the visibility information  for the storage of intermediate values when multiple operations are  performed in sequence, and can therefore significantly reduce the  memory overhead of longer filter pipelines. We provide a thorough  technical evaluation of the approach and demonstrate it on several  typical scenarios where on-the-fly processing is required.",
    pdf = "pdfs/Solteszova-2014-VPS.pdf",
    images = "images/Solteszova-2014-VPS.jpg",
    thumbnails = "images/Solteszova-2014-VPS.png",
    youtube = "https://www.youtube.com/watch?v=WJgc6BX1qig",
    note = "VCBM 2014 Best Paper Award",
    doi = "10.2312/vcbm.20141198",
    event = "VCBM 2014",
    keywords = "ultrasound, visibility-driven processing, filtering",
    location = "Vienna, Austria"
    }

2013

    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Mindek-2013-CSE,
    author = "Peter Mindek and Stefan Bruckner and Meister Eduard Gr{\"o}ller",
    title = "Contextual Snapshots: Enriched Visualization with Interactive Spatial Annotations",
    booktitle = "Proceedings of SCCG 2013",
    year = "2013",
    pages = "59--66",
    month = "may",
    abstract = "Spatial selections are a ubiquitous concept in visualization. By localizing  particular features, they can be analyzed and compared in different  views. However, the semantics of such selections are often dependent  on other parameter settings and it can be difficult to reconstruct  them without additional information. In this paper, we present the  concept of contextual snapshots as an effective means for managing  spatial selections in visualized data. The selections are automatically  associated with the context in which they have been created. Contextual  snapshots can be also used as the basis for interactive integrated  and linked views, which enable in-place investigation and comparison  of multiple visual representations of data. Our approach is implemented  as a flexible toolkit with welldefined interfaces for integration  into existing systems. We demonstrate the power and generality of  our techniques by applying them to several distinct scenarios such  as the visualization of simulation data and the analysis of historical  documents.",
    pdf = "pdfs/Mindek-2013-CSE.pdf",
    images = "images/Mindek-2013-CSE.jpg",
    thumbnails = "images/Mindek-2013-CSE.png",
    youtube = "https://www.youtube.com/watch?v=djuqJgixUCs",
    note = "SCCG 2013 Best Paper Award",
    doi = "10.1145/2508244.2508251",
    keywords = "spatial selections, annotations, linked views, provenance",
    location = "Smolenice, Slovakia",
    url = "http://www.cg.tuwien.ac.at/research/publications/2013/mindek-2013-csl/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Auzinger-2013-VVC,
    author = "Thomas Auzinger and Gabriel Mistelbauer and Ivan Baclija and R{\"u}diger Schernthaner and Arnold K{\"o}chl and Michael Wimmer and Meister Eduard Gr{\"o}ller and Stefan Bruckner",
    title = "Vessel Visualization using Curved Surface Reformation",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2013",
    volume = "19",
    number = "12",
    pages = "2858--2867",
    month = "dec",
    abstract = "Visualizations of vascular structures are frequently used in radiological  investigations to detect and analyze vascular diseases. Obstructions  of the blood flow through a vessel are one of the main interests  of physicians, and several methods have been proposed to aid the  visual assessment of calcifications on vessel walls. Curved Planar  Reformation (CPR) is a wide-spread method that is designed for peripheral  arteries which exhibit one dominant direction. To analyze the lumen  of arbitrarily oriented vessels, Centerline Reformation (CR) has  been proposed. Both methods project the vascular structures into  2D image space in order to reconstruct the vessel lumen. In this  paper, we propose Curved Surface Reformation (CSR), a technique that  computes the vessel lumen fully in 3D. This offers high-quality interactive  visualizations of vessel lumina and does not suffer from problems  of earlier methods such as ambiguous visibility cues or premature  discretization of centerline data. Our method maintains exact visibility  information until the final query of the 3D lumina data. We also  present feedback from several domain experts.",
    pdf = "pdfs/Auzinger-2013-VVC.pdf",
    images = "images/Auzinger-2013-VVC.jpg",
    thumbnails = "images/Auzinger-2013-VVC.png",
    youtube = "https://www.youtube.com/watch?v=rESIFaO_-Gs",
    doi = "10.1109/TVCG.2013.215",
    event = "IEEE VIS 2013",
    keywords = "volume Rendering, reformation, vessel, surface approximation",
    url = "http://www.cg.tuwien.ac.at/research/publications/2013/Auzinger_Mistelbauer_2013_CSR/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Patel-2013-ICS,
    author = "Daniel Patel and Veronika \v{S}olt{\'e}szov{\'a} and Jan Martin Nordbotten and Stefan Bruckner",
    title = "Instant Convolution Shadows for Volumetric Detail Mapping",
    journal = "ACM Transactions on Graphics",
    year = "2013",
    volume = "32",
    number = "5",
    pages = "154:1--154:18",
    month = "sep",
    abstract = "In this article, we present a method for rendering dynamic scenes  featuring translucent procedural volumetric detail with all-frequency  soft shadows being cast from objects residing inside the view frustum.  Our approach is based on an approximation of physically correct shadows  from distant Gaussian area light sources positioned behind the view  plane, using iterative convolution. We present a theoretical and  empirical analysis of this model and propose an efficient class of  convolution kernels which provide high quality at interactive frame  rates. Our GPU-based implementation supports arbitrary volumetric  detail maps, requires no precomputation, and therefore allows for  real-time modi?cation of all rendering parameters.",
    pdf = "pdfs/Patel-2013-ICS.pdf",
    images = "images/Patel-2013-ICS.jpg",
    thumbnails = "images/Patel-2013-ICS.png",
    youtube = "https://www.youtube.com/watch?v=lhGWgew3HXY,https://www.youtube.com/watch?v=XrhYjgQxfb0",
    doi = "10.1145/2492684",
    keywords = "shadows, volumetric effects, procedural texturing, filtering",
    project = "geoillustrator",
    url = "http://dl.acm.org/citation.cfm?id=2492684"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Mistelbauer-2013-VVC,
    author = "Gabriel Mistelbauer and Anca Morar and Andrej Varchola and R{\"u}diger Schernthaner and Ivan Baclija and Arnold K{\"o}chl and Armin Kanitsar and Stefan Bruckner and Meister Eduard Gr{\"o}ller",
    title = "Vessel Visualization using Curvicircular Feature Aggregation",
    journal = "Computer Graphics Forum",
    year = "2013",
    volume = "32",
    number = "3",
    pages = "231--240",
    month = "jun",
    abstract = "Radiological investigations are common medical practice for the diagnosis  of peripheral vascular diseases. Existing visualization methods such  as Curved Planar Reformation (CPR) depict calcifications on vessel  walls to determine if blood is still able to flow. While it is possible  with conventional CPR methods to examine the whole vessel lumen by  rotating around the centerline of a vessel, we propose Curvicircular  Feature Aggregation (CFA), which aggregates these rotated images  into a single view. By eliminating the need for rotation, vessels  can be investigated by inspecting only one image. This method can  be used as a guidance and visual analysis tool for treatment planning.  We present applications of this technique in the medical domain and  give feedback from radiologists.",
    pdf = "pdfs/Mistelbauer-2013-VVC.pdf",
    images = "images/Mistelbauer-2013-VVC.jpg",
    thumbnails = "images/Mistelbauer-2013-VVC.png",
    youtube = "https://www.youtube.com/watch?v=WwF5GPOs1pA",
    doi = "10.1111/cgf.12110",
    event = "EuroVis 2013",
    keywords = "medical visualization, vessel visualization, vessel reformation",
    location = "Leipzig, Germany",
    url = "http://www.cg.tuwien.ac.at/research/publications/2013/mistelbauer-2013-cfa/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Karimov-2013-VSV,
    author = "Alexey Karimov and Gabriel Mistelbauer and Johanna Schmidt and Peter Mindek and Elisabeth Schmidt and Timur Sharipov and Stefan Bruckner and Meister Eduard Gr{\"o}ller",
    title = "ViviSection: Skeleton-based Volume Editing",
    journal = "Computer Graphics Forum",
    year = "2013",
    volume = "32",
    number = "3",
    pages = "461--470",
    month = "jun",
    abstract = "Volume segmentation is important in many applications, particularly  in the medical domain. Most segmentation techniques, however, work  fully automatically only in very restricted scenarios and cumbersome  manual editing of the results is a common task. In this paper, we  introduce a novel approach for the editing of segmentation results.  Our method exploits structural features of the segmented object to  enable intuitive and robust correction and verification. We demonstrate  that our new approach can significantly increase the segmentation  quality even in difficult cases such as in the presence of severe  pathologies.",
    pdf = "pdfs/Karimov-2013-VSV.pdf",
    images = "images/Karimov-2013-VSV.jpg",
    thumbnails = "images/Karimov-2013-VSV.png",
    youtube = "https://www.youtube.com/watch?v=4s12ZbUyHiY",
    doi = "10.1111/cgf.12133",
    event = "EuroVis 2013",
    keywords = "volume visualization, volume editing, segmentation, interaction",
    location = "Leipzig, Germany",
    url = "http://www.cg.tuwien.ac.at/research/publications/2013/karimov-2013-vivisection/"
    }
    [PDF] [YT] [Bibtex]
    @ARTICLE {Mindek-2013-VPE,
    author = "Peter Mindek and Stefan Bruckner and Peter Rautek and Meister Eduard Gr{\"o}ller",
    title = "Visual Parameter Exploration in {GPU} Shader Space",
    journal = "Journal of WSCG",
    year = "2013",
    volume = "21",
    number = "3",
    pages = "225--234",
    month = "jun",
    abstract = "The wide availability of high-performance GPUs has made the use of  shader programs in visualization ubiquitous.Understanding shaders  is a challenging task. Frequently it is difficult to mentally reconstruct  the nature and types of transformations applied to the underlying  data during the visualization process. We propose a method for the  visual analysis of GPU shaders, which allows the flexible exploration  and investigation of algorithms, parameters, and their effects. We  introduce a method for extracting feature vectors composed of several  attributes of the shader, as well as a direct manipulation interface  for assigning semantics to them. The user interactively classifies  pixels of images which are rendered with the investigated shader.  The two resulting classes, a positive class and a negative one, are  employed to steer the visualization. Based on this information, we  can extract a wide variety of additional attributes and visualize  their relation to this classification. Our system allows an interactive  exploration of shader space and we demonstrate its utility for several  different applications.",
    pdf = "pdfs/Mindek-2013-VPE.pdf",
    images = "images/Mindek-2013-VPE.jpg",
    thumbnails = "images/Mindek-2013-VPE.png",
    youtube = "https://www.youtube.com/watch?v=Sk7EXvqCoxs",
    keywords = "parameter space exploration, shader augmentation",
    url = "http://www.cg.tuwien.ac.at/research/publications/2013/mindek-2013-pel/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Schmidt-2013-VVA,
    author = "Johanna Schmidt and Meister Eduard Gr{\"o}ller and Stefan Bruckner",
    title = "VAICo: Visual Analysis for Image Comparison",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2013",
    volume = "19",
    number = "12",
    pages = "2090--2099",
    month = "dec",
    abstract = "Scientists, engineers, and analysts are confronted with ever larger  and more complex sets of data, whose analysis poses special challenges.  In many situations it is necessary to compare two or more datasets.  Hence there is a need for comparative visualization tools to help  analyze differences or similarities among datasets. In this paper  an approach for comparative visualization for sets of images is presented.  Well-established techniques for comparing images frequently place  them side-by-side. A major drawback of such approaches is that they  do not scale well. Other image comparison methods encode differences  in images by abstract parameters like color. In this case information  about the underlying image data gets lost. This paper introduces  a new method for visualizing differences and similarities in large  sets of images which preserves contextual information, but also allows  the detailed analysis of subtle variations. Our approach identifies  local changes and applies cluster analysis techniques to embed them  in a hierarchy. The results of this process are then presented in  an interactive web application which allows users to rapidly explore  the space of differences and drill-down on particular features. We  demonstrate the flexibility of our approach by applying it to multiple  distinct domains.",
    pdf = "pdfs/Schmidt-2013-VVA.pdf",
    images = "images/Schmidt-2013-VVA.jpg",
    thumbnails = "images/Schmidt-2013-VVA.png",
    youtube = "https://www.youtube.com/watch?v=wfBqKZLVszk",
    doi = "10.1109/TVCG.2013.213",
    event = "IEEE VIS 2013",
    keywords = "focus+context visualization, image set comparison, comparative visualization",
    url = "http://www.cg.tuwien.ac.at/research/publications/2013/schmidt-2013-vaico/"
    }

2012

    [PDF] [DOI] [VID] [YT] [Bibtex]
    @ARTICLE {Birkeland-2012-IMC,
    author = "{\AA}smund Birkeland and Stefan Bruckner and Andrea Brambilla and Ivan Viola",
    title = "Illustrative Membrane Clipping",
    journal = "Computer Graphics Forum",
    year = "2012",
    volume = "31",
    number = "3",
    pages = "905--914",
    month = "jun",
    abstract = "Clipping is a fast, common technique for resolving occlusions. It  only requires simple interaction, is easily understandable, and thus  has been very popular for volume exploration. However, a drawback  of clipping is that the technique indiscriminately cuts through features.  Illustrators, for example, consider the structures in the vicinity  of the cut when visualizing complex spatial data and make sure that  smaller structures near the clipping plane are kept in the image  and not cut into fragments. In this paper we present a new technique,  which combines the simple clipping interaction with automated selective  feature preservation using an elastic membrane. In order to prevent  cutting objects near the clipping plane, the deformable membrane  uses underlying data properties to adjust itself to salient structures.  To achieve this behaviour, we translate data attributes into a potential  field which acts on the membrane, thus moving the problem of deformation  into the soft-body dynamics domain. This allows us to exploit existing  GPU-based physics libraries which achieve interactive frame rates.  For manual adjustment, the user can insert additional potential fields,  as well as pinning the membrane to interesting areas. We demonstrate  that our method can act as a flexible and non-invasive replacement  of traditional clipping planes.",
    pdf = "pdfs/Birkeland-2012-IMC.pdf",
    vid = "vids/Birkeland12Illustrative.avi",
    images = "images/Birkeland12Illustrative01.png, images/Birkeland12Illustrative02.png, images/Birkeland12Illustrative03.png",
    thumbnails = "images/Birkeland-2012-IMC.png",
    youtube = "https://www.youtube.com/watch?v=I89_--zul6c",
    note = "presented at EuroVis 2012",
    doi = "10.1111/j.1467-8659.2012.03083.x",
    event = "EuroVis 2012",
    keywords = "clipping, volume rendering, illustrative visualization",
    location = "Vienna, Austria",
    project = "illustrasound,medviz,illvis",
    url = "http://www.cg.tuwien.ac.at/research/publications/2012/Birkeland-2012-IMC/"
    }
    [PDF] [Bibtex]
    @MISC {Bruckner-2012-VEA-Thesis,
    author = "Stefan Bruckner",
    title = "Visual Exploration and Analysis of Volumetric Data",
    howpublished = "Habilitation Thesis",
    month = "mar",
    year = "2012",
    abstract = "Information technology has led to a rapid increase in the amount of  data that arise in areas such as biology, medicine, climate science,  and engineering. In many cases, these data are volumetric in nature,  i.e., they describe the distribution of one or several quantities  over a region in space. Volume visualization is the field of research  which investigates the transformation of such data sets into images  for purposes such as understanding structure or identifying features.  This thesis presents work to aid this process by improving the interactive  depiction, analysis, and exploration of volumetric data.",
    pdf = "pdfs/Bruckner-2012-VEA-Thesis.pdf",
    images = "images/Bruckner-2012-VEA-Thesis.jpg",
    thumbnails = "images/Bruckner-2012-VEA-Thesis.png",
    affiliation = "tuwien",
    keywords = "volume visualization, visual analysis, visual exploration",
    school = "Vienna University of Technology, Austria",
    url = "http://www.cg.tuwien.ac.at/research/publications/2012/Bruckner-2012-VEA/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Herghelegiu-2012-BPV,
    author = "Paul Herghelegiu and Vasile Manta and Radu Perin and Stefan Bruckner and Meister Eduard Gr{\"o}ller",
    title = "Biopsy Planner - Visual Analysis for Needle Pathway Planning in Deep Seated Brain Tumor Biopsy",
    journal = "Computer Graphics Forum",
    year = "2012",
    volume = "31",
    number = "3",
    pages = "1085--1094",
    month = "jun",
    abstract = "Biopsies involve taking samples from living tissue using a biopsy  needle. In current clinical practice they are a first mandatory step  before any further medical actions are planned. Performing a biopsy  on a deep seated brain tumor requires considerable time for establishing  and validating the desired biopsy needle pathway to avoid damage.  In this paper, we present a system for the visualization, analysis,  and validation of biopsy needle pathways. Our system uses a multi-level  approach for identifying stable needle placements which minimize  the risk of hitting blood vessels. This is one of the major dangers  in this type of intervention. Our approach helps in identifying and  visualizing the point on the pathway that is closest to a surrounding  blood vessel, requiring a closer inspection by the neurosurgeon.  An evaluation by medical experts is performed to demonstrate the  utility of our system.",
    pdf = "pdfs/Herghelegiu-2012-BPV.pdf",
    images = "images/Herghelegiu-2012-BPV.jpg",
    thumbnails = "images/Herghelegiu-2012-BPV.png",
    youtube = "https://www.youtube.com/watch?v=PBEv-D_0Zm8",
    affiliation = "tuwien",
    doi = "10.1111/j.1467-8659.2012.03101.x",
    event = "EuroVis 2012",
    keywords = "biopsy planning, medical visualization, visual analysis",
    location = "Vienna, Austria",
    url = "http://www.cg.tuwien.ac.at/research/publications/2012/Herghelegiu-2012-BPV/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Ropinski-2012-UBT,
    author = "Timo Ropinski and Stefan Diepenbrock and Stefan Bruckner and Klaus Hinrichs and Meister Eduard Gr{\"o}ller",
    title = "Unified Boundary-Aware Texturing for Interactive Volume Rendering",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2012",
    volume = "18",
    number = "11",
    pages = "1942--1955",
    month = "nov",
    abstract = "In this paper, we describe a novel approach for applying texture mapping  to volumetric data sets. In contrast to previous approaches, the  presented technique enables a unified integration of 2D and 3D textures  and thus allows to emphasize material boundaries as well as volumetric  regions within a volumetric data set at the same time. One key contribution  of this paper is a parametrization technique for volumetric data  sets, which takes into account material boundaries and volumetric  regions. Using this technique, the resulting parametrizations of  volumetric data sets enable texturing effects which create a higher  degree of realism in volume rendered images. We evaluate the quality  of the parametrization and demonstrate the usefulness of the proposed  concepts by combining volumetric texturing with volumetric lighting  models to generate photorealistic volume renderings. Furthermore,  we show the applicability in the area of illustrative visualization.",
    pdf = "pdfs/Ropinski-2012-UBT.pdf",
    images = "images/Ropinski-2012-UBT.jpg",
    thumbnails = "images/Ropinski-2012-UBT.png",
    youtube = "https://www.youtube.com/watch?v=kieFLOz22Dg",
    affiliation = "tuwien",
    doi = "10.1109/TVCG.2011.285",
    keywords = "interactive volume rendering, volumetric texturing",
    url = "http://www.cg.tuwien.ac.at/research/publications/2012/Ropinski-2012-UBT/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Csebfalvi-2012-IOM,
    author = "Bal{\'a}zs Csebfalvi and Bal{\'a}zs T{\'o}th and Stefan Bruckner and Meister Eduard Gr{\"o}ller",
    title = "Illumination-Driven Opacity Modulation for Expressive Volume Rendering",
    booktitle = "Proceedings of VMV 2012",
    year = "2012",
    pages = "103--109",
    month = "nov",
    abstract = "Using classical volume visualization, typically a couple of isosurface  layers are rendered semi-transparently to show the internal structures  contained in the data. However, the opacity transfer function is  often difficult to specify such that all the isosurfaces are of high  contrast and sufficiently perceivable. In this paper, we propose  a volumerendering technique which ensures that the different layers  contribute to fairly different regions of the image space. Since  the overlapping between the effected regions is reduced, an outer  translucent isosurface does not decrease significantly the contrast  of a partially hidden inner isosurface. Therefore, the layers of  the data become visually well separated. Traditional transfer functions  assign color and opacity values to the voxels depending on the density  and the gradient. In contrast, we assign also different illumination  directions to different materials, and modulate the opacities view-dependently  based on the surface normals and the directions of the light sources,  which are fixed to the viewing angle. We will demonstrate that this  model allows an expressive visualization of volumetric data.",
    pdf = "pdfs/Csebfalvi-2012-IOM.pdf",
    images = "images/Csebfalvi-2012-IOM.jpg",
    thumbnails = "images/Csebfalvi-2012-IOM.png",
    youtube = "https://www.youtube.com/watch?v=ZvB-Vb7aa4o",
    affiliation = "tuwien",
    doi = "10.2312/PE/VMV/VMV12/103-109",
    event = "VMV 2012",
    keywords = "illustrative visualization, illumination, volume rendering",
    location = "Magdeburg, Germany",
    url = "http://www.cg.tuwien.ac.at/research/publications/2012/Csebfalvi-2012-IOM/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Mistelbauer-2012-SSV,
    author = "Gabriel Mistelbauer and Hamed Bouzari and R{\"u}diger Schernthaner and Ivan Baclija and Arnold K{\"o}chl and Stefan Bruckner and Milos Sr{\'a}mek and Meister Eduard Gr{\"o}ller",
    title = "Smart Super Views - A Knowledge-Assisted Interface for Medical Visualization",
    booktitle = "Proceedings of IEEE VAST 2012",
    year = "2012",
    pages = "163--172",
    month = "oct",
    publisher = "IEEE Computer Society",
    abstract = "Due to the ever growing volume of acquired data and information, users  have to be constantly aware of the methods for their exploration  and for interaction. Of these, not each might be applicable to the  data at hand or might reveal the desired result. Owing to this, innovations  may be used inappropriately and users may become skeptical. In this  paper we propose a knowledge-assisted interface for medical visualization,  which reduces the necessary effort to use new visualization methods,  by providing only the most relevant ones in a smart way. Consequently,  we are able to expand such a system with innovations without the  users to worry about when, where, and especially how they may or  should use them. We present an application of our system in the medical  domain and give qualitative feedback from domain experts.",
    pdf = "pdfs/Mistelbauer-2012-SSV.pdf",
    images = "images/Mistelbauer-2012-SSV.jpg",
    thumbnails = "images/Mistelbauer-2012-SSV.png",
    youtube = "https://www.youtube.com/watch?v=cZREOedW7c4",
    affiliation = "tuwien",
    doi = "10.1109/VAST.2012.6400555",
    keywords = "knowledge-based visualization, medical visualization, integrated views",
    location = "Seattle, WA, USA",
    url = "http://www.cg.tuwien.ac.at/research/publications/2012/mistelbauer-2012-ssv/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Ford-2012-HRV,
    author = "Steven Ford and Ivan Viola and Stefan Bruckner and Hans Torp and Gabriel Kiss",
    title = "HeartPad: Real-Time Visual Guidance for Cardiac Ultrasound",
    booktitle = "Proceedings of WASA 2012",
    year = "2012",
    pages = "169--176",
    month = "nov",
    abstract = "Medical ultrasound is a challenging modality when it comes to image  interpretation. The goal we address in this work is to assist the  ultrasound examiner and partially alleviate the burden of interpretation.  We propose to address this goal with visualization that provides  clear cues on the orientation and the correspondence between anatomy  and the data being imaged. Our system analyzes the stream of 3D ultrasound  data and in real-time identifies distinct features that are basis  for a dynamically deformed mesh model of the heart. The heart mesh  is composited with the original ultrasound data to create the data-to-anatomy  correspondence. The visualization is broadcasted over the internet  allowing, among other opportunities, a direct visualization on the  patient on a tablet computer. The examiner interacts with the transducer  and with the visualization parameters on the tablet. Our system has  been characterized by domain specialist as useful in medical training  and for navigating occasional ultrasound users.",
    pdf = "pdfs/Ford-2012-HRV.pdf",
    images = "images/Ford-2012-HRV.jpg",
    thumbnails = "images/Ford-2012-HRV.png",
    youtube = "https://www.youtube.com/watch?v=2d3G7ig-yiQ",
    affiliation = "tuwien",
    doi = "10.1145/2425296.2425326",
    keywords = "medical visualization, ultrasound",
    url = "http://www.cg.tuwien.ac.at/research/publications/2012/Ford-2012-HRV/"
    }

2011

    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Haidacher-2011-VAM,
    author = "Martin Haidacher and Stefan Bruckner and Meister Eduard Gr{\"o}ller",
    title = "Volume Analysis Using Multimodal Surface Similarity",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2011",
    volume = "17",
    number = "12",
    pages = "1969--1978",
    month = "oct",
    abstract = "The combination of volume data acquired by multiple modalities has  been recognized as an important but challenging task. Modalities  often differ in the structures they can delineate and their joint  information can be used to extend the classification space. However,  they frequently exhibit differing types of artifacts which makes  the process of exploiting the additional information non-trivial.  In this paper, we present a framework based on an information-theoretic  measure of isosurface similarity between different modalities to  overcome these problems. The resulting similarity space provides  a concise overview of the differences between the two modalities,  and also serves as the basis for an improved selection of features.  Multimodal classification is expressed in terms of similarities and  dissimilarities between the isosurfaces of individual modalities,  instead of data value combinations. We demonstrate that our approach  can be used to robustly extract features in applications such as  dual energy computed tomography of parts in industrial manufacturing.",
    pdf = "pdfs/Haidacher-2011-VAM.pdf",
    images = "images/Haidacher-2011-VAM.jpg",
    thumbnails = "images/Haidacher-2011-VAM.png",
    youtube = "https://www.youtube.com/watch?v=x9ZTUssg8Fk",
    affiliation = "tuwien",
    doi = "10.1109/TVCG.2011.258",
    event = "IEEE Visualization 2011",
    keywords = "surface similarity, volume visualization, multimodal data",
    location = "Providence, Rhode Island, USA",
    url = "http://www.cg.tuwien.ac.at/research/publications/2011/haidacher-2011-VAM/"
    }
    [PDF] [Bibtex]
    @INPROCEEDINGS {Patel-2011-PEA,
    author = "Daniel Patel and Meister Eduard Gr{\"o}ller and Stefan Bruckner",
    title = "PhD Education Through Apprenticeship",
    booktitle = "Proceedings of Eurographics 2011 (Education Papers)",
    year = "2011",
    editor = "S. Maddock, J. Jorge",
    pages = "23--28",
    month = "apr",
    abstract = "We describe and analyze the PhD education in the visualization group  at the Vienna University of Technology and set the education in a  larger perspective. Four central mechanisms drive the PhD education  in Vienna. They are: to require an article-based PhD; to give the  student freedom to choose research direction; to let students work  in shared offices towards joint deadlines; and to involve students  in reviewing articles. This paper describes these mechanisms in detail  and illustrates their effect.",
    pdf = "pdfs/Patel-2011-PEA.pdf",
    images = "images/Patel-2011-PEA.jpg",
    thumbnails = "images/Patel-2011-PEA.png",
    keywords = "M., education, visualization, apprenticeship",
    location = "Llandudno, United Kingdom",
    url = "http://www.cg.tuwien.ac.at/research/publications/2011/patel-2011-PEA/"
    }

2010

    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Bruckner-2010-HVC,
    author = "Stefan Bruckner and Peter Rautek and Ivan Viola and Mike Roberts and Mario Costa Sousa and Meister Eduard Gr{\"o}ller",
    title = "Hybrid Visibility Compositing and Masking for Illustrative Rendering",
    journal = "Computers \& Graphics",
    year = "2010",
    volume = "34",
    number = "4",
    pages = "361--369",
    month = "aug",
    abstract = "In this paper, we introduce a novel framework for the compositing  of interactively rendered 3D layers tailored to the needs of scientific  illustration. Currently, traditional scientific illustrations are  produced in a series of composition stages, combining different pictorial  elements using 2D digital layering. Our approach extends the layer  metaphor into 3D without giving up the advantages of 2D methods.  The new compositing approach allows for effects such as selective  transparency, occlusion overrides, and soft depth buffering. Furthermore,  we show how common manipulation techniques such as masking can be  integrated into this concept. These tools behave just like in 2D,  but their influence extends beyond a single viewpoint. Since the  presented approach makes no assumptions about the underlying rendering  algorithms, layers can be generated based on polygonal geometry,  volumetric data, pointbased representations, or others. Our implementation  exploits current graphics hardware and permits real-time interaction  and rendering.",
    pdf = "pdfs/Bruckner-2010-HVC.pdf",
    images = "images/Bruckner-2010-HVC.jpg",
    thumbnails = "images/Bruckner-2010-HVC.png",
    youtube = "https://www.youtube.com/watch?v=V-Jbgpd9OjU,https://www.youtube.com/watch?v=Tsc30U4x3ic,https://www.youtube.com/watch?v=I4x5QtG25Tc",
    doi = "10.1016/j.cag.2010.04.003",
    keywords = "compositing, masking, illustration",
    project = "illustrasound,medviz,illvis",
    url = "http://www.cg.tuwien.ac.at/research/publications/2010/bruckner-2010-HVC/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Sikachev-2010-DFC,
    author = "Peter Sikachev and Peter Rautek and Stefan Bruckner and Meister Eduard Gr{\"o}ller",
    title = "Dynamic Focus+Context for Volume Rendering",
    booktitle = "Proceedings of VMV 2010",
    year = "2010",
    pages = "331--338",
    address = "University of Siegen, Siegen, Germany",
    month = "nov",
    abstract = "Interactive visualization is widely used in many applications for  efficient representation of complex data. Many techniques make use  of the focus+context approach in a static manner. These techniques  do not fully make use of the interaction semantics. In this paper  we present a dynamic focus+context approach that highlights salient  features during user interaction. We explore rotation, panning, and  zooming interaction semantics and propose several methods of changing  visual representations, based on a suggested engagement-estimation  method. We use DVR-MIP interpolation and a radial opacity-change  approach, exploring rotation, panning, and zooming semantics. Our  approach adds short animations during user interaction that help  to explore the data efficiently and aid the user in the detection  of unknown features.",
    pdf = "pdfs/Sikachev-2010-DFC.pdf",
    images = "images/Sikachev-2010-DFC.jpg",
    thumbnails = "images/Sikachev-2010-DFC.png",
    youtube = "https://www.youtube.com/watch?v=6x-gVBHYAcA,https://www.youtube.com/watch?v=TgotxmoepB8,https://www.youtube.com/watch?v=8K67zA8pbAo",
    affiliation = "tuwien",
    doi = "10.2312/PE/VMV/VMV10/331-338",
    keywords = "focus+contex, volume rendering, view-dependent visualization, level-of-detail techniques, nonphotorealistic techniques, user interaction",
    location = "Siegen, Germany",
    url = "http://www.cg.tuwien.ac.at/research/publications/2010/sikachev-2010-DFC/"
    }
    [PDF] [DOI] [Bibtex]
    @INCOLLECTION {Bruckner-2010-IFC,
    author = "Stefan Bruckner and Meister Eduard Gr{\"o}ller and Klaus Mueller and Bernhard Preim and Deborah Silver",
    title = "Illustrative Focus+Context Approaches in Interactive Volume Visualization",
    booktitle = "Scientific Visualization: Advanced Concepts",
    publisher = "Schloss Dagstuhl -- Leibniz-Zentrum fuer Informatik",
    year = "2010",
    editor = "Hans Hagen",
    series = "Dagstuhl Follow-Ups",
    chapter = "10",
    pages = "136--162",
    month = "aug",
    abstract = "Illustrative techniques are a new and exciting direction in visualization  research. Traditional techniques which have been used by scientific  illustrators for centuries are re-examined under the light of modern  computer technology. In this paper, we discuss the use of the focus+context  concept for the illustrative visualization of volumetric data. We  give an overview of the state-of-the-art and discuss recent approaches  which employ this concept in novel ways.",
    pdf = "pdfs/Bruckner-2010-IFC.pdf",
    images = "images/Bruckner-2010-IFC.jpg",
    thumbnails = "images/Bruckner-2010-IFC.png",
    affiliation = "tuwien",
    doi = "10.4230/DFU.SciViz.2010.136",
    url = "http://www.cg.tuwien.ac.at/research/publications/2010/bruckner-2010-IFC/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Patel-2010-SVV,
    author = "Daniel Patel and Stefan Bruckner and Ivan Viola and Meister Eduard Gr{\"o}ller",
    title = "Seismic Volume Visualization for Horizon Extraction",
    booktitle = "Proceedings of IEEE Pacific Visualization 2010",
    year = "2010",
    pages = "73--80",
    month = "mar",
    abstract = "Seismic horizons indicate change in rock properties and are central  in geoscience interpretation. Traditional interpretation systems  involve time consuming and repetitive manual volumetric seeding for  horizon growing. We present a novel system for rapidly interpreting  and visualizing seismic volumetric data. First we extract horizon  surface-parts by preprocessing the seismic data. Then during interaction  the user can assemble in realtime the horizon parts into horizons.  Traditional interpretation systems use gradient-based illumination  models in the rendering of the seismic volume and polygon rendering  of horizon surfaces. We employ realtime gradientfree forward-scattering  in the rendering of seismic volumes yielding results similar to high-quality  global illumination. We use an implicit surface representation of  horizons allowing for a seamless integration of horizon rendering  and volume rendering. We present a collection of novel techniques  constituting an interpretation and visualization system highly tailored  to seismic data interpretation.",
    pdf = "pdfs/Patel-2010-SVV.pdf",
    images = "images/Patel-2010-SVV.jpg",
    thumbnails = "images/Patel-2010-SVV.png",
    youtube = "https://www.youtube.com/watch?v=YXg4LZsTQdc",
    doi = "10.1109/PACIFICVIS.2010.5429605",
    keywords = "volume visualization, horizon extraction, seismic data",
    location = "Taipei, Taiwan",
    project = "geoillustrator,illvis",
    url = "http://www.cg.tuwien.ac.at/research/publications/2010/patel-2010-SVV/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Bruckner-2010-ISM,
    author = "Stefan Bruckner and Torsten M{\"o}ller",
    title = "Isosurface Similarity Maps",
    journal = "Computer Graphics Forum",
    year = "2010",
    volume = "29",
    number = "3",
    pages = "773--782",
    month = "jun",
    abstract = "In this paper, we introduce the concept of isosurface similarity maps  for the visualization of volume data. Isosurface similarity maps  present structural information of a volume data set by depicting  similarities between individual isosurfaces quantified by a robust  information-theoretic measure. Unlike conventional histograms, they  are not based on the frequency of isovalues and/or derivatives and  therefore provide complementary information. We demonstrate that  this new representation can be used to guide transfer function design  and visualization parameter specification. Furthermore, we use isosurface  similarity to develop an automatic parameter-free method for identifying  representative isovalues. Using real-world data sets, we show that  isosurface similarity maps can be a useful addition to conventional  classification techniques.",
    pdf = "pdfs/Bruckner-2010-ISM.pdf",
    images = "images/Bruckner-2010-ISM.jpg",
    thumbnails = "images/Bruckner-2010-ISM.png",
    youtube = "https://www.youtube.com/watch?v=NZFqx4QceCA,https://www.youtube.com/watch?v=kQO8fTJJxVg,https://www.youtube.com/watch?v=KDIbmfOAW00",
    note = "EuroVis 2010 Best Paper Award",
    affiliation = "tuwien",
    doi = "10.1111/j.1467-8659.2009.01689.x",
    event = "EuroVis 2010",
    keywords = "isosurfaces, volume visualization, mutual information, histograms",
    location = "Bordeaux, France",
    url = "http://www.cg.tuwien.ac.at/research/publications/2010/bruckner-2010-ISM/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Haidacher-2010-VVS,
    author = "Martin Haidacher and Daniel Patel and Stefan Bruckner and Armin Kanitsar and Meister Eduard Gr{\"o}ller",
    title = "Volume Visualization based on Statistical Transfer-Function Spaces",
    booktitle = "Proceedings of IEEE Pacific Visualization 2010",
    year = "2010",
    pages = "17--24",
    month = "mar",
    abstract = "It is a difficult task to design transfer functions for noisy data.  In traditional transfer-function spaces, data values of different  materials overlap. In this paper we introduce a novel statistical  transfer-function space which in the presence of noise, separates  different materials in volume data sets. Our method adaptively estimates  statistical properties, i.e. the mean value and the standard deviation,  of the data values in the neighborhood of each sample point. These  properties are used to define a transfer-function space which enables  the distinction of different materials. Additionally, we present  a novel approach for interacting with our new transfer-function space  which enables the design of transfer functions based on statistical  properties. Furthermore, we demonstrate that statistical information  can be applied to enhance visual appearance in the rendering process.  We compare the new method with 1D, 2D, and LH transfer functions  to demonstrate its usefulness.",
    pdf = "pdfs/Haidacher-2010-VVS.pdf",
    images = "images/Haidacher-2010-VVS.jpg",
    thumbnails = "images/Haidacher-2010-VVS.png",
    youtube = "https://www.youtube.com/watch?v=firkkbHdZ5o",
    doi = "10.1109/PACIFICVIS.2010.5429615",
    keywords = "transfer function, statistics, shading, noisy data, classification",
    location = "Taipei, Taiwan",
    url = "http://www.cg.tuwien.ac.at/research/publications/2010/haidacher_2010_statTF/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Bruckner-2010-RES,
    author = "Stefan Bruckner and Torsten M{\"o}ller",
    title = "Result-Driven Exploration of Simulation Parameter Spaces for Visual Effects Design",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2010",
    volume = "16",
    number = "6",
    pages = "1467--1475",
    month = "oct",
    abstract = "Graphics artists commonly employ physically-based simulation for the  generation of effects such as smoke, explosions, and similar phenomena.  The task of finding the correct parameters for a desired result,  however, is difficult and time-consuming as current tools provide  little to no guidance. In this paper, we present a new approach for  the visual exploration of such parameter spaces. Given a three-dimensional  scene description, we utilize sampling and spatio-temporal clustering  techniques to generate a concise overview of the achievable variations  and their temporal evolution. Our visualization system then allows  the user to explore the simulation space in a goal-oriented manner.  Animation sequences with a set of desired characteristics can be  composed using a novel search-by-example approach and interactive  direct volume rendering is employed to provide instant visual feedback.  A user study was performed to evaluate the applicability of our system  in production use.",
    pdf = "pdfs/Bruckner-2010-RES.pdf",
    images = "images/Bruckner-2010-RES.jpg",
    thumbnails = "images/Bruckner-2010-RES.png",
    youtube = "https://www.youtube.com/watch?v=JunXyxULCpo",
    affiliation = "tuwien",
    doi = "10.1109/TVCG.2010.190",
    event = "IEEE Visualization 2010",
    keywords = "visual exploration, visual effects, clustering, time-dependent volume data",
    location = "Salt Lake City, Utah, USA",
    url = "http://www.cg.tuwien.ac.at/research/publications/2010/brucker-2010-RES/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Solteszova-2010-MOS,
    author = "Veronika \v{S}olt{\'e}szov{\'a} and Daniel Patel and Stefan Bruckner and Ivan Viola",
    title = "A Multidirectional Occlusion Shading Model for Direct Volume Rendering",
    journal = "Computer Graphics Forum",
    year = "2010",
    volume = "29",
    number = "3",
    pages = "883--891",
    month = "jun",
    abstract = "In this paper, we present a novel technique which simulates directional  light scattering for more realistic interactive visualization of  volume data. Our method extends the recent directional occlusion  shading model by enabling light source positioning with practically  no performance penalty. Light transport is approximated using a tilted  cone-shaped function which leaves elliptic footprints in the opacity  buffer during slice-based volume rendering. We perform an incremental  blurring operation on the opacity buffer for each slice in front-to-back  order. This buffer is then used to define the degree of occlusion  for the subsequent slice. Our method is capable of generating high-quality  soft shadowing effects, allows interactive modification of all illumination  and rendering parameters, and requires no pre-computation.",
    pdf = "pdfs/Solteszova-2010-MOS.pdf",
    images = "images/Solteszova-2010-MOS.jpg",
    thumbnails = "images/Solteszova-2010-MOS.png",
    youtube = "https://www.youtube.com/watch?v=V4y0BVKV_bw",
    doi = "10.1111/j.1467-8659.2009.01695.x",
    event = "EuroVis 2010",
    keywords = "global illumination, volume rendering, shadows, optical model",
    location = "Bordeaux, France",
    project = "illustrasound,medviz,illvis",
    url = "http://www.cg.tuwien.ac.at/research/publications/2010/solteszova-2010-MOS/"
    }

2009

    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Bruckner-2009-BVQ,
    author = "Stefan Bruckner and Veronika \v{S}olt{\'e}szov{\'a} and Meister Eduard Gr{\"o}ller and Ji\v{r}{\'i} Hlad\r{u}vka and Katja B{\"u}hler and Jai Yu and Barry Dickson",
    title = "BrainGazer - Visual Queries for Neurobiology Research",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2009",
    volume = "15",
    number = "6",
    pages = "1497--1504",
    month = "nov",
    abstract = "Neurobiology investigates how anatomical and physiological relationships  in the nervous system mediate behavior. Molecular genetic techniques,  applied to species such as the common fruit fly Drosophila melanogaster,  have proven to be an important tool in this research. Large databases  of transgenic specimens are being built and need to be analyzed to  establish models of neural information processing. In this paper  we present an approach for the exploration and analysis of neural  circuits based on such a database. We have designed and implemented  BrainGazer, a system which integrates visualization techniques for  volume data acquired through confocal microscopy as well as annotated  anatomical structures with an intuitive approach for accessing the  available information. We focus on the ability to visually query  the data based on semantic as well as spatial relationships. Additionally,  we present visualization techniques for the concurrent depiction  of neurobiological volume data and geometric objects which aim to  reduce visual clutter. The described system is the result of an ongoing  interdisciplinary collaboration between neurobiologists and visualization  researchers.",
    pdf = "pdfs/Bruckner-2009-BVQ.pdf",
    images = "images/Bruckner-2009-BVQ.jpg",
    thumbnails = "images/Bruckner-2009-BVQ.png",
    youtube = "https://www.youtube.com/watch?v=LB5t3RtLifk",
    affiliation = "tuwien",
    doi = "10.1109/TVCG.2009.121",
    event = "IEEE Visualization 2009",
    keywords = "biomedical visualization, neurobiology, visual queries, volume visualization",
    location = "Atlantic City, New Jersey, USA",
    url = "http://www.cg.tuwien.ac.at/research/publications/2009/bruckner-2009-BVQ/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Bruckner-2009-IVV,
    author = "Stefan Bruckner and Meister Eduard Gr{\"o}ller",
    title = "Instant Volume Visualization using Maximum Intensity Difference Accumulation",
    journal = "Computer Graphics Forum",
    year = "2009",
    volume = "28",
    number = "3",
    pages = "775--782",
    month = "jun",
    abstract = "It has long been recognized that transfer function setup for Direct  Volume Rendering (DVR) is crucial to its usability. However, the  task of finding an appropriate transfer function is complex and time-consuming  even for experts. Thus, in many practical applications simpler techniques  which do not rely on complex transfer functions are employed. One  common example is Maximum Intensity Projection (MIP) which depicts  the maximum value along each viewing ray. In this paper, we introduce  Maximum Intensity Difference Accumulation (MIDA), a new approach  which combines the advantages of DVR and MIP. Like MIP, MIDA exploits  common data characteristics and hence does not require complex transfer  functions to generate good visualization results. It does, however,  feature occlusion and shape cues similar to DVR. Furthermore, we  show that MIDA - in addition to being a useful technique in its own  right- can be used to smoothly transition between DVR and MIP in  an intuitive manner. MIDA can be easily implemented using volume  raycasting and achieves real-time performance on current graphics  hardware.",
    pdf = "pdfs/Bruckner-2009-IVV.pdf",
    images = "images/Bruckner-2009-IVV.jpg",
    thumbnails = "images/Bruckner-2009-IVV.png",
    youtube = "https://www.youtube.com/watch?v=lNwZJXxoLTg,https://www.youtube.com/watch?v=AR-Zp3S35hs,https://www.youtube.com/watch?v=xk4J8bkI2-Y,https://www.youtube.com/watch?v=XApq2rGKMR8",
    issn = "0167-7055",
    affiliation = "tuwien",
    doi = "10.1111/j.1467-8659.2009.01474.x",
    event = "EuroVis 2009",
    keywords = "illustrative visualization, maximum intensity projection, direct volume rendering",
    location = "Berlin, Germany",
    url = "http://www.cg.tuwien.ac.at/research/publications/2009/bruckner-2009-IVV/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Kohlmann-2009-CPV,
    author = "Peter Kohlmann and Stefan Bruckner and Armin Kanitsar and Meister Eduard Gr{\"o}ller",
    title = "Contextual Picking of Volumetric Structures",
    booktitle = "Proceedings of the IEEE Pacific Visualization 2009",
    year = "2009",
    editor = "Peter Eades, Thomas Ertl, Han-Wei Shen",
    pages = "185--192",
    month = "may",
    abstract = "This paper presents a novel method for the interactive identification  of contextual interest points within volumetric data by picking on  a direct volume rendered image. In clinical diagnostics the points  of interest are often located in the center of anatomical structures.  In order to derive the volumetric position which allows a convenient  examination of the intended structure, the system automatically extracts  contextual meta information from the DICOM (Digital Imaging and Communications  in Medicine) images and the setup of the medical workstation. Along  a viewing ray for a volumetric picking, the ray profile is analyzed  for structures which are similar to predefined templates from a knowledge  base. We demonstrate with our results that the obtained position  in 3D can be utilized to highlight a structure in 2D slice views,  to interactively calculate centerlines of tubular objects, or to  place labels at contextually-defined volumetric positions.",
    pdf = "pdfs/Kohlmann-2009-CPV.pdf",
    images = "images/Kohlmann-2009-CPV.jpg",
    thumbnails = "images/Kohlmann-2009-CPV.png",
    youtube = "https://www.youtube.com/watch?v=SgyGwePAE7o",
    affiliation = "tuwien",
    doi = "10.1109/PACIFICVIS.2009.4906855",
    isbn = "978-1-4244-4404-5",
    keywords = "picking, interaction, selection, volume visualization",
    location = "Peking, China",
    url = "http://www.cg.tuwien.ac.at/research/publications/2009/kohlmann-2009-cp/"
    }

2008

    [DOI] [Bibtex]
    @ARTICLE {rautek08illustrative,
    author = "Peter Rautek and Stefan Bruckner and Meister Eduard Gr{\"o}ller and Ivan Viola",
    title = "Illustrative Visualization: New Technology or Useless Tautology?",
    journal = "SIGGRAPH Comput. Graph.",
    year = "2008",
    volume = "42",
    number = "3",
    images = "images/rautek08illustrative.jpg",
    thumbnails = "images/rautek08illustrative_thumb.jpg",
    address = "New York, NY, USA",
    affiliation = "tuwien",
    doi = "http://doi.acm.org/10.1145/1408626.1408633",
    project = "illvis",
    publisher = "ACM",
    url = "http://doi.acm.org/10.1145/1408626.1408633"
    }
    [PDF] [DOI] [Bibtex]
    @INPROCEEDINGS {Ruiz-2008-SEV,
    author = "Marc Ruiz and Ivan Viola and Imma Boada and Stefan Bruckner and Miquel Feixas and Mateu Sbert",
    title = "Similarity-based Exploded Views",
    booktitle = "Proceedings of Smart Graphics 2008",
    year = "2008",
    pages = "154--165",
    month = "aug",
    abstract = "Exploded views are often used in illustration to overcome the problem  of occlusion when depicting complex structures. In this paper, we  propose a volume visualization technique inspired by exploded views  that partitions the volume into a number of parallel slabs and shows  them apart from each other. The thickness of slabs is driven by the  similarity between partitions. We use an information-theoretic technique  for the generation of exploded views. First, the algorithm identifies  the viewpoint from which the structure is the highest. Then, the  partition of the volume into the most informative slabs for exploding  is obtained using two complementary similarity-based strategies.  The number of slabs and the similarity parameter are freely adjustable  by the user.",
    pdf = "pdfs/Ruiz-2008-SEV.pdf",
    images = "images/Ruiz-2008-SEV.jpg",
    thumbnails = "images/Ruiz-2008-SEV.png",
    doi = "10.1007/978-3-540-85412-8_14",
    keywords = "volume visualization, illustrative visualization, exploded views",
    location = "Rennes, France",
    project = "illvis,medviz",
    url = "http://www.cg.tuwien.ac.at/research/publications/2008/ruiz-2008-SEV/"
    }
    [PDF] [YT] [Bibtex]
    @INPROCEEDINGS {Kohlmann-2008-LEI,
    author = "Peter Kohlmann and Stefan Bruckner and Armin Kanitsar and Meister Eduard Gr{\"o}ller",
    title = "LiveSync++: Enhancements of an Interaction Metaphor",
    booktitle = "Proceedings of Graphics Interface 2008",
    year = "2008",
    pages = "81--88",
    month = "may",
    abstract = "The LiveSync interaction metaphor allows an efficient and non-intrusive  integration of 2D and 3D visualizations in medical workstations.  This is achieved by synchronizing the 2D slice view with the volumetric  view. The synchronization is initiated by a simple picking on a structure  of interest in the slice view. In this paper we present substantial  enhancements of the existing concept to improve its usability. First,  an efficient parametrization for the derived parameters is presented,  which allows hierarchical refinement of the search space for good  views. Second, the extraction of the feature of interest is performed  in a way, which is adapting to the volumetric extent of the feature.  The properties of the extracted features are utilized to adjust a  predefined transfer function in a feature-enhancing manner. Third,  a new interaction mode is presented, which allows the integration  of more knowledge about the user-intended visualization, without  increasing the interaction effort. Finally, a new clipping technique  is integrated, which guarantees an unoccluded view on the structure  of interest while keeping important contextual information.",
    pdf = "pdfs/Kohlmann-2008-LEI.pdf",
    images = "images/Kohlmann-2008-LEI.jpg",
    thumbnails = "images/Kohlmann-2008-LEI.png",
    youtube = "https://www.youtube.com/watch?v=_Jt8ezi7yjs",
    affiliation = "tuwien",
    keywords = "viewpoint selection, linked views, medical visualization, smart interaction",
    location = "Windsor, Ontario, Canada",
    url = "http://www.cg.tuwien.ac.at/research/publications/2008/kohlmann-2008-lse/"
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE {Rautek-2008-IVN,
    author = "Peter Rautek and Stefan Bruckner and Ivan Viola and Meister Eduard Gr{\"o}ller",
    title = "Illustrative visualization: new technology or useless tautology?",
    journal = "ACM SIGGRAPH Computer Graphics",
    year = "2008",
    volume = "42",
    number = "3",
    month = "aug",
    abstract = "The computer graphics group at TU Vienna has created some of most  beautiful and effective illustrative visualizations. In this article,  they share with us their unique perspective on illustrative visualization.",
    pdf = "pdfs/Rautek-2008-IVN.pdf",
    images = "images/Rautek-2008-IVN.jpg",
    thumbnails = "images/Rautek-2008-IVN.png",
    doi = "10.1145/1408626.1408633",
    url = "http://www.cg.tuwien.ac.at/research/publications/2008/Rautek-2008-VF/"
    }
    [PDF] [DOI] [Bibtex]
    @INPROCEEDINGS {Bruckner-2008-IVV,
    author = "Stefan Bruckner and Peter Kohlmann and Armin Kanitsar and Meister Eduard Gr{\"o}ller",
    title = "Integrating Volume Visualization Techniques Into Medical Applications",
    booktitle = "Proceedings of ISBI 2008",
    year = "2008",
    pages = "820--823",
    month = "may",
    abstract = "One of the main obstacles in integrating 3D volume visualization in  the clinical workflow is the time-consuming process of adjusting  parameters such as viewpoint, transfer functions, and clipping planes  required to generate a diagnostically relevant image. Current applications  therefore make scarce use of volume rendering and instead primarily  employ 2D views generated through standard techniques such as multi-planar  reconstruction (MPR). However, in many cases 3D renditions can supply  additional useful information. This paper discusses ongoing work  which aims to improve the integration of 3D visualization into the  diagnostic workflow by automatically generating meaningful renditions  based on minimal user interaction. A method for automatically generating  3D views for structures in 2D slices based on a single picking interaction  is presented.",
    pdf = "pdfs/Bruckner-2008-IVV.pdf",
    images = "images/Bruckner-2008-IVV.jpg",
    thumbnails = "images/Bruckner-2008-IVV.png",
    affiliation = "tuwien",
    doi = "10.1109/ISBI.2008.4541122",
    isbn = "978-1-4244-2002-5",
    keywords = "viewpoint selection, medical visualization, volume rendering",
    location = "Paris, France",
    url = "http://www.cg.tuwien.ac.at/research/publications/2008/bruckner-2008-IVV/"
    }
    [PDF] [DOI] [Bibtex]
    @INPROCEEDINGS {Haidacher-2008-ITF,
    author = "Martin Haidacher and Stefan Bruckner and Armin Kanitsar and Meister Eduard Gr{\"o}ller",
    title = "Information-based Transfer Functions for Multimodal Visualization",
    booktitle = "Proceedings of VCBM 2008",
    year = "2008",
    editor = "C.P Botha, G. Kindlmann, W.J. Niessen, and B. Preim",
    pages = "101--108",
    month = "oct",
    publisher = "Eurographics Association",
    abstract = "Transfer functions are an essential part of volume visualization.  In multimodal visualization at least two values exist at every sample  point. Additionally, other parameters, such as gradient magnitude,  are often retrieved for each sample point. To find a good transfer  function for this high number of parameters is challenging because  of the complexity of this task. In this paper we present a general  information-based approach for transfer function design in multimodal  visualization which is independent of the used modality types. Based  on information theory, the complex multi-dimensional transfer function  space is fused to allow utilization of a well-known 2D transfer function  with a single value and gradient magnitude as parameters. Additionally,  a quantity is introduced which enables better separation of regions  with complementary information. The benefit of the new method in  contrast to other techniques is a transfer function space which is  easy to understand and which provides a better separation of different  tissues. The usability of the new approach is shown on examples of  different modalities.",
    pdf = "pdfs/Haidacher-2008-ITF.pdf",
    images = "images/Haidacher-2008-ITF.jpg",
    thumbnails = "images/Haidacher-2008-ITF.png",
    affiliation = "tuwien",
    doi = "10.2312/VCBM/VCBM08/101-108",
    isbn = "978-3-905674-13-2",
    issn = "2070-5778",
    keywords = "multimodal visualization, transfer functions, information theory",
    location = "Delft",
    url = "http://www.cg.tuwien.ac.at/research/publications/2008/haidacher-2008-vcbm/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Rautek-2008-ISI,
    author = "Peter Rautek and Stefan Bruckner and Meister Eduard Gr{\"o}ller",
    title = "Interaction-Dependent Semantics for Illustrative Volume Rendering",
    journal = "Computer Graphics Forum",
    year = "2008",
    volume = "27",
    number = "3",
    pages = "847--854",
    month = "may",
    abstract = "In traditional illustration the choice of appropriate styles and rendering  techniques is guided by the intention of the artist. For illustrative  volume visualizations it is difficult to specify the mapping between  the 3D data and the visual representation that preserves the intention  of the user. The semantic layers concept establishes this mapping  with a linguistic formulation of rules that directly map data features  to rendering styles. With semantic layers fuzzy logic is used to  evaluate the user defined illustration rules in a preprocessing step.  In this paper we introduce interaction-dependent rules that are evaluated  for each frame and are therefore computationally more expensive.  Enabling interaction-dependent rules, however, allows the use of  a new class of semantics, resulting in more expressive interactive  illustrations. We show that the evaluation of the fuzzy logic can  be done on the graphics hardware enabling the efficient use of interaction-dependent  semantics. Further we introduce the flat rendering mode and discuss  how different rendering parameters are influenced by the rule base.  Our approach provides high quality illustrative volume renderings  at interactive frame rates, guided by the specification of illustration  rules.",
    pdf = "pdfs/Rautek-2008-ISI.pdf",
    images = "images/Rautek-2008-ISI.jpg",
    thumbnails = "images/Rautek-2008-ISI.png",
    youtube = "https://www.youtube.com/watch?v=fHIl2A50Ico",
    affiliation = "tuwien",
    doi = "10.1111/j.1467-8659.2008.01216.x",
    event = "Eurographics/ IEEE-VGTC Symposium on Visualization",
    keywords = "volume visualization, illustrative visualization, semantics, interaction",
    location = "Eindhoven, The Netherlands",
    url = "http://www.cg.tuwien.ac.at/research/publications/2008/Rautek-2008-IDS/"
    }
    [PDF] [DOI] [Bibtex]
    @INPROCEEDINGS {Ruiz-2008-OVR,
    author = "Marc Ruiz and Imma Boada and Ivan Viola and Stefan Bruckner and Miquel Feixas and Mateu Sbert",
    title = "Obscurance-based Volume Rendering Framework",
    booktitle = "Proceedings of Volume Graphics 2008",
    year = "2008",
    pages = "113--120",
    month = "aug",
    abstract = "Obscurances, from which ambient occlusion is a particular case, is  a technology that produces natural-looking lighting effects in a  faster way than global illumination. Its application in volume visualization  is of special interest since it permits us to generate a high quality  rendering at a low cost. In this paper, we propose an obscurance-based  framework that allows us to obtain realistic and illustrative volume  visualizations in an interactive manner. Obscurances can include  color bleeding effects without additional cost. Moreover, we obtain  a saliency map from the gradient of obscurances and we show its application  to enhance volume visualization and to select the most salient views.",
    pdf = "pdfs/Ruiz-2008-OVR.pdf",
    images = "images/Ruiz-2008-OVR.jpg",
    thumbnails = "images/Ruiz-2008-OVR.png",
    doi = "10.2312/VG/VG-PBG08/113-120",
    keywords = "volume rendering, illustrative visualization, ambient occlusion",
    location = "Los Angeles, CA, USA",
    project = "illvis,medviz",
    url = "http://www.cg.tuwien.ac.at/research/publications/2008/ruiz-2008-OVR/"
    }
    [PDF] [Bibtex]
    @PHDTHESIS {Bruckner-2008-IIV-Thesis,
    author = "Stefan Bruckner",
    title = "Interactive Illustrative Volume Visualization",
    school = "Vienna University of Technology, Austria",
    year = "2008",
    month = "apr",
    abstract = "Illustrations are essential for the effective communication of complex  subjects. Their production, however, is a difficult and expensive  task. In recent years, three-dimensional imaging has become a vital  tool not only in medical diagnosis and treatment planning, but also  in many technical disciplines (e.g., material inspection), biology,  and archeology. Modalities such as X-Ray Computed Tomography (CT)  and Magnetic Resonance Imaging (MRI) produce high-resolution volumetric  scans on a daily basis. It seems counter-intuitive that even though  such a wealth of data is available, the production of an illustration  should still require a mainly manual and time-consuming process.  This thesis is devoted to the computer-assisted generation of illustrations  directly from volumetric data using advanced visualization techniques.  The concept of a direct volume illustration system is introduced  for this purpose. Instead of requiring an additional modeling step,  this system allows the designer of an illustration to work directly  on the measured data. Abstraction, a key component of traditional  illustrations, is used in order to reduce visual clutter, emphasize  important structures, and reveal hidden detail. Low-level abstraction  techniques are concerned with the appearance of objects and allow  flexible artistic shading of structures in volumetric data sets.  High-level abstraction techniques control which objects are visible.  For this purpose, novel methods for the generation of ghosted and  exploded views are introduced. The visualization techniques presented  in this thesis employ the features of current graphics hardware to  achieve interactive performance. The resulting system allows the  generation of expressive illustrations directly from volumetric data  with applications in medical training, patient education, and scientific  communication.",
    pdf = "pdfs/Bruckner-2008-IIV-Thesis.pdf",
    images = "images/Bruckner-2008-IIV-Thesis.jpg",
    thumbnails = "images/Bruckner-2008-IIV-Thesis.png",
    affiliation = "tuwien",
    keywords = "visual analysis, visual exploration, volume data",
    url = "http://www.cg.tuwien.ac.at/research/publications/2008/bruckner-2008-IIV/"
    }

2007

    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Bruckner-2007-EDF,
    author = "Stefan Bruckner and Meister Eduard Gr{\"o}ller",
    title = "Enhancing Depth-Perception with Flexible Volumetric Halos",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2007",
    volume = "13",
    number = "6",
    pages = "1344--1351",
    month = "oct",
    abstract = "Volumetric data commonly has high depth complexity which makes it  difficult to judge spatial relationships accurately. There are many  different ways to enhance depth perception, such as shading, contours,  and shadows. Artists and illustrators frequently employ halos for  this purpose. In this technique, regions surrounding the edges of  certain structures are darkened or brightened which makes it easier  to judge occlusion. Based on this concept, we present a flexible  method for enhancing and highlighting structures of interest using  GPU-based direct volume rendering. Our approach uses an interactively  defined halo transfer function to classify structures of interest  based on data value, direction, and position. A feature-preserving  spreading algorithm is applied to distribute seed values to neighboring  locations, generating a controllably smooth field of halo intensities.  These halo intensities are then mapped to colors and opacities using  a halo profile function. Our method can be used to annotate features  at interactive frame rates.",
    pdf = "pdfs/Bruckner-2007-EDF.pdf",
    images = "images/Bruckner-2007-EDF.jpg",
    thumbnails = "images/Bruckner-2007-EDF.png",
    youtube = "https://www.youtube.com/watch?v=NvHfxX8wjE8",
    affiliation = "tuwien",
    doi = "10.1109/TVCG.2007.70555",
    event = "IEEE Visualization 2007",
    keywords = "volume rendering, illustrative visualization, halos",
    location = "Sacramento, California, USA",
    url = "http://www.cg.tuwien.ac.at/research/publications/2007/bruckner-2007-EDF/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Kohlmann-2007-LDV,
    author = "Peter Kohlmann and Stefan Bruckner and Armin Kanitsar and Meister Eduard Gr{\"o}ller",
    title = "LiveSync: Deformed Viewing Spheres for Knowledge-Based Navigation",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2007",
    volume = "13",
    number = "6",
    pages = "1544--1551",
    month = "oct",
    abstract = "Although real-time interactive volume rendering is available even  for very large data sets, this visualization method is used quite  rarely in the clinical practice. We suspect this is because it is  very complicated and time consuming to adjust the parameters to achieve  meaningful results. The clinician has to take care of the appropriate  viewpoint, zooming, transfer function setup, clipping planes and  other parameters. Because of this, most often only 2D slices of the  data set are examined. Our work introduces LiveSync, a new concept  to synchronize 2D slice views and volumetric views of medical data  sets. Through intuitive picking actions on the slice, the users define  the anatomical structures they are interested in. The 3D volumetric  view is updated automatically with the goal that the users are provided  with expressive result images. To achieve this live synchronization  we use a minimal set of derived information without the need for  segmented data sets or data-specific pre-computations. The components  we consider are the picked point, slice view zoom, patient orientation,  viewpoint history, local object shape and visibility. We introduce  deformed viewing spheres which encode the viewpoint quality for the  components. A combination of these deformed viewing spheres is used  to estimate a good viewpoint. Our system provides the physician with  synchronized views which help to gain deeper insight into the medical  data with minimal user interaction.",
    pdf = "pdfs/Kohlmann-2007-LDV.pdf",
    images = "images/Kohlmann-2007-LDV.jpg",
    thumbnails = "images/Kohlmann-2007-LDV.png",
    youtube = "https://www.youtube.com/watch?v=vzoS6plGxzQ",
    affiliation = "tuwien",
    doi = "10.1109/TVCG.2007.70576",
    event = "IEEE Visualization 2007",
    keywords = "linked views, interaction, medical visualization, navigation, viewpoint selection",
    location = "Sacramento, California, USA",
    url = "http://www.cg.tuwien.ac.at/research/publications/2007/kohlmann-2007-livesync/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Bruckner-2007-STF,
    author = "Stefan Bruckner and Meister Eduard Gr{\"o}ller",
    title = "Style Transfer Functions for Illustrative Volume Rendering",
    journal = "Computer Graphics Forum",
    year = "2007",
    volume = "26",
    number = "3",
    pages = "715--724",
    month = "sep",
    abstract = "Illustrative volume visualization frequently employs non-photorealistic  rendering techniques to enhance important features or to suppress  unwanted details. However, it is difficult to integrate multiple  non-photorealistic rendering approaches into a single framework due  to great differences in the individual methods and their parameters.  In this paper, we present the concept of style transfer functions.  Our approach enables flexible data-driven illumination which goes  beyond using the transfer function to just assign colors and opacities.  An image-based lighting model uses sphere maps to represent non-photorealistic  rendering styles. Style transfer functions allow us to combine a  multitude of different shading styles in a single rendering. We extend  this concept with a technique for curvature-controlled style contours  and an illustrative transparency model. Our implementation of the  presented methods allows interactive generation of high-quality volumetric  illustrations.",
    pdf = "pdfs/Bruckner-2007-STF.pdf",
    images = "images/Bruckner-2007-STF.jpg",
    thumbnails = "images/Bruckner-2007-STF.png",
    youtube = "https://www.youtube.com/watch?v=40SdXa7aAjI",
    note = "Eurographics 2007 3rd Best Paper Award",
    affiliation = "tuwien",
    doi = "10.1111/j.1467-8659.2007.01095.x",
    event = "Eurographics 2007",
    keywords = "illustrative visualization, transfer functions, volume rendering",
    location = "Prague, Czech Republic",
    url = "http://www.cg.tuwien.ac.at/research/publications/2007/bruckner-2007-STF/"
    }
    [PDF] [Bibtex]
    @ARTICLE {Kohlmann-2007-EBV,
    author = "Peter Kohlmann and Stefan Bruckner and Armin Kanitsar and Meister Eduard Gr{\"o}ller",
    title = "Evaluation of a Bricked Volume Layout for a Medical Workstation based on Java",
    journal = "Journal of WSCG",
    year = "2007",
    volume = "15",
    number = "1-3",
    pages = "83--90",
    month = "jan",
    abstract = "Volumes acquired for medical examination purposes are constantly increasing  in size. For this reason, the computer’s memory is the limiting  factor for visualizing the data. Bricking is a well-known concept  used for rendering large data sets. The volume data is subdivided  into smaller blocks to achieve better memory utilization. Until now,  the vast majority of medical workstations use a linear volume layout.  We implemented a bricked volume layout for such a workstation based  on Java as required by our collaborative company partner to evaluate  different common access patterns to the volume data. For rendering,  we were mainly interested to see how the performance will differ  from the traditional linear volume layout if we generate images of  arbitrarily oriented slices via Multi-Planar Reformatting (MPR).  Furthermore, we tested access patterns which are crucial for segmentation  issues like a random access to data values and a simulated region  growing. Our goal was to find out if it makes sense to change the  volume layout of a medical workstation to benefit from bricking.  We were also interested to identify the tasks where problems might  occur if bricking is applied. Overall, our results show that it is  feasible to use a bricked volume layout in the stringent context  of a medical workstation implemented in Java.",
    pdf = "pdfs/Kohlmann-2007-EBV.pdf",
    images = "images/Kohlmann-2007-EBV.jpg",
    thumbnails = "images/Kohlmann-2007-EBV.png",
    issn = "1213-6972",
    affiliation = "tuwien",
    event = "WSCG 2007",
    keywords = "MPR, bricked volume layout, medical visualization, medical workstation",
    location = "Plzen, Czech Republic",
    url = "http://www.cg.tuwien.ac.at/research/publications/2007/Kohlmann-2007-EBV/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Rautek-2007-SLI,
    author = "Peter Rautek and Stefan Bruckner and Meister Eduard Gr{\"o}ller",
    title = "Semantic Layers for Illustrative Volume Rendering",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2007",
    volume = "13",
    number = "6",
    pages = "1336--1343",
    month = "oct",
    abstract = "Direct volume rendering techniques map volumetric attributes (e.g.,  density, gradient magnitude, etc.) to visual styles. Commonly this  mapping is specified by a transfer function. The specification of  transfer functions is a complex task and requires expert knowledge  about the underlying rendering technique. In the case of multiple  volumetric attributes and multiple visual styles the specification  of the multi-dimensional transfer function becomes more challenging  and non-intuitive. We present a novel methodology for the specification  of a mapping from several volumetric attributes to multiple illustrative  visual styles. We introduce semantic layers that allow a domain expert  to specify the mapping in the natural language of the domain. A semantic  layer defines the mapping of volumetric attributes to one visual  style. Volumetric attributes and visual styles are represented as  fuzzy sets. The mapping is specified by rules that are evaluated  with fuzzy logic arithmetics. The user specifies the fuzzy sets and  the rules without special knowledge about the underlying rendering  technique. Semantic layers allow for a linguistic specification of  the mapping from attributes to visual styles replacing the traditional  transfer function specification.",
    pdf = "pdfs/Rautek-2007-SLI.pdf",
    images = "images/Rautek-2007-SLI.jpg",
    thumbnails = "images/Rautek-2007-SLI.png",
    youtube = "https://www.youtube.com/watch?v=c91m6ru5m0g",
    affiliation = "tuwien",
    doi = "10.1109/TVCG.2007.70591",
    event = "IEEE Visualization 2007",
    keywords = "focus+context techniques, volume visualization, illustrative visualization",
    location = "Sacramento, California, USA",
    url = "http://www.cg.tuwien.ac.at/research/publications/2007/Rautek-2007-SLI/"
    }

2006

    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Bruckner-2006-ICE,
    author = "Stefan Bruckner and S{\"o}ren Grimm and Armin Kanitsar and Meister Eduard Gr{\"o}ller",
    title = "Illustrative Context-Preserving Exploration of Volume Data",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2006",
    volume = "12",
    number = "6",
    pages = "1559--1569",
    month = "nov",
    abstract = "In volume rendering it is very difficult to simultaneously visualize  interior and exterior structures while preserving clear shape cues.  Highly transparent transfer functions produce cluttered images with  many overlapping structures, while clipping techniques completely  remove possibly important context information. In this paper we present  a new model for volume rendering, inspired by techniques from illustration.  It provides a means of interactively inspecting the interior of a  volumetric data set in a feature-driven way which retains context  information. The context-preserving volume rendering model uses a  function of shading intensity, gradient magnitude, distance to the  eye point, and previously accumulated opacity to selectively reduce  the opacity in less important data regions. It is controlled by two  user-specified parameters. This new method represents an alternative  to conventional clipping techniques, shares their easy and intuitive  user control, but does not suffer from the drawback of missing context  information.",
    pdf = "pdfs/Bruckner-2006-ICE.pdf",
    images = "images/Bruckner-2006-ICE.jpg",
    thumbnails = "images/Bruckner-2006-ICE.png",
    youtube = "https://www.youtube.com/watch?v=a92NXYtJeT0,https://www.youtube.com/watch?v=OLEr5-O1jmY,https://www.youtube.com/watch?v=RSet7-n6Mc4,https://www.youtube.com/watch?v=w0U8lteEMOM,https://www.youtube.com/watch?v=csYsfKrQxN8,https://www.youtube.com/watch?v=3xduvvU6IAw",
    issn = "1077-2626",
    affiliation = "tuwien",
    doi = "10.1109/TVCG.2006.96",
    keywords = "focus+context techniques, volume rendering, illustrative visualization",
    url = "http://www.cg.tuwien.ac.at/research/publications/2006/bruckner-2006-ICE/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Bruckner-2006-EVV,
    author = "Stefan Bruckner and Meister Eduard Gr{\"o}ller",
    title = "Exploded Views for Volume Data",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2006",
    volume = "12",
    number = "5",
    pages = "1077--1084",
    month = "sep",
    abstract = "Exploded views are an illustration technique where an object is partitioned  into several segments. These segments are displaced to reveal otherwise  hidden detail. In this paper we apply the concept of exploded views  to volumetric data in order to solve the general problem of occlusion.  In many cases an object of interest is occluded by other structures.  While transparency or cutaways can be used to reveal a focus object,  these techniques remove parts of the context information. Exploded  views, on the other hand, do not suffer from this drawback. Our approach  employs a force-based model: the volume is divided into a part configuration  controlled by a number of forces and constraints. The focus object  exerts an explosion force causing the parts to arrange according  to the given constraints. We show that this novel and flexible approach  allows for a wide variety of explosion-based visualizations including  view-dependent explosions. Furthermore, we present a high-quality  GPU-based volume ray casting algorithm for exploded views which allows  rendering and interaction at several frames per second.",
    pdf = "pdfs/Bruckner-2006-EVV.pdf",
    images = "images/Bruckner-2006-EVV.jpg",
    thumbnails = "images/Bruckner-2006-EVV.png",
    youtube = "https://www.youtube.com/watch?v=6jEqVrjaM3M",
    issn = "1077-2626",
    affiliation = "tuwien",
    doi = "10.1109/TVCG.2006.140",
    event = "IEEE Visualization 2006",
    keywords = "exploded views, illustrative visualization, volume rendering",
    url = "http://www.cg.tuwien.ac.at/research/publications/2006/bruckner-2006-EVV/"
    }
    [PDF] [DOI] [Bibtex]
    @INPROCEEDINGS {Rautek-2006-DHQ,
    author = "Peter Rautek and Bal{\'a}zs Csebfalvi and S{\"o}ren Grimm and Stefan Bruckner and Meister Eduard Gr{\"o}ller",
    title = "D2VR: High Quality Volume Rendering of Projection-based Volumetric Data",
    booktitle = "Proceedings of EuroVis 2006",
    year = "2006",
    pages = "211--218",
    month = "may",
    publisher = "IEEE CS",
    abstract = "Volume rendering techniques are conventionally classified as either  direct or indirect methods. Indirect methods require to transform  the initial volumetric model into an intermediate geometrical model  in order to efficiently visualize it. In contrast, direct volume  rendering (DVR) methods can directly process the volumetric data.  Modern CT scanners usually provide data as a set of samples on a  rectilinear grid, which is computed from the measured projections  by discrete tomographic reconstruction. Therefore the rectilinear  grid can already be considered as an intermediate volume representation.  In this paper we introduce direct direct volume rendering (D²VR).  D2VR does not require a rectilinear grid, since it is based on an  immediate processing of the measured projections. Arbitrary samples  for ray casting are reconstructed from the projections by using the  Filtered Back-Projection algorithm. Our method removes a lossy resampling  step from the classical volume rendering pipeline. It provides much  higher accuracy than traditional grid-based resampling techniques  do. Furthermore we also present a novel high-quality gradient estimation  scheme, which is also based on the Filtered Back-Projection algorithm.",
    pdf = "pdfs/Rautek-2006-DHQ.pdf",
    images = "images/Rautek-2006-DHQ.jpg",
    thumbnails = "images/Rautek-2006-DHQ.png",
    number = "In Proceedings of EuroVis",
    affiliation = "tuwien",
    doi = "10.2312/VisSym/EuroVis06/211-218",
    keywords = "volume rendering, filtered back-projection, reconstruction",
    url = "http://www.cg.tuwien.ac.at/research/publications/2006/RAUTEK06/"
    }

2005

    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Bruckner-2005-ICV,
    author = "Stefan Bruckner and S{\"o}ren Grimm and Armin Kanitsar and Meister Eduard Gr{\"o}ller",
    title = "Illustrative Context-Preserving Volume Rendering",
    booktitle = "Proceedings of EuroVis 2005",
    year = "2005",
    pages = "69--76",
    month = "may",
    abstract = "In volume rendering it is very difficult to simultaneously visualize  interior and exterior structures while preserving clear shape cues.  Very transparent transfer functions produce cluttered images with  many overlapping structures, while clipping techniques completely  remove possibly important context information. In this paper we present  a new model for volume rendering, inspired by techniques from illustration  that provides a means of interactively inspecting the interior of  a volumetric data set in a feature-driven way which retains context  information. The context-preserving volume rendering model uses a  function of shading intensity, gradient magnitude, distance to the  eye point, and previously accumulated opacity to selectively reduce  the opacity in less important data regions. It is controlled by two  user-specified parameters. This new method represents an alternative  to conventional clipping techniques, shares their easy and intuitive  user control, but does not suffer from the drawback of missing context  information. ",
    pdf = "pdfs/Bruckner-2005-ICV.pdf",
    images = "images/Bruckner-2005-ICV.jpg",
    thumbnails = "images/Bruckner-2005-ICV.png",
    youtube = "https://www.youtube.com/watch?v=Tc4E2oOD8Zg,https://www.youtube.com/watch?v=_8P_hVBoFeU,https://www.youtube.com/watch?v=0yxNoPjT6Ig,https://www.youtube.com/watch?v=EjG6E2WEO30",
    affiliation = "tuwien",
    doi = "10.2312/VisSym/EuroVis05/069-076",
    keywords = "non-photorealistic techniques, focus+context techniques, volume rendering",
    url = "http://www.cg.tuwien.ac.at/research/publications/2005/bruckner-2005-ICV/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Bruckner-2005-VIS,
    author = "Stefan Bruckner and Meister Eduard Gr{\"o}ller",
    title = "VolumeShop: An Interactive System for Direct Volume Illustration",
    booktitle = "Proceedings of IEEE Visualization 2005",
    year = "2005",
    editor = "C. T. Silva, E. Gr{\"o}ller, H. Rushmeier",
    pages = "671--678",
    month = "oct",
    abstract = "Illustrations play a major role in the education process. Whether  used to teach a surgical or radiologic procedure, to illustrate normal  or aberrant anatomy, or to explain the functioning of a technical  device, illustration significantly impacts learning. Although many  specimens are readily available as volumetric data sets, particularly  in medicine, illustrations are commonly produced manually as static  images in a time-consuming process. Our goal is to create a fully  dynamic three-dimensional illustration environment which directly  operates on volume data. Single images have the aesthetic appeal  of traditional illustrations, but can be interactively altered and  explored. In this paper we present methods to realize such a system  which combines artistic visual styles and expressive visualization  techniques. We introduce a novel concept for direct multi-object  volume visualization which allows control of the appearance of inter-penetrating  objects via two-dimensional transfer functions. Furthermore, a unifying  approach to efficiently integrate many non-photorealistic rendering  models is presented. We discuss several illustrative concepts which  can be realized by combining cutaways, ghosting, and selective deformation.  Finally, we also propose a simple interface to specify objects of  interest through three-dimensional volumetric painting. All presented  methods are integrated into VolumeShop, an interactive hardware-accelerated  application for direct volume illustration.",
    pdf = "pdfs/Bruckner-2005-VIS.pdf",
    images = "images/Bruckner-2005-VIS.jpg",
    thumbnails = "images/Bruckner-2005-VIS.png",
    youtube = "https://www.youtube.com/watch?v=1FZausY8dFw,https://www.youtube.com/watch?v=WB-4NHKSM4k,https://www.youtube.com/watch?v=Rzi6q6n5lRs,https://www.youtube.com/watch?v=0B_fVsBibZk",
    affiliation = "tuwien",
    doi = "10.1109/VISUAL.2005.1532856",
    isbn = "0780394623",
    keywords = "focus+context techniques, illustrative visualization, volume rendering",
    location = "Minneapolis, USA",
    url = "http://www.cg.tuwien.ac.at/research/publications/2005/bruckner-2005-VIS/"
    }
    [PDF] [YT] [Bibtex]
    @INPROCEEDINGS {Coto-2005-MAC,
    author = "Ernesto Coto and S{\"o}ren Grimm and Stefan Bruckner and Meister Eduard Gr{\"o}ller and Armin Kanitsar and Omaira Rodriguez",
    title = "MammoExplorer: An Advanced {CAD} Application for Breast {DC}E-{MRI}",
    booktitle = "Proceedings of VMV 2005",
    year = "2005",
    editor = "G. Greiner, J. Hornegger, H. Niemann, M. Stamminger",
    pages = "91--98",
    month = "nov",
    abstract = "Currently X-ray mammography is the most widely used method for early  detection of breast cancer. However, the use of Dynamic Contrast  Enhanced MRI (DCE-MRI) has gained wider attention, since it considerably  improves tumor detection and classification by analyzing the flow  of contrast agent within the breast tissue. In this paper we present  MammoExplorer, a CAD application that combines advanced interaction,  segmentation and visualization techniques to explore Breast DCE-MRI  data. Our application uses Brushing and Linking, Two-level Volume  Rendering, Importance-driven Volume Rendering, and False Color Maps.  In addition, we present Enhancement Scatterplots, a novel graphical  representation of DCE-MRI data, novel segmentation approaches, and  a new way to explore time-varying CE-MRI data.",
    pdf = "pdfs/Coto-2005-MAC.pdf",
    images = "images/Coto-2005-MAC.jpg",
    thumbnails = "images/Coto-2005-MAC.png",
    youtube = "https://www.youtube.com/watch?v=6XBD1f1y2xs",
    affiliation = "tuwien",
    isbn = "3898380688",
    keywords = "CAD, breast cancer, contrast enhanced MRI",
    location = "Erlangen, Germany",
    url = "http://www.cg.tuwien.ac.at/research/publications/2005/coto-2005-MAC/"
    }

2004

    [PDF] [DOI] [Bibtex]
    @ARTICLE {Grimm-2004-VVD,
    author = "S{\"o}ren Grimm and Stefan Bruckner and Armin Kanitsar and Meister Eduard Gr{\"o}ller",
    title = "VOTS: VOlume doTS as a Point-Based Representation of Volumetric Data",
    journal = "Computer Graphics Forum",
    year = "2004",
    volume = "23",
    number = "3",
    pages = "668--661",
    month = "sep",
    abstract = "We present Volume dots (Vots), a new primitive for volumetric data  modelling, processing, and rendering. Vots are a point-based representation  of volumetric data. An individual Vot is specified by the coefficients  of a Taylor series expansion, i.e. the function value and higher  order derivatives at a specific point. A Vot does not only represent  a single sample point, it represents the underlying function within  a region. With the Vots representation we have a more intuitive and  high-level description of the volume data. This allows direct analytical  examination and manipulation of volumetric datasets. Vots enable  the representation of the underlying scalar function with specified  precision. User-centric importance sampling is also possible, i.e.,  unimportant volume parts are still present but represented with just  very few Vots. As proof of concept, we show Maximum Intensity Projection  based on Vots.",
    pdf = "pdfs/Grimm-2004-VVD.pdf",
    images = "images/Grimm-2004-VVD.jpg",
    thumbnails = "images/Grimm-2004-VVD.png",
    issn = "0167-7055",
    affiliation = "tuwien",
    doi = "10.1111/j.1467-8659..00798.x",
    keywords = "point-based data, volume data",
    url = "http://www.cg.tuwien.ac.at/research/publications/2004/grimm-2004-volume/"
    }
    [PDF] [Bibtex]
    @MASTERTHESIS {Bruckner-2004-EVV-Thesis,
    author = "Stefan Bruckner",
    title = "Efficient Volume Visualization of Large Medical Datasets",
    school = "Vienna University of Technology, Austria",
    year = "2004",
    month = "may",
    abstract = "The size of volumetric datasets used in medical environments is increasing  at a rapid pace. Due to excessive pre-computation and memory demanding  data structures, most current approaches for volume visualization  do not meet the requirements of daily clinical routine. In this diploma  thesis, an approach for interactive high-quality rendering of large  medical data is presented. It is based on image-order raycasting  with object-order data traversal, using an optimized cache coherent  memory layout. New techniques and parallelization strategies for  direct volume rendering of large data on commodity hardware are presented.  By using new memory efficient acceleration data structures, high-quality  direct volume rendering of several hundred megabyte sized datasets  at sub-second frame rates on a commodity notebook is achieved.",
    pdf = "pdfs/Bruckner-2004-EVV-Thesis.pdf",
    images = "images/Bruckner-2004-EVV-Thesis.jpg",
    thumbnails = "images/Bruckner-2004-EVV-Thesis.png",
    affiliation = "tuwien",
    keywords = "volume rendering, large data",
    url = "http://www.cg.tuwien.ac.at/research/publications/2004/bruckner-2004-EVV/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Grimm-2004-MEA,
    author = "S{\"o}ren Grimm and Stefan Bruckner and Armin Kanitsar and Meister Eduard Gr{\"o}ller",
    title = "Memory Efficient Acceleration Structures and Techniques for {CPU}-based Volume Raycasting of Large Data",
    booktitle = "Proceedings of IEEE VolVis 2004",
    year = "2004",
    editor = "D. Silver, T. Ertl, C. Silva",
    pages = "1--8",
    month = "oct",
    abstract = "Most CPU-based volume raycasting approaches achieve high performance  by advanced memory layouts, space subdivision, and excessive pre-computing.  Such approaches typically need an enormous amount of memory. They  are limited to sizes which do not satisfy the medical data used in  daily clinical routine. We present a new volume raycasting approach  based on image-ordered raycasting with object-ordered processing,  which is able to perform high-quality rendering of very large medical  data in real-time on commodity computers. For large medical data  such as computed tomographic (CT) angiography run-offs (512x512x1202)  we achieve rendering times up to 2.5 fps on a commodity notebook.  We achieve this by introducing a memory efficient acceleration technique  for on-the-fly gradient estimation and a memory efficient hybrid  removal and skipping technique of transparent regions. We employ  quantized binary histograms, granular resolution octrees, and a cell  invisibility cache. These acceleration structures require just a  small extra storage of approximately 10%.",
    pdf = "pdfs/Grimm-2004-MEA.pdf",
    images = "images/Grimm-2004-MEA.jpg",
    thumbnails = "images/Grimm-2004-MEA.png",
    youtube = "https://www.youtube.com/watch?v=WK9DJ6Dyrx4,https://www.youtube.com/watch?v=iYz5VYHMd9U,https://www.youtube.com/watch?v=UdtaaENWs7M",
    affiliation = "tuwien",
    doi = "10.1109/SVVG.2004.8",
    isbn = "0-7803-8781-3",
    keywords = "volume rendering, acceleration, large data",
    url = "http://www.cg.tuwien.ac.at/research/publications/2004/grimm-2004-memory/"
    }
    [PDF] [Bibtex]
    @INPROCEEDINGS {Bruckner-2004-EVV,
    author = "Stefan Bruckner",
    title = "Efficient Volume Visualization of Large Medical Datasets",
    booktitle = "Proceedings of CESCG 2004",
    year = "2004",
    month = "apr",
    abstract = "In volume visualization, huge amounts of data have to be processed.  While modern hardware is quite capable of this task in terms of processing  power, the gap between CPU performance and memory bandwidth further  increases with every new generation of CPUs. It is therefore essential  to efficiently use the limited memory bandwidth. In this paper, we  present novel approaches to optimize CPU-based volume raycasting  of large datasets on commodity hardware. A new addressing scheme  is introduced, which permits the use of a bricked volume layout with  minimal overhead. We further present an extended parallelization  strategy for Simultaneous Multithreading. Finally, we introduce memory  efficient acceleration data structures which enable us to render  large medical datasets, such as the Visible Male (587x341x1878),  at up to 2.5 frames/second on a commodity notebook.",
    pdf = "pdfs/Bruckner-2004-EVV.pdf",
    images = "images/Bruckner-2004-EVV.jpg",
    thumbnails = "images/Bruckner-2004-EVV.png",
    note = "CESCG 2004 Best Paper Award and Best Presentation Award",
    affiliation = "tuwien",
    url = "http://www.cescg.org/CESCG-2004/web/Bruckner-Stefan/html/"
    }
    [PDF] [YT] [Bibtex]
    @INPROCEEDINGS {Grimm-2004-FDM,
    author = "S{\"o}ren Grimm and Stefan Bruckner and Armin Kanitsar and Meister Eduard Gr{\"o}ller",
    title = "Flexible Direct Multi-Volume Rendering in Interactive Scenes",
    booktitle = "Proceedings of VMV 2004",
    year = "2004",
    pages = "386--379",
    month = "oct",
    abstract = "In this paper we describe methods to efficiently visualize multiple  ntersecting volumetric objects. We introduce the concept of V-Objects.  V-Objects represent abstract properties of an object connected to  a volumetric data source. We present a method to perform direct volume  rendering of a scene comprised of an arbitrary number of possibly  intersecting V-Objects. The idea of our approach is to distinguish  between regions of intersection, which need costly multi-volume processing,  and regions containing only one V-Object, which can be processed  using a highly efficient brick-wise volume traversal scheme. Using  this method, we achieve significant performance gains for multi-volume  rendering. We show possible medical applications, such as surgical  planning, diagnosis, and education.",
    pdf = "pdfs/Grimm-2004-FDM.pdf",
    images = "images/Grimm-2004-FDM.jpg",
    thumbnails = "images/Grimm-2004-FDM.png",
    youtube = "https://www.youtube.com/watch?v=pDskLE6cnFw,https://www.youtube.com/watch?v=VYKaSpsZd2s,https://www.youtube.com/watch?v=BGE640_Tw2U,https://www.youtube.com/watch?v=p-I0HWBv4Jc,https://www.youtube.com/watch?v=6zlprE38GGo",
    affiliation = "tuwien",
    keywords = "multi volume rendering, medical visualization, volume raycasting",
    location = "Stanford, USA",
    url = "http://www.cg.tuwien.ac.at/research/publications/2004/GRIMM-2004-FDMX-P/"
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE {Grimm-2004-RDA,
    author = "S{\"o}ren Grimm and Stefan Bruckner and Armin Kanitsar and Meister Eduard Gr{\"o}ller",
    title = "A Refined Data Addressing and Processing Scheme to Accelerate Volume Raycasting",
    journal = "Computers \& Graphics",
    year = "2004",
    volume = "28",
    number = "5",
    pages = "719--729",
    month = "oct",
    abstract = "Most volume rendering systems based on CPU volume raycasting still  suffer from inefficient CPU utilization and high memory usage. To  target these issues we present a new technique for efficient data  addressing. Furthermore, we introduce a new processing scheme for  volume raycasting which exploits thread-level parallelism - a technology  now supported by commodity computer architectures.",
    pdf = "pdfs/Grimm-2004-RDA.pdf",
    images = "images/Grimm-2004-RDA.jpg",
    thumbnails = "images/Grimm-2004-RDA.png",
    issn = "0097-8493",
    affiliation = "tuwien",
    doi = "10.1016/j.cag.2004.06.010",
    isbn = "0097-8493",
    keywords = "volume raycasting, bricking, parallel computing",
    url = "http://www.cg.tuwien.ac.at/research/publications/2004/grimm-2004-arefined/"
    }

2003

    [PDF] [YT] [Bibtex]
    @INPROCEEDINGS {Bruckner-2003-IWN,
    author = "Stefan Bruckner and Dieter Schmalstieg and Helwig Hauser and Meister Eduard Gr{\"o}ller",
    title = "The Inverse Warp: Non-Invasive Integration of Shear-Warp Volume Rendering into Polygon Rendering Pipelines",
    booktitle = "Proceedings of VMV 2003",
    year = "2003",
    editor = "T. Ertl, B. Girod, G. Greiner, H. Niemann, H.-P. Seidel, E. Steinbach, R. Westermann",
    pages = "529--536",
    month = "nov",
    publisher = "infix",
    abstract = "In this paper, a simple and efficient solution for combining shear-warp  volume rendering and the hardware graphics pipeline is presented.  The approach applies an inverse warp transformation to the Z-Buffer,  containing the rendered geometry. This information is used for combining  geometry and volume data during compositing. We present applications  of this concept which include hybrid volume rendering, i.e., concurrent  rendering of polygonal objects and volume data, and volume clipping  on convex clipping regions. Furthermore, it can be used to efficiently  define regions with different rendering modes and transfer functions  for focus+context volume rendering. Empirical results show that the  approach has very low impact on performance.",
    pdf = "pdfs/Bruckner-2003-IWN.pdf",
    images = "images/Bruckner-2003-IWN.jpg",
    thumbnails = "images/Bruckner-2003-IWN.png",
    youtube = "https://www.youtube.com/watch?v=l_49gLBUO3E,https://www.youtube.com/watch?v=zmWQfUs3Bmc,https://www.youtube.com/watch?v=qFwv-Ru8Ftc",
    affiliation = "tuwien",
    isbn = "3898380483",
    keywords = "focus+context techniques, clipping, hybrid volume rendering",
    url = "http://www.cg.tuwien.ac.at/research/publications/2003/Bruckner-2003-The/"
    }