@ARTICLE{LawonnSmit-2017-Survey, author = {Lawonn, K. and Smit, N.N. and B{\"u}hler, K. and Preim, B.}, title = {A Survey on Multimodal Medical Data Visualization}, journal = {Computer Graphics Forum}, year = {2017} issn = {1467-8659}, url = {http://dx.doi.org/10.1111/cgf.13306}, doi = {10.1111/cgf.13306}, keywords = {medical imaging, visualization, scientific visualization, visualization, volume visualization, visualization, Medical Imaging [Visualization], Scientific Visualization [Visualization], Volume Visualization [Visualization], Multimodal Medical Data}, abstract = {Multi-modal data of the complex human anatomy contain a wealth of information. To visualize and explore such data, techniques for emphasizing important structures and controlling visibility are essential. Such fused overview visualizations guide physicians to suspicious regions to be analysed in detail, e.g. with slice-based viewing. We give an overview of state of the art in multi-modal medical data visualization techniques. Multi-modal medical data consist of multiple scans of the same subject using various acquisition methods, often combining multiple complimentary types of information. Three-dimensional visualization techniques for multi-modal medical data can be used in diagnosis, treatment planning, doctor–patient communication as well as interdisciplinary communication. Over the years, multiple techniques have been developed in order to cope with the various associated challenges and present the relevant information from multiple sources in an insightful way. We present an overview of these techniques and analyse the specific challenges that arise in multi-modal data visualization and how recent works aimed to solve these, often using smart visibility techniques. We provide a taxonomy of these multi-modal visualization applications based on the modalities used and the visualization techniques employed. Additionally, we identify unsolved problems as potential future research directions.}, note = {CGF Early View}, images = {images/LawonnSmit-2017-MULTI.JPG}, pdf = {pdfs/LawonnSmit-2017-MULTI.pdf}, thumbnails = {images/LawonnSmit-2017-MULTI.PNG} } @misc{Hauser2017Eurovis, author = {Helwig Hauser}, title = {From One to Many in Visualization}, year = {2017}, month = {June}, day = {16}, howpublished = {Capstone Talk at EuroVis 2017}, abstract = {}, location = {Barcelona, Spain}, images = {images/one-to-many.png}, thumbnails = {images/one-to-many.png}, pdf = {pdfs/2017-06-16--EuroVis2017--FromOneToMany--HH--print2up.pdf} } @misc{Hauser2017IntroToVis, author = {Helwig Hauser}, title = {An Introduction to Visualization}, year = {2017}, month = {March}, day = {19}, howpublished = {Invited Talk at OO17}, abstract = {}, location = {Bergen, Norway}, images = {images/IntroToVis.png}, thumbnails = {images/IntroToVis.png}, pdf = {pdfs/2017-04-19--OO17-Vis--HH--print2up.pdf} } @misc{Hauser2017VisForMed, author = {Helwig Hauser}, title = {Visualization for Medical Data Science}, year = {2017}, month = {February}, day = {23}, howpublished = {Invited Talk at the BUS1 Inauguration}, abstract = {}, location = {Bergen, Norway}, images = {images/2017-02-23--BUS1inaug--Vis4MedDataSci--HH--print2up.png}, thumbnails = {images/2017-02-23--BUS1inaug--Vis4MedDataSci--HH--print2up.png}, pdf = {pdfs/2017-02-23--BUS1inaug--Vis4MedDataSci--HH--print2up.pdf} } @misc{Hauser2017FutureBig, author = {Helwig Hauser}, title = {The Future of Big Data Visualization}, year = {2017}, month = {February}, day = {23}, howpublished = {Invited Talk at VCT 2017}, abstract = {}, location = {Vienna, Austria}, images = {images/2017-01-26--VCT2017--BigDataVisFuture--HH--print2up.png}, thumbnails = {images/2017-01-26--VCT2017--BigDataVisFuture--HH--print2up.png}, pdf = {pdfs/2017-01-26--VCT2017--BigDataVisFuture--HH--print2up.pdf} } @ARTICLE{Lind-2017-CCR, author = {Andreas Johnsen Lind and Stefan Bruckner}, title = {Comparing Cross-Sections and 3D Renderings for Surface Matching Tasks using Physical Ground Truths}, journal = {IEEE Transactions on Visualization and Computer Graphics}, year = {2017}, volume = {23}, number = {1}, month = jan, note = {Accepted for publication, to be presented at IEEE SciVis 2016}, abstract = {Within the visualization community there are some well-known techniques for visualizing 3D spatial data and some general assumptions about how perception affects the performance of these techniques in practice. However, there is a lack of empirical research backing up the possible performance differences among the basic techniques for general tasks. One such assumption is that 3D renderings are better for obtaining an overview, whereas cross sectional visualizations such as the commonly used Multi- Planar Reformation (MPR) are better for supporting detailed analysis tasks. In the present study we investigated this common assumption by examining the difference in performance between MPR and 3D rendering for correctly identifying a known surface. We also examined whether prior experience working with image data affects the participant’s performance, and whether there was any difference between interactive or static versions of the visualizations. Answering this question is important because it can be used as part of a scientific and empirical basis for determining when to use which of the two techniques. An advantage of the present study compared to other studies is that several factors were taken into account to compare the two techniques. The problem was examined through an experiment with 45 participants, where physical objects were used as the known surface (ground truth). Our findings showed that: 1. The 3D renderings largely outperformed the cross sections; 2. Interactive visualizations were partially more effective than static visualizations; and 3. The high experience group did not generally outperform the low experience group.}, event = {IEEE SciVis 2016}, doi = {10.1109/TVCG.2016.2598602}, images = {images/Lind-2017-CCR.jpg}, keywords = {human-computer interaction, quantitative evaluation, volume visualization}, location = {Baltimore, USA}, thumbnails = {images/Lind-2017-CCR.png} } @ARTICLE{Kolesar-2017-FCP, author={I. Kolesar and S. Bruckner and I. Viola and H. Hauser}, journal={IEEE Transactions on Visualization and Computer Graphics}, title={A Fractional Cartesian Composition Model for Semi-spatial Comparative Visualization Design}, year={2017}, volume={23}, number={1}, pages={1-1}, publisher = {IEEE}, note = {Accepted for publication, presented at IEEE SciVis 2016}, abstract={The study of spatial data ensembles leads to substantial visualization challenges in a variety of applications. In this paper, we present a model for comparative visualization that supports the design of according ensemble visualization solutions by partial automation. We focus on applications, where the user is interested in preserving selected spatial data characteristics of the data as much as possible—even when many ensemble members should be jointly studied using comparative visualization. In our model, we separate the design challenge into a minimal set of user-specified parameters and an optimization component for the automatic configuration of the remaining design variables. We provide an illustrated formal description of our model and exemplify our approach in the context of several application examples from different domains in order to demonstrate its generality within the class of comparative visualization problems for spatial data ensembles.}, keywords={Computational modeling;Data models;Data visualization;Encoding;Spatial databases; Three-dimensional displays;Visualization;Design Methodologies;Integrating Spatial and Non-Spatial Data Visualization;Visualization Models}, doi={10.1109/TVCG.2016.2598870}, ISSN={1077-2626}, month={Jan}, project = {physioillustration}, images = {images/Kolesar-2017-FCC.jpg}, thumbnails = {images/Kolesar-2017-FCC.png} } @ARTICLE{Smit-2017-PAS, author = {Noeska Smit and Kai Lawonn and Annelot Kraima and Marco DeRuiter and Hessam Sokooti and Stefan Bruckner and Elmar Eisemann and Anna Vilanova}, title = {PelVis: Atlas-based Surgical Planning for Oncological Pelvic Surgery}, journal = {IEEE Transactions on Visualization and Computer Graphics}, year = {2017}, volume = {23}, number = {1}, month = jan, note = {To Appear. Presented at IEEE SciVis 2016}, abstract = {Due to the intricate relationship between the pelvic organs and vital structures, such as vessels and nerves, pelvic anatomy is often considered to be complex to comprehend. In oncological pelvic surgery, a trade-off has to be made between complete tumor resection and preserving function by preventing damage to the nerves. Damage to the autonomic nerves causes undesirable post-operative side-effects such as fecal and urinal incontinence, as well as sexual dysfunction in up to 80 percent of the cases. Since these autonomic nerves are not visible in pre-operative MRI scans or during surgery, avoiding nerve damage during such a surgical procedure becomes challenging. In this work, we present visualization methods to represent context, target, and risk structures for surgical planning. We employ distance-based and occlusion management techniques in an atlas-based surgical planning tool for oncological pelvic surgery. Patient-specific pre-operative MRI scans are registered to an atlas model that includes nerve information. Through several interactive linked views, the spatial relationships and distances between the organs, tumor and risk zones are visualized to improve understanding, while avoiding occlusion. In this way, the surgeon can examine surgically relevant structures and plan the procedure before going into the operating theater, thus raising awareness of the autonomic nerve zone regions and potentially reducing post-operative complications. Furthermore, we present the results of a domain expert evaluation with surgical oncologists that demonstrates the advantages of our approach.}, event = {IEEE SciVis 2016}, images = {images/Smit-2017-PAS.jpg}, keywords = {atlas, surgical planning, medical visualization}, location = {Baltimore, USA}, pdf = {pdfs/Smit-2016-PAS.pdf}, thumbnails = {images/Smit-2017-PAS.png} } @article{Stoppel-2017-VPI, author = {Sergej Stoppel and Stefan Bruckner}, title = {Vol˛velle: Printable Interactive Volume Visualization}, journal = {IEEE Transactions on Visualization and Computer Graphics}, year = {2017}, volume = {23}, number = {1}, month = jan, note = {Accepted for publication, to be presented at IEEE SciVis 2016}, abstract = {Interaction is an indispensable aspect of data visualization. The presentation of volumetric data, in particular, often significantly benefits from interactive manipulation of parameters such as transfer functions, rendering styles, or clipping planes. However, when we want to create hardcopies of such visualizations, this essential aspect is lost. In this paper, we present a novel approach for creating hardcopies of volume visualizations which preserves a certain degree of interactivity. We present a method for automatically generating Volvelles, printable tangible wheel charts that can be manipulated to explore different parameter settings. Our interactive system allows the flexible mapping of arbitrary visualization parameters and supports advanced features such as linked views. The resulting designs can be easily reproduced using a standard printer and assembled within a few minutes.}, doi = {10.1109/TVCG.2016.2599211}, images = {images/Stoppel-2017-VPI.jpg}, pdf = {pdfs/Stoppel_VIS2017_Volvelle.pdf}, vid = {vids/Stoppel_VIS2017_Volvelle.mp4}, thumbnails = {images/Stoppel-2017-VPI.png} } @article{Turkay-2017-VIS, author = C. Turkay and E. Kaya and S. Balcisoy and H. Hauser}, title = {Designing Progressive and Interactive Analytics Processes for High-Dimensional Data Analysis}, journal = {IEEE Transactions on Visualization and Computer Graphics}, year = {2017}, volume = {PP}, number = {99}, pages = {1-1} month = jan, abstract = {In interactive data analysis processes, the dialogue between the human and the computer is the enabling mechanism that can lead to actionable observations about the phenomena being investigated. It is of paramount importance that this dialogue is not interrupted by slow computational mechanisms that do not consider any known temporal human-computer interaction characteristics that prioritize the perceptual and cognitive capabilities of the users. In cases where the analysis involves an integrated computational method, for instance to reduce the dimensionality of the data or to perform clustering, such non-optimal processes are often likely. To remedy this, progressive computations, where results are iteratively improved, are getting increasing interest in visual analytics. In this paper, we present techniques and design considerations to incorporate progressive methods within interactive analysis processes that involve high-dimensional data. We define methodologies to facilitate processes that adhere to the perceptual characteristics of users and describe how online algorithms can be incorporated within these. A set of design recommendations and according methods to support analysts in accomplishing high-dimensional data analysis tasks are then presented. Our arguments and decisions here are informed by observations gathered over a series of analysis sessions with analysts from finance. We document observations and recommendations from this study and present evidence on how our approach contribute to the efficiency and productivity of interactive visual analysis sessions involving high-dimensional data.}, doi={10.1109/TVCG.2016.2598470}, ISSN={1077-2626}, pdf = {pdfs/2016-11-04--Turkay-2017-VIS.pdf}, images = {images/Turkay-2017-VIS.png}, thumbnails = {images/Turkay-2017-VIS.png} }