generated from alshedivat/al-folio
-
Notifications
You must be signed in to change notification settings - Fork 0
/
papers.bib
1250 lines (1175 loc) · 164 KB
/
papers.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
---
---
@string{aps = {American Physical Society,}}
@article{ChavezDePlaza2025,
abbr = {},
bibtex_show = {true},
title = {Implementation of Delineation Error Detection Systems in Time-Critical Radiotherapy: Do AI-Supported Optimization and Human Preferences Meet?},
author = {Chaves-de-Plaza, Nicolas F. and Mody, Prerak and Hildebrandt, Klaus and Staring, Marius and Astreinidou, Eleftheria and de Ridder, Mischa and de Ridder, Huib and Vilanova, Anna and van Egmond, Rene},
journal = {Cognition, Technology & Work},
volume = {},
pages = {},
month = {},
year = {2025},
pdf = {2025_j_CTW.pdf},
html = {https://doi.org/10.1007/s10111-024-00784-4},
arxiv = {},
code = {},
abstract = {Artificial Intelligence (AI)-based auto-delineation technologies rapidly delineate multiple structures of interest like organs-at-risk and tumors in 3D medical images, reducing personnel load and facilitating time-critical therapies. Despite its accuracy, the AI may produce flawed delineations, requiring clinician attention. Quality assessment (QA) of these delineations is laborious and demanding. Delineation error detection systems (DEDS) aim to aid QA, yet questions linger about potential challenges to their adoption and time-saving potential. To address these queries, we first conducted a user study with two clinicians from Holland Proton Therapy Center, a Dutch cancer treatment center. Based on the study's findings about the clinicians' error detection workflows with and without DEDS assistance, we developed a simulation model of the QA process, which we used to assess different error detection workflows on a retrospective cohort of 42 head and neck cancer patients. Results suggest possible time savings, provided the per-slice analysis time stays close to the current baseline and trading-off delineation quality is acceptable. Our findings encourage the development of user-centric delineation error detection systems and provide a new way to model and evaluate these systems' potential clinical value.},
}
@article{Malimban2024,
abbr = {},
bibtex_show = {true},
title = {A simulation framework for preclinical proton irradiation workflow},
author = {Malimban, Justin and Ludwig, Felix and Lathouwers, Danny and Staring, Marius and Verhaegen, Frank and Brandenburg, Sytze},
journal = {Physics in Medicine and Biology},
volume = {69},
pages = {215040},
month = {},
year = {2024},
pdf = {2024_j_PMB.pdf},
html = {https://doi.org/10.1088/1361-6560/ad897f},
arxiv = {},
code = {},
abstract = {<b>Objective:</b> The integration of proton beamlines with X-ray imaging/irradiation platforms has opened up possibilities for image-guided Bragg peak irradiations in small animals. Such irradiations allow selective targeting of normal tissue substructures and tumours. However, their small size and location pose challenges in designing experiments. This work presents a simulation framework useful for optimizing beamlines, imaging protocols, and design of animal experiments. The usage of the framework is demonstrated, mainly focusing on the imaging part.<br><b>Approach:</b> The fastCAT toolkit was modified with Monte Carlo (MC)-calculated primary and scatter data of a small animal imager for the simulation of micro-CT scans. The simulated CT of a mini-calibration phantom from fastCAT was validated against a full MC TOPAS CT simulation. A realistic beam model of a preclinical proton facility was obtained from beam transport simulations to create irradiation plans in matRad. Simulated CT images of a digital mouse phantom were generated using single-energy CT (SECT) and dual-energy CT (DECT) protocols and their accuracy in proton stopping power ratio (SPR) estimation and their impact on calculated proton dose distributions in a mouse were evaluated.<br><b>Main Results:</b> The CT numbers from fastCAT agree within 11 HU with TOPAS except for materials at the centre of the phantom. Discrepancies for central inserts are caused by beam hardening issues. The root mean square deviation in the SPR for the best SECT (90kV/Cu) and DECT (50kV/Al-90kV/Al) protocols are 3.7% and 1.0%, respectively. Dose distributions calculated for SECT and DECT datasets revealed range shifts <0.1 mm, gamma pass rates (3%/0.1mm) greater than 99%, and no substantial dosimetric differences for all structures. The outcomes suggest that SECT is sufficient for proton treatment planning in animals.<br><b>Significance:</b> The framework is a useful tool for the development of an optimized experimental configuration without using animals and beam time.},
}
@article{Jia2024b,
abbr = {},
bibtex_show = {true},
title = {Explainable fully automated CT scoring of interstitial lung disease for patients suspected of systemic sclerosis by cascaded regression neural networks and its comparison with experts},
author = {Jia, Jingnan and Hern{\'a}ndez Gir{\'o}n, Irene and Schouffoer, Anne A. and De Vries-Bouwstra, Jeska K. and Ninaber, Maarten K. and Korving, Julie C. and Staring, Marius and Kroft, Lucia J.M. and Stoel, Berend C.},
journal = {Scientific Reports},
volume = {14},
pages = {26666},
month = {},
year = {2024},
pdf = {2024_j_SR.pdf},
html = {https://doi.org/10.1038/s41598-024-78393-4},
arxiv = {},
code = {},
abstract = {Visual scoring of interstitial lung disease in systemic sclerosis (SSc-ILD) from CT scans is laborious, subjective and time-consuming. This study aims to develop a deep learning framework to automate SSc-ILD scoring. The automated framework is a cascade of two neural networks. The first network selects the craniocaudal positions of the five scoring levels. Subsequently, for each level, the second network estimates the ratio of three patterns to the total lung area: the total extent of disease (TOT), ground glass (GG) and reticulation (RET). To overcome the score imbalance in the second network, we propose a method to augment the training dataset with synthetic data. To explain the network's output, a heat map method is introduced to highlight the candidate interstitial lung disease regions. The explainability of heat maps was evaluated by two human experts and a quantitative method that uses the heat map to produce the score. The results show that our framework achieved a κ of 0.66, 0.58, and 0.65, for the TOT, GG and RET scoring, respectively. Both experts agreed with the heat maps in 91%, 90% and 80% of cases, respectively. Therefore, it is feasible to develop a framework for automated SSc-ILD scoring, which performs competitively with human experts and provides high-quality explanations using heat maps. Confirming the model's generalizability is needed in future studies.},
}
@article{Jia2024a,
abbr = {},
bibtex_show = {true},
title = {Using 3D point cloud and graph-based neural networks to improve the estimation of pulmonary function tests from chest CT},
author = {Jia, Jingnan and Yu, Bo and Mody, Prerak and Ninaber, Maarten K. and Schouffoer, Anne A. and Kroft, Lucia J.M. and Staring, Marius and Stoel, Berend C.},
journal = {Computers in Biology and Medicine},
volume = {182},
pages = {109192},
month = {November},
year = {2024},
pdf = {2024_j_CMB.pdf},
html = {https://doi.org/10.1016/j.compbiomed.2024.109192},
arxiv = {},
code = {https://github.com/Jingnan-Jia/PFT_regression},
abstract = {Pulmonary function tests (PFTs) are important clinical metrics to measure the severity of interstitial lung disease for systemic sclerosis patients. However, PFTs cannot always be performed by spirometry if there is a risk of disease transmission or other contraindications. In addition, it is unclear how lung function is affected by changes in lung vessels. Convolution neural networks (CNNs) have been previously proposed to estimate PFTs from chest CT scans (CNN-CT) and extracted vessels (CNNVessel). Due to GPU memory constraints, however, these networks used down-sampled images, which causes a loss of information on small vessels. Previous work based on CNNs has indicated that detailed vessel information from CT scans can be helpful for PFT estimation. Therefore, this paper proposes to use a point cloud neural network (PNN-Vessel) and graph neural network (GNN-Vessel) to estimate PFTs from point cloud and graph-based representations of pulmonary vessel centerlines, respectively. After that, we perform multiple variable step-wise regression analysis to explore if vessel-based networks can contribute to the PFT estimation, in addition to CNN-CT. Results showed that both PNN-Vessel and GNN-Vessel outperformed CNN-Vessel, by 14% and 4%, respectively, when averaged across the ICC scores of four PFTs metrics. In addition, compared to CNN-Vessel, PNNVessel used 30% of training time (1.1 hours) and 7% parameters (2.1 M) and GNN-Vessel used only 7% training time (0.25 hours) and 0.7% parameters (0.2 M). Our multiple variable regression analysis still verified that more detailed vessel information could provide further explanation for PFT estimation from anatomical imaging.},
}
@article{Mody:2024b,
abbr = {},
bibtex_show = {true},
title = {Improving Uncertainty-Error Correspondence in Deep Bayesian Medical Image Segmentation},
author = {Mody, Prerak and Chaves-de-Plaza, Nicolas and Rao, Chinmay and Astreinidou, Eleftheria and De Ridder, Mischa, and Hoekstra, Nienke and Hildebrandt, Klaus and Staring, Marius},
journal = {The Journal of Machine Learning for Biomedical Imaging},
volume = {2},
pages = {1048 -- 1082},
month = {August},
year = {2024},
pdf = {2024_j_MELBAb.pdf},
html = {https://doi.org/10.59275/j.melba.2024-5gc8},
arxiv = {},
code = {https://github.com/prerakmody/bayesuncertainty-error-correspondence},
abstract = {Increased usage of automated tools like deep learning in medical image segmentation has alleviated the bottleneck of manual contouring. This has shifted manual labour to quality assessment (QA) of automated contours which involves detecting errors and correcting them. A potential solution to semi-automated QA is to use deep Bayesian uncertainty to recommend potentially erroneous regions, thus reducing time spent on error detection. Previous work has investigated the correspondence between uncertainty and error, however, no work has been done on improving the ``utility" of Bayesian uncertainty maps such that it is only present in inaccurate regions and not in the accurate ones. Our work trains the FlipOut model with the Accuracy-vs-Uncertainty (AvU) loss which promotes uncertainty to be present only in inaccurate regions. We apply this method on datasets of two radiotherapy body sites, c.f. head-and-neck CT and prostate MR scans. Uncertainty heatmaps (i.e. predictive entropy) are evaluated against voxel inaccuracies using Receiver Operating Characteristic (ROC) and Precision-Recall (PR) curves. Numerical results show that when compared to the Bayesian baseline the proposed method successfully suppresses uncertainty for accurate voxels, with similar presence of uncertainty for inaccurate voxels. Code to reproduce experiments is available at <a href="https://github.com/prerakmody/bayesuncertainty-error-correspondence">https://github.com/prerakmody/bayesuncertainty-error-correspondence</a>.}
}
@article{Chaves-de-Plaza:2024,
abbr = {CGF},
bibtex_show = {true},
title = {Depth for Multi-Modal Contour Ensembles},
author = {Chaves-de-Plaza, N.F. and Molenaar, M. and Mody, P. and Staring, M. and van Egmond, R. and Eisemann, E. and Vilanova, A. and Hildebrandt, K.},
journal = {Computer Graphics Forum},
volume = {43},
number = {3},
pages = {e15083},
year = {2024},
pdf = {2024_j_CGF.pdf},
html = {https://doi.org/10.1111/cgf.15083},
code = {https://github.com/chadepl/paper-multimodal-contour-depth},
abstract = {The contour depth methodology enables non-parametric summarization of contour ensembles by extracting their representatives, confidence bands, and outliers for visualization (via contour boxplots) and robust downstream procedures. We address two shortcomings of these methods. Firstly, we significantly expedite the computation and recomputation of Inclusion Depth (ID), introducing a linear-time algorithm for epsilon ID, a variant used for handling ensembles with contours with multiple intersections. We also present the inclusion matrix, which contains the pairwise inclusion relationships between contours, and leverage it to accelerate the recomputation of ID. Secondly, extending beyond the single distribution assumption, we present the Relative Depth (ReD), a generalization of contour depth for ensembles with multiple modes. Building upon the linear-time eID, we introduce CDclust, a clustering algorithm that untangles ensemble modes of variation by optimizing ReD. Synthetic and real datasets from medical image segmentation and meteorological forecasting showcase the speed advantages, illustrate the use case of progressive depth computation and enable non-parametric multimodal analysis. To promote research and adoption, we offer the contour-depth Python package.},
}
@article{Mody:2024a,
abbr = {PhIRO},
bibtex_show = {true},
title = {Large-scale dose evaluation of deep learning organ contours in head-and-neck radiotherapy by leveraging existing plans},
author = {Mody, Prerak and Huiskes, Merle and Chaves-de-Plaza, Nicolas and Onderwater, Alice and Lamsma, Rense and Hildebrandt, Klaus and Hoekstra, Nienke and Astreinidou, Eleftheria and Staring, Marius and Dankers, Frank},
journal = {Physics and Imaging in Radiation Oncology},
volume = {30},
pages = {100572},
month = {April},
year = {2024},
pdf = {2024_j_PHIRO.pdf},
html = {https://doi.org/10.1016/j.phro.2024.100572},
code = {https://github.com/prerakmody/dose-eval-via-existing-plan-parameters},
abstract = {<b>Background and Purpose:</b> Retrospective dose evaluation for organ-at-risk auto-contours has previously used small cohorts due to additional manual effort required for treatment planning on auto-contours. We aimed to do this at large scale, by a) proposing and assessing an automated plan optimization workflow that used existing clinical plan parameters and b) using it for head-and-neck auto-contour dose evaluation.<br><b>Materials and Methods:</b> Our automated workflow emulated our clinic's treatment planning protocol and reused existing clinical plan optimization parameters. This workflow recreated the original clinical plan (P<sub>OG</sub>) with manual contours (P<sub>MC</sub>) and evaluated the dose effect (P<sub>OG</sub> - P<sub>MC</sub>) on 70 photon and 30 proton plans of head-and-neck patients. As a use-case, the same workflow (and parameters) created a plan using auto-contours (P<sub>AC</sub>) of eight head-and-neck organs-at-risk from a commercial tool and evaluated their dose effect (P<sub>MC</sub> - P<sub>AC</sub>).<br><b>Results:</b> For plan recreation (P<sub>OG</sub> - P<sub>MC</sub>), our workflow had a median impact of 1.0% and 1.5% across dose metrics of auto-contours, for photon and proton respectively. Computer time of automated planning was 25% (photon) and 42% (proton) of manual planning time. For auto-contour evaluation (P<sub>MC</sub> - P<sub>AC</sub>), we noticed an impact of 2.0% and 2.6% for photon and proton radiotherapy. All evaluations had a median ΔNTCP (Normal Tissue Complication Probability) less than 0.3%.<br><b>Conclusions:</b> The plan replication capability of our automated program provides a blueprint for other clinics to perform auto-contour dose evaluation with large patient cohorts. Finally, despite geometric differences, auto-contours had a minimal median dose impact, hence inspiring confidence in their utility and facilitating their clinical adoption.}
}
@article{Stoel:2024,
abbr = {Nat. Rev. Rheumatol.},
bibtex_show = {true},
title = {Deep Learning in Rheumatologic Image Interpretation},
author = {Stoel, Berend C. and Staring, Marius and Reijnierse, Monique and van der Helm-van Mil, Annette H.M.},
journal = {Nature Reviews Rheumatology},
volume = {20},
pages = {182 -- 195},
month = {March},
year = {2024},
pdf = {2024_j_NRR.pdf},
html = {https://doi.org/10.1038/s41584-023-01074-5},
abstract = {Artificial intelligence techniques, specifically deep learning, have already affected daily life in a wide range of areas. Likewise, initial applications have been explored in rheumatology. Deep learning might not easily surpass the accuracy of classic techniques when performing classification or regression on low-dimensional numerical data. With images as input, however, deep learning has become so successful that it has already outperformed the majority of conventional image-processing techniques developed during the past 50 years. As with any new imaging technology, rheumatologists and radiologists need to consider adapting their arsenal of diagnostic, prognostic and monitoring tools, and even their clinical role and collaborations. This adaptation requires a basic understanding of the technical background of deep learning, to efficiently utilize its benefits but also to recognize its drawbacks and pitfalls, as blindly relying on deep learning might be at odds with its capabilities. To facilitate such an understanding, it is necessary to provide an overview of deep-learning techniques for automatic image analysis in detecting, quantifying, predicting and monitoring rheumatic diseases, and of currently published deep-learning applications in radiological imaging for rheumatology, with critical assessment of possible limitations, errors and confounders, and conceivable consequences for rheumatologists and radiologists in clinical practice.}
}
@article{Chen:2024,
abbr = {Melba},
bibtex_show = {true},
author = {Chen, Yunjie and Staring, Marius and Neve, Olaf M. and Romeijn, Stephan R. and Hensen, Erik F. and Verbist, Berit M. and Wolterink, Jelmer M. and Tao, Qian},
title = {CoNeS: Conditional neural fields with shift modulation for multi-sequence MRI translation},
journal = {The Journal of Machine Learning for Biomedical Imaging},
volume = {2},
pages = {657 -- 685},
year = {2024},
pdf = {2024_j_MELBAa.pdf},
html = {https://doi.org/10.59275/j.melba.2024-d61g},
arxiv = {2309.03320},
code = {https://github.com/cyjdswx/CoNeS.git},
abstract = {Multi-sequence magnetic resonance imaging (MRI) has found wide applications in both modern clinical studies and deep learning research. However, in clinical practice, it frequently occurs that one or more of the MRI sequences are missing due to different image acquisition protocols or contrast agent contraindications of patients, limiting the utilization of deep learning models trained on multi-sequence data. One promising approach is to leverage generative models to synthesize the missing sequences, which can serve as a surrogate acquisition. State-of-the-art methods tackling this problem are based on convolutional neural networks (CNN) which usually suffer from spectral biases, resulting in poor reconstruction of high-frequency fine details. In this paper, we propose Conditional Neural fields with Shift modulation (CoNeS), a model that takes voxel coordinates as input and learns a representation of the target images for multi-sequence MRI translation. The proposed model uses a multi-layer perceptron (MLP) instead of a CNN as the decoder for pixel-to-pixel mapping. Hence, each target image is represented as a neural field that is conditioned on the source image via shift modulation with a learned latent code. Experiments on BraTS 2018 and an in-house clinical dataset of vestibular schwannoma patients showed that the proposed method outperformed state-of-the-art methods for multi-sequence MRI translation both visually and quantitatively. Moreover, we conducted spectral analysis, showing that CoNeS was able to overcome the spectral bias issue common in conventional CNN models. To further evaluate the usage of synthesized images in clinical downstream tasks, we tested a segmentation network using the synthesized images at inference. The results showed that CoNeS improved the segmentation performance when some MRI sequences were missing and outperformed other synthesis models. We concluded that neural fields are a promising technique for multi-sequence MRI translation.},
}
@article{Chaves-de-Plaza:2024,
abbr = {TVCG},
bibtex_show = {true},
author = {Chaves-de-Plaza, Nicolas and Mody, Prerak P. and Staring, Marius and van Egmond, Ren{\'e}; and Vilanova, Anna and Hildebrandt, Klaus},
title = {Inclusion Depth for Contour Ensembles},
journal = {IEEE Transactions on Visualization and Computer Graphics},
volume = {},
number = {},
pages = {},
year = {2024},
pdf = {2024_j_TVCG.pdf},
html = {https://doi.org/10.1109/TVCG.2024.3350076},
arxiv = {},
code = {},
abstract = {Ensembles of contours arise in various applications like simulation, computer-aided design, and semantic segmentation. Uncovering ensemble patterns and analyzing individual members is a challenging task that suffers from clutter. Ensemble statistical summarization can alleviate this issue by permitting analyzing ensembles' distributional components like the mean and median, confidence intervals, and outliers. Contour boxplots, powered by Contour Band Depth (CBD), are a popular nonparametric ensemble summarization method that benefits from CBD's generality, robustness, and theoretical properties. In this work, we introduce Inclusion Depth (ID), a new notion of contour depth with three defining characteristics. First, ID is a generalization of functional Half-Region Depth, which offers several theoretical guarantees. Second, ID relies on a simple principle: the inside/outside relationships between contours. This facilitates implementing ID and understanding its results. Third, the computational complexity of ID scales quadratically in the number of members of the ensemble, improving CBD's cubic complexity. This also in practice speeds up the computation enabling the use of ID for exploring large contour ensembles or in contexts requiring multiple depth evaluations like clustering. In a series of experiments on synthetic data and case studies with meteorological and segmentation data, we evaluate ID's performance and demonstrate its capabilities for the visual analysis of contour ensembles.},
}
@article{Beljaards:2024,
abbr = {},
bibtex_show = {true},
author = {Beljaards, Laurens and Pezzotti, Nicola and Rao, Chinmay and Doneva, Mariya and van Osch, Matthias J.P. and Staring, Marius},
title = {AI-Based Motion Artifact Severity Estimation in Undersampled MRI Allowing for Selection of Appropriate Reconstruction Models},
journal = {Medical Physics},
volume = {51},
number = {5},
pages = {3555 -- 3565},
year = {2024},
pdf = {2024_j_MP.pdf},
html = {https://doi.org/10.1002/mp.16918},
arxiv = {},
code = {},
abstract = {<b>Background:</b> MR acquisition is a time consuming process, making it susceptible to patient motion during scanning. Even motion in the order of a millimeter can introduce severe blurring and ghosting artifacts, potentially necessitating re-acquisition. MRI can be accelerated by acquiring only a fraction of k-space, combined with advanced reconstruction techniques leveraging coil sensitivity profiles and prior knowledge. AI-based reconstruction techniques have recently been popularized, but generally assume an ideal setting without intra-scan motion.<br><b>Purpose:</b> To retrospectively detect and quantify the severity of motion artifacts in undersampled MRI data. This may prove valuable as a safety mechanism for AI-based approaches, provide useful information to the reconstruction method, or prompt for re-acquisition while the patient is still in the scanner.<br><b>Methods:</b> We developed a deep learning approach that detects and quantifies motion artifacts in undersampled brain MRI. We demonstrate that synthetically motion-corrupted data can be leveraged to train the CNN-based motion artifact estimator, generalizing well to real-world data. Additionally, we leverage the motion artifact estimator by using it as a selector for a motion-robust reconstruction model in case a considerable amount of motion was detected, and a high data consistency model otherwise.<br><b>Results:</b> Training and validation were performed on 4387 and 1304 synthetically motion-corrupted images and their uncorrupted counterparts, respectively. Testing was performed on undersampled in vivo motion-corrupted data from 28 volunteers, where our model distinguished head motion from motion-free scans with 91% and 96% accuracy when trained on synthetic and on real data, respectively. It predicted a manually defined quality label (`Good', `Medium' or `Bad' quality) correctly in 76% and 85% of the time when trained on synthetic and real data, respectively. When used as a selector it selected the appropriate reconstruction network 93% of the time, achieving near optimal SSIM values.<br><b>Conclusions:</b> The proposed method quantified motion artifact severity in undersampled MRI data with high accuracy, enabling real-time motion artifact detection that can help improve the safety and quality of AI-based reconstructions.},
}
@article{Jia:2023,
abbr = {},
bibtex_show = {true},
author = {Jia, Jingnan and Marges, Emiel R. and Ninaber, Maarten K. and Kroft, Lucia J.M. and Schouffoer, Anne A. and Staring, Marius and Stoel, Berend C.},
title = {Automatic pulmonary function estimation from chest CT scans using deep regression neural networks: the relation between structure and function in systemic sclerosis},
journal = {IEEE Access},
volume = {11},
pages = {135272 -- 135282},
month = {November},
year = {2023},
pdf = {2023_j_Access.pdf},
html = {https://doi.org/10.1109/ACCESS.2023.3337639},
arxiv = {},
code = {},
abstract = {Pulmonary function test (PFT) plays an important role in screening and following-up pulmonary involvement in systemic sclerosis (SSc). However, some patients are not able to perform PFT due to contraindications. In addition, it is unclear how lung function is affected by changes in lung structure in SSc. Therefore, this study aims to explore the potential of automatically estimating PFT results from chest CT scans of SSc patients and how different regions influence the estimation of PFT values. Deep regression networks were developed with transfer learning to estimate PFT from 316 SSc patients. Segmented lungs and vessels were used to mask the CT images to train the network with different inputs: from entire CT scan, lungs-only to vessels-only. The network trained by entire CT scans with transfer learning achieved an ICC of 0.71, 0.76, 0.80, and 0.81 for the estimation of DLCO, FEV1, FVC and TLC, respectively. The performance of the networks gradually decreased when trained on data from lungs-only and vessels-only. Regression attention maps showed that regions close to large vessels are highlighted more than other regions, and occasionally regions outside the lungs are highlighted. These experiments mean that apart from lungs and large vessels, other regions contribute to the estimation of PFTs. In addition, adding manually designed biomarkers increased the correlation (R) from 0.75, 0.74, 0.82, and 0.83 to 0.81, 0.83, 0.88, and 0.90, respectively. It means that that manually designed imaging biomarkers can still contribute to explaining the relation between lung function and structure.},
}
@article{Neve:2023,
abbr = {},
bibtex_show = {true},
author = {Neve, Olaf M. and Romeijn, Stephan R. and Chen, Yunjie and Nagtegaal, Larissa and Grootjans, Willem and Jansen, Jeroen C. and Staring, Marius and Verbist, Berit M. and Hensen, Erik F.},
title = {Automated 2-dimensional measurement of vestibular schwannoma: validity and accuracy of an artificial intelligence algorithm},
journal = {Otolaryngology - Head and Neck Surgery},
volume = {169},
number = {6},
pages = {1582 -- 1589},
month = {December},
year = {2023},
pdf = {2023_j_OHNS.pdf},
html = {https://doi.org/10.1002/ohn.470},
arxiv = {},
code = {},
abstract = {<b>Objective.</b> Validation of automated 2-dimensional (2D) diameter measurements of vestibular schwannomas on magnetic resonance imaging (MRI).<br><b>Study Design.</b>Retrospective validation study using 2 data sets containing MRIs of vestibular schwannoma patients.<br><b>Setting.</b> University Hospital in The Netherlands.<br><b>Methods.</b>Two data sets were used, 1 containing 1 scan per patient (n = 134) and the other containing at least 3 consecutive MRIs of 51 patients, all with contrast-enhanced T1 or high-resolution T2 sequences. 2D measurements of the maximal extrameatal diameters in the axial plane were automatically derived from a 3D-convolutional neural network compared to manual measurements by 2 human observers. Intra- and interobserver variabilities were calculated using the intraclass correlation coefficient (ICC), agreement on tumor progression using Cohen's kappa.<br><b>Results.</b> The human intra- and interobserver variability showed a high correlation (ICC: 0.98-0.99) and limits of agreement of 1.7 to 2.1 mm. Comparing the automated to human measurements resulted in ICC of 0.98 (95% confidence interval [CI]: 0.974; 0.987) and 0.97 (95% CI: 0.968; 0.984), with limits of agreement of 2.2 and 2.1 mm for diameters parallel and perpendicular to the posterior side of the temporal bone, respectively. There was satisfactory agreement on tumor progression between automated measurements and human observers (Cohen's κ = 0.77), better than the agreement between the human observers (Cohen's κ = 0.74).<br><b>Conclusion.</b> Automated 2D diameter measurements and growth detection of vestibular schwannomas are at least as accurate as human 2D measurements. In clinical practice, measurements of the maximal extrameatal tumor (2D) diameters of vestibular schwannomas provide important complementary information to total tumor volume (3D) measurements. Combining both in an automated measurement algorithm facilitates clinical adoption.},
}
@article{Zhai:2023,
abbr = {},
bibtex_show = {true},
author = {Zhai, Zhiwei and Boon, Gudula J.A.M. and Staring, Marius and van Dam, Lisette F. and Kroft, Lucia J.M. and Giron, Irene Hernandez and Ninaber, Maarten K. and Bogaard, Harm Jan and Meijboom, Lilian J. and Vonk Noordegraaf, Anton and Huisman, Menno V. and Klok, Frederikus A. and Stoel, Berend C.},
title= {Automated Quantification of the Pulmonary Vasculature in Pulmonary Embolism and Chronic Thromboembolic Pulmonary Hypertension},
journal = {Pulmonary Circulation},
volume = {13},
number = {2},
pages = {e12223},
year = {2023},
pdf = {2023_j_PC.pdf},
html = {https://doi.org/10.1002/pul2.12223},
arxiv = {},
code = {},
abstract = {The particular mechanical obstruction of pulmonary embolism (PE) and chronic thromboembolic pulmonary hypertension (CTEPH) may affect pulmonary arteries and veins differently. Therefore, we evaluated whether pulmonary vascular morphology and densitometry using CT pulmonary angiography (CTPA) in arteries and veins could distinguish PE from CTEPH.<br>We analyzed CTPA images from a convenience cohort of 16 PE patients, 6 CTEPH patients and 15 controls without PE or CTEPH. Pulmonary vessels were extracted with a graph-cuts method, and separated into arteries and veins using a deep-learning classification method. By analyzing the distribution of vessel radii, vascular morphology was quantified into a slope (α) and intercept (β) for the entire pulmonary vascular tree, and for arteries and veins, separately. To quantify lung perfusion, the median pulmonary vascular density was calculated. As a reference, lung perfusion was also quantified by the contrast enhancement in the parenchymal areas, pulmonary trunk and descending aorta. All quantifications were compared between the three groups.<br>Vascular morphology did not differ between groups, in contrast to vascular density values (both arterial and venous; p-values 0.006 - 0.014). The median vascular density (interquartile range) was -452 (95), -567 (113) and -470 (323) HU, for the PE, control and CTEPH group, respectively. The perfusion curves from all measurements showed different patterns between groups.<br>In this proof of concept study, not vasculature morphology but vascular densities differentiated between normal and thrombotic obstructed vasculature. For distinction on an individual patient level, further technical improvements are needed both in terms of image acquisition/reconstruction and post-processing.},
}
@article{Goedmakers:2022,
abbr = {},
bibtex_show = {true},
author = {Goedmakers, C.M.W and Pereboom, L.M. and Schoones, J.W. and de Leeuw den Bouter, M.L. and Remis, R.F. and Staring, M. and Vleggeert-Lankamp, C.L.A.},
title = {Machine learning for image analysis in the cervical spine: Systematic review of the available models and methods},
journal = {Brain and Spine},
volume = {2},
pages = {101666},
year = {2022},
pdf = {2022_j_BandS.pdf},
html = {https://doi.org/10.1016/j.bas.2022.101666},
arxiv = {},
code = {},
abstract = {<ul><li>Neural network approaches show the most potential for automated image analysis of thecervical spine.</li><li>Fully automatic convolutional neural network (CNN) models are promising Deep Learning methods for segmentation.</li><li>In cervical spine analysis, the biomechanical features are most often studied using finiteelement models.</li><li>The application of artificial neural networks and support vector machine models looks promising for classification purposes.</li><li>This article provides an overview of the methods for research on computer aided imaging diagnostics of the cervical spine.</li></ul>},
}
@article{Neve:2022,
abbr = {Radiol Artif Intell},
bibtex_show = {true},
author = {Neve, Olaf and Chen, Yunjie and Tao, Qian and Romeijn, Stephan and de Boer, Nick and Grootjans, Willem and Kruit, Mark and Lelieveldt, Boudewijn and Jansen, Jeroen and Hensen, Erik and Verbist, Berit and Staring, Marius},
title = {Fully Automated 3D Vestibular Schwannoma Segmentation with and without Gadolinium Contrast: a multi-center, multi-vendor study},
journal = {Radiology: Artificial Intelligence},
volume = {4},
number = {4},
pages = {e210300},
year = {2022},
pdf = {2022_j_RadAI.pdf},
html = {https://doi.org/10.1148/ryai.210300},
abstract = {<b>Purpose:</b> To develop automated vestibular schwannoma measurements on contrast-enhanced T1- and T2-weighted MRI.<br><b>Material and methods:</b> MRI data from 214 patients in 37 different centers was retrospectively analyzed between 2020-2021. Patients with hearing loss (134 vestibular schwannoma positive [mean age ± SD, 54 ± 12 years; 64 men], 80 negative) were randomized to a training and validation set and an independent test set. A convolutional neural network (CNN) was trained using five-fold cross-validation for two models (T1 and T2). Quantitative analysis including Dice index, Hausdorff distance, surface-to-surface distance (S2S), and relative volume error were used to compare the computer and the human delineations. Furthermore, an observer study was performed in which two experienced physicians evaluated both delineations.<br><b>Results:</b> The T1-weighted model showed state-of-the-art performance with a mean S2S distance of less than 0.6 mm for the whole tumor and the intrameatal and extrameatal tumor parts. The whole tumor Dice index and Hausdorff distance were 0.92 and 2.1 mm in the independent test set. T2-weighted images had a mean S2S distance less than 0.6 mm for the whole tumor and the intrameatal and extrameatal tumor parts. Whole tumor Dice index and Hausdorff distance were 0.87 and 1.5 mm in the independent test set. The observer study indicated that the tool was comparable to human delineations in 85-92% of cases.<br><b>Conclusion:</b> The CNN model detected and delineated vestibular schwannomas accurately on contrast-enhanced T1 and T2-weighted MRI and distinguished the clinically relevant difference between intrameatal and extrameatal tumor parts.}
}
@article{Koolstra:2022,
abbr = {},
bibtex_show = {true},
author = {Koolstra, Kirsten and Staring, Marius and de Bruin, Paul and van Osch, Mathias J.P.},
title = {Subject-specific optimization of background suppression for arterial spin labeling MRI using a feedback loop on the scanner},
journal = {NMR in Biomedicine},
volume = {35},
number = {9},
pages = {e4746},
month = {September},
year = {2022},
pdf = {2022_j_NMR.pdf},
html = {https://doi.org/10.1002/nbm.4746},
arxiv = {},
code = {},
abstract = {Background suppression (BGS) in arterial spin labeling (ASL) MRI leads to a higher temporal SNR (tSNR) of the perfusion images compared to ASL without BGS. The performance of the BGS, however, depends on the tissue relaxation times and on inhomogeneities of the scanner's magnetic fields, which differ between subjects and are unknown at the moment of scanning. Therefore, we developed a feedback loop (FBL) mechanism that optimizes the BGS for each subject in the scanner during acquisition. We implemented the FBL for 2D pseudo-continuous ASL (PCASL) scans with an echo-planar imaging (EPI) readout. After each dynamic scan, acquired ASL images were automatically sent to an external computer and processed with a Python processing tool. Inversion times were optimized on-the-fly using 80 iterations of the Nelder-Mead method, by minimizing the signal intensity in the label image while maximizing the signal intensity in the perfusion image. The performance of this method was first tested in a 4-component phantom. The regularization parameter was then tuned in 6 healthy subjects (3 male, 3 female, age 24-62 years) and set as λ=4 for all other experiments. Resulting ASL images, perfusion images and tSNR maps obtained from the last 20 iterations of the FBL scan were compared to those obtained without BGS and to standard BGS in 12 healthy volunteers (5 male, 7 female, age 24-62 years) (including the 6 volunteers used for tuning of λ). The FBL resulted in perfusion images with a statistically significantly higher tSNR (2.20) compared to standard BGS (1.96) (P < 5 10<sup>-3</sup>, two-sided paired t-test). Minimizing signal in the label image furthermore resulted in control images from which approximate changes in perfusion signal can directly be appreciated. This could be relevant to ASL applications that require a high temporal resolution. Future work is needed to minimize the number of initial acquisitions during which the performance of BGS is reduced compared to standard BGS and to extend the technique to 3D ASL.},
}
@article{Brink:2022,
abbr = {},
bibtex_show = {true},
author = {Brink, Wyger M. and Yousefi, Sahar and Bhatnagar, Prerna and Remis, Rob F. and Staring, Marius and Webb, Andrew G.},
title = {Personalised Local SAR Prediction for Parallel Transmit Neuroimaging at 7T from a Single T1-weighted Dataset},
journal = {Magnetic Resonance in Medicine},
volume = {88},
number = {1},
pages = {464 - 475},
month = {July},
year = {2022},
pdf = {2022_j_MRM.pdf},
html = {https://doi.org/10.1002/mrm.29215},
arxiv = {},
code = {https://github.com/wygerbrink/PersonalizedDosimetry},
abstract = {<b>Purpose.</b> Parallel RF transmission (PTx) is one of the key technologies enabling high quality imaging at ultrahigh field strengths (≥7T). Compliance with regulatory limits on the local specific absorption rate (SAR) typically involves over-conservative safety margins to account for intersubject variability, which negatively affect the utilization of ultra-high field MR. In this work, we present a method to generate a subject-specific body model from a single T1-weighted dataset for personalized local SAR prediction in PTx neuroimaging at 7T.<br><b>Methods.</b> Multi-contrast data were acquired at 7T (N=10) to establish ground truth segmentations in eight tissue types. A 2.5D convolutional neural network was trained using the T1-weighted data as input in a leave-one-out cross-validation study. The segmentation accuracy was evaluated through local SAR simulations in a quadrature birdcage as well as a PTx coil model.<br><b>Results.</b> The network-generated segmentations reached overall Dice coefficients of 86.7% ± 6.7% (mean ± standard deviation) and showed to successfully address the severe intensity bias and contrast variations typical to 7T. Errors in peak local SAR obtained were below 3.0% in the quadrature birdcage. Results obtained in the PTx configuration indicated that a safety margin of 6.3% ensures conservative local SAR estimates in 95% of the random RF shims, compared to an average overestimation of 34% in the generic "one-size-fits-all" approach.<br><b>Conclusion.</b> A subject-specific body model can be automatically generated from a single T1-weighted dataset by means of deep learning, providing the necessary inputs for accurate and personalized local SAR predictions in PTx neuroimaging at 7T.},
}
@article{Malimban:2022,
abbr = {},
bibtex_show = {true},
author = {Malimban, Justin and Lathouwers, Danny and Qian, Haibin and Verhaegen, Frank and Wiedemann, Julia and Brandenburg, Sytze and Staring, Marius},
title = {Deep learning-based segmentation of the thorax in mouse micro-CT scans},
journal = {Scientific Reports},
volume = {12},
number = {1},
pages = {1822},
year = {2022},
pdf = {2022_j_SR.pdf},
html = {https://doi.org/10.1038/s41598-022-05868-7},
arxiv = {},
code = {},
abstract = {For image-guided small animal irradiations, the whole workflow of imaging, organ contouring, irradiation planning, and delivery is typically performed in a single session requiring continuous administration of anesthetic agents. Automating contouring leads to a faster workflow, which limits exposure to anesthesia and thereby, reducing its impact on experimental results and on animal wellbeing. Here, we trained the 2D and 3D U-Net architectures of no-new-Net (nnU-Net) for autocontouring of the thorax in mouse micro-CT images. We trained the models only on native CTs and evaluated their performance using an independent testing dataset (i.e., native CTs not included in the training and validation). Unlike previous studies, we also tested the model performance on an external dataset (i.e., contrast-enhanced CTs) to see how well they predict on CTs completely different from what they were trained on. We also assessed the interobserver variability using the generalized conformity index (CIgen) among three observers, providing a stronger human baseline for evaluating automated contours than previous studies. Lastly, we showed the benefit on the contouring time compared to manual contouring. The results show that 3D models of nnU-Net achieve superior segmentation accuracy and are more robust to unseen data than 2D models. For all target organs, the mean surface distance (MSD) and the Hausdorff distance (95p HD) of the best performing model for this task (nnU-Net 3d_fullres) are within 0.16 mm and 0.60 mm, respectively. These values are below the minimum required contouring accuracy of 1 mm for small animal irradiations, and improve significantly upon state-of-the-art 2D U-Net-based AIMOS method. Moreover, the conformity indices of the 3d_fullres model also compare favourably to the interobserver variability for all target organs, whereas the 2D models perform poorly in this regard. Importantly, the 3d_fullres model offers 98% reduction in contouring time.},
}
@article{Elmahdy:2021,
abbr = {},
bibtex_show = {true},
author = {Elmahdy, Mohamed S. and Beljaards, Laurens and Yousefi, Sahar and Sokooti, Hessam and Verbeek, Fons and van der Heide, U.A. and Staring, Marius},
title = {Joint Registration and Segmentation via Multi-Task Learning for Adaptive Radiotherapy of Prostate Cancer},
journal = {IEEE Access},
volume = {9},
pages = {95551 -- 95568},
month = {June},
year = {2021},
pdf = {2021_j_Accessc.pdf},
html = {https://doi.org/10.1109/ACCESS.2021.3091011},
arxiv = {2105.01844},
code = {https://github.com/moelmahdy/JRS-MTL},
abstract = {Medical image registration and segmentation are two of the most frequent tasks in medical image analysis. As these tasks are complementary and correlated, it would be beneficial to apply them simultaneously in a joint manner. In this paper, we formulate registration and segmentation as a joint problem via a Multi-Task Learning (MTL) setting, allowing these tasks to leverage their strengths and mitigate their weaknesses through the sharing of beneficial information. We propose to merge these tasks not only on the loss level, but on the architectural level as well. We studied this approach in the context of adaptive image-guided radiotherapy for prostate cancer, where planning and follow-up CT images as well as their corresponding contours are available for training. At testing time the contours of the follow-up scans are not available, which is a common scenario in adaptive radiotherapy. The study involves two datasets from different manufacturers and institutes. The first dataset was divided into training (12 patients) and validation (6 patients), and was used to optimize and validate the methodology, while the second dataset (14 patients) was used as an independent test set. We carried out an extensive quantitative comparison between the quality of the automatically generated contours from different network architectures as well as loss weighting methods. Moreover, we evaluated the quality of the generated deformation vector field (DVF). We show that MTL algorithms outperform their Single-Task Learning (STL) counterparts and achieve better generalization on the independent test set. The best algorithm achieved a mean surface distance of 1.06±0.3 mm, 1.27±0.4 mm, 0.91±0.4 mm, and 1.76±0.8 mm on the validation set for the prostate, seminal vesicles, bladder, and rectum, respectively. The high accuracy of the proposed method combined with the fast inference speed, makes it a promising method for automatic re-contouring of follow-up scans for adaptive radiotherapy, potentially reducing treatment related complications and therefore improving patients quality-of-life after treatment. The source code is available at <a href="https://github.com/moelmahdy/JRS-MTL">https://github.com/moelmahdy/JRS-MTL</a>.},
}
@article{Yousefi:2021,
abbr = {},
bibtex_show = {true},
author = {Yousefi, Sahar and Sokooti, Hessam and Elmahdy, Mohamed S. and Lips, Irene M. and Manzuri Shalmani, Mohammad T. and Zinkstok, Roel T. and Dankers, Frank J.W.M. and and Staring, Marius},
title = {Esophageal Tumor Segmentation in CT Images using a Dilated Dense Attention Unet (DDAUnet)},
journal = {IEEE Access},
volume = {9},
pages = {99235 -- 99248},
month = {July},
year = {2021},
pdf = {2021_j_Accessb.pdf},
html = {https://doi.org/10.1109/ACCESS.2021.3096270},
arxiv = {2012.03242},
code = {https://github.com/yousefis/DenseUnet_Esophagus_Segmentation},
abstract = {Manual or automatic delineation of the esophageal tumor in CT images is known to be very challenging. This is due to the low contrast between the tumor and adjacent tissues, the anatomical variation of the esophagus, as well as the occasional presence of foreign bodies (e.g. feeding tubes). Physicians therefore usually exploit additional knowledge such as endoscopic findings, clinical history, additional imaging modalities like PET scans. Achieving his additional information is time-consuming, while the results are error-prone and might lead to non-deterministic results. In this paper we aim to investigate if and to what extent a simplified clinical workflow based on CT alone, allows one to automatically segment the esophageal tumor with sufficient quality. For this purpose, we present a fully automatic end-to-end esophageal tumor segmentation method based on convolutional neural networks (CNNs). The proposed network, called Dilated Dense Attention Unet (DDAUnet), leverages spatial and channel attention gates in each dense block to selectively concentrate on determinant feature maps and regions. Dilated convolutional layers are used to manage GPU memory and increase the network receptive field. We collected a dataset of 792 scans from 288 distinct patients including varying anatomies with air pockets, feeding tubes and proximal tumors. Repeatability and reproducibility studies were conducted for three distinct splits of training and validation sets. The proposed network achieved a DSC value of 0.79 ± 0.20, a mean surface distance of 5.4 ± 20.2mm and 95% Hausdorff distance of 14.7 ± 25.0mm for 287 test scans, demonstrating promising results with a simplified clinical workflow based on CT alone. Our code is publicly available via <a href="https://github.com/yousefis/DenseUnet_Esophagus_Segmentation">https://github.com/yousefis/DenseUnet_Esophagus_Segmentation</a>.},
}
@article{Sokooti:2021,
abbr = {},
bibtex_show = {true},
author = {Sokooti, Hessam and Yousefi, Sahar and Elmahdy, Mohamed S. and Lelieveldt, Boudewijn P.F. and Staring, Marius},
title = {Hierarchical Prediction of Registration Misalignment using a Convolutional LSTM: Application to Chest CT Scans},
journal = {IEEE Access},
volume = {9},
pages = {62008 -- 62020},
month = {April},
year = {2021},
pdf = {2021_j_Accessa.pdf},
html = {https://doi.org/10.1109/ACCESS.2021.3074124},
arxiv = {},
code = {},
abstract = {In this paper we propose a supervised method to predict registration misalignment using convolutional neural networks (CNNs). This task is casted to a classification problem with multiple classes of misalignment: "correct" 0-3 mm, "poor" 3-6 mm and "wrong" over 6 mm. Rather than a direct prediction, we propose a hierarchical approach, where the prediction is gradually refined from coarse to fine. Our solution is based on a convolutional Long Short-Term Memory (LSTM), using hierarchical misalignment predictions on three resolutions of the image pair, leveraging the intrinsic strengths of an LSTM for this problem. The convolutional LSTM is trained on a set of artificially generated image pairs obtained from artificial displacement vector fields (DVFs). Results on chest CT scans show that incorporating multi-resolution information, and the hierarchical use via an LSTM for this, leads to overall better F1 scores, with fewer misclassifications in a well-tuned registration setup. The final system yields an accuracy of 87.1%, and an average F1 score of 66.4% aggregated in two independent chest CT scan studies.},
}
@article{Luu:2021,
abbr = {},
bibtex_show = {true},
author = {Luu, Ha Manh and van Walsum, Theo and Franklin, Daniel and Pham, Phuong Cam and Vu, Luu Dang and Moelker, Adriaan and Staring, Marius and Van Hoang, Xiem and Niessen, Wiro and Trung, Nguyen Linh},
title = {Efficiently Compressing 3D Medical Images for Teleinterventions via CNNs and Anisotropic Diffusion},
journal = {Medical Physics},
volume = {48},
number = {6},
pages = {2877 -- 2890},
month = {June},
year = {2021},
pdf = {2021_j_MP.pdf},
html = {https://doi.org/10.1002/mp.14814},
arxiv = {},
code = {},
abstract = {<b>Purpose:</b> Efficient compression of images while preserving image quality has the potential to be a major enabler of effective remote clinical diagnosis and treatment, since poor Internet connection conditions are often the primary constraint in such services. This paper presents a framework for organ-specific image compression for teleinterventions based on a deep learning approach and anisotropic diffusion filter.<br><b>Methods:</b> The proposed method, DLAD, uses a CNN architecture to extract a probability map for the organ of interest; this probability map guides an anisotropic diffusion filter that smooths the image except at the location of the organ of interest. Subsequently, a compression method, such as BZ2 and HEVC-visually lossless, is applied to compress the image. We demonstrate the proposed method on 3D CT images acquired for radio frequency ablation (RFA) of liver lesions. We quantitatively evaluate the proposed method on 151 CT images using peak-signal-to-noise ratio (PSNR), structural similarity (SSIM) and compression ratio (CR) metrics. Finally, we compare the assessments of two radiologists on the liver lesion detection and the liver lesion center annotation using 33 sets of the original images and the compressed images.<br><b>Results:</b> The results show that the method can significantly improve CR of most well-known compression methods. DLAD combined with HEVC-visually lossless achieves the highest average CR of 6.45, which is 36% higher than that of the original HEVC and outperforms other state-of-the-art lossless medical image compression methods. The means of PSNR and SSIM are 70 dB and 0.95, respectively. In addition, the compression effects do not statistically significantly affect the assessments of the radiologists on the liver lesion detection and the lesion center annotation.<br><b>Conclusions:</b> We thus conclude that the method has a high potential to be applied in teleintervention applications.},
}
@article{Pezzotti:2020,
abbr = {},
bibtex_show = {true},
author = {Pezzotti, Nicola and Yousefi, Sahar and Elmahdy, Mohamed S. and van Gemert, Jeroen and Sch{\"u}lke, Christophe and Doneva, Mariya and Nielsen, Tim and Kastryulin, Sergey and Lelieveldt, Boudewijn P.F. and van Osch, Matthias J.P. and de Weerdt, Elwin and Staring, Marius},
title = {An Adaptive Intelligence Algorithm for Undersampled Knee MRI Reconstruction},
journal = {IEEE Access},
volume = {8},
pages = {204825 -- 204838},
year = {2020},
pdf = {2020_j_Access.pdf},
html = {https://doi.org/10.1109/ACCESS.2020.3034287},
arxiv = {2004.07339},
code = {},
abstract = {Adaptive intelligence aims at empowering machine learning techniques with the additional use of domain knowledge. In this work, we present the application of adaptive intelligence to accelerate MR acquisition. Starting from undersampled k-space data, an iterative learning-based reconstruction scheme inspired by compressed sensing theory is used to reconstruct the images. We developed a novel deep neural network to refine and correct prior reconstruction assumptions given the training data. The network was trained and tested on a knee MRI dataset from the 2019 fastMRI challenge organized by Facebook AI Research and NYU Langone Health. All submissions to the challenge were initially ranked based on similarity with a known groundtruth, after which the top 4 submissions were evaluated radiologically. Our method was evaluated by the fastMRI organizers on an independent challenge dataset. It ranked #1, shared #1, and #3 on respectively the 8x accelerated multi-coil, the 4x multi-coil, and the 4x single-coil tracks. This demonstrates the superior performance and wide applicability of the method.},
}
@article{Zhao:2020,
abbr = {},
bibtex_show = {true},
author = {Zhao, Hong and Stoel, Berend C. and Staring, Marius and Bakker, M. Els and Stolk, Jan and Zhou, Ping and Xiao, Changyan},
title = {A framework for pulmonary fissure segmentation in 3D CT images using a directional derivative of plate filter},
journal = {Signal Processing},
volume = {173},
pages = {107602},
month = {August},
year = {2020},
pdf = {2020_j_SP.pdf},
html = {https://doi.org/10.1016/j.sigpro.2020.107602},
arxiv = {},
code = {},
abstract = {Imaging pulmonary fissures by CT provides useful information on diagnosis of pulmonary diseases. Automatic segmentation of fissures is a challenging task due to the variable appearance of fissures, such as inhomogeneous intensities, pathological deformation and imaging noise. To overcome these challenges, we propose an anisotropic differential operator called directional derivative of plate (DDoP) filter to probe the presence of fissure objects in 3D space by modeling the profile of a fissure patch with three parallel plates. To reduce the huge computation burden of dense matching with rotated DDoP kernels, a family of spherical harmonics are particularly utilized for acceleration. Additionally, a two-stage post-processing scheme is introduced to segment fissures. The performance of our method was verified in experiments using 55 scans from the publicly available LOLA11 dataset and 50 low-dose CT scans of lung cancer patients from the VIA-ELCAP database. Our method showed superior performance compared to the derivative of sticks (DoS) method and the Hessian-based method in terms of median and mean F1-score. The median F1-score for DDoP, DoS-based and Hessian-based methods on the LOLA11 dataset was 0.899, 0.848 and 0.843, respectively, and the mean F1-score was 0.858 ± 0.103, 0.781 ± 0.165 and 0.747 ± 0.239, respectively.},
}
@article{Yousefi:2019,
abbr = {},
bibtex_show = {true},
author = {Yousefi, Sahar and Manzuri Shalmani, M. T. and Lin, Jeremy and Staring, Marius},
title = {A Novel Motion Detection Method Using 3D Discrete Wavelet Transform},
journal = {IEEE Transactions on Circuits and Systems for Video Technology},
volume = {29},
number = {12},
pages = {3487 -- 3500},
month = {December},
year = {2019},
pdf = {2019_j_CSVT.pdf},
html = {https://doi.org/10.1109/TCSVT.2018.2885211},
arxiv = {},
code = {},
abstract = {The problem of motion detection has received considerable attention due to the explosive growth of its applications in video analysis and surveillance systems. While the previous approaches can produce good results, the accurate detection of motion remains a challenging task due to the difficulties raised by illumination variations, occlusion, camouflage, sudden motions appearing in burst, dynamic texture, and environmental changes such as those on weather conditions, sunlight changes during a day, etc. In this study, a novel per-pixel motion descriptor is proposed for motion detection in video sequences which outperforms the current methods in the literature particularly in severe scenarios. The proposed descriptor is based on two complementary three-dimensional discrete wavelet transforms (3D-DWT) and a three-dimensional wavelet leader. In this approach, a feature vector is extracted for each pixel by applying a novel three-dimensional wavelet-based motion descriptor. Then, the extracted features are clustered by the well-known K-means algorithm. The experimental results demonstrate the effectiveness of the proposed method compared to state-of-the-art approaches in several public benchmark datasets. The application of the proposed method and additional experimental results for several challenging datasets are available online.},
}
@article{vandenEnde:2019a,
abbr = {},
bibtex_show = {true},
author = {van den Ende, R.P.J. and Kerkhof, E.M. and Rigter, L.S. and van Leerdam, M.E. and Peters, F.P. and van Triest, B. and Staring, M. and Marijnen, C.A.M. and van der Heide, U.A.},
title = {Feasibility of gold fiducial markers as a surrogate for GTV position in image-guided radiotherapy of rectal cancer},
journal = {International Journal of Radiation Oncology, Biology, Physics},
volume = {105},
number = {5},
pages = {1151 -- 1159},
month = {December},
year = {2019},
pdf = {2019_j_IJROBP.pdf},
html = {https://doi.org/10.1016/j.ijrobp.2019.08.052},
arxiv = {},
code = {},
abstract = {<b>Purpose:</b> To evaluate the feasibility of fiducial markers as a surrogate for GTV position in image-guided radiotherapy of rectal cancer.<br><b>Methods and Materials:</b> We analyzed 35 fiducials in 19 rectal cancer patients who received short course radiotherapy or long-course chemoradiotherapy. A MRI exam was acquired before and after the first week of radiotherapy and daily pre- and post-irradiation CBCT scans were acquired in the first week of radiotherapy. Between the two MRI exams, the fiducial displacement relative to the center of gravity of the GTV (COGGTV) and the COGGTV displacement relative to bony anatomy was determined. Using the CBCT scans, inter- and intrafraction fiducial displacement relative to bony anatomy was determined.<br><b>Results:</b> The systematic error of the fiducial displacement relative to the COG<sub>GTV</sub> was 2.8, 2.4 and 4.2 mm in the left-right (LR), anterior-posterior (AP) and craniocaudal (CC) direction. Large interfraction systematic errors of up to 8.0 and random errors up to 4.7 mm were found for COG<sub>GTV</sub> and fiducial displacements relative to bony anatomy, mostly in the AP and CC directions. For tumors located in the mid- and upper rectum these errors were up to 9.4 (systematic) and 5.6 mm (random) compared to 4.9 and 2.9 mm for tumors in the lower rectum. Systematic and random errors of the intrafraction fiducial displacement relative to bony anatomy were ≤ 2.1 mm in all directions.<br><b>Conclusions:</b> Large interfraction errors of the COG<sub>GTV</sub> and the fiducials relative to bony anatomy were found. Therefore, despite the observed fiducial displacement relative to the COG<sub>GTV</sub>, the use of fiducials as a surrogate for GTV position reduces the required margins in the AP and CC direction for a GTV boost using image-guided radiotherapy of rectal cancer. This reduction may be larger in patients with tumors located in the mid- and upper rectum compared to the lower rectum.},
}
@article{Qiao:2019a,
abbr = {},
bibtex_show = {true},
author = {Qiao, Yuchuan and Jagt, Thyrza and Hoogeman, Mischa and Lelieveldt, Boudewijn P.F. and Staring, Marius},
title = {Evaluation of an open source registration package for automatic contour propagation in online adaptive intensity-modulated proton therapy of prostate cancer},
journal = {Frontiers in Oncology},
volume = {9},
pages = {1297},
month = {November},
year = {2019},
pdf = {2019_j_FiO.pdf},
html = {https://doi.org/10.3389/fonc.2019.01297},
arxiv = {},
code = {https://github.com/SuperElastix/elastix},
abstract = {<b>Objective:</b> Our goal was to investigate the performance of an open source deformable image registration package, elastix, for fast and robust contour propagation in the context of online adaptive intensity-modulated proton therapy (IMPT) for prostate cancer.<br><b>Methods:</b> A planning and 7-10 repeat CT scans were available of 18 prostate cancer patients. Automatic contour propagation of repeat CT scans was performed using elastix and compared with manual delineations in terms of geometric accuracy and runtime. Dosimetric accuracy was quantified by generating IMPT plans using the propagated contours expanded with a 2 mm (prostate) and 3.5 mm margin (seminal vesicles and lymph nodes) and calculating dosimetric coverage based on the manual delineation. A coverage of V95% ≥ 98% (at least 98% of the target volumes receive at least 95% of the prescribed dose) was considered clinically acceptable.<br><b>Results:</b> Contour propagation runtime varied between 3 and 30 seconds for different registration settings. For the fastest setting, 83 in 93 (89.2%), 73 in 93 (78.5%), and 91 in 93 (97.9%) registrations yielded clinically acceptable dosimetric coverage of the prostate, seminal vesicles, and lymph nodes, respectively. For the prostate, seminal vesicles, and lymph nodes the Dice Similarity Coefficient (DSC) was 0:87 ± 0:05, 0:63 ± 0:18 and 0:89 ± 0:03 and the mean surface distance (MSD) was 1:4 ± 0:5 mm, 2:0 ± 1:2 mm and 1:5 ± 0:4 mm, respectively.<br><b>Conclusion:</b> With a dosimetric success rate of 78.5% to 97.9%, this software may facilitate online adaptive IMPT of prostate cancer using a fast, free and open implementation.},
}
@article{Zhai:2019a,
abbr = {},
bibtex_show = {true},
author = {Zhai, Zhiwei and Staring, Marius and Ninaber, Maarten K. and de Vries-Bouwstra, Jeska and Schouffoer, Anne A. and Kroft, Lucia J. and Stolk, Jan and Stoel, Berend C.},
title = {Pulmonary Vascular Morphology Associated with Gas Exchange in Systemic Sclerosis without Lung Fibrosis},
journal = {Journal of Thoracic Imaging},
volume = {34},
number = {6},
pages = {373 -- 379},
month = {November},
year = {2019},
pdf = {2019_j_JTI.pdf},
html = {http://dx.doi.org/10.1097/RTI.0000000000000395},
arxiv = {},
code = {},
abstract = {<b>Purpose:</b> Gas exchange in systemic sclerosis (SSc) is known to be affected by fibrotic changes in the pulmonary parenchyma. However, SSc patients without detectable fibrosis can still have impaired gas transfer. We aim to investigate whether pulmonary vascular changes could partly explain a reduction in gas transfer of systemic sclerosis (SSc) patients without fibrosis.<br><b>Materials and Methods:</b> We selected 77 patients, whose visual CT scoring showed no fibrosis. Pulmonary vessels were detected automatically in CT images and their local radii were calculated. The frequency of occurrence for each radius was calculated, and from this radius histogram two imaging biomarkers (α and β) were extracted, where α reflects the relative contribution of small vessels compared to large vessels and β represents the vessel tree capacity. Correlations between imaging biomarkers and gas transfer (DLCOc %predicted) were evaluated with Spearman's correlation. Multivariable stepwise linear regression was performed with DLCOc %predicted as dependent variable and age, BMI, sPAP, FEV1 %predicted, TLC %predicted, FVC %predicted, α, β, voxel size and CT-derived lung volume as independent variables.<br><b>Results:</b> Both α and β were significantly correlated with gas transfer (R=-0.29, p-value=0.011 and R=0.32, p-value=0.004, respectively). The multivariable step-wise linear regression analysis selected sPAP (coefficient=-0.78, 95%CI=[-1.07, -0.49], p-value<0.001), β (coefficient=8.6, 95%CI=[4.07, 13.1], p-value<0.001) and FEV1 %predicted (coefficient=0.3, 95%CI=[0.12, 0.48], p-value=0.001) as significant independent predictors of DLCOc %predicted (R=0.71, p-value<0.001).<br><b>Conclusions:</b>In SSc patients without detectable pulmonary fibrosis, pulmonary vascular morphology is associated with gas transfer, indicating that impaired gas exchange is associated with vascular changes.},
}
@article{Qiao:2019b,
abbr = {TMI},
bibtex_show = {true},
author = {Qiao, Yuchuan and Lelieveldt, Boudewijn P.F and Staring, Marius},
title = {An efficient preconditioner for stochastic gradient descent optimization of image registration},
journal = {IEEE Transactions on Medical Imaging},
volume = {38},
number = {10},
pages = {2314 -- 2325},
month = {October},
year = {2019},
pdf = {2019_j_TMI.pdf},
html = {https://doi.org/10.1109/TMI.2019.2897943},
arxiv = {},
code = {https://github.com/SuperElastix/elastix},
abstract = {Stochastic gradient descent (SGD) is commonly used to solve (parametric) image registration problems. In case of badly scaled problems, SGD however only exhibits sublinear convergence properties. In this paper we propose an efficient preconditioner estimation method to improve the convergence rate of SGD. Based on the observed distribution of voxel displacements in the registration, we estimate the diagonal entries of a preconditioning matrix, thus rescaling the optimization cost function. The preconditioner is efficient to compute and employ, and can be used for mono-modal as well as multi-modal cost functions, in combination with different transformation models like the rigid, affine and B-spline model. Experiments on different clinical data sets show that the proposed method indeed improves the convergence rate compared to SGD with speedups around 2-5 in all tested settings, while retaining the same level of registration accuracy.},
}
@article{Bayer:2019,
abbr = {},
bibtex_show = {true},
author = {Bayer, Siming and Zhai, Zhiwei and Strumia, Maddalena and Tong, Xiaoguang and Gao, Ying and Staring, Marius and Stoel, Berend and Fahrig, Rebecca and Nabavi, Arya and Maier, Andreas and Ravikumar, Nishant},
title = {Registration of vascular structures using a hybrid mixture model},
journal = {International Journal of Computer Assisted Radiology and Surgery},
volume = {14},
number = {9},
pages = {1507 -- 1516},
month = {September},
year = {2019},
pdf = {2019_j_IJCARS.pdf},
html = {https://doi.org/10.1007/s11548-019-02007-y},
arxiv = {},
code = {},
abstract = {<b>Purpose:</b> Morphological changes to anatomy resulting from invasive surgical procedures or pathology, typically alter the surrounding vasculature. This makes it useful as a descriptor for feature-driven image registration in various clinical applications. However, registration of vasculature remains challenging, as vessels often differ in size and shape, and may even miss branches, due to surgical interventions or pathological changes. Furthermore, existing vessel registration methods are typically designed for a specific application. To address this limitation, we propose a generic vessel registration approach useful for a variety of clinical applications, involving different anatomical regions.<br><b>Methods:</b> A probabilistic registration framework based on a hybrid mixture model, with a refinement mechanism to identify missing branches (denoted as HdMM+) during vasculature matching, is introduced. Vascular structures are represented as 6-dimensional hybrid point sets comprising spatial positions and centerline orientations, using Student's t-distributions to model the former and Watson distributions for the latter.<br><b>Results:</b> The proposed framework is evaluated for intraoperative brain shift compensation, and monitoring changes in pulmonary vasculature resulting from chronic lung disease. Registration accuracy is validated using both synthetic and patient data. Our results demonstrate, HdMM+ is able to reduce more than 85% of the initial error for both applications, and outperforms the state-of-the-art point-based registration methods such as coherent point drift (CPD) and Student's t-Distribution mixture model (TMM), in terms of mean surface distance, modified hausdorff distance, Dice and Jaccard scores.<br><b>Conclusion:</b> The proposed registration framework models complex vascular structures using a hybrid representation of vessel centerlines, and accommodates intricate variations in vascular morphology. Furthermore, it is generic and flexible in its design, enabling its use in a variety of clinical applications.},
}
@article{Zhai:2019b,
abbr = {MP},
bibtex_show = {true},
author = {Zhai, Zhiwei and Staring, Marius and Giron, Irene Hernandez and Veldkamp, Wouter J.H. and Kroft, Lucia J. and Ninaber, Maarten K. and Stoel, Berend C.},
title = {Automatic quantitative analysis of pulmonary vascular morphology in CT images},
journal = {Medical Physics},
volume = {46},
number = {9},
pages = {3985 -- 3997},
month = {September},
year = {2019},
pdf = {2019_j_MPb.pdf},
html = {https://doi.org/10.1002/mp.13659},
arxiv = {},
code = {},
abstract = {<b>Purpose:</b> Vascular remodeling is a significant pathological feature of various pulmonary diseases, which may be assessed by quantitative CT imaging. The purpose of this study was therefore to develop and validate an automatic method for quantifying pulmonary vascular morphology in CT images.<br><b>Methods:</b> The proposed method consists of pulmonary vessel extraction and quantification. For extracting pulmonary vessels, a graph-cuts based method is proposed which considers appearance (CT intensity) and shape (vesselness from a Hessian-based filter) features, and incorporates distance to the airways into the cost function to prevent false detection of airway walls. For quantifying the extracted pulmonary vessels, a radius histogram is generated by counting the occurrence of vessel radii, calculated from a distance transform based method. Subsequently, two biomarkers, slope α and intercept β, are calculated by linear regression on the radius histogram. A public data set from the VESSEL12 challenge was used to independently evaluate the vessel extraction. The quantitative analysis method was validated using images of a 3D printed vessel phantom, scanned by a clinical CT scanner and a micro-CT scanner (to obtain a gold standard). To confirm the association between imaging biomarkers and pulmonary function, 77 scleroderma patients were investigated with the proposed method.<br><b>Results:</b> In the independent evaluation with the public data set, our vessel segmentation method obtained an area under the ROC curve of 0.976. The median radius difference between clinical and micro-CT scans of a 3D printed vessel phantom was 0.062 ± 0.020 mm, with interquartile range of 0.199 ± 0.050 mm. In the studied patient group, a significant correlation between diffusion capacity for carbon monoxide and the biomarkers, α (R=-0.27, p-value=0.018) and β (R=0.321, p-value=0.004), was obtained.<br><b>Conclusions:</b> In conclusion, the proposed method was highly accurate, validated with a public data set and a 3D printed vessel phantom data set. The correlation between imaging biomarkers and diffusion capacity in a clinical data set confirmed an association between lung structure and function. This quantification of pulmonary vascular morphology may be helpful in understanding the pathophysiology of pulmonary vascular diseases.},
}
@article{Elmahdy:2019,
abbr = {MP},
bibtex_show = {true},
author = {Elmahdy, Mohamed S. and Jagt, Thyrza and Zinkstok, Roel Th. and Qiao, Yuchuan and Shazad, Rahil and Sokooti, Hessam and Yousefi, Sahar and Incrocci, Luca and Marijnen, Corrie A.M. and Hoogeman, Mischa and Staring, Marius},
title = {Robust contour propagation using deep learning and image registration for online adaptive proton therapy of prostate cancer},
journal = {Medical Physics},
volume = {46},
number = {8},
pages = {3329 -- 3343},
month = {August},
year = {2019},
pdf = {2019_j_MPa.pdf},
html = {https://doi.org/10.1002/mp.13620},
arxiv = {},
code = {},
abstract = {<b>Purpose:</b> To develop and validate a robust and accurate registration pipeline for automatic contour propagation for online adaptive Intensity-Modulated Proton Therapy (IMPT) of prostate cancer using elastix software and deep learning.<br><b>Methods:</b> A 3D Convolutional Neural Network was trained for automatic bladder segmentation of the CT scans. The automatic bladder segmentation alongside the CT scan are jointly optimized to add explicit knowledge about the underlying anatomy to the registration algorithm. We included three datasets from different institutes and CT manufacturers. The first was used for training and testing the ConvNet, where the second and the third were used for evaluation of the proposed pipeline. The system performance was quantified geometrically using the Dice Similarity Coefficient (DSC), the Mean Surface Distance (MSD), and the 95% Hausdorff Distance (HD). The propagated contours were validated clinically through generating the associated IMPT plans and compare it with the IMPT plans based on the manual delineations. Propagated contours were considered clinically acceptable if their treatment plans met the dosimetric coverage constraints on the manual contours.<br><b>Results:</b> The bladder segmentation network achieved a DSC of 88% and 82% on the test datasets. The proposed registration pipeline achieved a MSD of 1.29 ± 0.39, 1.48 ± 1.16, and 1.49 ± 0.44 mm for the prostate, seminal vesicles, and lymph nodes, respectively on the second dataset and a MSD of 2.31 ± 1.92 and 1.76 ± 1.39 mm for the prostate and seminal vesicles on the third dataset. The automatically propagated contours met the dose coverage constraints in 86%, 91%, and 99% of the cases for the prostate, seminal vesicles, and lymph nodes, respectively. A Conservative Success Rate (CSR) of 80% was obtained, compared to 65% when only using intensity-based registration.<br><b>Conclusions:</b> The proposed registration pipeline obtained highly promising results for generating treatment plans adapted to the daily anatomy. With 80% of the automatically generated treatment plans directly usable without manual correction, a substantial improvement in system robustness was reached compared to a previous approach. The proposed method therefore facilitates more precise proton therapy of prostate cancer, potentially leading to fewer treatment related adverse side effects.},
}
@article{Sokooti:2019,
abbr = {},
bibtex_show = {true},
author = {Sokooti, Hessam and Saygili, Gorkem and Glocker, Ben and Lelieveldt, Boudewijn P.F. and Staring, Marius},
title = {Quantitative Error Prediction of Medical Image Registration using Regression Forests},
journal = {Medical Image Analysis},
volume = {56},
number = {8},
pages = {110 -- 121},
month = {August},
year = {2019},
pdf = {2019_j_MedIAb.pdf},
html = {https://doi.org/10.1016/j.media.2019.05.005},
arxiv = {},
code = {},
abstract = {Predicting registration error can be useful for evaluation of registration procedures, which is important for the adoption of registration techniques in the clinic. In addition, quantitative error prediction can be helpful in improving the registration quality. The task of predicting registration error is demanding due to the lack of a ground truth in medical images. This paper proposes a new automatic method to predict the registration error in a quantitative manner, and is applied to chest CT scans. A random regression forest is utilized to predict the registration error locally. The forest is built with features related to the transformation model and features related to the dissimilarity after registration. The forest is trained and tested using manually annotated corresponding points between pairs of chest CT scans in two experiments: SPREAD (trained and tested on SPREAD) and inter-database (including three databases SPREAD, DIR-Lab-4DCT and DIR-Lab-COPDgene). The results show that the mean absolute errors of regression are 1.07 ± 1.86 and 1.76 ± 2.59 mm for the SPREAD and inter-database experiment, respectively. The overall accuracy of classification in three classes (correct, poor and wrong registration) is 90.7% and 75.4%, for SPREAD and inter-database respectively. The good performance of the proposed method enables important applications such as automatic quality control in large-scale image analysis.},
}
@article{vandenEnde:2019b,
abbr = {},
bibtex_show = {true},
author = {van den Ende, R.P.J. and Rigter, L.S. and Kerkhof, E.M. and van Persijn van Meerten, E.L. and Rijkmans, E.C. and Lambregts, D.M.J. and van Triest, B. and van Leerdam, M.E. and Staring, M. and Marijnen, C.A.M. and van der Heide, U.A.},
title = {MRI visibility of gold fiducial markers for image-guided radiotherapy of rectal cancer},
journal = {Radiotherapy & Oncology},
volume = {132},
number = {3},
pages = {93 -- 99},
month = {March},
year = {2019},
pdf = {2019_j_RO.pdf},
html = {https://doi.org/10.1016/j.radonc.2018.11.016},
arxiv = {},
code = {},
abstract = {<b>Background and purpose:</b> A GTV boost is suggested to result in higher complete response rates in rectal cancer patients, which is attractive for organ preservation. Fiducials may offer GTV position verification on (CB)CT, if the fiducial-GTV spatial relationship can be accurately defined on MRI. The study aim was to evaluate the MRI visibility of fiducials inserted in the rectum.<br><b>Materials and methods:</b> We tested four fiducial types (two Visicoil types, Cook and Gold Anchor), inserted in five patients each. Four observers identified fiducial locations on two MRI exams per patient in two scenarios: without (scenario A) and with (scenario B) (CB)CT available. A fiducial was defined to be consistently identified if 3 out of 4 observers labeled that fiducial at the same position on MRI. Fiducial visibility was scored on an axial and sagittal T2-TSE sequence and a T1 3D GRE sequence.<br><b>Results:</b> Fiducial identification was poor in scenario A for all fiducial types. The Visicoil 0.75 and Gold Anchor were the most consistently identified fiducials in scenario B with 7 out of 9 and 8 out of 11 consistently identified fiducials in the first MRI exam and 2 out of 7 and 5 out of 10 in the second MRI exam, respectively. The consistently identified Visicoil 0.75 and Gold Anchor fiducials were best visible on the T1 3D GRE sequence.<br><b>Conclusion:</b> The Visicoil 0.75 and Gold Anchor fiducials were the most visible fiducials on MRI as they were most consistently identified. The use of a registered (CB)CT and a T1 3D GRE MRI sequence is recommended.},
}
@article{DeVos:2019,
abbr = {},
bibtex_show = {true},
author = {De Vos, Bob and Berendsen, Floris F. and Viergever, Max A. and Sokooti, Hessam and Staring, Marius and I{\v{s}}gum, Ivana},
title = {A Deep Learning Framework for Unsupervised Affine and Deformable Image Registration},
journal = {Medical Image Analysis},
volume = {52},
number = {2},
pages = {128 -- 143},
month = {February},
year = {2019},
pdf = {2019_j_MedIAa.pdf},
html = {https://doi.org/10.1016/j.media.2018.11.010},
arxiv = {},
code = {},
abstract = {Image registration, the process of aligning two or more images, is the core technique of many (semi-)automatic medical image analysis tasks. Recent studies have shown that deep learning methods, notably convolutional neural networks (ConvNets), can be used for image registration. Thus far training of ConvNets for registration was supervised using predefined example registrations. However, obtaining example registrations is not trivial. To circumvent the need for predefined examples, and thereby to increase convenience of training ConvNets for image registration, we propose the Deep Learning Image Registration (DLIR) framework for unsupervised affine and deformable image registration. In the DLIR framework ConvNets are trained for image registration by exploiting image similarity analogous to conventional intensity-based image registration. After a ConvNet has been trained with the DLIR framework, it can be used to register pairs of unseen images in one shot. We propose flexible ConvNets designs for affine image registration and for deformable image registration. By stacking multiple of these ConvNets into a larger architecture, we are able to perform coarse-to-fine image registration. We show for registration of cardiac cine MRI and registration of chest CT that performance of the DLIR framework is comparable to conventional image registration while being several orders of magnitude faster.},
}
@article{Sun:2018,
abbr = {},
bibtex_show = {true},
author = {Sun, Zhuo and Qiao, Yuchuan and Lelieveldt, Boudewijn P.F. and Staring, Marius},
title = {Integrating Spatial-Anatomical Regularization and Structure Sparsity into SVM: Improving Interpretation of Alzheimer's Disease Classification},
journal = {NeuroImage},
volume = {178},
pages = {445 -- 460},
month = {September},
year = {2018},
pdf = {2018_j_NI.pdf},
html = {https://doi.org/10.1016/j.neuroimage.2018.05.051},
arxiv = {},
code = {},
abstract = {In recent years, machine learning approaches have been successfully applied to the field of neuroimaging for classification and regression tasks. However, many approaches do not give an intuitive relation between the raw features and the diagnosis. Therefore, they are difficult for clinicians to interpret. Moreover, most approaches treat the features extracted from the brain (for example, voxelwise gray matter concentration maps from brain MRI) as independent variables and ignore their spatial and anatomical relations. In this paper, we present a new Support Vector Machine (SVM)-based learning method for the classification of Alzheimer's disease (AD), which integrates spatial-anatomical information. In this way, spatial-neighbor features in the same anatomical region are encouraged to have similar weights in the SVM model. Secondly, to make the learned model more interpretable, we introduce a group lasso penalty to induce structure sparsity, which may help clinicians to assess the key regions involved in the disease. For solving this learning problem, we use an accelerated proximal gradient descent approach. We tested our method on the subset of ADNI data selected by Cuingnet et al. (2011) for Alzheimer's disease classification, as well as on an independent larger dataset from ADNI. Good classification performance is obtained for distinguishing cognitive normals (CN) vs. AD, as well as on distinguishing between various sub-types (e.g. CN vs. Mild Cognitive Impairment). The model trained on Cuignet's dataset for AD vs. CN classification was directly used without re-training to the independent larger dataset. Good performance was achieved, demonstrating the generalizability of the proposed methods. For all experiments, the classification results are comparable or better than the state-of-the-art, while the weight map more clearly indicates the key regions related to Alzheimer's disease.},
}
@article{Zhai:2018,
abbr = {},
bibtex_show = {true},
author = {Zhai, Zhiwei and Ota, Hideki and Staring, Marius and Stolk, Jan and Sugimura, Koichiro and Takase, Kei and Stoel, Berend C.},
title = {Treatment Effect of Balloon Pulmonary Angioplasty in Chronic Thromboembolic Pulmonary Hypertension Quantified by Automatic Comparative Imaging in Computed Tomography Pulmonary Angiography},
journal = {Investigative Radiology},
volume = {53},
number = {5},
pages = {286 -- 292},
month = {May},
year = {2018},
pdf = {2018_j_IR.pdf},
html = {https://doi.org/10.1097/RLI.0000000000000441},
arxiv = {},
code = {},
abstract = {<b>Objectives:</b> Balloon pulmonary angioplasty (BPA) in patients with inoperable chronic thromboembolic pulmonary hypertension (CTEPH) can have variable outcomes. To gain more insight into this variation, we designed a method for visualizing and quantifying changes in pulmonary perfusion by automatically comparing computed tomography (CT) pulmonary angiography before and after BPA treatment. We validated these quantifications of perfusion changes against hemodynamic changes measured with right-sided heart catheterization.<br><b>Materials and Methods:</b> We studied 14 consecutive CTEPH patients (12 women; age, 70.5 ± 24), who underwent CT pulmonary angiography and right-sided heart catheterization, before and after BPA. Posttreatment images were registered to pretreatment CT scans (using the Elastix toolbox) to obtain corresponding locations. Pulmonary vascular trees and their centerlines were detected using a graph cuts method and a distance transform method, respectively. Areas distal from vessels were defined as pulmonary parenchyma. Subsequently, the density changes within the vascular centerlines and parenchymal areas were calculated and corrected for inspiration level differences. For visualization, the densitometric changes were displayed in color-coded overlays. For quantification, the median and interquartile range of the density changes in the vascular and parenchymal areas (ΔVD and ΔPD) were calculated. The recorded changes in hemodynamic parameters, including changes in systolic, diastolic, mean pulmonary artery pressure (ΔsPAP, ΔdPAP and ΔmPAP, respectively) and vascular resistance (ΔPVR), were used as reference assessments of the treatment effect. Spearman correlation coefficients were employed to investigate the correlations between changes in perfusion and hemodynamic changes.<br><b>Results:</b> Comparative imaging maps showed distinct patterns in perfusion changes among patients. Within pulmonary vessels, the interquartile range of ΔVD correlated significantly with ΔsPAP (R= 0.58, p=0.03), ΔdPAP (R= 0.71, p=0.005), ΔmPAP (R= 0.71, p=0.005), and ΔPVR (R= 0.77, p=0.001). In the parenchyma, the median of ΔPD had significant correlations with ΔdPAP (R= 0.58, p=0.030) and ΔmPAP (R= 0.59, p=0.025).<br><b>Conclusions:</b> Comparative imaging analysis in CTEPH patients offers insight into differences in BPA treatment effect. Quantification of perfusion changes provides noninvasive measures that reflect hemodynamic changes.},
}
@article{Shahzad:2017,
abbr = {},
bibtex_show = {true},
author = {Shahzad, Rahil and Tao, Qian and Dzyubachyk, Oleh and Staring, Marius and Lelieveldt, Boudewijn P.F. and van der Geest, Rob J.},
title = {Fully-Automatic Left Ventricular Segmentation from Long-Axis Cardiac Cine MR Scans},
journal = {Medical Image Analysis},
volume = {39},
pages = {44 -- 55},
month = {July},
year = {2017},
pdf = {2017_j_MedIA.pdf},
html = {http://doi.org/10.1016/j.media.2017.04.004},
arxiv = {},
code = {},
abstract = {With an increasing number of large-scale population-based cardiac magnetic resonance (CMR) imaging studies being conducted nowadays, there comes the mammoth task of image annotation and image analysis. Such population-based studies would greatly benefit from automated pipelines, with an efficient CMR image analysis workflow. The purpose of this work is to investigate the feasibility of using a fully-automatic pipeline to segment the left ventricular endocardium and epicardium simultaneously on two orthogonal (vertical and horizontal) long-axis cardiac cine MRI scans. The pipeline is based on a multi-atlas-based segmentation approach and a spatio-temporal registration approach. The performance of the method was assessed by: (i) comparing the automatic segmentations to those obtained manually at both the end-diastolic and end-systolic phase, (ii) comparing the automatically obtained clinical parameters, including end-diastolic volume, end-systolic volume, stroke volume and ejection fraction, with those defined manually and (iii) by the accuracy of classifying subjects to the appropriate risk category based on the estimated ejection fraction. Automatic segmentation of the left ventricular endocardium was achieved with a Dice similarity coefficient (DSC) of 0.93 on the end-diastolic phase for both the vertical and horizontal long-axis scan; on the end-systolic phase the DSC was 0.88 and 0.85, respectively. For the epicardium, a DSC of 0.94 and 0.95 was obtained on the end-diastolic vertical and horizontal long-axis scans; on the end-systolic phase the DSC was 0.90 and 0.88, respectively. With respect to the clinical volumetric parameters, Pearson correlation coefficient (R) of 0.97 was obtained for the end-diastolic volume, 0.95 for end-systolic volume, 0.87 for stroke volume and 0.84 for ejection fraction. Risk category classification based on ejection fraction showed that 80% of the subjects were assigned to the correct risk category and only one subject (< 1%) was more than one risk category off. We conclude that the proposed automatic pipeline presents a viable and cost-effective alternative for manual annotation.},
}
@article{Sun:2017,
abbr = {},
bibtex_show = {true},
author = {Sun, Zhuo and van der Giessen, Martijn and Lelieveldt, Boudewijn P.F. and Staring, Marius},
title = {Detection of conversion from mild cognitive impairment to Alzheimer's disease using longitudinal brain MRI},
journal = {Frontiers in Neuroinformatics},
volume = {11},
pages = {16},
month = {February},
year = {2017},
pdf = {2017_j_FNI.pdf},
html = {http://dx.doi.org/10.3389/fninf.2017.00016},
arxiv = {},
code = {},
abstract = {Mild Cognitive Impairment (MCI) is an intermediate stage between healthy and Alzheimer's disease (AD). To enable early intervention it is important to identify the MCI subjects that will convert to AD in an early stage. In this paper, we provide a new method to distinguish between MCI patients that either convert to Alzheimer's Disease (MCIc) or remain stable (MCIs), using only longitudinal T1-weighted MRI. Currently, most longitudinal studies focus on volumetric comparison of a few anatomical structures, thereby ignoring more detailed development inside and outside those structures. In this study we propose to exploit the anatomical development within the entire brain, as found by a non-rigid registration approach. Specifically, this anatomical development is represented by the stationary velocity field (SVF) from registration between the baseline and follow-up images. To make the SVFs comparable among subjects, we use the parallel transport method to align them in a common space. The normalized SVF together with derived features are then used to distinguish between MCIc and MCIs subjects. This novel feature space is reduced using a Kernel Principal Component Analysis method, and a linear support vector machine is used as a classifier. Extensive comparative experiments are performed to inspect the influence of several aspects of our method on classification performance, specifically the feature choice, the smoothing parameter in the registration and the use of dimensionality reduction. The optimal result from a 10-fold cross-validation using 36 month follow-up data shows competitive results: accuracy 92%, sensitivity 95%, specificity 90%, and AUC 94%. Based on the same dataset, the proposed approach outperforms two alternative ones that either depends on the baseline image only, or uses longitudinal information from larger brain areas. Good results were also obtained when scans at 6, 12 or 24 months were used for training the classifier. Besides the classification power, the proposed method can quantitatively compare brain regions that have a significant difference in development between the MCIc and MCIs groups.},
}
@article{Dzyubachyk:2017,
abbr = {},
bibtex_show = {true},
author = {Dzyubachyk, Oleh and Staring, Marius and Reijnierse, Monique and Lelieveldt, Boudewijn P.F. and van der Geest, Rob J.},
title = {Inter-Station Intensity Standardization for Whole-Body MR Data},
journal = {Magnetic Resonance in Medicine},
volume = {77},
number = {1},
pages = {422 -- 433},
month = {January},
year = {2017},
pdf = {2017_j_MRM.pdf},
html = {http://dx.doi.org/10.1002/mrm.26098},
arxiv = {},
code = {},
abstract = {<b>Purpose.</b> To develop and validate a method for performing inter-station intensity standardization in multi-spectral whole-body MR data.<br><b>Methods.</b> Different approaches for mapping the intensity of each acquired image stack into the reference intensity space were developed and validated. The registration strategies included: "direct" registration to the reference station (Strategy 1), "progressive" registration to the neighbouring stations without (Strategy 2) and with (Strategy 3) using information from the overlap regions of the neighbouring stations. For Strategy 3, two regularized modifications were proposed and validated. All methods were tested on two multi-spectral whole-body MR data sets: a multiple myeloma patients data set (48 subjects) and a whole-body MR angiography data set (33 subjects).<br><b>Results.</b> For both data sets, all strategies showed significant improvement of intensity homogeneity with respect to vast majority of the validation measures (p < 0.005). Strategy 1 exhibited the best performance, closely followed by Strategy 2. Strategy 3 and its modifications were performing worse, in majority of the cases significantly (p < 0.05).<br><b>Conclusions.</b> We propose several strategies for performing inter-station intensity standardization in multi-spectral whole-body MR data. All the strategies were successfully applied to two types of whole-body MR data, and the "direct" registration strategy was concluded to perform the best.},
}
@article{Viergever:2016,
abbr = {},
bibtex_show = {true},
author = {Viergever, Max A. and Maintz, J.B. Antoine and Klein, Stefan and Murphy, Keelin and Staring, Marius and Pluim, Josien P.W.},
title = {A survey of medical image registration - under review},
journal = {Medical Image Analysis},
volume = {33},
pages = {140 -- 144},
month = {October},
year = {2016},
pdf = {2016_j_MedIA.pdf},
html = {http://dx.doi.org/10.1016/j.media.2016.06.030},
arxiv = {},
code = {},
abstract = {A retrospective view on the past two decades of the field of medical image registration is presented, guided by the article "A survey of medical image registration" (Maintz and Viergever, 1998). It shows that the classification of the field introduced in that article is still usable, although some modifications to do justice to advances in the field would be due. The main changes over the last twenty years are the shift from extrinsic to intrinsic registration, the primacy of intensity-based registration, the breakthrough of nonlinear registration, the progress of inter-subject registration, and the availability of generic image registration software packages. Two problems that were called urgent already 20 years ago, are even more urgent nowadays: Validation of registration methods, and translation of results of image registration research to clinical practice. It may be concluded that the field of medical image registration has evolved, but still is in need of further development in various aspects.},
}
@article{Xiao:2016,
abbr = {},
bibtex_show = {true},
author = {Xiao, Changyan and Stoel, Berend C. and Bakker, M. Els and Peng, Yuanyuan and Stolk, Jan and Staring, Marius},
title = {Pulmonary Fissure Detection in CT Images Using a Derivative of Stick Filter},
journal = {IEEE Transactions on Medical Imaging},
volume = {35},
number = {6},
pages = {1488 -- 1500},
month = {June},
year = {2016},
pdf = {2016_j_TMIc.pdf},
html = {http://dx.doi.org/10.1109/TMI.2016.2517680},
arxiv = {},
code = {},
abstract = {Pulmonary fissures are important landmarks for recognition of lung anatomy. In CT images, automatic detection of fissures is complicated by factors like intensity variability, pathological deformation and imaging noise. To circumvent this problem, we propose a derivative of stick (DoS) filter for fissure enhancement and a post-processing pipeline for subsequent segmentation. Considering a typical thin curvilinear shape of fissure profiles inside 2D cross-sections, the DoS filter is presented by first defining nonlinear derivatives along a triple stick kernel in varying directions. Then, to accommodate pathological abnormality and orientational deviation, a max-min cascading and multiple plane integration scheme is adopted to form a shape-tuned likelihood for 3D surface patches discrimination. During the post-processing stage, our main contribution is to isolate the fissure patches from adhering clutters by introducing a branch-point removal algorithm, and a multi-threshold merging framework is employed to compensate for local intensity inhomogeneity. The performance of our method was validated in experiments with two clinical CT data sets including 55 publicly available LOLA11 scans as well as separate left and right lung images from 23 GLUCOLD scans of COPD patients. Compared with manually delineating interlobar boundary references, our method obtained a high segmentation accuracy with median F1-scores of 0.833, 0.885, and 0.856 for the LOLA11, left and right lung images respectively, whereas the corresponding indices for a conventional Wiemker filtering method were 0.687, 0.853, and 0.841. The good performance of our proposed method was also verified by visual inspection and demonstration on abnormal and pathological cases, where typical deformations were robustly detected together with normal fissures.},
}
@article{Saygili:2016,
abbr = {},
bibtex_show = {true},
author = {Saygili, Gorkem and Staring, Marius and Hendriks, Emile A.},
title = {Confidence Estimation for Medical Image Registration Based On Stereo Confidences},
journal = {IEEE Transactions on Medical Imaging},
volume = {35},
number = {2},
pages = {539 -- 549},
month = {February},
year = {2016},
pdf = {2016_j_TMIb.pdf},
html = {http://dx.doi.org/10.1109/TMI.2015.2481609},
arxiv = {},
code = {},
abstract = {In this paper, we propose a novel method to estimate the confidence of a registration that does not require any ground truth, is independent from the registration algorithm and the resulting confidence is correlated with the amount of registration error. We first apply a local search to match patterns between the registered image pairs. Local search induces a cost space per voxel which we explore further to estimate the confidence of the registration similar to confidence estimation algorithms for stereo matching. We test our method on both synthetically generated registration errors and on real registrations with ground truth. The experimental results show that our confidence measure can estimate registration errors and it is correlated with local errors.},
}
@article{Qiao:2016,
abbr = {},
bibtex_show = {true},
author = {Qiao, Yuchuan and van Lew, Baldur and Lelieveldt, Boudewijn P.F and Staring, Marius},
title = {Fast Automatic Step Size Estimation for Gradient Descent Optimization of Image Registration},
journal = {IEEE Transactions on Medical Imaging},
volume = {35},
number = {2},
pages = {391 -- 403},
month = {February},
year = {2016},
pdf = {2016_j_TMIa.pdf},
html = {http://dx.doi.org/10.1109/TMI.2015.2476354},
arxiv = {},
code = {https://github.com/SuperElastix/elastix},
abstract = {Fast automatic image registration is an important prerequisite for image guided clinical procedures. However, due to the large number of voxels in an image and the complexity of registration algorithms, this process is often very slow. Among many classical optimization strategies, stochastic gradient descent is a powerful method to iteratively solve the registration problem. This procedure relies on a proper selection of the optimization step size, which is important for the optimization procedure to converge. This step size selection is difficult to perform manually, since it depends on the input data, similarity measure and transformation model. The Adaptive Stochastic Gradient Descent (ASGD) method has been proposed to automatically choose the step size, but it comes at a high computational cost, dependent on the number of transformation parameters.<br>In this paper, we propose a new computationally efficient method (fast ASGD) to automatically determine the step size for gradient descent methods, by considering the observed distribution of the voxel displacements between iterations. A relation between the step size and the expectation and variance of the observed distribution is derived. While ASGD has quadratic complexity with respect to the transformation parameters, the fast ASGD method only has linear complexity. Extensive validation has been performed on different datasets with different modalities, inter/intra subjects, different similarity measures and transformation models. To perform a large scale experiment on 3D MR brain data, we have developed efficient and reusable tools to exploit an international high performance computing facility. For all experiments, we obtained similar accuracy as ASGD. Moreover, the estimation time of the fast ASGD method is reduced to a very small value, from 40 seconds to less than 1 second when the number of parameters is 10<sup>5</sup>, almost 40 times faster. Depending on the registration settings, the total registration time is reduced by a factor of 2.5-7x for the experiments in this paper.},
}
@article{Hammelrath:2016,
abbr = {},
bibtex_show = {true},
author = {Hammelrath, Luam and {\v{S}}koki{\'c}, Sini{\v{s}}a and Khmelinskii,Artem and Hess, Andreas and van der Knaap, Noortje and Staring, Marius and Lelieveldt, Boudewijn P.F. and Wiedermann, Dirk and Hoehn, Mathias},
title = {Morphological maturation of the mouse brain: An in vivo MRI and histology investigation},
journal = {NeuroImage},
volume = {125},
number = {15},
pages = {144 -- 152},
month = {January},
year = {2016},
pdf = {2016_j_NI.pdf},
html = {http://dx.doi.org/10.1016/j.neuroimage.2015.10.009},
arxiv = {},
code = {},
abstract = {With the wide access to studies of selected gene expressions in transgenic animals, mice have become the dominant species as cerebral disease models.Many of these studies are performed on animals of not more than eight weeks, declared as adult animals. Based on the earlier reports that full brain maturation requires at least three months in rats, there is a clear need to discern the corresponding minimal animal age to provide an "adult brain" in mice in order to avoid modulation of disease progression/therapy studies by ongoing developmental changes. For this purpose, we have studied anatomical brain alterations of mice during their first six months of age. Using T2-weighted and diffusion-weighted MRI, structural and volume changes of the brain were identified and compared with histological analysis of myelination. Mouse brain volume was found to be almost stable already at three weeks, but cortex thickness kept decreasing continuously with maximal changes during the first three months. Myelination is still increasing between three and six months, although most dramatic changes are over by three months. While our results emphasize that mice should be at least three months old when adult animals are needed for brain studies, preferred choice of one particular metric for future investigation goals will result in somewhat varying age windows of stabilization.},
}
@article{Shahzad:2015,
abbr = {},
bibtex_show = {true},
author = {Shahzad, Rahil and Dzyubachyk, Oleh and Staring, Marius and Kullberg, Joel and Johansson, Lars and Ahlstr{\"o}m, H{\r{a}}kan and Lelieveldt, Boudewijn P. F. and van der Geest, Rob J.},
title = {Automated Extraction and Labelling of the Arterial Tree from Whole-Body MRA Data},
journal = {Medical Image Analysis},
volume = {24},
number = {1},
pages = {28 -- 40},
month = {August},
year = {2015},
pdf = {2015_j_MedIA.pdf},
html = {http://dx.doi.org/10.1016/j.media.2015.05.008},
arxiv = {},
code = {},
abstract = {In this work, we present a fully automated algorithm for extraction of the 3D arterial tree and labelling the tree segments from whole-body magnetic resonance angiography (WB-MRA) sequences. The algorithm developed consists of two core parts (i) 3D volume reconstruction from different stations with simultaneous correction of different types of intensity inhomogeneity, and (ii) Extraction of the arterial tree and subsequent labelling of the pruned extracted tree. Extraction of the arterial tree is performed using the probability map of the "contrast" class, which is obtained as one of the results of the inhomogeneity correction scheme. We demonstrate that such approach is more robust than using the difference between the pre- and post-contrast channels traditionally used for this purpose. Labelling the extracted tree is performed by using a combination of graph-based and atlas-based approaches. Validation of our method with respect to the extracted tree was performed on the arterial tree subdivided into 32 segments, 82.4% of which were completely detected, 11.7% partially detected, and 5.9% were missed on a cohort of 35 subjects. With respect to automated labelling accuracy of the 32 segments, various registration strategies were investigated on a training set consisting of 10 scans. Further analysis on the test set consisting of 25 data sets indicates that 69% of the vessel centerline tree in the head and neck region, 80% in the thorax and abdomen region, and 84% in the legs was accurately labelled to the correct vessel segment. These results indicate clinical potential of our approach in enabling fully automated and accurate analysis of the entire arterial tree. This is the first study that not only automatically extracts the WB-MRA arterial tree, but also labels the vessel tree segments.},
}
@article{Ninaber:2015,
abbr = {},
bibtex_show = {true},
pdf = {2015_j_EJR.pdf},
author = {Ninaber, Maarten K. and Stolk, Jan and Smit, Jasper and Le Roy, Ernest J. and Kroft, Lucia J.M. and Bakker, M.E. and de Vries Bouwstra, Jeska K. and Schouffoer, Anna A. and Staring, Marius and Stoel, Berend C.},
title = {Lung Structure And Function Relation In Systemic Sclerosis: Application Of Lung Densitometry},
journal = {European Journal of Radiology},
volume = {84},
number = {5},
pages = {975 - 979},
month = {May},
year = {2015},
html = {http://dx.doi.org/10.1016/j.ejrad.2015.01.012},
arxiv = {},
code = {},
abstract = {<b>Introduction.</b> Interstitial lung disease occurs frequently in patients with systemic sclerosis (SSc). Quantitative computed tomography (CT) densitometry using the percentile density method may provide a sensitive assessment of lung structure for monitoring parenchymal damage. Therefore, we aimed to evaluate the optimal percentile density score in SSc by quantitative CT densitometry, against pulmonary function.<br><b>Material and Methods.</b> We investigated 41 SSc patients by chest CT scan, spirometry and gas transfer tests. Lung volumes and the nth percentile density (between 1 and 99%) of the entire lungs were calculated from CT histograms. The nth percentile density is defined as the threshold value of densities expressed in Hounsfield units. A prerequisite for an optimal percentage was its correlation with baseline DLCO%predicted. Two patients showed distinct changes in lung function 2 years after baseline. We obtained CT scans from these patients and performed progression analysis.<br><b>Results.</b> Regression analysis for the relation between DLCO%predicted and the nth percentile density was optimal at 85% (Perc85). There was significant agreement between Perc85 and DLCO%predicted (R = -0.49, P = 0.001) and FVC%predicted (R = -0.64, P < 0.001). Two patients showed a marked change in Perc85 over a two year period, but the localisation of change differed clearly.<br><b>Conclusions.</b> We identified Perc85 as optimal lung density parameter, which correlated significantly with DLCO and FVC, confirming a lung parenchymal structure-function relation in SSc. This provides support for future studies to determine whether structural changes do precede lung function decline.},
}
@article{Stoel:2015,
abbr = {},
bibtex_show = {true},
author = {Stoel, Berend C. and Marquering, Henk A. and Staring, Marius and Beenen, Ludo F. and Slump, Cornelis H. and Roos, Yvo B. and Majoie, Charles B.},
title = {Automated brain computed tomographic densitometry of early ischemic changes in acute stroke},
journal = {Journal of Medical Imaging},
volume = {2},
number = {1},
pages = {014004},
month = {March},
year = {2015},
pdf = {2015_j_JMI.pdf},
html = {http://dx.doi.org/10.1117/1.JMI.2.1.014004},
arxiv = {},
code = {},
abstract = {The Alberta Stroke Program Early CT score (ASPECTS) scoring method is frequently used for quantifying early ischemic changes (EICs) in patients with acute ischemic stroke in clinical studies. Varying interobserver agreement has been reported, however, with limited agreement. Therefore, our goal was to develop and evaluate an automated brain densitometric method. It divides CT scans of the brain into ASPECTS regions using atlas-based segmentation. EICs are quantified by comparing the brain density between contralateral sides. This method was optimized and validated using CT data from 10 and 63 patients, respectively. The automated method was validated against manual ASPECTS, stroke severity at baseline and clinical outcome after 7 to 10 days (NIH Stroke Scale, NIHSS) and 3 months (modified Rankin Scale). Manual and automated ASPECTS showed similar and statistically significant correlations with baseline NIHSS (R=-0.399 and -0.277, respectively) and with follow-up mRS (R=-0.256 and -0.272), except for the follow-up NIHSS. Agreement between automated and consensus ASPECTS reading was similar to the interobserver agreement of manual ASPECTS (differences <1 point in 73% of cases). The automated ASPECTS method could, therefore, be used as a supplementary tool to assist manual scoring.},
}
@article{Rudyanto:2014,
abbr = {},
bibtex_show = {true},
author = {Rudyanto, Rina D. and Kerkstra, Sjoerd and van Rikxoort, Eva M. and Fetita, Catalin and Brillet, Pierre-Yves and Lefevre, Christophe and Xue, Wenzhe and Zhu, Xiangjun and Liang, Jianming and {\"O}ks{\"u}z, Ilkay and {\"U}nay, Devrim and Kadipasaoglu, Kamuran and Est{\'e}par, Ra{\'u}l San Jos{\'e} and Ross, James C. and Washko, George R. and Prieto, Juan-Carlos and Hoyos, Marcela Hern{\'a}ndez and Orkisz, Maciej and Meine, Hans and H{\"u}llebrand, Markus and St{\"o}cker, Christina and Mir, Fernando Lopez and Naranjo, Valery and Villanueva, Eliseo and Staring, Marius and Xiao, Changyan and Stoel, Berend C. and Fabijanska, Anna and Smistad, Erik and Elster, Anne C. and Lindseth, Frank and Foruzan, Amir Hossein and Kiros, Ryan and Popuri, Karteek and Cobzas, Dana and Jimenez-Carretero, Daniel and Santos, Andres and Ledesma-Carbayo, Maria J. and Helmberger, Michael and Urschler, Martin and Pienn, Michael and Bosboom, Dennis G. H. and Campo, Arantza and Prokop, Mathias and de Jong, Pim A. and Ortiz-de-Solorzano, Carlos and Mu{\~n}oz-Barrutia, Arrate and van Ginneken, Bram},
title = {Comparing algorithms for automated vessel segmentation in Computed Tomography scans of the lung: The VESSEL12 study},
journal = {Medical Image Analysis},
volume = {18},
number = {7},
pages = {1217 - 1232},
month = {October},
year = {2014},
pdf = {2014_j_MedIA.pdf},
html = {http://dx.doi.org/10.1016/j.media.2014.07.003},
arxiv = {},
code = {},
abstract = {The VESSEL12 (VESsel SEgmentation in the Lung) challenge objectively compares the performance of different algorithms to identify vessels in thoracic computed tomography (CT) scans. Vessel segmentation is fundamental in computer aided processing of data generated by 3D imaging modalities. As manual vessel segmentation is prohibitively time consuming, any real world application requires some form of automation. Several approaches exist for automated vessel segmentation, but judging their relative merits is difficult due to a lack of standardized evaluation. We present an annotated reference dataset containing 20 CT scans and propose nine categories to perform a comprehensive evaluation of vessel segmentation algorithms from both academia and industry. Twenty algorithms participated in the VESSEL12 challenge, held at International Symposium on Biomedical Imaging (ISBI) 2012. All results have been published at the VESSEL12 website http://vessel12.grand-challenge.org. The challenge remains ongoing and open to new participants. Our three contributions are: (1) an annotated reference dataset available online for evaluation of new algorithms; (2) a quantitative scoring system for objective comparison of algorithms; and (3) performance analysis of the strengths and weaknesses of the various vessel segmentation methods in the presence of various lung diseases.},
}
@article{Staring:2014,
abbr = {},
bibtex_show = {true},
author = {Staring, Marius and Bakker, M.E. and Stolk, Jan and Shamonin, Denis P. and Reiber, Johan H.C. and Stoel, Berend C.},
title = {Towards Local Progression Estimation of Pulmonary Emphysema using CT},
journal = {Medical Physics},
volume = {41},
number = {2},
pages = {021905-1 - 021905-13},
month = {February},
year = {2014},
pdf = {2014_j_MP.pdf},
html = {http://dx.doi.org/10.1118/1.4851535},
arxiv = {},
code = {},
abstract = {<b>Purpose:</b> Whole lung densitometry on chest CT images is an accepted method for measuring tissue destruction in patients with pulmonary emphysema in clinical trials. Progression measurement is required for evaluation of change in health condition and the effect of drug treatment. Information about the location of emphysema progression within the lung may be important for the correct interpretation of drug efficacy, or for determining a treatment plan. The purpose of this study is therefore to develop and validate methods that enable the local measurement of lung density changes, which requires proper modeling of the effect of respiration on density.<br><b>Methods:</b> Four methods, all based on registration of baseline and follow-up chest CT scans, are compared. The first naive method subtracts registered images. The second employs the so-called dry sponge model, where volume correction is performed using the determinant of the Jacobian of the transformation. The third and the fourth introduce a novel adaptation of the dry sponge model that circumvents its constant-mass assumption, which is shown to be invalid. The latter two methods require a third CT scan at a different inspiration level to estimate the patient-specific density-volume slope, where one method employs a global and the other a local slope. The methods were validated on CT scans of a phantom mimicking the lung, where mass and volume could be controlled. In addition, validation was performed on data of 21 patients with pulmonary emphysema.<br><b>Results:</b> The image registration method was optimized leaving a registration error below half the slice increment (median 1.0mm). The phantom study showed that the locally adapted slope model most accurately measured local progression. The systematic error in estimating progression, as measured on the phantom data, was below 2 gr/l for a 70 ml (6%) volume difference, and 5 gr/l for a 210 ml (19%) difference, if volume correction was applied. On the patient data an underlying linearity assumption relating lung volume change with density change was shown to hold (fit R<sup>2</sup> = 0.94), and globalized versions of the local models are consistent with global results (R<sup>2</sup> of 0.865 and 0.882 for the two adapted slope models, respectively).<br><b>Conclusions:</b> In conclusion, image matching and subsequent analysis of differences according to the proposed lung models i) has good local registration accuracy on patient data, ii) effectively eliminates a dependency on inspiration level at acquisition time, iii) accurately predicts progression in phantom data, and iv) is reasonably consistent with global results in patient data. It is therefore a potential future tool for assessing local emphysema progression in drug evaluation trials and in clinical practice.},
}
@article{Shamonin:2014,
abbr = {},
bibtex_show = {true},
author = {Shamonin, Denis P and Bron, Esther E and Lelieveldt, Boudewijn P.F. and Smits, Marion and Klein, Stefan and Staring, Marius},
title = {Fast Parallel Image Registration on CPU and GPU for Diagnostic Classification of Alzheimer's Disease},
journal = {Frontiers in Neuroinformatics},
volume = {7},
number = {50},
pages = {1-15},
month = {January},
year = {2014},
pdf = {2014_j_FNI.pdf},
html = {http://dx.doi.org/10.3389/fninf.2013.00050},
arxiv = {},
code = {https://github.com/SuperElastix/elastix},
abstract = {Nonrigid image registration is an important, but time-consuming task in medical image analysis. In typical neuroimaging studies, multiple image registrations are performed, i.e. for atlas-based segmentation or template construction. Faster image registration routines would therefore be beneficial.<br>In this paper we explore acceleration of the image registration package elastix by a combination of several techniques: i) parallelization on the CPU, to speed up the cost function derivative calculation; ii) parallelization on the GPU building on and extending the OpenCL framework from ITKv4, to speed up the Gaussian pyramid computation and the image resampling step; iii) exploitation of certain properties of the B-spline transformation model; iv) further software optimizations.<br>The accelerated registration tool is employed in a study on diagnostic classification of Alzheimer's disease and cognitively normal controls based on T1-weighted MRI. We selected 299 participants from the publicly available Alzheimer's Disease Neuroimaging Initiative database. Classification is performed with a support vector machine based on gray matter volumes as a marker for atrophy. We evaluated two types of strategies (voxel-wise and region-wise) that heavily rely on nonrigid image registration.<br>Parallelization and optimization resulted in an acceleration factor of 4-5x on an 8-core machine. Using OpenCL a speedup factor of {\sim}2 was realized for computation of the Gaussian pyramids, and 15-60 for the resampling step, for larger images. The voxel-wise and the region-wise classification methods had an area under the receiver operator characteristic curve of 88% and 90%, respectively, both for standard and accelerated registration.<br>We conclude that the image registration package elastix was substantially accelerated, with nearly identical results to the non-optimized version. The new functionality will become available in the next release of elastix as open source under the BSD license.},
}
@article{Baka:2014,
abbr = {},
bibtex_show = {true},
author = {Baka, Nora and Kaptein, Bart L. and Giphart, J. Erik and Staring, Marius and de Bruijne, Marleen and Lelieveldt, Boudewijn P.F. and Valstar, Edward},
title = {Evaluation of automated statistical shape model based knee kinematics from biplane fluoroscopy},
journal = {Journal of Biomechanics},
volume = {47},
number = {1},
pages = {122 - 129},
month = {January},
year = {2014},
pdf = {2014_j_JBM.pdf},
html = {http://dx.doi.org/10.1016/j.jbiomech.2013.09.022},
arxiv = {},
code = {},
abstract = {State-of-the-art fluoroscopic knee kinematic analysis methods require the patient-specific bone shapes segmented from CT or MRI. Substituting the patient-specific bone shapes with personalizable models, such as statistical shape models (SSM), could eliminate the CT/MRI acquisitions, and thereby decrease costs and radiation dose (when eliminating CT). SSM based kinematics, however, have not yet been evaluated on clinically relevant joint motion parameters.<br>Therefore, in this work the applicability of SSM-s for computing knee kinematics from biplane fluoroscopic sequences was explored. Kinematic precision with an edge based automated bone tracking method using SSM-s was evaluated on 6 cadaver and 10 in-vivo fluoroscopic sequences. The SSMs of the femur and the tibia-fibula were created using 61 training datasets. Kinematic precision was determined for medial-lateral tibial shift, anterior-posterior tibial drawer, joint distraction-contraction, flexion, tibial rotation and adduction. The relationship between kinematic precision and bone shape accuracy was also investigated.<br>The SSM based kinematics resulted in sub-millimeter (0.48-0.81 mm) and approximately one degree (0.69-0.99<sup>{\circ}</sup>) median precision on the cadaveric knees compared to bone-marker-based kinematics. The precision on the in-vivo datasets was comparable to the cadaveric sequences when evaluated with a semi-automatic reference method. These results are promising, though further work is necessary to reach the accuracy of CT-based kinematics. We also demonstrated that a better shape reconstruction accuracy does not automatically imply a better kinematic precision. This result suggests that the ability of accurately fitting the edges in the fluoroscopic sequences has a larger role in determining the kinematic precision than the overall 3D shape accuracy.},
}
@article{Mengler:2014,
abbr = {},
bibtex_show = {true},
author = {Mengler, Luam and Khmelinskii, Artem and Diedenhofen, Michael and Po, Chrystelle and Staring, Marius and Lelieveldt, Boudewijn P.F. and Hoehn, Mathias},
title = {Brain maturation of the adolescent rat cortex and striatum: changes in volume and myelination},
journal = {NeuroImage},
volume = {84},
number = {1},
pages = {35-44},
month = {January},
year = {2014},
pdf = {2014_j_NI.pdf},
html = {http://dx.doi.org/10.1016/j.neuroimage.2013.08.034},
arxiv = {},
code = {},
abstract = {Longitudinal studies on brain pathology and assessment of therapeutic strategies rely on a fully mature adult brain to exclude confounds of cerebral developmental changes. Thus, knowledge about onset of adulthood is indispensable for discrimination of developmental phase and adulthood. We have performed a high-resolution longitudinal MRI study at 11.7T of male Wistar rats between 21 days and six months of age, characterizing cerebral volume changes and tissue-specific myelination as a function of age. Cortical thickness reaches final value at 1 month, while volume increases of cortex, striatum and whole brain end only after two months. Myelin accretion is pronounced until the end of the third postnatal month. After this time, continuing myelination increases in cortex are still seen on histological analysis but are no longer reliably detectable with diffusion-weighted MRI due to parallel tissue restructuring processes. In conclusion, cerebral development continues over the first three months of age. This is of relevance for future studies on brain disease models which should not start before the end of month 3 to exclude serious confounds of continuing tissue development.},
}
@article{vantKlooster:2013,
abbr = {},
bibtex_show = {true},
author = {van 't Klooster, Ronald and Staring, Marius and Klein, Stefan and Kwee, R.M. and Kooi, M.E. and Reiber, J.H.C and Lelieveldt, Boudewijn P.F. and van der Geest, Rob J.},
title = {Automated registration of multispectral MR vessel wall images of the carotid artery},
journal = {Medical Physics},
volume = {40},
number = {12},
pages = {121904-1 -- 121904-12},
month = {December},
year = {2013},
pdf = {2013_j_MP.pdf},
html = {http://dx.doi.org/10.1118/1.4829503},
arxiv = {},
code = {},
abstract = {<b>Purpose:</b> Atherosclerosis is the primary cause of heart disease and stroke. The detailed assessment of atherosclerosis of the carotid artery requires high resolution imaging of the vessel wall using multiple MR sequences with different contrast weightings. These images allow manual or automated classification of plaque components inside the vessel wall. Automated classification requires all sequences to be in alignment, which is hampered by patient motion. In clinical practice correction of this motion is performed manually. Previous studies applied automated image registration to correct for motion using only non-deformable transformation models and did not perform a detailed quantitative validation. The purpose of this study is to develop an automated accurate 3D registration method, and to extensively validate this method on a large set of patient data. In addition, we quantified patient motion during scanning to investigate the need for correction.<br><b>Methods:</b> MR imaging studies (1.5T, dedicated carotid surface coil, Philips) from fifty-five TIA/stroke patients with ipsilateral <70% carotid artery stenosis were randomly selected from a larger cohort. Five MR pulse sequences were acquired around the carotid bifurcation, each containing nine transverse slices: T1W TFE, TOF, T2W TSE, and pre- and post-contrast T1W TSE. The images were manually segmented by delineating the lumen contour in each vessel wall sequence and were manually aligned by applying through-plane and in-plane translations to the images. To find the optimal automatic image registration method, different masks, choice of the fixed image, different types of the mutual information image similarity metric, and transformation models including 3D deformable transformation models, were evaluated. Evaluation of the automatic registration results was performed by comparing the lumen segmentations of the fixed image and moving image after registration.<br><b>Results:</b> The average required manual translation per image slice was 1.33 mm. Translations were larger as the patient was longer inside the scanner. Manual alignment took 187.5 seconds per patient resulting in a mean surface distance of 0.271 ± 0.127 mm. After minimal user interaction to generate the mask in the fixed image, the remaining sequences are automatically registered with a computation time of 52.0 seconds per patient. The optimal registration strategy used a circular mask with a diameter of 10 mm, a 3D B-spline transformation model with a control point spacing of 15 mm, mutual information as image similarity metric, and the pre-contrast T1W TSE as fixed image. A mean surface distance of 0.288 ± 0.128 mm was obtained with these settings, which is very close to the accuracy of the manual alignment procedure. The exact registration parameters and software were made publicly available.<br><b>Conclusions:</b> An automated registration method was developed and optimized, only needing two mouse clicks to mark the start and end point of the artery. Validation on a large group of patients showed that automated image registration has similar accuracy as the manual alignment procedure, substantially reducing the amount of user interactions needed, and is multiple times faster. In conclusion, we believe that the proposed automated method can replace the current manual procedure, thereby reducing the time to analyze the images.},
}
@article{Dzyubachyk:2013,
abbr = {},
bibtex_show = {true},
author = {Dzyubachyk, Oleh and Blaas, Jorik and Botha, Charl P. and Staring, Marius and Reijnierse, Monique and Bloem, Johan L. and van der Geest, Rob J. and Lelieveldt, Boudewijn P.F.},
title = {Comparative Exploration of Whole-Body MR through Locally Rigid Transforms},
journal = {International Journal of Computer Assisted Radiology and Surgery},
volume = {8},
number = {4},
pages = {635 - 647},
month = {July},
year = {2013},
pdf = {2013_j_IJCARS.pdf},
html = {http://dx.doi.org/10.1007/s11548-013-0820-z},
arxiv = {},
code = {},
abstract = {<i>Purpose</i> Whole-body MRI is seeing increasing use in the study and diagnosis of disease progression. In this, a central task is the visual assessment of the progressive changes that occur between two whole-body MRI datasets, taken at baseline and follow-up. Current radiological workflow for this consists in manual search of each organ of interest on both scans, usually on multiple data channels, for further visual comparison. Large size of datasets, significant posture differences, and changes in patient anatomy turn manual matching in an extremely labour-intensive task that requires from radiologists high concentration for long period of time. This strongly limits the productivity and increases risk of underdiagnosis.<br><i>Materials and Methods</i> We present a novel approach to the comparative visual analysis of whole-body MRI follow-up data. Our method is based on interactive derivation of locally rigid transforms from a pre-computed whole-body deformable registration. Using this approach, baseline and follow-up slices can be interactively matched with a single mouse-click in the anatomical region of interest. In addition to the synchronized side-by-side baseline and matched follow-up slices, we have integrated four techniques to further facilitate the visual comparison of the two datasets: the "deformation sphere", the color fusion view, the magic lens, and a set of uncertainty iso-contours around the current region of interest.<br><i>Results</i> We have applied our method to the study of cancerous bone lesions over time in patients with Kahler's disease. During these studies, the radiologist carefully visually examines a large number of anatomical sites for changes. Our interactive locally rigid matching approach was found helpful in localization of cancerous lesions and visual assessment of changes between different scans. Furthermore, each of the features integrated in our software was separately evaluated by the experts.<br><i>Conclusions</i> We demonstrated how our method significantly facilitates examination of whole-body MR datasets in follow-up studies by enabling the rapid interactive matching of regions of interest and by the explicit visualization of change.},
}
@article{Xiao:2013,
abbr = {},
bibtex_show = {true},
author = {Xiao, Changyan and Staring, Marius and Wang, Yaonan and Shamonin, Denis P. and Stoel, Berend C.},
title = {A Multiscale Bi-Gaussian Filter for Adjacent Curvilinear Structures Detection With Application to Vasculature Images},
journal = {IEEE Transactions on Image Processing},
volume = {22},
number = {1},
pages = {174 - 188},
month = {January},
year = {2013},
pdf = {2013_j_TIP.pdf},
html = {http://dx.doi.org/10.1109/TIP.2012.2216277},
arxiv = {},
code = {},
abstract = {The intensity or gray-level derivatives have been widely used in image segmentation and enhancement. Conventional derivative filters often suffer from an undesired merging of adjacent objects, due to their intrinsic usage of an inappropriately broad Gaussian kernel; as a result neighboring structures cannot be properly resolved. To avoid this problem, we propose to replace the low-level Gaussian kernel with a bi-Gaussian function, which allows independent selection of scales on foreground and background. By selecting a narrow neighborhood for the background relative to the foreground, the proposed method will reduce interference from adjacent objects, while preserving the ability of intra-region smoothing. Our idea is inspired by a comparative analysis of existing line filters, where several traditional methods including the vesselness, gradient flux and medialness models are integrated into a uniform framework. The comparison subsequently aids in understanding the principles of the different filtering kernels, which is also a contribution of the paper. Based on some axiomatic scale-space assumptions, the full representation of our bi-Gaussian kernel is deduced. The popular {\gamma}-normalization scheme for multi-scale integration is extended to the bi-Gaussian operators. Finally, combined with a parameter-free shape estimation scheme, a derivative filter is developed for the typical applications of curvilinear structure detection and vasculature image enhancement. It is verified in experiments using synthetic and real data that the proposed method outperforms several conventional filters in separating closely located objects as well as being robust to noise.},
}
@article{Murphy:2011,
abbr = {},