-
Notifications
You must be signed in to change notification settings - Fork 6
/
references.bib
519 lines (480 loc) · 46.4 KB
/
references.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
%% This BibTeX bibliography file was created using BibDesk.
%% http://bibdesk.sourceforge.net/
%% Created for Nick Tustison at 2013-07-10 09:24:20 -0400
%% Saved with string encoding Unicode (UTF-8)
@article{avants2011a,
Abstract = {The United States National Institutes of Health (NIH) commit significant support to open-source data and software resources in order to foment reproducibility in the biomedical imaging sciences. Here, we report and evaluate a recent product of this commitment: Advanced Neuroimaging Tools (ANTs), which is approaching its 2.0 release. The ANTs open source software library consists of a suite of state-of-the-art image registration, segmentation and template building tools for quantitative morphometric analysis. In this work, we use ANTs to quantify, for the first time, the impact of similarity metrics on the affine and deformable components of a template-based normalization study. We detail the ANTs implementation of three similarity metrics: squared intensity difference, a new and faster cross-correlation, and voxel-wise mutual information. We then use two-fold cross-validation to compare their performance on openly available, manually labeled, T1-weighted MRI brain image data of 40 subjects (UCLA's LPBA40 dataset). We report evaluation results on cortical and whole brain labels for both the affine and deformable components of the registration. Results indicate that the best ANTs methods are competitive with existing brain extraction results (Jaccard=0.958) and cortical labeling approaches. Mutual information affine mapping combined with cross-correlation diffeomorphic mapping gave the best cortical labeling results (Jaccard=0.669$\pm$0.022). Furthermore, our two-fold cross-validation allows us to quantify the similarity of templates derived from different subgroups. Our open code, data and evaluation scripts set performance benchmark parameters for this state-of-the-art toolkit. This is the first study to use a consistent transformation framework to provide a reproducible evaluation of the isolated effect of the similarity metric on optimal template construction and brain labeling.},
Author = {Avants, Brian B and Tustison, Nicholas J and Song, Gang and Cook, Philip A and Klein, Arno and Gee, James C},
Date-Added = {2013-07-10 13:23:20 +0000},
Date-Modified = {2013-07-10 13:24:20 +0000},
Doi = {10.1016/j.neuroimage.2010.09.025},
Journal = {Neuroimage},
Journal-Full = {NeuroImage},
Mesh = {Algorithms; Brain; Databases, Factual; Diagnostic Imaging; Head; Humans; Image Processing, Computer-Assisted; Linear Models; Models, Anatomic; Models, Neurological; Population; Reproducibility of Results; Software},
Month = {Feb},
Number = {3},
Pages = {2033-44},
Pmc = {PMC3065962},
Pmid = {20851191},
Pst = {ppublish},
Title = {A reproducible evaluation of {ANTs} similarity metric performance in brain image registration},
Volume = {54},
Year = {2011},
Bdsk-Url-1 = {http://dx.doi.org/10.1016/j.neuroimage.2010.09.025}}
@article{tustison2009,
Author = {Tustison, Nicholas J and Gee, James C},
Date-Added = {2013-06-14 23:38:08 +0000},
Date-Modified = {2013-06-15 00:01:45 +0000},
Journal = {Insight Journal},
Title = {Introducing {D}ice, {J}accard, and Other Label Overlap Measures To {ITK}},
Year = {2009}}
@article{warfield2004,
Abstract = {Characterizing the performance of image segmentation approaches has been a persistent challenge. Performance analysis is important since segmentation algorithms often have limited accuracy and precision. Interactive drawing of the desired segmentation by human raters has often been the only acceptable approach, and yet suffers from intra-rater and inter-rater variability. Automated algorithms have been sought in order to remove the variability introduced by raters, but such algorithms must be assessed to ensure they are suitable for the task. The performance of raters (human or algorithmic) generating segmentations of medical images has been difficult to quantify because of the difficulty of obtaining or estimating a known true segmentation for clinical data. Although physical and digital phantoms can be constructed for which ground truth is known or readily estimated, such phantoms do not fully reflect clinical images due to the difficulty of constructing phantoms which reproduce the full range of imaging characteristics and normal and pathological anatomical variability observed in clinical data. Comparison to a collection of segmentations by raters is an attractive alternative since it can be carried out directly on the relevant clinical imaging data. However, the most appropriate measure or set of measures with which to compare such segmentations has not been clarified and several measures are used in practice. We present here an expectation-maximization algorithm for simultaneous truth and performance level estimation (STAPLE). The algorithm considers a collection of segmentations and computes a probabilistic estimate of the true segmentation and a measure of the performance level represented by each segmentation. The source of each segmentation in the collection may be an appropriately trained human rater or raters, or may be an automated segmentation algorithm. The probabilistic estimate of the true segmentation is formed by estimating an optimal combination of the segmentations, weighting each segmentation depending upon the estimated performance level, and incorporating a prior model for the spatial distribution of structures being segmented as well as spatial homogeneity constraints. STAPLE is straightforward to apply to clinical imaging data, it readily enables assessment of the performance of an automated image segmentation algorithm, and enables direct comparison of human rater and algorithm performance.},
Author = {Warfield, Simon K and Zou, Kelly H and Wells, William M},
Date-Added = {2013-06-14 03:11:03 +0000},
Date-Modified = {2013-06-14 03:11:03 +0000},
Doi = {10.1109/TMI.2004.828354},
Journal = {IEEE Trans Med Imaging},
Journal-Full = {IEEE transactions on medical imaging},
Mesh = {Algorithms; Brain; Decision Making, Computer-Assisted; Humans; Image Enhancement; Image Interpretation, Computer-Assisted; Infant, Newborn; Magnetic Resonance Imaging; Markov Chains; Models, Statistical; Observer Variation; Phantoms, Imaging; Reproducibility of Results; Sensitivity and Specificity},
Month = {Jul},
Number = {7},
Pages = {903-21},
Pmc = {PMC1283110},
Pmid = {15250643},
Pst = {ppublish},
Title = {Simultaneous truth and performance level estimation (STAPLE): an algorithm for the validation of image segmentation},
Volume = {23},
Year = {2004},
Bdsk-Url-1 = {http://dx.doi.org/10.1109/TMI.2004.828354}}
@incollection{verhoek2011,
Author = {Michael Verhoek and Mohammad Yaqub and John McManigle and J. Alison Noble},
Booktitle = {Machine Learning in Medical Imaging},
Date-Added = {2013-04-09 16:41:08 +0000},
Date-Modified = {2013-04-09 22:21:59 +0000},
Doi = {10.1007/978-3-642-24319-6_10},
Editor = {Kenji Suzuki and Fei Wang and Dinggang Shen and Pingkun Yan},
Isbn = {978-3-642-24318-9},
Pages = {75-82},
Publisher = {Springer Berlin Heidelberg},
Series = {Lecture Notes in Computer Science},
Title = {Learning Optical Flow Propagation Strategies Using Random Forests for Fast Segmentation in Dynamic {2D} and {3D} Echocardiography},
Volume = {7009},
Year = {2011},
Bdsk-Url-1 = {http://dx.doi.org/10.1007/978-3-642-24319-6_10}}
@article{iglesias2010,
Abstract = {Learning-based approaches have become increasingly practical in medical imaging. For a supervised learning strategy, the quality of the trained algorithm (usually a classifier) is heavily dependent on the amount, as well as quality, of the available training data. It is often very time-consuming to obtain the ground truth manual delineations. In this paper, we propose a semi-supervised learning algorithm and show its application to skull stripping in brain MRI. The resulting method takes advantage of existing state-of-the-art systems, such as BET and FreeSurfer, to sample unlabeled data in an agreement-based framework. Using just two labeled and a set of unlabeled MRI scans, a voxel-based random forest classifier is trained to perform the skull stripping. Our system is practical, and it displays significant improvement over supervised approaches, BET and FreeSurfer in two datasets (60 test images).},
Author = {Iglesias, Juan Eugenio and Liu, Cheng-Yi and Thompson, Paul and Tu, Zhuowen},
Date-Added = {2013-04-09 16:38:11 +0000},
Date-Modified = {2013-04-09 16:38:11 +0000},
Journal = {Med Image Comput Comput Assist Interv},
Journal-Full = {Medical image computing and computer-assisted intervention : MICCAI ... International Conference on Medical Image Computing and Computer-Assisted Intervention},
Mesh = {Algorithms; Artificial Intelligence; Brain; Humans; Image Enhancement; Image Interpretation, Computer-Assisted; Magnetic Resonance Imaging; Pattern Recognition, Automated; Reproducibility of Results; Sensitivity and Specificity; Signal Processing, Computer-Assisted; Skull; Subtraction Technique},
Number = {Pt 3},
Pages = {147-54},
Pmid = {20879394},
Pst = {ppublish},
Title = {Agreement-based semi-supervised learning for skull stripping},
Volume = {13},
Year = {2010}}
@article{yi2009,
Abstract = {A new algorithm is presented for the automatic segmentation and classification of brain tissue from 3D MR scans. It uses discriminative Random Decision Forest classification and takes into account partial volume effects. This is combined with correction of intensities for the MR bias field, in conjunction with a learned model of spatial context, to achieve accurate voxel-wise classification. Our quantitative validation, carried out on existing labelled datasets, demonstrates improved results over the state of the art, especially for the cerebro-spinal fluid class which is the most difficult to label accurately.},
Author = {Yi, Zhao and Criminisi, Antonio and Shotton, Jamie and Blake, Andrew},
Date-Added = {2013-04-09 16:35:37 +0000},
Date-Modified = {2013-04-09 22:22:11 +0000},
Journal = {Med Image Comput Comput Assist Interv},
Journal-Full = {Medical image computing and computer-assisted intervention : MICCAI ... International Conference on Medical Image Computing and Computer-Assisted Intervention},
Mesh = {Algorithms; Artificial Intelligence; Brain; Data Interpretation, Statistical; Discriminant Analysis; Humans; Image Enhancement; Image Interpretation, Computer-Assisted; Imaging, Three-Dimensional; Information Storage and Retrieval; Magnetic Resonance Imaging; Pattern Recognition, Automated; Reproducibility of Results; Sensitivity and Specificity; Subtraction Technique},
Number = {Pt 2},
Pages = {558-65},
Pmid = {20426156},
Pst = {ppublish},
Title = {Discriminative, semantic segmentation of brain tissue in {MR} images},
Volume = {12},
Year = {2009}}
@article{criminisi2013,
Abstract = {This paper proposes a new algorithm for the efficient, automatic detection and localization of multiple anatomical structures within three-dimensional computed tomography (CT) scans. Applications include selective retrieval of patients images from PACS systems, semantic visual navigation and tracking radiation dose over time. The main contribution of this work is a new, continuous parametrization of the anatomy localization problem, which allows it to be addressed effectively by multi-class random regression forests. Regression forests are similar to the more popular classification forests, but trained to predict continuous, multi-variate outputs, where the training focuses on maximizing the confidence of output predictions. A single pass of our probabilistic algorithm enables the direct mapping from voxels to organ location and size. Quantitative validation is performed on a database of 400 highly variable CT scans. We show that the proposed method is more accurate and robust than techniques based on efficient multi-atlas registration and template-based nearest-neighbor detection. Due to the simplicity of the regressor's context-rich visual features and the algorithm's parallelism, these results are achieved in typical run-times of only ∼4s on a conventional single-core machine.},
Author = {A Criminisi and D Robertson and E Konukoglu and J Shotton and S Pathak and S White and K Siddiqui},
Date-Added = {2013-04-09 16:32:18 +0000},
Date-Modified = {2013-04-09 16:45:49 +0000},
Doi = {10.1016/j.media.2013.01.001},
Journal = {Med Image Anal},
Journal-Full = {Medical image analysis},
Month = {Jan},
Pmid = {23410511},
Pst = {aheadofprint},
Title = {Regression forests for efficient anatomy detection and localization in computed tomography scans},
Year = {2013},
Bdsk-Url-1 = {http://dx.doi.org/10.1016/j.media.2013.01.001}}
@inproceedings{geremia2012,
Author = {E. Geremia and B. H. Menze and N. Ayache},
Booktitle = {Proceedings of MICCAI-BRATS 2012},
Date-Added = {2013-04-09 16:30:08 +0000},
Date-Modified = {2013-04-09 22:20:45 +0000},
Month = {October},
Title = {Spatial Decision Forests for Glioma Segmentation in Multi-Channel {MR} Images},
Year = {2012}}
@article{viola2005,
Abstract = {This paper describes a pedestrian detection system that integrates image intensity information with motion information. We use a detection style algorithm that scans a detector over two consecutive frames of a video sequence. The detector is trained (using AdaBoost) to take advantage of both motion and appearance information to detect a walking person. Past approaches have built detectors based on motion information or detectors based on appearance information, but ours is the first to combine both sources of information in a single detector. The implementation described runs at about 4 frames/second, detects pedestrians at very small scales (as small as 20 x 15 pixels), and has a very low false positive rate.
Our approach builds on the detection work of Viola and Jones. Novel contributions of this paper include: (i) development of a representation of image motion which is extremely efficient, and (ii) implementation of a state of the art pedestrian detection system which operates on low resolution images under difficult conditions (such as rain and snow).},
Author = {Viola, P and Jones, MJ and Snow, D},
Date-Added = {2013-04-09 16:28:38 +0000},
Date-Modified = {2013-04-09 16:28:38 +0000},
Isi = {000229049200004},
Isi-Recid = {144361209},
Isi-Ref-Recids = {144361210 59366261 116629405 100945247 101558405 144361211 128262616 132539103 144361212 144361213 123892680 144361214 134397090 134006604},
Iso-Source-Abbreviation = {Int J Comput Vision},
Journal = {International Journal of Computer Vision},
Keywords = {pedestrian detection; human sensing; boosting; tracking},
Pages = {153--161},
Times-Cited = {166},
Title = {Detecting pedestrians using patterns of motion and appearance},
Type = {Proceedings Paper},
Volume = {63},
Year = {2005},
Bdsk-Url-1 = {http://ws.isiknowledge.com/cps/openurl/service?url_ver=Z39.88-2004&rft_id=info:ut/000229049200004}}
@periodical{liaw2002,
Author = {Andy Liaw and Matthew Wiener},
Date-Added = {2013-04-09 16:14:15 +0000},
Date-Modified = {2013-04-09 16:17:05 +0000},
Journal = {R News},
Pages = {18--22},
Title = {Classification and Regression by randomForest},
Volume = {2/3},
Year = {2002}}
@article{breiman1996,
Abstract = {Bagging predictors is a method for generating multiple versions of a predictor and using these to gel an aggregated predictor. The aggregation averages over the versions when predicting a numerical outcome and does a plurality vote when predicting a class. The multiple versions are formed by making bootstrap replicates of the learning set and using these as new learning sets. Tests on real and simulated data sets using classification and regression trees and subset selection in linear regression show that bagging can give substantial gains in accuracy. The vital element is the instability of the prediction method. If perturbing the learning set can cause significant changes in the predictor constructed, then bagging can improve accuracy.},
Author = {Breiman, L},
Date-Added = {2013-04-09 16:07:04 +0000},
Date-Modified = {2013-04-09 16:07:04 +0000},
Isi = {A1996UZ38000003},
Isi-Recid = {96837512},
Isi-Ref-Recids = {41702097 53594983 96837513 96564139 56736490 96837514 96837515 87253760 75543838 96837516 96837517 89931393 60729608 70388986 71636877 74299770},
Iso-Source-Abbreviation = {Mach Learn},
Journal = {Machine Learning},
Keywords = {aggregation; bootstrap; averaging; combining},
Pages = {123--140},
Times-Cited = {4032},
Title = {Bagging predictors},
Volume = {24},
Year = {1996},
Bdsk-Url-1 = {http://ws.isiknowledge.com/cps/openurl/service?url_ver=Z39.88-2004&rft_id=info:ut/A1996UZ38000003}}
@techreport{criminisi2011,
Author = {A. Criminisi and J. Shotton and E. Konukoglu},
Date-Added = {2013-04-09 15:55:19 +0000},
Date-Modified = {2013-04-09 16:46:31 +0000},
Institution = {Microsoft Resaerch},
Title = {Decision Forests for Classification, Regression, Density Estimation, Manifold Learning and Semi-Supervised Learning},
Year = {2011}}
@article{schapire1990,
Author = {Schapire, RE},
Date-Added = {2013-04-09 14:53:28 +0000},
Date-Modified = {2013-04-09 14:53:50 +0000},
Isi = {A1990DR72200005},
Isi-Recid = {73226534},
Isi-Ref-Recids = {37393899 41453320 69036081 73226535 62366088 70949856 73226536 70949860 73226537 73226538 73226539 73226540 70488056 70488061 73226541 8601675 73226542 72135619 73226543 66849114 70488063 67390994 66708420 73226544 53800470},
Iso-Source-Abbreviation = {Mach Learn},
Journal = {Machine Learning},
Pages = {197--227},
Times-Cited = {753},
Title = {The strength of weak learnability},
Volume = {5},
Year = {1990},
Bdsk-Url-1 = {http://ws.isiknowledge.com/cps/openurl/service?url_ver=Z39.88-2004&rft_id=info:ut/A1990DR72200005}}
@article{freund1997,
Abstract = {In the first part of the paper we consider the problem of dynamically apportioning resources among a set of options in a worst-case on-line framework. The model we study can be interpreted as a broad, abstract extension of the well-studied on-line prediction model to a general decision-theoretic setting. We show that the multiplicative weight-update Littlestone-Warmuth rule can be adapted to this model, yielding bounds that are slightly weaker in some cases, but applicable to a considerably more general class of learning problems. We show how the resulting learning algorithm can be applied to a variety of problems, including gambling, multiple-outcome prediction, repeated games, and prediction of points in R-n. In the second part of the paper we apply the multiplicative weight-update technique to derive a new boosting algorithm. This boosting algorithm does not require any prior knowledge about the performance of the weak learning algorithm, We also study generalizations of the new boosting algorithm to the problem of learning functions whose range, rather than being binary, is an arbitrary finite set or a bounded segment of the real line. (C) 1997 Academic Press.},
Author = {Freund, Y and Schapire, RE},
Date-Added = {2013-04-09 14:51:49 +0000},
Date-Modified = {2013-04-09 14:51:49 +0000},
Isi = {A1997XT05700011},
Isi-Recid = {101819767},
Isi-Ref-Recids = {73811830 9924809 101819768 90870234 101766117 76358257 100342113 101819769 95873021 92842289 101819770 101819771 101819772 101819773 8929374 101766124 101819774 101819775 90870193 93055060 88626043 101819776 73226534 57425374 101819777 101819778 42240754},
Iso-Source-Abbreviation = {J Comput Syst Sci},
Journal = {Journal of Computer and System Sciences},
Pages = {119--139},
Times-Cited = {2367},
Title = {A decision-theoretic generalization of on-line learning and an application to boosting},
Type = {Proceedings Paper},
Volume = {55},
Year = {1997},
Bdsk-Url-1 = {http://ws.isiknowledge.com/cps/openurl/service?url_ver=Z39.88-2004&rft_id=info:ut/A1997XT05700011}}
@article{geremia2011,
Abstract = {A new algorithm is presented for the automatic segmentation of Multiple Sclerosis (MS) lesions in 3D Magnetic Resonance (MR) images. It builds on a discriminative random decision forest framework to provide a voxel-wise probabilistic classification of the volume. The method uses multi-channel MR intensities (T1, T2, and FLAIR), knowledge on tissue classes and long-range spatial context to discriminate lesions from background. A symmetry feature is introduced accounting for the fact that some MS lesions tend to develop in an asymmetric way. Quantitative evaluation of the proposed methods is carried out on publicly available labeled cases from the MICCAI MS Lesion Segmentation Challenge 2008 dataset. When tested on the same data, the presented method compares favorably to all earlier methods. In an a posteriori analysis, we show how selected features during classification can be ranked according to their discriminative power and reveal the most important ones.},
Author = {Geremia, Ezequiel and Clatz, Olivier and Menze, Bjoern H and Konukoglu, Ender and Criminisi, Antonio and Ayache, Nicholas},
Date-Added = {2013-04-09 14:20:56 +0000},
Date-Modified = {2013-04-09 22:20:33 +0000},
Doi = {10.1016/j.neuroimage.2011.03.080},
Journal = {Neuroimage},
Journal-Full = {NeuroImage},
Mesh = {Algorithms; Brain Mapping; Decision Trees; Humans; Image Interpretation, Computer-Assisted; Magnetic Resonance Imaging; Multiple Sclerosis},
Month = {Jul},
Number = {2},
Pages = {378-90},
Pmid = {21497655},
Pst = {ppublish},
Title = {Spatial decision forests for {MS} lesion segmentation in multi-channel magnetic resonance images},
Volume = {57},
Year = {2011},
Bdsk-Url-1 = {http://dx.doi.org/10.1016/j.neuroimage.2011.03.080}}
@article{das2009,
Abstract = {Cortical thickness is an important biomarker for image-based studies of the brain. A diffeomorphic registration based cortical thickness (DiReCT) measure is introduced where a continuous one-to-one correspondence between the gray matter-white matter interface and the estimated gray matter-cerebrospinal fluid interface is given by a diffeomorphic mapping in the image space. Thickness is then defined in terms of a distance measure between the interfaces of this sheet like structure. This technique also provides a natural way to compute continuous estimates of thickness within buried sulci by preventing opposing gray matter banks from intersecting. In addition, the proposed method incorporates neuroanatomical constraints on thickness values as part of the mapping process. Evaluation of this method is presented on synthetic images. As an application to brain images, a longitudinal study of thickness change in frontotemporal dementia (FTD) spectrum disorder is reported.},
Author = {Das, Sandhitsu R and Avants, Brian B and Grossman, Murray and Gee, James C},
Date-Added = {2013-04-09 13:18:14 +0000},
Date-Modified = {2013-04-09 13:18:14 +0000},
Doi = {10.1016/j.neuroimage.2008.12.016},
Journal = {Neuroimage},
Journal-Full = {NeuroImage},
Mesh = {Aged; Algorithms; Brain Mapping; Cerebral Cortex; Dementia; Humans; Image Interpretation, Computer-Assisted; Magnetic Resonance Imaging; Middle Aged},
Month = {Apr},
Number = {3},
Pages = {867-79},
Pmc = {PMC2836782},
Pmid = {19150502},
Pst = {ppublish},
Title = {Registration based cortical thickness measurement},
Volume = {45},
Year = {2009},
Bdsk-Url-1 = {http://dx.doi.org/10.1016/j.neuroimage.2008.12.016}}
@article{avants2008a,
Abstract = {One of the most challenging problems in modern neuroimaging is detailed characterization of neurodegeneration. Quantifying spatial and longitudinal atrophy patterns is an important component of this process. These spatiotemporal signals will aid in discriminating between related diseases, such as frontotemporal dementia (FTD) and Alzheimer's disease (AD), which manifest themselves in the same at-risk population. Here, we develop a novel symmetric image normalization method (SyN) for maximizing the cross-correlation within the space of diffeomorphic maps and provide the Euler-Lagrange equations necessary for this optimization. We then turn to a careful evaluation of our method. Our evaluation uses gold standard, human cortical segmentation to contrast SyN's performance with a related elastic method and with the standard ITK implementation of Thirion's Demons algorithm. The new method compares favorably with both approaches, in particular when the distance between the template brain and the target brain is large. We then report the correlation of volumes gained by algorithmic cortical labelings of FTD and control subjects with those gained by the manual rater. This comparison shows that, of the three methods tested, SyN's volume measurements are the most strongly correlated with volume measurements gained by expert labeling. This study indicates that SyN, with cross-correlation, is a reliable method for normalizing and making anatomical measurements in volumetric MRI of patients and at-risk elderly individuals.},
Author = {Avants, B B and Epstein, C L and Grossman, M and Gee, J C},
Date-Added = {2013-04-09 13:13:19 +0000},
Date-Modified = {2013-04-09 13:13:32 +0000},
Doi = {10.1016/j.media.2007.06.004},
Journal = {Med Image Anal},
Journal-Full = {Medical image analysis},
Mesh = {Algorithms; Atrophy; Cerebral Cortex; Dementia; Humans; Image Enhancement; Image Interpretation, Computer-Assisted; Magnetic Resonance Imaging},
Month = {Feb},
Number = {1},
Pages = {26-41},
Pmc = {PMC2276735},
Pmid = {17659998},
Pst = {ppublish},
Title = {Symmetric diffeomorphic image registration with cross-correlation: evaluating automated labeling of elderly and neurodegenerative brain},
Volume = {12},
Year = {2008},
Bdsk-Url-1 = {http://dx.doi.org/10.1016/j.media.2007.06.004}}
@article{breiman2001,
Author = {L. Breiman},
Date-Added = {2013-03-25 17:08:02 +0000},
Date-Modified = {2013-03-25 17:08:55 +0000},
Journal = {Machine Learning},
Number = {2},
Pages = {123--140},
Title = {Random forests},
Volume = {24},
Year = {1996}}
@inproceedings{ho1995,
Author = {Tin Kam Ho},
Booktitle = {Document Analysis and Recognition, 1995., Proceedings of the Third International Conference on},
Date-Added = {2013-03-25 16:58:10 +0000},
Date-Modified = {2013-03-25 16:58:22 +0000},
Doi = {10.1109/ICDAR.1995.598994},
Keywords = {decision theory;handwriting recognition;optical character recognition;complexity;decision trees;generalization accuracy;handwritten digits;random decision forests;stochastic modeling;suboptimal accuracy;tree-based classifiers;Classification tree analysis;Decision trees;Handwriting recognition;Hidden Markov models;Multilayer perceptrons;Optimization methods;Stochastic processes;Testing;Tin;Training data},
Pages = {278-282 vol.1},
Title = {Random decision forests},
Volume = {1},
Year = {1995},
Bdsk-Url-1 = {http://dx.doi.org/10.1109/ICDAR.1995.598994}}
@article{amit1997,
Author = {Yali Amit and Donald Geman},
Date-Added = {2013-03-25 16:55:03 +0000},
Date-Modified = {2013-03-25 16:55:29 +0000},
Journal = {Neural Computation},
Pages = {1545--1588},
Title = {Shape quantization and recognition with randomized trees},
Volume = {9},
Year = {1997}}
@article{maurer2003,
Author = {C. R. Maurer and Qi Rensheng and V. Raghavan},
Date-Added = {2013-03-25 15:50:50 +0000},
Date-Modified = {2013-04-09 22:21:18 +0000},
Doi = {10.1109/TPAMI.2003.1177156},
Issn = {0162-8828},
Journal = {Pattern Analysis and Machine Intelligence, IEEE Transactions on},
Keywords = {computational complexity;computational geometry;image processing;transforms;Voronoi diagram;anisotropic voxel dimensions;binary images;chamfer metrics;dimensionality reduction;exact Euclidean DT;exact Euclidean distance transform computation;feature voxels;linear time algorithm;linear time complexity;multidimensional binary image;partial Voronoi diagram construction;Anisotropic magnetoresistance;Computer vision;Euclidean distance;Image processing;Image registration;Interpolation;Nearest neighbor searches;Pattern matching;Skeleton;Surface morphology},
Number = {2},
Pages = {265-270},
Title = {A linear time algorithm for computing exact {E}uclidean distance transforms of binary images in arbitrary dimensions},
Volume = {25},
Year = {2003},
Bdsk-Url-1 = {http://dx.doi.org/10.1109/TPAMI.2003.1177156}}
@article{tustison2010,
Abstract = {A variant of the popular nonparametric nonuniform intensity normalization (N3) algorithm is proposed for bias field correction. Given the superb performance of N3 and its public availability, it has been the subject of several evaluation studies. These studies have demonstrated the importance of certain parameters associated with the B-spline least-squares fitting. We propose the substitution of a recently developed fast and robust B-spline approximation routine and a modified hierarchical optimization scheme for improved bias field correction over the original N3 algorithm. Similar to the N3 algorithm, we also make the source code, testing, and technical documentation of our contribution, which we denote as "N4ITK," available to the public through the Insight Toolkit of the National Institutes of Health. Performance assessment is demonstrated using simulated data from the publicly available Brainweb database, hyperpolarized (3)He lung image data, and 9.4T postmortem hippocampus data.},
Author = {Tustison, Nicholas J and Avants, Brian B and Cook, Philip A and Zheng, Yuanjie and Egan, Alexander and Yushkevich, Paul A and Gee, James C},
Date-Added = {2013-03-24 03:42:39 +0000},
Date-Modified = {2013-04-09 22:21:44 +0000},
Doi = {10.1109/TMI.2010.2046908},
Journal = {IEEE Trans Med Imaging},
Journal-Full = {IEEE transactions on medical imaging},
Mesh = {Algorithms; Artifacts; Brain; Humans; Image Enhancement; Image Interpretation, Computer-Assisted; Magnetic Resonance Imaging; Reproducibility of Results; Sensitivity and Specificity},
Month = {Jun},
Number = {6},
Pages = {1310-20},
Pmc = {PMC3071855},
Pmid = {20378467},
Pst = {ppublish},
Title = {{N4ITK}: improved {N3} bias correction},
Volume = {29},
Year = {2010},
Bdsk-Url-1 = {http://dx.doi.org/10.1109/TMI.2010.2046908}}
@article{nyul2000,
Abstract = {One of the major drawbacks of magnetic resonance imaging (MRI) has been the lack of a standard and quantifiable interpretation of image intensities. Unlike in other modalities, such as X-ray computerized tomography, MR images taken for the same patient on the same scanner at different times may appear different from each other due to a variety of scanner-dependent variations and, therefore, the absolute intensity values do not have a fixed meaning. We have devised a two-step method wherein all images (independent of patients and the specific brand of the MR scanner used) can be transformed in such a way that for the same protocol and body region, in the transformed images similar intensities will have similar tissue meaning. Standardized images can be displayed with fixed windows without the need of per-case adjustment. More importantly, extraction of quantitative information about healthy organs or about abnormalities can be considerably simplified. This paper introduces and compares new variants of this standardizing method that can help to overcome some of the problems with the original method.},
Author = {Ny{\'u}l, L G and Udupa, J K and Zhang, X},
Date-Added = {2013-03-24 03:42:30 +0000},
Date-Modified = {2013-04-09 22:21:29 +0000},
Doi = {10.1109/42.836373},
Journal = {IEEE Trans Med Imaging},
Journal-Full = {IEEE transactions on medical imaging},
Mesh = {Algorithms; Brain; Humans; Image Processing, Computer-Assisted; Magnetic Resonance Imaging; Multiple Sclerosis},
Month = {Feb},
Number = {2},
Pages = {143-50},
Pmid = {10784285},
Pst = {ppublish},
Title = {New variants of a method of {MRI} scale standardization},
Volume = {19},
Year = {2000},
Bdsk-Url-1 = {http://dx.doi.org/10.1109/42.836373}}
@article{prastawa2003,
Abstract = {RATIONALE AND OBJECTIVES: Manual segmentation of brain tumors from magnetic resonance images is a challenging and time-consuming task. An automated system has been developed for brain tumor segmentation that will provide objective, reproducible segmentations that are close to the manual results. Additionally, the method segments white matter, grey matter, cerebrospinal fluid, and edema. The segmentation of pathology and healthy structures is crucial for surgical planning and intervention.
MATERIALS AND METHODS: The method performs the segmentation of a registered set of magnetic resonance images using an expectation-maximization scheme. The segmentation is guided by a spatial probabilistic atlas that contains expert prior knowledge about brain structures. This atlas is modified with the subject-specific brain tumor prior that is computed based on contrast enhancement.
RESULTS: Five cases with different types of tumors are selected for evaluation. The results obtained from the automatic segmentation program are compared with results from manual and semi-automated methods. The automated method yields results that have surface distances at roughly 1-4 mm compared with the manual results.
CONCLUSION: The automated method can be applied to different types of tumors. Although its performance is below that of the semi-automated method, it has the advantage of requiring no user supervision.},
Author = {Prastawa, Marcel and Bullitt, Elizabeth and Moon, Nathan and Van Leemput, Koen and Gerig, Guido},
Date-Added = {2013-03-24 03:14:00 +0000},
Date-Modified = {2013-03-24 03:14:00 +0000},
Journal = {Acad Radiol},
Journal-Full = {Academic radiology},
Mesh = {Automation; Brain Neoplasms; Humans; Image Processing, Computer-Assisted; Magnetic Resonance Imaging; Sensitivity and Specificity},
Month = {Dec},
Number = {12},
Pages = {1341-8},
Pmc = {PMC2430604},
Pmid = {14697002},
Pst = {ppublish},
Title = {Automatic brain tumor segmentation by subject specific modification of atlas priors},
Volume = {10},
Year = {2003}}
@article{menze2010,
Abstract = {We introduce a generative probabilistic model for segmentation of tumors in multi-dimensional images. The model allows for different tumor boundaries in each channel, reflecting difference in tumor appearance across modalities. We augment a probabilistic atlas of healthy tissue priors with a latent atlas of the lesion and derive the estimation algorithm to extract tumor boundaries and the latent atlas from the image data. We present experiments on 25 glioma patient data sets, demonstrating significant improvement over the traditional multivariate tumor segmentation.},
Author = {Menze, Bjoern H and Van Leemput, Koen and Lashkari, Danial and Weber, Marc-Andr{\'e} and Ayache, Nicholas and Golland, Polina},
Date-Added = {2013-03-24 02:50:23 +0000},
Date-Modified = {2013-03-24 02:50:23 +0000},
Journal = {Med Image Comput Comput Assist Interv},
Journal-Full = {Medical image computing and computer-assisted intervention : MICCAI ... International Conference on Medical Image Computing and Computer-Assisted Intervention},
Mesh = {Algorithms; Brain Neoplasms; Computer Simulation; Glioma; Humans; Image Enhancement; Image Interpretation, Computer-Assisted; Magnetic Resonance Imaging; Models, Neurological; Pattern Recognition, Automated; Reproducibility of Results; Sensitivity and Specificity; Subtraction Technique},
Number = {Pt 2},
Pages = {151-9},
Pmc = {PMC3050038},
Pmid = {20879310},
Pst = {ppublish},
Title = {A generative model for brain tumor segmentation in multi-modal images},
Volume = {13},
Year = {2010}}
@article{avants2011,
Abstract = {We introduce Atropos, an ITK-based multivariate n-class open source segmentation algorithm distributed with ANTs ( http://www.picsl.upenn.edu/ANTs). The Bayesian formulation of the segmentation problem is solved using the Expectation Maximization (EM) algorithm with the modeling of the class intensities based on either parametric or non-parametric finite mixtures. Atropos is capable of incorporating spatial prior probability maps (sparse), prior label maps and/or Markov Random Field (MRF) modeling. Atropos has also been efficiently implemented to handle large quantities of possible labelings (in the experimental section, we use up to 69 classes) with a minimal memory footprint. This work describes the technical and implementation aspects of Atropos and evaluates its performance on two different ground-truth datasets. First, we use the BrainWeb dataset from Montreal Neurological Institute to evaluate three-tissue segmentation performance via (1) K-means segmentation without use of template data; (2) MRF segmentation with initialization by prior probability maps derived from a group template; (3) Prior-based segmentation with use of spatial prior probability maps derived from a group template. We also evaluate Atropos performance by using spatial priors to drive a 69-class EM segmentation problem derived from the Hammers atlas from University College London. These evaluation studies, combined with illustrative examples that exercise Atropos options, demonstrate both performance and wide applicability of this new platform-independent open source segmentation tool.},
Author = {Avants, Brian B and Tustison, Nicholas J and Wu, Jue and Cook, Philip A and Gee, James C},
Date-Added = {2013-03-24 01:22:58 +0000},
Date-Modified = {2013-03-24 01:22:58 +0000},
Doi = {10.1007/s12021-011-9109-y},
Journal = {Neuroinformatics},
Journal-Full = {Neuroinformatics},
Mesh = {Access to Information; Algorithms; Bayes Theorem; Databases, Factual; Humans; Image Processing, Computer-Assisted; Internet; Magnetic Resonance Imaging; Models, Statistical; Pattern Recognition, Automated; Software},
Month = {Dec},
Number = {4},
Pages = {381-400},
Pmc = {PMC3297199},
Pmid = {21373993},
Pst = {ppublish},
Title = {An open source multivariate framework for n-tissue segmentation with evaluation on public data},
Volume = {9},
Year = {2011},
Bdsk-Url-1 = {http://dx.doi.org/10.1007/s12021-011-9109-y}}
@incollection{reynolds2009,
Author = {Douglas A. Reynolds},
Booktitle = {Encyclopedia of Biometrics},
Date-Added = {2013-03-24 00:12:10 +0000},
Date-Modified = {2013-03-24 02:10:34 +0000},
Editor = {Stan Z. Li and Anil K. Jain},
Pages = {659--663},
Publisher = {Springer US},
Title = {Gaussian Mixture Modeling},
Year = {2009}}
@article{avants2010,
Abstract = {We evaluate the impact of template choice on template-based segmentation of the hippocampus in epilepsy. Four dataset-specific strategies are quantitatively contrasted: the "closest to average" individual template, the average shape version of the closest to average template, a best appearance template and the best appearance and shape template proposed here and implemented in the open source toolkit Advanced Normalization Tools (ANTS). The cross-correlation similarity metric drives the correspondence model and is used consistently to determine the optimal appearance. Minimum shape distance in the diffeomorphic space determines optimal shape. Our evaluation results show that, with respect to gold-standard manual labeling of hippocampi in epilepsy, optimal shape and appearance template construction outperforms the other strategies for gaining data-derived templates. Our results also show the improvement is most significant on the diseased side and insignificant on the healthy side. Thus, the importance of the template increases when used to study pathology and may be less critical for normal control studies. Furthermore, explicit geometric optimization of the shape component of the unbiased template positively impacts the study of diseased hippocampi.},
Author = {Avants, Brian B and Yushkevich, Paul and Pluta, John and Minkoff, David and Korczykowski, Marc and Detre, John and Gee, James C},
Date-Added = {2013-03-22 16:44:28 +0000},
Date-Modified = {2013-03-22 16:44:28 +0000},
Doi = {10.1016/j.neuroimage.2009.09.062},
Journal = {Neuroimage},
Journal-Full = {NeuroImage},
Mesh = {Algorithms; Atlases as Topic; Epilepsy; Hippocampus; Humans; Image Interpretation, Computer-Assisted},
Month = {Feb},
Number = {3},
Pages = {2457-66},
Pmc = {PMC2818274},
Pmid = {19818860},
Pst = {ppublish},
Title = {The optimal template effect in hippocampus studies of diseased populations},
Volume = {49},
Year = {2010},
Bdsk-Url-1 = {http://dx.doi.org/10.1016/j.neuroimage.2009.09.062}}
@article{avants2008,
Abstract = {RATIONALE AND OBJECTIVES: Diffusion tensor (DT) and T1 structural magnetic resonance images provide unique and complementary tools for quantifying the living brain. We leverage both modalities in a diffeomorphic normalization method that unifies analysis of clinical datasets in a consistent and inherently multivariate (MV) statistical framework. We use this technique to study MV effects of traumatic brain injury (TBI).
MATERIALS AND METHODS: We contrast T1 and DT image-based measurements in the thalamus and hippocampus of 12 TBI survivors and nine matched controls normalized to a combined DT and T1 template space. The normalization method uses maps that are topology-preserving and unbiased. Normalization is based on the full tensor of information at each voxel and, simultaneously, the similarity between high-resolution features derived from T1 data. The technique is termed symmetric normalization for MV neuroanatomy (SyNMN). Voxel-wise MV statistics on the local volume and mean diffusion are assessed with Hotelling's T(2) test with correction for multiple comparisons.
RESULTS: TBI significantly (false discovery rate P < .05) reduces volume and increases mean diffusion at coincident locations in the mediodorsal thalamus and anterior hippocampus.
CONCLUSIONS: SyNMN reveals evidence that TBI compromises the limbic system. This TBI morphometry study and an additional performance evaluation contrasting SyNMN with other methods suggest that the DT component may aid normalization quality.},
Author = {Avants, Brian and Duda, Jeffrey T and Kim, Junghoon and Zhang, Hui and Pluta, John and Gee, James C and Whyte, John},
Date-Added = {2013-03-22 15:53:54 +0000},
Date-Modified = {2013-03-22 15:53:54 +0000},
Doi = {10.1016/j.acra.2008.07.007},
Journal = {Acad Radiol},
Journal-Full = {Academic radiology},
Mesh = {Adult; Brain; Brain Injuries; Cohort Studies; Diffusion Magnetic Resonance Imaging; Echo-Planar Imaging; Female; Hippocampus; Humans; Image Processing, Computer-Assisted; Male; Middle Aged; Multivariate Analysis; Thalamus},
Month = {Nov},
Number = {11},
Pages = {1360-75},
Pmid = {18995188},
Pst = {ppublish},
Title = {Multivariate analysis of structural and diffusion imaging in traumatic brain injury},
Volume = {15},
Year = {2008},
Bdsk-Url-1 = {http://dx.doi.org/10.1016/j.acra.2008.07.007}}
@article{landman2011,
Abstract = {Modern MRI image processing methods have yielded quantitative, morphometric, functional, and structural assessments of the human brain. These analyses typically exploit carefully optimized protocols for specific imaging targets. Algorithm investigators have several excellent public data resources to use to test, develop, and optimize their methods. Recently, there has been an increasing focus on combining MRI protocols in multi-parametric studies. Notably, these have included innovative approaches for fusing connectivity inferences with functional and/or anatomical characterizations. Yet, validation of the reproducibility of these interesting and novel methods has been severely hampered by the limited availability of appropriate multi-parametric data. We present an imaging protocol optimized to include state-of-the-art assessment of brain function, structure, micro-architecture, and quantitative parameters within a clinically feasible 60-min protocol on a 3-T MRI scanner. We present scan-rescan reproducibility of these imaging contrasts based on 21 healthy volunteers (11 M/10 F, 22-61 years old). The cortical gray matter, cortical white matter, ventricular cerebrospinal fluid, thalamus, putamen, caudate, cerebellar gray matter, cerebellar white matter, and brainstem were identified with mean volume-wise reproducibility of 3.5%. We tabulate the mean intensity, variability, and reproducibility of each contrast in a region of interest approach, which is essential for prospective study planning and retrospective power analysis considerations. Anatomy was highly consistent on structural acquisition (~1-5% variability), while variation on diffusion and several other quantitative scans was higher (~<10%). Some sequences are particularly variable in specific structures (ASL exhibited variation of 28% in the cerebral white matter) or in thin structures (quantitative T2 varied by up to 73% in the caudate) due, in large part, to variability in automated ROI placement. The richness of the joint distribution of intensities across imaging methods can be best assessed within the context of a particular analysis approach as opposed to a summary table. As such, all imaging data and analysis routines have been made publicly and freely available. This effort provides the neuroimaging community with a resource for optimization of algorithms that exploit the diversity of modern MRI modalities. Additionally, it establishes a baseline for continuing development and optimization of multi-parametric imaging protocols.},
Author = {Landman, Bennett A and Huang, Alan J and Gifford, Aliya and Vikram, Deepti S and Lim, Issel Anne L and Farrell, Jonathan A D and Bogovic, John A and Hua, Jun and Chen, Min and Jarso, Samson and Smith, Seth A and Joel, Suresh and Mori, Susumu and Pekar, James J and Barker, Peter B and Prince, Jerry L and van Zijl, Peter C M},
Date-Added = {2013-03-22 14:40:43 +0000},
Date-Modified = {2013-04-09 22:21:07 +0000},
Doi = {10.1016/j.neuroimage.2010.11.047},
Journal = {Neuroimage},
Journal-Full = {NeuroImage},
Mesh = {Adult; Brain; Brain Mapping; Female; Humans; Image Interpretation, Computer-Assisted; Magnetic Resonance Imaging; Male; Middle Aged; Reproducibility of Results; Young Adult},
Month = {Feb},
Number = {4},
Pages = {2854-66},
Pmc = {PMC3020263},
Pmid = {21094686},
Pst = {ppublish},
Title = {Multi-parametric neuroimaging reproducibility: a 3-{T} resource study},
Volume = {54},
Year = {2011},
Bdsk-Url-1 = {http://dx.doi.org/10.1016/j.neuroimage.2010.11.047}}
@inproceedings{zikic2012,
Author = {D. Zikic and B. Glocker and E. Konukoglu and J. Shotton and A. Criminisi and D. H. Ye and C. Demiralp and O. M. Thomas and T. Das and R. Jena and S. J. Price},
Booktitle = {Proceedings of MICCAI-BRATS 2012},
Date-Added = {2013-03-22 01:05:43 +0000},
Date-Modified = {2013-03-22 01:05:43 +0000},
Month = {October},
Pages = {1--9},
Title = {Context-sensitive Classification Forests for Segmentation of Brain Tumor Tissues},
Year = {2012}}
@inproceedings{bauer2012,
Author = {S. Bauer and T. Fejes and J. Slotboom and R. Wiest and L.-P. Nolte and M. Reyes},
Booktitle = {Proceedings of MICCAI-BRATS 2012},
Date-Added = {2013-03-22 00:59:52 +0000},
Date-Modified = {2013-03-22 01:06:51 +0000},
Month = {October},
Pages = {10--13},
Title = {Segmentation of Brain Tumor Images Based on Integrated Hierarchical Classification and Regularization},
Year = {2012}}