-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathreferences.bib
4074 lines (3905 loc) · 301 KB
/
references.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
@Article{Eldar2010,
author = {Eldar, Y. and Kuppinger, P. and B{\"o}lcskei, H.},
title = {Block-sparse signals: Uncertainty relations and efficient recovery},
number = {6},
pages = {3042--3054},
volume = {58},
annote = {read http://www.nari.ee.ethz.ch/commth//pubs/p/Block2009 block-coherence IEEE Transactions on Signal Processing},
journal = {IEEE Trans. Signal Process.},
keywords = {Basis pursuit,Block-sparsity,compressed sensing,matching pursuit},
month = jun,
owner = {Fardin},
timestamp = {2016-09-30T13:34:19Z},
year = {2010},
}
@Article{Velmurugan2016,
author = {Velmurugan, J. and Sinha, Sanjib and Nagappa, Madhu and Mariyappa, N. and Bindu, P. S. and Ravi, G. S. and Hazra, Nandita and Thennarasu, K. and Ravi, V. and Taly, A. B. and Satishchandra, P.},
title = {Combined {{MEG}}\textendash{}{{EEG}} Source Localisation in Patients with Sub-Acute Sclerosing Pan-Encephalitis},
doi = {10.1007/s10072-016-2571-4},
issn = {1590-1874, 1590-3478},
language = {en},
number = {8},
pages = {1221--1231},
urldate = {2016-10-21},
volume = {37},
abstract = {To study the genesis and propagation patterns of periodic complexes (PCs) associated with myoclonic jerks in sub-acute sclerosing pan-encephalitis (SSPE) using magnetoencephalography (MEG) and electroencephalography (EEG). Simultaneous recording of MEG (306 channels) and EEG (64 channels) in five patients of SSPE (M:F = 3:2; age 10.8 $\pm$ 3.2 years; symptom-duration 6.2 $\pm$ 10 months) was carried out using Elekta Neuromag\textregistered{} TRIUX\texttrademark{} system. Qualitative analysis of 80\textendash{}160 PCs per patient was performed. Ten isomorphic classical PCs with significant field topography per patient were analysed at the `onset' and at `earliest significant peak' of the burst using discrete and distributed source imaging methods. MEG background was asymmetrical in 2 and slow in 3 patients. Complexes were periodic (3) or quasi-periodic (2), occurring every 4\textendash{}16 s and varied in morphology among patients. Mean source localization at onset of bursts using discrete and distributed source imaging in magnetic source imaging (MSI) was in thalami and or insula (50 and 50 \%, respectively) and in electric source imaging (ESI) was also in thalami and or insula (38 and 46 \%, respectively). Mean source localization at the earliest rising phase of peak in MSI was in peri-central gyrus (49 and 42 \%) and in ESI it was in frontal cortex (52 and 56 \%). Further analysis revealed that PCs were generated in thalami and or insula and thereafter propagated to anterolateral surface of the cortices (viz. sensori-motor cortex and frontal cortex) to same side as that of the onset. This novel MEG\textendash{}EEG based case series of PCs provides newer insights for understanding the plausible generators of myoclonus in SSPE and patterns of their propagation.},
file = {Snapshot:\\\\filesrv4\\home$\\afdidehf\\.windows\\Application Data\\Zotero\\Zotero\\Profiles\\wi3wq94h.default\\zotero\\storage\\RAGTPM6E\\s10072-016-2571-4.html:text/html},
journal = {Neurological Sciences},
month = apr,
timestamp = {2016-10-21T14:04:40Z},
year = {2016},
}
@Article{Heath2006,
author = {Heath, R. W. and Strohmer, T. and Paulraj, A. J.},
title = {On Quasi-Orthogonal Signatures for {{CDMA}} Systems},
doi = {10.1109/TIT.2005.864469},
issn = {0018-9448},
number = {3},
pages = {1217--1226},
volume = {52},
abstract = {Sum capacity optimal signatures in synchronous code-division multiple-access (CDMA) systems are functions of the codebook length as well as the number of active users. A new signature set must be assigned every time the number of active users changes. This correspondence considers signature sets that are less sensitive to changes in the number of active users. Equiangular signature sequences are proven to solve a certain max-min signal-to-interference-plus-noise problem, which results from their interference invariance. Unions of orthonormal bases have subsets that come close to satisfying the Welch bound. Bounds on the maximum number of bases with minimum maximum correlation are derived and a new construction algorithm is provided. Connections are made between these signature design problems, Grassmannian line packing, frame theory, and algebraic geometry},
file = {IEEE Xplore Abstract Record:\\\\filesrv4\\home$\\afdidehf\\.windows\\Application Data\\Zotero\\Zotero\\Profiles\\wi3wq94h.default\\zotero\\storage\\WTJCSP3V\\articleDetails.html:text/html},
journal = {IEEE Transactions on Information Theory},
keywords = {algebraic geometry,Circuits,codebook length,code division multiple access,code-division multiple-access,Code-division multiple-access (CDMA) systems,Code division multiplexing,Colored noise,Encoding,equiangular signature sequence,Fading,frame theory,Gaussian channels,Geometry,Grassmannian line packing,Interference,interference (signal),Iterative algorithms,minimax techniques,minimum maximum correlation,Multiaccess communication,number theory,quasiorthogonal signature,sequences,signal-to-interference-plus-noise problem,sum capacity,synchronous CDMA system,Walsh sequences,Welch bound,White noise},
month = mar,
timestamp = {2016-09-30T13:50:26Z},
year = {2006},
}
@Article{Cand`es2008a,
author = {Cand{\`e}s, Emmanuel J.},
title = {The restricted isometry property and its implications for compressed sensing},
doi = {http://dx.doi.org/10.1016/j.crma.2008.03.014},
issn = {1631-073X},
number = {9-10},
pages = {589--592},
volume = {346},
abstract = {It is now well-known that one can reconstruct sparse or compressible
signals accurately from a very limited number of measurements, possibly
contaminated with noise. This technique known as “compressed sensing?
or “compressive sampling? relies on properties of the sensing
matrix such as the restricted isometry property. In this Note, we
establish new results about the accuracy of the reconstruction from
undersampled measurements which improve on earlier estimates, and
have the advantage of being more elegant. To cite this article: E.J.
Candès, C. R. Acad. Sci. Paris, Ser. I 346 (2008).},
annote = {read http://www.sciencedirect.com/science/article/pii/S1631073X08000964},
journal = {Comptes Rendus Mathematique},
owner = {Fardin},
timestamp = {2017-06-23T13:04:18Z},
year = {2008},
}
@Article{Cotter2005,
author = {Cotter, S.F. and Rao, B.D. and Engan, K. and Kreutz-Delgado, K.},
title = {Sparse solutions to linear inverse problems with multiple measurement vectors},
doi = {10.1109/TSP.2005.849172},
issn = {1053-587X},
number = {7},
pages = {2477--2488},
volume = {53},
abstract = {We address the problem of finding sparse solutions to an underdetermined
system of equations when there are multiple measurement vectors having
the same, but unknown, sparsity structure. The single measurement
sparse solution problem has been extensively studied in the past.
Although known to be NP-hard, many single-measurement suboptimal
algorithms have been formulated that have found utility in many different
applications. Here, we consider in depth the extension of two classes
of algorithms-Matching Pursuit (MP) and FOCal Underdetermined System
Solver (FOCUSS)-to the multiple measurement case so that they may
be used in applications such as neuromagnetic imaging, where multiple
measurement vectors are available, and solutions with a common sparsity
structure must be computed. Cost functions appropriate to the multiple
measurement problem are developed, and algorithms are derived based
on their minimization. A simulation study is conducted on a test-case
dictionary to show how the utilization of more than one measurement
vector improves the performance of the MP and FOCUSS classes of algorithm,
and their performances are compared.},
journal = {Signal Processing, IEEE Transactions on},
keywords = {algorithms-matching pursuit,Computational modeling,Cost function,Dictionaries,Equations,focal underdetermined system solver,Focusing,inverse problems,linear inverse problem,measurement vector,Minimization methods,neuromagnetic imaging,Pursuit algorithms,signal processing,suboptimal algorithm,Testing,Vectors},
month = jul,
owner = {Fardin},
timestamp = {2016-09-30T11:19:42Z},
year = {2005},
}
@Article{Mishali2011a,
author = {Mishali, M. and Eldar, Y. C. and Dounaevsky, O. and Shoshan, E.},
title = {Xampling: {{Analog}} to Digital at Sub-{{Nyquist}} Rates},
doi = {10.1049/iet-cds.2010.0147},
issn = {1751-858X},
number = {1},
pages = {8--20},
volume = {5},
abstract = {The authors present a sub-Nyquist analog-to-digital converter of wideband inputs. The circuit realises the recently proposed modulated wideband converter, which is a flexible platform for sampling signals according to their actual bandwidth occupation. The theoretical work enables, for example, a sub-Nyquist wideband communication receiver, which has no prior information on the transmitter carrier positions. The present design supports input signals with 2 GHz Nyquist rate and 120 MHz spectrum occupancy, with arbitrary transmission frequencies. The sampling rate is as low as 280 MHz. To the best of the authors knowledge, this is the first reported hardware that performs sub-Nyquist sampling and reconstruction of wideband signals. The authors describe the various circuit design considerations, with an emphasis on the non-ordinary challenges the converter introduces: mixing a signal with a multiple set of sinusoids, rather than a single local oscillator, and generation of highly transient periodic waveforms, with transient intervals on the order of the Nyquist rate. Hardware experiments validate the design and demonstrate sub-Nyquist sampling and signal reconstruction.},
file = {IEEE Xplore Abstract Record:\\\\filesrv4\\home$\\afdidehf\\.windows\\Application Data\\Zotero\\Zotero\\Profiles\\wi3wq94h.default\\zotero\\storage\\MH4SWW6J\\5692791.html:text/html},
journal = {IET Circuits, Devices Systems},
keywords = {analog-to-digital converter,analogue-digital conversion,circuit design considerations,frequency 2 GHz,frequency 120 MHz,frequency 280 MHz,integrated circuit design,signal reconstruction,single local oscillator,subNyquist wideband communication receiver,transient intervals,transient periodic waveforms,wideband signals,Xampling},
month = jan,
shorttitle = {Xampling},
timestamp = {2016-09-30T11:17:32Z},
year = {2011},
}
@Book{Hari2017,
author = {Riitta Hari and Aina Puce},
title = {MEG-EEG Primer},
year = {2017},
}
@Article{Mallat1993,
author = {Mallat, S.G. and Zhang, Z.},
title = {Matching pursuits with time-frequency dictionaries},
doi = {10.1109/78.258082},
issn = {1053-587X},
number = {12},
pages = {3397--3415},
volume = {41},
abstract = {The authors introduce an algorithm, called matching pursuit, that
decomposes any signal into a linear expansion of waveforms that are
selected from a redundant dictionary of functions. These waveforms
are chosen in order to best match the signal structures. Matching
pursuits are general procedures to compute adaptive signal representations.
With a dictionary of Gabor functions a matching pursuit defines an
adaptive time-frequency transform. They derive a signal energy distribution
in the time-frequency plane, which does not include interference
terms, unlike Wigner and Cohen class distributions. A matching pursuit
isolates the signal structures that are coherent with respect to
a given dictionary. An application to pattern extraction from noisy
signals is described. They compare a matching pursuit decomposition
with a signal expansion over an optimized wavepacket orthonormal
basis, selected with the algorithm of Coifman and Wickerhauser see
(IEEE Trans. Informat. Theory, vol. 38, Mar. 1992)},
journal = {Signal Processing, IEEE Transactions on},
keywords = {adaptive signal representations,adaptive time-frequency transform,Dictionaries,Fourier transforms,Gabor functions,Interference,linear waveform expansion,matching pursuit algorithm,Matching pursuit algorithms,matching pursuit decomposition,Natural languages,noisy signals,optimized wavepacket orthonormal basis,pattern extraction,Pursuit algorithms,signal energy distribution,signal expansion,signal processing,Signal processing algorithms,signal representations,signal structures,time-frequency analysis,Time frequency analysis,time-frequency dictionaries,time-frequency plane,Vocabulary,wavelet transforms},
month = dec,
owner = {afdidehf},
timestamp = {2016-09-29T14:53:46Z},
year = {1993},
}
@Article{Hurley2009,
author = {Hurley, N. and Rickard, Scott},
title = {Comparing Measures of Sparsity},
doi = {10.1109/TIT.2009.2027527},
issn = {0018-9448},
number = {10},
pages = {4723--4741},
volume = {55},
abstract = {Sparsity of representations of signals has been shown to be a key
concept of fundamental importance in fields such as blind source
separation, compression, sampling and signal analysis. The aim of
this paper is to compare several commonly-used sparsity measures
based on intuitive attributes. Intuitively, a sparse representation
is one in which a small number of coefficients contain a large proportion
of the energy. In this paper, six properties are discussed: (Robin
Hood, Scaling, Rising Tide, Cloning, Bill Gates, and Babies), each
of which a sparsity measure should have. The main contributions of
this paper are the proofs and the associated summary table which
classify commonly-used sparsity measures based on whether or not
they satisfy these six propositions. Only two of these measures satisfy
all six: the pq-mean with p les 1, q > 1 and the Gini index.},
annote = {read Information Theory, IEEE Transactions on},
journal = {IEEE Trans. Inf. Theory},
keywords = {adaptive signal processing,blind source separation,Cloning,compression analysis,Gini index,image coding,information theory,Machine learning,Measures of sparsity,measuring sparsity,sampling analysis,Sampling methods,Sea measurements,Signal analysis,source separation,sparse distribution,sparse representation,Sparsity,sparsity measures,Tides},
month = oct,
owner = {Fardin},
timestamp = {2016-09-30T10:42:08Z},
year = {2009},
}
@InProceedings{Berg1999,
author = {Berg, A.P. and Mikhael, W.B.},
booktitle = {Circuits and Systems, 1999. ISCAS '99. Proceedings of the 1999 IEEE International Symposium on},
title = {A survey of mixed transform techniques for speech and image coding},
doi = {10.1109/ISCAS.1999.779953},
pages = {106--109},
volume = {4},
abstract = {The goal of transform based coding is to build a representation of
a signal using the smallest number of weighted basis functions possible,
while maintaining the ability to reconstruct the signal with adequate
fidelity. Mixed transform techniques, which employ subsets of non-orthogonal
basis functions chosen from two or more transform domains, have been
shown to consistently yield more efficient signal representations
than those based on one transform. This paper provides a survey of
mixed transform techniques, also known as multitransforms or mixed
basis representations, which have been developed for speech and image
coding},
keywords = {Baseband,Compaction,data compression,Dictionaries,discrete cosine transforms,Discrete transforms,Fourier transforms,image coding,Image reconstruction,mixed basis representations,mixed transform techniques,multitransforms,nonorthogonal basis functions,Prototypes,signal representation,signal representations,speech coding,Speech coding,transform based coding,Transform coding,transforms,weighted basis functions},
month = jul,
owner = {Fardin},
timestamp = {2016-09-29T16:28:53Z},
year = {1999},
}
@Article{Cotter2002,
author = {Cotter, S.F. and Rao, B.D.},
title = {Sparse channel estimation via matching pursuit with application to equalization},
doi = {10.1109/26.990897},
issn = {0090-6778},
number = {3},
pages = {374--377},
volume = {50},
abstract = {Channels with a sparse impulse response arise in a number of communication
applications. Exploiting the sparsity of the channel, we show how
an estimate of the channel may be obtained using a matching pursuit
(MP) algorithm. This estimate is compared to thresholded variants
of the least squares (LS) channel estimate. Among these sparse channel
estimates, the MP estimate is computationally much simpler to implement
and a shorter training sequence is required to form an accurate channel
estimate leading to greater information throughput},
journal = {Communications, IEEE Transactions on},
keywords = {Broadband communication,Broadband communication,channel estimation,Channel estimation,decision feedback equalisers,decision feedback equalisers,decision feedback equalizer,decision feedback equalizer,Delay estimation,Delay estimation,DFE,DFE,HDTV,HDTV,information throughput,information throughput,intersymbol interference,intersymbol interference,intersymbol interference,ISI,ISI,Least squares approximation,Least squares approximation,least squares approximations,Least squares approximations,least squares channel estimate,least squares channel estimate,matching pursuit algorithm,matching pursuit algorithm,Matching pursuit algorithms,Matching pursuit algorithms,parameter estimation,parameter estimation,Pursuit algorithms,Pursuit algorithms,sparse channel estimation,sparse channel estimation,sparse impulse response,sparse impulse response,telecommunication channels,telecommunication channels,Throughput,Throughput,training sequence,training sequence,transient response,transient response,TV,TV,White noise,White noise},
month = mar,
owner = {Fardin},
timestamp = {2016-09-29T16:29:32Z},
year = {2002},
}
@Book{Boyd2004,
author = {Boyd, Stephen and Vandenberghe, Lieven},
title = {Convex Optimization},
publisher = {{Cambridge, U.K.: Cambridge Univ. Press}},
abstract = {Convex optimization problems arise frequently in many different fields.
A comprehensive introduction to the subject, this book shows in detail
how such problems can be solved numerically with great efficiency.
The focus is on recognizing convex optimization problems and then
finding the most appropriate technique for solving them. The text
contains many worked examples and homework exercises and will appeal
to students, researchers and practitioners in fields such as engineering,
computer science, mathematics, statistics, finance, and economics.
More material can be found at the web sites for EE364A (Stanford)
or EE236B (UCLA), and our own web pages. Source code for almost all
examples and figures in part 2 of the book is available in CVX (in
the examples directory), in CVXOPT (in the book examples directory),
and in CVXPY. Source code for examples in Chapters 9, 10, and 11
can be found here. Instructors can obtain complete solutions to exercises
by email request to us; please give us the URL of the course you
are teaching. If you find an error not listed in our errata list,
please do let us know about it.},
owner = {afdidehf},
timestamp = {2016-07-08T12:04:41Z},
year = {2004},
}
@Article{Gribonval2004a,
author = {Gribonval, R{\'e}mi and Nielsen, Morten},
title = {On the Strong Uniqueness of Highly Sparse Representations from Redundant Dictionaries},
pages = {201--208},
urldate = {2016-05-16},
volume = {3195},
annote = {Independent Component Analysis and Blind Signal Separation},
journal = {Int. Conf. Ind. Compon. Anal. Blind Signal Sep.},
timestamp = {2017-04-19T15:29:02Z},
year = {2004},
}
@Article{Blumensath2009a,
author = {Blumensath, Thomas and Davies, Mike E.},
title = {Iterative hard thresholding for compressed sensing},
doi = {http://dx.doi.org/10.1016/j.acha.2009.04.002},
issn = {1063-5203},
number = {3},
pages = {265 - 274},
volume = {27},
abstract = {Compressed sensing is a technique to sample compressible signals below
the Nyquist rate, whilst still allowing near optimal reconstruction
of the signal. In this paper we present a theoretical analysis of
the iterative hard thresholding algorithm when applied to the compressed
sensing recovery problem. We show that the algorithm has the following
properties (made more precise in the main text of the paper)• It
gives near-optimal error guarantees. • It is robust to observation
noise. • It succeeds with a minimum number of observations. •
It can be used with any sampling operator for which the operator
and its adjoint can be computed. • The memory requirement is linear
in the problem size. • Its computational complexity per iteration
is of the same order as the application of the measurement operator
or its adjoint. • It requires a fixed number of iterations depending
only on the logarithm of a form of signal to noise ratio of the signal.
• Its performance guarantees are uniform in that they only depend
on properties of the sampling operator and signal sparsity.},
journal = {Applied and Computational Harmonic Analysis},
keywords = {Algorithms},
owner = {afdidehf},
timestamp = {2016-07-09T19:45:21Z},
year = {2009},
}
@Article{Ben-Haim2011,
author = {Ben-Haim, Z. and Eldar, Y.C.},
title = {Near-Oracle Performance of Greedy Block-Sparse Estimation Techniques From Noisy Measurements},
doi = {10.1109/JSTSP.2011.2160250},
issn = {1932-4553},
number = {5},
pages = {1032-1047},
volume = {5},
abstract = {This paper examines the ability of greedy algorithms to estimate a
block sparse parameter vector from noisy measurements. In particular,
block sparse versions of the orthogonal matching pursuit and thresholding
algorithms are analyzed under both adversarial and Gaussian noise
models. In the adversarial setting, it is shown that estimation accuracy
comes within a constant factor of the noise power. Under Gaussian
noise, the Crame?r-Rao bound is derived, and it is shown that the
greedy techniques come close to this bound at high signal-to-noise
ratio. The guarantees are numerically compared with the actual performance
of block and non-block algorithms, identifying situations in which
block sparse techniques improve upon the scalar sparsity approach.
Specifically, we show that block sparse methods are particularly
successful when the atoms within each block are nearly orthogonal.},
journal = {Selected Topics in Signal Processing, IEEE Journal of},
keywords = {Atomic measurements,block sparse parameter vector,block sparsity,Coherence,Crame?r-Rao bound,Dictionaries,Estimation,Gaussian noise,greedy block-sparse estimation technique,greedy block-sparse estimation technique,iterative methods,Matching pursuit algorithms,Matching pursuit algorithms,near-oracle performance,near-oracle performance,noisy measurement,orthogonal matching pursuit,orthogonal matching pursuit,orthogonal matching pursuit,performance guarantees,scalar sparsity approach,scalar sparsity approach,signal processing,signal processing,Signal-To-Noise Ratio,thresholding,thresholding algorithm,thresholding algorithm},
month = sep,
owner = {afdidehf},
timestamp = {2016-07-10T06:48:47Z},
year = {2011},
}
@Article{Plonsey1967,
author = {Plonsey, Robert and Heppner, Dennis B.},
title = {Considerations of Quasi-Stationarity in Electrophysiological Systems},
doi = {10.1007/BF02476917},
issn = {0007-4985, 1522-9602},
language = {en},
number = {4},
pages = {657--664},
urldate = {2017-08-21},
volume = {29},
abstract = {Conditions under which a time varying electromagnetic field problem (such as arises in electrophysiology, electrocardiography, etc.) can be reduced to the conventional quasistatic problem are summarized. These conditions are discussed for typical physiological parameters.},
file = {Snapshot:\\\\filesrv4\\home$\\afdidehf\\.windows\\Application Data\\Zotero\\Zotero\\Profiles\\wi3wq94h.default\\zotero\\storage\\6Z4EXCM7\\BF02476917.html:text/html},
journal = {The bulletin of mathematical biophysics},
month = dec,
timestamp = {2017-08-21T15:00:59Z},
year = {1967},
}
@Article{Elad2001,
author = {Elad, M. and Bruckstein, A.M.},
title = {On sparse signal representations},
doi = {10.1109/ICIP.2001.958936},
pages = {3-6},
volume = {1},
abstract = {An elementary proof of a basic uncertainty principle concerning pairs
of representations of ?N vectors in different orthonormal bases is
provided. The result, slightly stronger than stated before, has a
direct impact on the uniqueness property of the sparse representation
of such vectors using pairs of orthonormal bases as overcomplete
dictionaries. The main contribution in this paper is the improvement
of an important result due to Donoho and Huo (1999) concerning the
replacement of the l0 optimization problem by a linear programming
minimization when searching for the unique sparse representation},
journal = {Image Processing, 2001. Proceedings. 2001 International Conference on},
keywords = {Cities and towns,Computer science,Dictionaries,Equations,linear programming,linear programming minimization,minimisation,optimization problem,orthonormal bases,overcomplete dictionaries,Signal generators,signal processing,signal representation,signal representations,sparse representation,Uncertainty,uncertainty principle,uniqueness property,unique sparse representation search,Vectors},
owner = {Fardin},
timestamp = {2016-07-10T07:14:04Z},
year = {2001},
}
@Misc{Oostenveld2011,
author = {Oostenveld, Robert and Fries, Pascal and Maris, Eric and Schoffelen, Jan-Mathijs},
title = {{{FieldTrip}}: {{Open Source Software}} for {{Advanced Analysis}} of {{MEG}}, {{EEG}}, and {{Invasive Electrophysiological Data}}},
doi = {10.1155/2011/156869},
howpublished = {\url{https://www.hindawi.com/journals/cin/2011/156869/}},
language = {en},
type = {Research article},
urldate = {2017-12-25},
abstract = {This paper describes FieldTrip, an open source software package that we developed for the analysis of MEG, EEG, and other electrophysiological data. The software is implemented as a MATLAB toolbox and includes a complete set of consistent and user-friendly high-level functions that allow experimental neuroscientists to analyze experimental data. It includes algorithms for simple and advanced analysis, such as time-frequency analysis using multitapers, source reconstruction using dipoles, distributed sources and beamformers, connectivity analysis, and nonparametric statistical permutation tests at the channel and source level. The implementation as toolbox allows the user to perform elaborate and structured analyses of large data sets using the MATLAB command line and batch scripting. Furthermore, users and developers can easily extend the functionality and implement new algorithms. The modular design facilitates the reuse in other software packages.},
file = {Snapshot:\\\\filesrv4\\home$\\afdidehf\\.windows\\Application Data\\Zotero\\Zotero\\Profiles\\wi3wq94h.default\\zotero\\storage\\XGP3I2D4\\156869.html:application/xhtml+xml},
journal = {Computational Intelligence and Neuroscience},
pmid = {21253357},
shorttitle = {{{FieldTrip}}},
timestamp = {2017-12-25T16:41:37Z},
year = {2011},
}
@Article{Wright2009a,
author = {Wright, J. and Yang, A. Y. and Ganesh, A. and Sastry, S. S. and Ma, Y.},
title = {Robust Face Recognition via Sparse Representation},
doi = {10.1109/TPAMI.2008.79},
issn = {0162-8828},
number = {2},
pages = {210--227},
volume = {31},
abstract = {We consider the problem of automatically recognizing human faces from frontal views with varying expression and illumination, as well as occlusion and disguise. We cast the recognition problem as one of classifying among multiple linear regression models and argue that new theory from sparse signal representation offers the key to addressing this problem. Based on a sparse representation computed by C1-minimization, we propose a general classification algorithm for (image-based) object recognition. This new framework provides new insights into two crucial issues in face recognition: feature extraction and robustness to occlusion. For feature extraction, we show that if sparsity in the recognition problem is properly harnessed, the choice of features is no longer critical. What is critical, however, is whether the number of features is sufficiently large and whether the sparse representation is correctly computed. Unconventional features such as downsampled images and random projections perform just as well as conventional features such as eigenfaces and Laplacianfaces, as long as the dimension of the feature space surpasses certain threshold, predicted by the theory of sparse representation. This framework can handle errors due to occlusion and corruption uniformly by exploiting the fact that these errors are often sparse with respect to the standard (pixel) basis. The theory of sparse representation helps predict how much occlusion the recognition algorithm can handle and how to choose the training images to maximize robustness to occlusion. We conduct extensive experiments on publicly available databases to verify the efficacy of the proposed algorithm and corroborate the above claims.},
journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence},
keywords = {Algorithms,algorithms,Artificial Intelligence,Artificial Intelligence,Automated,Biometry,Biometry,Classification algorithms,Classification algorithms,Classification algorithms,Classifier design and evaluation,Classifier design and evaluation,Cluster Analysis,cluster analysis,compressed sensing,compressed sensing,compressed sensing,Computer-Assisted,downsampled images,downsampled images,eigenfaces,eigenfaces,ell^1–minimization,ell^1–minimization,Face,Face,Face and gesture recognition,Face and gesture recognition,Face and gesture recognition,face recognition,face recognition,Feature evaluation and selection,Feature evaluation and selection,feature extraction,feature extraction,Feature Extraction,Humans,Humans,illumination,illumination,image-based object recognition,image-based object recognition,Image Enhancement,Image Enhancement,Image Enhancement,Image Interpretation,Image Interpretation; Computer-Assisted,Image recognition,Image recognition,Laplacianfaces,Laplacianfaces,Lighting,lighting,lightning,lightning,Linear regression,Linear regression,Linear regression,multiple linear regression model,multiple linear regression model,object recognition,Object Recognition,occlusion,occlusion,occlusion and corruption,occlusion and corruption,occlusion and corruption,Outlier rejection,Outlier rejection,Pattern Recognition,Pattern Recognition; Automated,random processes,random processes,random projections,random projections,random projections,regression analysis,regression analysis,Reproducibility of Results,Reproducibility of Results,robust face recognition,robust face recognition,robust face recognition,Robustness,Robustness,Sensitivity and Specificity,Sensitivity and Specificity,signal representation,signal representation,signal representations,signal representations,Signal representations,Spare representation,Spare representation,sparse representation,sparse representation,sparse signal representation,sparse signal representation,sparse signal representation,Subtraction Technique,Subtraction Technique,validation and outlier rejection.,validation and outlier rejection.,validation and outlier rejection.},
month = feb,
timestamp = {2016-09-30T11:28:15Z},
year = {2009},
}
@Article{Davis1997,
author = {Davis, G. and Mallat, S. and Avellaneda, M.},
title = {Adaptive greedy approximations},
doi = {10.1007/BF02678430},
issn = {0176-4276},
language = {English},
number = {1},
pages = {57--98},
volume = {13},
abstract = {The problem of optimally approximating a function with a linear expansion
over a redundant dictionary of waveforms is NP-hard. The greedy matching
pursuit algorithm and its orthogonalized variant produce suboptimal
function expansions by iteratively choosing dictionary waveforms
that best match the function's structures. A matching pursuit provides
a means of quickly computing compact, adaptive function approximations.
Numerical experiments show that the approximation errors from matching
pursuits initially decrease rapidly, but the asymptotic decay rate
of the errors is slow. We explain this behavior by showing that matching
pursuits are chaotic, ergodic maps. The statistical properties of
the approximation errors of a pursuit can be obtained from the invariant
measure of the pursuit. We characterize these measures using group
symmetries of dictionaries and by constructing a stochastic differential
equation model. We derive a notion of the coherence of a signal with
respect to a dictionary from our characterization of the approximation
errors of a pursuit. The dictionary elements selected during the
initial iterations of a pursuit correspond to a function's coherent
structures. The tail of the expansion, on the other hand, corresponds
to a noise which is characterized by the invariant measure of the
pursuit map. When using a suitable dictionary, the expansion of a
function into its coherent structures yields a compact approximation.
We demonstrate a denoising algorithm based on coherent function expansions.},
journal = {Constructive Approximation},
keywords = {41A10,Adaptive approximations,denoising,Greedy algorithms,matching pursuit,overcomplete signal representation,Time frequency analysis},
owner = {afdidehf},
timestamp = {2016-09-30T10:44:14Z},
year = {1997},
}
@Article{Sarvas1987,
author = {Sarvas, J.},
title = {Basic Mathematical and Electromagnetic Concepts of the Biomagnetic Inverse Problem},
doi = {10.1088/0031-9155/32/1/004},
issn = {0031-9155},
language = {en},
number = {1},
pages = {11},
urldate = {2016-10-14},
volume = {32},
abstract = {Basic mathematical and physical concepts of the biomagnetic inverse problem are reviewed with some new approaches. The forward problem is discussed for both homogeneous and inhomogeneous media. Geselowitz' formulae and a surface integral equation are presented to handle a piecewise homogeneous conductor. The special cases of a spherically symmetric conductor and a horizontally layered medium are discussed in detail. The non-uniqueness of the solution of the magnetic inverse problem is discussed and the difficulty caused by the contribution of the electric potential to the magnetic field outside the conductor is studied. As practical methods of solving the inverse problem, a weighted least-squares search with confidence limits and the method of minimum norm estimate are discussed.},
journal = {Physics in Medicine and Biology},
timestamp = {2016-10-14T12:01:39Z},
year = {1987},
}
@Article{Davies2009a,
author = {Davies, M. E. and Gribonval, R.},
title = {lp minimization and sparse approximation failure for compressible signals},
journal = {SAMPTA},
owner = {afdidehf},
timestamp = {2016-09-30T11:07:47Z},
year = {2009},
}
@Article{Donoho2006a,
author = {Donoho, D.L. and Elad, M. and Temlyakov, V.N.},
title = {Stable recovery of sparse overcomplete representations in the presence of noise},
doi = {10.1109/TIT.2005.860430},
issn = {0018-9448},
number = {1},
pages = {6--18},
volume = {52},
abstract = {Overcomplete representations are attracting interest in signal processing
theory, particularly due to their potential to generate sparse representations
of signals. However, in general, the problem of finding sparse representations
must be unstable in the presence of noise. This paper establishes
the possibility of stable recovery under a combination of sufficient
sparsity and favorable structure of the overcomplete system. Considering
an ideal underlying signal that has a sufficiently sparse representation,
it is assumed that only a noisy version of it can be observed. Assuming
further that the overcomplete system is incoherent, it is shown that
the optimally sparse approximation to the noisy data differs from
the optimally sparse decomposition of the ideal noiseless signal
by at most a constant multiple of the noise level. As this optimal-sparsity
method requires heavy (combinatorial) computational effort, approximation
algorithms are considered. It is shown that similar stability is
also available using the basis and the matching pursuit algorithms.
Furthermore, it is shown that these methods result in sparse approximation
of the noisy data that contains only terms also appearing in the
unique sparsest representation of the ideal noiseless sparse signal.},
annote = {read},
journal = {Information Theory, IEEE Transactions on},
keywords = {approximation theory,Basis pursuit,Dictionaries,greedy approximation,greedy approximation algorithm,incoherent dictionary,iterative methods,Kruskal rank,Linear algebra,matching pursuit,Matching pursuit algorithms,Noise generators,Noise level,noisy data,optimal sparse decomposition,overcomplete representation,signal denoising,signal processing,Signal processing algorithms,signal processing theory,signal representation,signal representations,sparse overcomplete representation,sparse representation,Stability,stable recovery,stepwise regression,superresolution,superresolution signal,time-frequency analysis,Vectors},
month = jan,
owner = {Fardin},
timestamp = {2016-09-29T16:17:03Z},
year = {2006},
}
@Article{Afdideh2016,
author = {Afdideh, F. and Phlypo, R. and Jutten, C.},
title = {Recovery Guarantees for Mixed Norm $\ell_{p_1,p_2}$ Block Sparse Representations},
doi = {10.1109/EUSIPCO.2016.7760274},
pages = {378-382},
abstract = {In this work, we propose theoretical and algorithmic-independent recovery
conditions which guarantee the uniqueness of block sparse recovery
in general dictionaries through a general mixed norm optimization
problem. These conditions are derived using the proposed block uncertainty
principles and block null space property, based on some newly defined
characterizations of block spark, and (p, p)-block mutual incoherence.
We show that there is improvement in the recovery condition when
exploiting the block structure of the representation. In addition,
the proposed recovery condition extends the similar results for block
sparse setting by generalizing the criterion for determining the
active blocks, generalizing the block sparse recovery condition,
and relaxing some constraints on blocks such as linear independency
of the columns.},
annote = {2016 24th European Signal Processing Conference (EUSIPCO)},
journal = {24th Eur. Signal Process. Conf.},
keywords = {algorithmic-independent recovery condition,block mutual incoherence constant,Block Mutual Incoherence Constant (BMIC),block null space property,Block Spark,block spark characterization,block sparse recovery condition,Block-sparse recovery conditions,Block-sparsity,block uncertainty,Block Uncertainty Principle (BUP),Dictionaries,Europe,mixed norm lp1-p2 block sparse representation,mixed norm optimization problem,optimisation,Optimization,signal processing,Signal processing algorithms,signal representation,Sparks,Uncertainty},
month = aug,
timestamp = {2017-06-23T11:39:51Z},
year = {2016},
}
@InProceedings{Donoho2006d,
author = {Donoho, D. L. and Tanner, J.},
booktitle = {2006 40th Annual Conference on Information Sciences and Systems},
title = {Thresholds for the Recovery of Sparse Solutions via L1 Minimization},
doi = {10.1109/CISS.2006.286462},
pages = {202--206},
abstract = {The ubiquitous least squares method for systems of linear equations returns solutions which typically have all non-zero entries. However, solutions with the least number of non-zeros allow for greater insight. An exhaustive search for the sparsest solution is intractable, NP-hard. Recently, a great deal of research showed that linear programming can find the sparsest solution for certain 'typical' systems of equations, provided the solution is sufficiently sparse. In this note we report recent progress determining conditions under which the sparsest solution to large systems is available by linear programming. Our work shows that there are sharp thresholds on sparsity below which these methods will succeed and above which they fail; it evaluates those thresholds precisely and hints at several interesting applications.},
keywords = {Cities and towns,Cities and towns,compressed sensing,compressed sensing,Equations,Equations,L1 minimization,l1 minimization,least squares approximations,Least squares approximations,Least squares methods,Least squares methods,linear programming,linear programming,Mathematics,Mathematics,Minimization methods,Minimization methods,NP-hard,NP-hard,Sampling methods,Sampling methods,sparse matrices,Sparse matrices,sparse solution,sparse solution,Statistics,Statistics,ubiquitous least squares method,ubiquitous least squares method},
month = mar,
timestamp = {2016-09-30T10:46:41Z},
year = {2006},
}
@PhdThesis{Dossal2005,
author = {Dossal, C.},
title = {Estimation de fonctions g{\'e}om{\'e}triques et d{\'e}convolution},
year = {2005},
}
@Article{Gribonval2003a,
author = {Gribonval, R. and Nielsen, M.},
title = {Sparse representations in unions of bases},
doi = {10.1109/TIT.2003.820031},
issn = {0018-9448},
number = {12},
pages = {3320--3325},
volume = {49},
abstract = {The purpose of this correspondence is to generalize a result by Donoho
and Huo and Elad and Bruckstein on sparse representations of signals
in a union of two orthonormal bases for RN. We consider general (redundant)
dictionaries for RN, and derive sufficient conditions for having
unique sparse representations of signals in such dictionaries. The
special case where the dictionary is given by the union of L?2 orthonormal
bases for RN is studied in more detail. In particular, it is proved
that the result of Donoho and Huo, concerning the replacement of
the ?0 optimization problem with a linear programming problem when
searching for sparse representations, has an analog for dictionaries
that may be highly redundant.},
annote = {read Information Theory, IEEE Transactions on},
journal = {IEEE Trans. Inf. Theory},
keywords = {Dictionaries,Grassmannian frame,indeterminancy,linear programming,minimisation,mutually incoherent base,nonlinear approximation,NSP,null space property,redundant dictionary,redundant dictionary,sparse matrices,sparse matrices,sparse representation,Sufficient conditions,Vectors},
month = dec,
owner = {Fardin},
timestamp = {2016-09-29T16:24:26Z},
year = {2003},
}
@InProceedings{Ziaei2010,
author = {Ziaei, A. and Pezeshki, A. and Bahmanpour, S. and Azimi-Sadjadi, M.R.},
booktitle = {Signals, Systems and Computers (ASILOMAR), 2010 Conference Record of the Forty Fourth Asilomar Conference on},
title = {Compressed sensing of different size block-sparse signals: Efficient recovery},
doi = {10.1109/ACSSC.2010.5757679},
pages = {818-821},
abstract = {This paper considers compressed sensing of different size block-sparse
signals, i.e. signals with nonzero elements occurring in blocks with
different lengths. A new sufficient condition for mixed l2/l1-optimization
algorithm is derived to successfully recover k-sparse signals. We
show that if the signal possesses k-block sparse structure, then
via mixed l2/l1-optimization algorithm, a better reconstruction results
can be achieved in comparison with the conventional l1-optimization
algorithm and fixed-size mixed l2/l1-optimization algorithm. The
significance of the results presented in this paper lies in the fact
that making explicit use of different block-sparsity can yield better
reconstruction properties than treating the signal as being sparse
in the conventional sense, thereby ignoring the structure in the
signal.},
annote = {read},
keywords = {block-sparse signals,Block-sparsity,Coherence,compressed sensing,compressed sensing,Dictionaries,Error analysis,Matching pursuit algorithms,Minimization,mixed l2/l1-optimization algorithm,mixed-optimization algorithm,signal processing,sparse matrices},
month = nov,
owner = {afdidehf},
timestamp = {2016-07-08T11:56:47Z},
year = {2010},
}
@InProceedings{Acar2003,
author = {Acar, C.E. and Gencer, N.G.},
booktitle = {Engineering in Medicine and Biology Society, 2003. Proceedings of the 25th Annual International Conference of the IEEE},
title = {Sensitivity of EEG and MEG to conductivity perturbations},
doi = {10.1109/IEMBS.2003.1280508},
pages = {2834-2837 Vol.3},
volume = {3},
abstract = {Solution of the electro-magnetic source imaging (EMSI) problem requires
an accurate representation of the head using a numerical model. Some
of the errors in source estimation are due to the differences between
this model and the actual head. This study investigates the effects
of conductivity perturbations, that is, changing the conductivity
of a small region by a small amount, on the EEG and MEG measurements.
By computing the change in measurements for perturbations throughout
the volume, it is possible to obtain a spatial distribution of sensitivity.
Using this information, it is possible, for a given source configuration,
to identify the regions to which a measurement is most sensitive.
In this work, two mathematical expressions for efficient computation
of the sensitivity distribution are presented. The formulation is
implemented for a numerical head model using the finite element method
(FEM). 3D sensitivity distributions are computed and analyzed for
selected dipoles and sensors. It was observed that the voltage measurements
are sensitive to the skull, the regions near the dipole and the electrodes.
The magnetic field measurements are mostly sensitive to regions near
the dipole. It could also be possible to use the computed sensitivity
matrices to estimate or update the conductivity of the tissues from
EEG and MEG measurements.},
keywords = {bioelectric phenomena,biological tissues,biomedical electrodes,biosensors,Brain modeling,Conductivity measurement,conductivity perturbations,dipoles,Distributed computing,EEG,electroencephalography,electro-magnetic source imaging,Estimation error,finite element analysis,finite element method,Finite element methods,magnetic field measurement,magnetic field measurements,Magnetic heads,magnetoencephalography,medical image processing,MEG,numerical head model,Numerical models,physiological models,sensitivity distribution,Sensors,source estimation,voltage measurement,voltage measurements,Volume measurement},
month = sep,
owner = {afdidehf},
timestamp = {2016-07-10T08:10:37Z},
year = {2003},
}
@Article{Zelnik-Manor2012,
author = {Zelnik-Manor, L. and Rosenblum, K. and Eldar, Y.C.},
title = {Dictionary Optimization for Block-Sparse Representations},
doi = {10.1109/TSP.2012.2187642},
issn = {1053-587X},
number = {5},
pages = {2386-2395},
volume = {60},
abstract = {Recent work has demonstrated that using a carefully designed dictionary
instead of a predefined one, can improve the sparsity in jointly
representing a class of signals. This has motivated the derivation
of learning methods for designing a dictionary which leads to the
sparsest representation for a given set of signals. In some applications,
the signals of interest can have further structure, so that they
can be well approximated by a union of a small number of subspaces
(e.g., face recognition and motion segmentation). This implies the
existence of a dictionary which enables block-sparse representations
of the input signals once its atoms are properly sorted into blocks.
In this paper, we propose an algorithm for learning a block-sparsifying
dictionary of a given set of signals. We do not require prior knowledge
on the association of signals into groups (subspaces). Instead, we
develop a method that automatically detects the underlying block
structure given the maximal size of those groups. This is achieved
by iteratively alternating between updating the block structure of
the dictionary and updating the dictionary atoms to better fit the
data. Our experiments show that for block-sparse data the proposed
algorithm significantly improves the dictionary recovery ability
and lowers the representation error compared to dictionary learning
methods that do not employ block structure.},
journal = {Signal Processing, IEEE Transactions on},
keywords = {Algorithm design and analysis,block-sparse representation,block-sparsifying dictionary,block sparsity,Cost function,Dictionaries,dictionary design,dictionary learning method,dictionary optimization,Learning systems,Matching pursuit algorithms,optimisation,signal reconstruction,signal representation,Sparse coding,Vectors},
month = may,
owner = {afdidehf},
timestamp = {2016-07-08T12:11:27Z},
year = {2012},
}
@Article{Donoho1989,
author = {Donoho, D.L. and Stark, P.B.},
title = {Uncertainty Principles and Signal Recovery},
number = {3},
pages = {906--931},
volume = {49},
abstract = {The uncertainty principle can easily be generalized to cases where
the sets of concentration are not intervals. Such generalizations
are presented for continuous and discrete-time functions, and for
several measures of concentration (e.g., $L_2 $ and $L_1 $
measures). The generalizations explain interesting phenomena in signal
recovery problems where there is an interplay of missing data, sparsity,
and bandlimiting. Read More: http://epubs.siam.org/doi/abs/10.1137/0149053},
journal = {SIAM J. Appl. Math.},
keywords = {bandlimiting timelimiting,L1-methods,signal recovery,sparse spike trains,stable recovery,uncertainty principle,unique recovery},
owner = {afdidehf},
timestamp = {2017-06-23T13:12:09Z},
year = {1989},
}
@Article{Yan1991,
author = {Yan, Y. and Nunez, P. L. and Hart, R. T.},
title = {Finite-Element Model of the Human Head: Scalp Potentials Due to Dipole Sources},
doi = {10.1007/BF02442317},
issn = {0140-0118, 1741-0444},
language = {en},
number = {5},
pages = {475--481},
urldate = {2017-08-22},
volume = {29},
abstract = {Three-dimensional finite-element models provide a method to study the relationship between human scalp potentials and neural current sources inside the brain. A new formulation of dipole-like current sources is developed here. Finiteelement analyses based on this formulation are carried out for both a threeconcentric-spheres model and a human-head model. Differences in calculated scalp potentials between these two models are studied in the context of the forward and inverse problems in EEG. The effects of the eye orbit structure on surface potential distribution are also studied.},
file = {Snapshot:\\\\filesrv4\\home$\\afdidehf\\.windows\\Application Data\\Zotero\\Zotero\\Profiles\\wi3wq94h.default\\zotero\\storage\\DHKDQG8Q\\BF02442317.html:text/html},
journal = {Medical and Biological Engineering and Computing},
month = sep,
shorttitle = {Finite-Element Model of the Human Head},
timestamp = {2017-08-22T09:45:07Z},
year = {1991},
}
@Article{Natarajan1995,
author = {Natarajan, B. K.},
title = {Sparse Approximate Solutions To Linear Systems},
number = {2},
pages = {227--234},
volume = {24},
abstract = {The following problem is considered: given a matrix A in Rm,
(m rows and n columns), a vector b in Rm, and 6 > 0, compute a vector
x satisfying IIAx bl[2 <_ 6 if such exists, such that x has the fewest
number of non-zero entries over all such vectors. It is shown that
the problem is NP-hard, but that the well-known greedy heuristic
is good in that it computes a solution with at most [18 Opt(6/Z)llA+
I1 ln(llbl12/6)] non-zero entries, where Opt(6/2) is the optimum
number of nonzero entries at error 6/2, A is the matrix obtained
by normalizing each column of A with respect to the L2 norm, and
A+ is its pseudo-inverse.},
journal = {SIAM J. Comput.},
keywords = {Linear systems,sparse solutions},
month = apr,
owner = {afdidehf},
timestamp = {2017-06-23T13:18:32Z},
year = {1995},
}
@Article{Blumensath2009,
author = {Blumensath, T. and Davies, M.E.},
title = {Sampling Theorems for Signals From the Union of Finite-Dimensional Linear Subspaces},
doi = {10.1109/TIT.2009.2013003},
issn = {0018-9448},
number = {4},
pages = {1872--1882},
volume = {55},
abstract = {Compressed sensing is an emerging signal acquisition technique that
enables signals to be sampled well below the Nyquist rate, given
that the signal has a sparse representation in an orthonormal basis.
In fact, sparsity in an orthonormal basis is only one possible signal
model that allows for sampling strategies below the Nyquist rate.
In this paper, we consider a more general signal model and assume
signals that live on or close to the union of linear subspaces of
low dimension. We present sampling theorems for this model that are
in the same spirit as the Nyquist-Shannon sampling theorem in that
they connect the number of required samples to certain model parameters.
Contrary to the Nyquist-Shannon sampling theorem, which gives a necessary
and sufficient condition for the number of required samples as well
as a simple linear algorithm for signal reconstruction, the model
studied here is more complex. We therefore concentrate on two aspects
of the signal model, the existence of one to one maps to lower dimensional
observation spaces and the smoothness of the inverse map. We show
that almost all linear maps are one to one when the observation space
is at least of the same dimension as the largest dimension of the
convex hull of the union of any two subspaces in the model. However,
we also show that in order for the inverse map to have certain smoothness
properties such as a given finite Lipschitz constant, the required
observation dimension necessarily depends logarithmically on the
number of subspaces in the signal model. In other words, while unique
linear sampling schemes require a small number of samples depending
only on the dimension of the subspaces involved, in order to have
stable sampling methods, the number of samples depends necessarily
logarithmically on the number of subspaces in the model. These results
are then applied to two examples, the standard compressed sensing
signal model i- - n which the signal has a sparse representation
in an orthonormal basis and to a sparse signal model with additional
tree structure.},
journal = {Information Theory, IEEE Transactions on},
keywords = {additional tree structure,Bandwidth,compressed sensing,compressed sensing signal model,Councils,embedding and restricted isometry,finite-dimensional linear subspaces,Focusing,Image reconstruction,linear sampling schemes,Nyquist criterion,Nyquist rate,Nyquist-Shannon sampling theorem,Sampling methods,sampling theorems,signal acquisition,signal processing,signal reconstruction,signal sampling,sparse representation,Sufficient conditions,Tree data structures,trees (mathematics),unions of linear subspaces},
month = apr,
owner = {afdidehf},
timestamp = {2016-09-30T11:25:16Z},
year = {2009},
}
@InCollection{Vaiter2015,
author = {Vaiter, Samuel and Peyr{\'e}, Gabriel and Fadili, Jalal},
booktitle = {Sampling {{Theory}}, a {{Renaissance}}},
title = {Low {{Complexity Regularization}} of {{Linear Inverse Problems}}},
doi = {10.1007/978-3-319-19749-4_3},
editor = {Pfander, G{\"o}tz E.},
isbn = {978-3-319-19748-7 978-3-319-19749-4},
language = {en},
pages = {103--153},
publisher = {{Springer International Publishing}},
series = {Applied and Numerical Harmonic Analysis},
urldate = {2016-09-30},
abstract = {Inverse problems and regularization theory is a central theme in imaging sciences, statistics, and machine learning. The goal is to reconstruct an unknown vector from partial indirect, and possibly noisy, measurements of it. A now standard method for recovering the unknown vector is to solve a convex optimization problem that enforces some prior knowledge about its structure. This chapter delivers a review of recent advances in the field where the regularization prior promotes solutions conforming to some notion of simplicity/low complexity. These priors encompass as popular examples sparsity and group sparsity (to capture the compressibility of natural signals and images), total variation and analysis sparsity (to promote piecewise regularity), and low rank (as natural extension of sparsity to matrix-valued data). Our aim is to provide a unified treatment of all these regularizations under a single umbrella, namely the theory of partial smoothness. This framework is very general and accommodates all low complexity regularizers just mentioned, as well as many others. Partial smoothness turns out to be the canonical way to encode low-dimensional models that can be linear spaces or more general smooth manifolds. This review is intended to serve as a one stop shop toward the understanding of the theoretical properties of the so-regularized solutions. It covers a large spectrum including (i) recovery guarantees and stability to noise, both in terms of $\mathscr{l}$ 2-stability and model (manifold) identification; (ii) sensitivity analysis to perturbations of the parameters involved (in particular the observations), with applications to unbiased risk estimation; (iii) convergence properties of the forward-backward proximal splitting scheme that is particularly well suited to solve the corresponding large-scale regularized optimization problem.},
copyright = {\textcopyright{}2015 Springer International Publishing Switzerland},
file = {Snapshot:\\\\filesrv4\\home$\\afdidehf\\.windows\\Application Data\\Zotero\\Zotero\\Profiles\\wi3wq94h.default\\zotero\\storage\\HFCPJVQT\\978-3-319-19749-4_3.html:text/html},
keywords = {Appl.Mathematics/Computational Methods of Engineering,Approximations and Expansions,Functions of a Complex Variable,Information and Communication; Circuits,Signal; Image and Speech Processing},
timestamp = {2016-10-07T14:20:53Z},
year = {2015},
}
@Article{Amaldi1998,
author = {Amaldi, Edoardo and Kann, Viggo},
title = {On the approximability of minimizing nonzero variables or unsatisfied relations in linear systems},
doi = {10.1016/S0304-3975(97)00115-1},
issn = {0304-3975},
number = {1–2},
pages = {237--260},
urldate = {2016-03-30},
volume = {209},
abstract = {We investigate the computational complexity of two closely related classes of combinatorial optimization problems for linear systems which arise in various fields such as machine learning, operations research and pattern recognition. In the first class (Min ULR) one wishes, given a possibly infeasible system of linear relations, to find a solution that violates as few relations as possible while satisfying all the others. In the second class (Min RVLS) the linear system is supposed to be feasible and one looks for a solution with as few nonzero variables as possible. For both Min ULR and Min RVLS the four basic types of relational operators =, ⩾, > and ≠ are considered. While Min RVLS with equations was mentioned to be NP-hard in (Garey and Johnson, 1979), we established in (Amaldi; 1992; Amaldi and Kann, 1995) that min ULR with equalities and inequalities are NP-hard even when restricted to homogeneous systems with bipolar coefficients. The latter problems have been shown hard to approximate in (Arora et al., 1993). In this paper we determine strong bounds on the approximability of various variants of Min RVLS and min ULR, including constrained ones where the variables are restricted to take binary values or where some relations are mandatory while others are optional. The various NP-hard versions turn out to have different approximability properties depending on the type of relations and the additional constraints, but none of them can be approximated within any constant factor, unless P = NP. Particular attention is devoted to two interesting special cases that occur in discriminant analysis and machine learning. In particular, we disprove a conjecture of van Horn and Martinez (1992) regarding the existence of a polynomial-time algorithm to design linear classifiers (or perceptrons) that involve a close-to-minimum number of features.},
file = {ScienceDirect Snapshot:\\\\filesrv4\\home$\\afdidehf\\.windows\\Application Data\\Zotero\\Zotero\\Profiles\\wi3wq94h.default\\zotero\\storage\\Q9NIJNQ8\\S0304397597001151.html:text/html},
journal = {Theoretical Computer Science},
keywords = {Approximability bounds,Approximability bounds,Designing linear classifiers,Designing linear classifiers,Linear systems,Linear systems,Nonzero variables,Nonzero variables,Unsatisfied relations,Unsatisfied relations},
month = dec,
timestamp = {2016-09-30T10:42:52Z},
year = {1998},
}
@Article{Gencer2004,
author = {Gen{\c c}er, Nevzat G. and Acar, Can E.},
title = {Sensitivity of {{EEG}} and {{MEG}} Measurements to Tissue Conductivity},
doi = {10.1088/0031-9155/49/5/004},
issn = {0031-9155},
language = {en},
number = {5},
pages = {701},
urldate = {2016-11-22},
volume = {49},
abstract = {Monitoring the electrical activity inside the human brain using electrical and magnetic field measurements requires a mathematical head model. Using this model the potential distribution in the head and magnetic fields outside the head are computed for a given source distribution. This is called the forward problem of the electro-magnetic source imaging. Accurate representation of the source distribution requires a realistic geometry and an accurate conductivity model. Deviation from the actual head is one of the reasons for the localization errors. In this study, the mathematical basis for the sensitivity of voltage and magnetic field measurements to perturbations from the actual conductivity model is investigated. Two mathematical expressions are derived relating the changes in the potentials and magnetic fields to conductivity perturbations. These equations show that measurements change due to secondary sources at the perturbation points. A finite element method (FEM) based formulation is developed for computing the sensitivity of measurements to tissue conductivities efficiently. The sensitivity matrices are calculated for both a concentric spheres model of the head and a realistic head model. The rows of the sensitivity matrix show that the sensitivity of a voltage measurement is greater to conductivity perturbations on the brain tissue in the vicinity of the dipole, the skull and the scalp beneath the electrodes. The sensitivity values for perturbations in the skull and brain conductivity are comparable and they are, in general, greater than the sensitivity for the scalp conductivity. The effects of the perturbations on the skull are more pronounced for shallow dipoles, whereas, for deep dipoles, the measurements are more sensitive to the conductivity of the brain tissue near the dipole. The magnetic measurements are found to be more sensitive to perturbations near the dipole location. The sensitivity to perturbations in the brain tissue is much greater when the primary source is tangential and it decreases as the dipole depth increases. The resultant linear system of equations can be used to update the initially assumed conductivity distribution for the head. They may be further exploited to image the conductivity distribution of the head from EEG and/or MEG measurements. This may be a fast and promising new imaging modality.},
journal = {Physics in Medicine and Biology},
timestamp = {2016-11-22T12:42:32Z},
year = {2004},
}
@Article{Blumensath2008,
author = {Blumensath, Thomas and Davies, Mike E.},
title = {Iterative {{Thresholding}} for {{Sparse Approximations}}},
doi = {10.1007/s00041-008-9035-z},
issn = {1069-5869, 1531-5851},
language = {en},
number = {5-6},
pages = {629--654},
urldate = {2016-10-07},
volume = {14},
abstract = {Sparse signal expansions represent or approximate a signal using a small number of elements from a large collection of elementary waveforms. Finding the optimal sparse expansion is known to be NP hard in general and non-optimal strategies such as Matching Pursuit, Orthogonal Matching Pursuit, Basis Pursuit and Basis Pursuit De-noising are often called upon. These methods show good performance in practical situations, however, they do not operate on the $\mathscr{l}$0 penalised cost functions that are often at the heart of the problem. In this paper we study two iterative algorithms that are minimising the cost functions of interest. Furthermore, each iteration of these strategies has computational complexity similar to a Matching Pursuit iteration, making the methods applicable to many real world problems. However, the optimisation problem is non-convex and the strategies are only guaranteed to find local solutions, so good initialisation becomes paramount. We here study two approaches. The first approach uses the proposed algorithms to refine the solutions found with other methods, replacing the typically used conjugate gradient solver. The second strategy adapts the algorithms and we show on one example that this adaptation can be used to achieve results that lie between those obtained with Matching Pursuit and those found with Orthogonal Matching Pursuit, while retaining the computational complexity of the Matching Pursuit algorithm.},
file = {Snapshot:\\\\filesrv4\\home$\\afdidehf\\.windows\\Application Data\\Zotero\\Zotero\\Profiles\\wi3wq94h.default\\zotero\\storage\\NS8PACQI\\s00041-008-9035-z.html:text/html},
journal = {Journal of Fourier Analysis and Applications},
month = sep,
timestamp = {2016-10-07T11:27:57Z},
year = {2008},
}
@Article{Hadamard1902,
author = {Hadamard, Jacques},
title = {Sur les probl{\`e}mes aux d{\'e}riv{\'e}s partielles et leur signification physique},
pages = {49--52},
volume = {13},
journal = {Princeton Univ.},
keywords = {Ill-posed,problem},
timestamp = {2017-04-19T14:24:06Z},
year = {1902},
}
@InProceedings{Elhamifar2011,
author = {Elhamifar, E. and Vidal, R.},
booktitle = {2011 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
title = {Robust classification using structured sparse representation},
doi = {10.1109/CVPR.2011.5995664},
pages = {1873--1879},
abstract = {In many problems in computer vision, data in multiple classes lie in multiple low-dimensional subspaces of a high-dimensional ambient space. However, most of the existing classification methods do not explicitly take this structure into account. In this paper, we consider the problem of classification in the multi-sub space setting using sparse representation techniques. We exploit the fact that the dictionary of all the training data has a block structure where the training data in each class form few blocks of the dictionary. We cast the classification as a structured sparse recovery problem where our goal is to find a representation of a test example that uses the minimum number of blocks from the dictionary. We formulate this problem using two different classes of non-convex optimization programs. We propose convex relaxations for these two non-convex programs and study conditions under which the relaxations are equivalent to the original problems. In addition, we show that the proposed optimization programs can be modified properly to also deal with corrupted data. To evaluate the proposed algorithms, we consider the problem of automatic face recognition. We show that casting the face recognition problem as a structured sparse recovery problem can improve the results of the state-of-the-art face recognition algorithms, especially when we have relatively small number of training data for each class. In particular, we show that the new class of convex programs can improve the state-of-the-art face recognition results by 10% with only 25% of the training data. In addition, we show that the algorithms are robust to occlusion, corruption, and disguise.},
keywords = {automatic face recognition,automatic face recognition,computer vision,computer vision,concave programming,concave programming,convex relaxations,convex relaxations,convex relaxations,Dictionaries,Dictionaries,Face,Face,face recognition,face recognition,image classification,image classification,image representation,image representation,image representation,multi-sub space setting,multi-sub space setting,nonconvex optimization programs,nonconvex optimization programs,Optimization,Optimization,robust classification,robust classification,robust classification,structured sparse representation,structured sparse representation,Training data,Training data,Vectors,Vectors},
month = jun,
timestamp = {2016-09-30T11:45:25Z},
year = {2011},
}
@Article{Zhao2015a,
author = {Zhao, Junxi and Song, Rongfang and Zhao, Jie and Zhu, Wei-Ping},
title = {New Conditions for Uniformly Recovering Sparse Signals via Orthogonal Matching Pursuit},
doi = {10.1016/j.sigpro.2014.06.010},
issn = {0165-1684},
pages = {106--113},
urldate = {2016-09-23},
volume = {106},
abstract = {Recently, lots of work has been done on conditions of guaranteeing sparse signal recovery using orthogonal matching pursuit (OMP). However, none of the existing conditions is both necessary and sufficient in terms of the so-called restricted isometric property, coherence, cumulative coherence (Babel function), or other verifiable quantities in the literature. Motivated by this observation, we propose a new measure of a matrix, named as union cumulative coherence, and present both sufficient and necessary conditions under which the OMP algorithm can uniformly recover sparse signals for all sensing matrices. The proposed condition guarantees a uniform recovery of sparse signals using OMP, and reveals the capability of OMP in sparse recovery. We demonstrate by examples that the proposed condition can be used to more effectively determine the recoverable sparse signals via OMP than the conditions existing in the literature. Furthermore, sparse recovery from noisy measurements is also considered in terms of the proposed union cumulative coherence.},
annote = {read},
file = {ScienceDirect Snapshot:\\\\filesrv4\\home$\\afdidehf\\.windows\\Application Data\\Zotero\\Zotero\\Profiles\\wi3wq94h.default\\zotero\\storage\\EWZKTPKK\\S016516841400276X.html:text/html},
journal = {Signal Processing},
keywords = {compressive sensing,Cumulative coherence,orthogonal matching pursuit,Restricted isometric property,sparse signal},
month = jan,
timestamp = {2016-09-30T13:51:59Z},
year = {2015},
}
@InProceedings{Elhamifar2010,
author = {Elhamifar, E. and Vidal, R.},
booktitle = {2010 IEEE International Conference on Acoustics, Speech and Signal Processing},
title = {Clustering disjoint subspaces via sparse representation},
doi = {10.1109/ICASSP.2010.5495317},
pages = {1926--1929},
abstract = {Given a set of data points drawn from multiple low-dimensional linear subspaces of a high-dimensional space, we consider the problem of clustering these points according to the subspaces they belong to. Our approach exploits the fact that each data point can be written as a sparse linear combination of all the other points. When the subspaces are independent, the sparse coefficients can be found by solving a linear program. However, when the subspaces are disjoint, but not independent, the problem becomes more challenging. In this paper, we derive theoretical bounds relating the principal angles between the subspaces and the distribution of the data points across all the subspaces under which the coefficients are guaranteed to be sparse. The clustering of the data is then easily obtained from the sparse coefficients. We illustrate the validity of our results through simulation experiments.},
keywords = {Application software,Application software,Clustering methods,Clustering methods,computer vision,computer vision,data clustering,data clustering,disjoint subspace clustering,disjoint subspace clustering,disjoint subspace clustering,disjoint subspaces,disjoint subspaces,Image Processing,Image processing,Image segmentation,Image segmentation,pattern clustering,pattern clustering,pattern clustering,signal processing,signal processing,sparse coefficients,sparse coefficients,sparse linear combination,sparse linear combination,sparse matrices,sparse matrices,Sparse matrices,sparse representation,sparse representation,Sparsity,sparsity,Statistical analysis,statistical analysis,subspace angles,subspace angles,subspace angles,Subspace clustering,subspace clustering,Video sequences,video sequences},
month = mar,
timestamp = {2016-09-30T13:27:54Z},
year = {2010},
}
@Article{Donoho2005b,
author = {Donoho, David L.},
title = {High-Dimensional Centrally Symmetric Polytopes with Neighborliness Proportional to Dimension},
doi = {10.1007/s00454-005-1220-0},
issn = {0179-5376, 1432-0444},
language = {en},
number = {4},
pages = {617--652},
urldate = {2016-05-15},
volume = {35},
journal = {Discrete and Computational Geometry},
keywords = {Combinatorics,Combinatorics,Computational Mathematics and Numerical Analysis,Computational Mathematics and Numerical Analysis},
month = dec,
timestamp = {2016-10-07T14:42:32Z},
year = {2005},
}
@Article{Elhamifar2013,
author = {Elhamifar, E. and Vidal, R.},
title = {Sparse Subspace Clustering: Algorithm, Theory, and Applications},
doi = {10.1109/TPAMI.2013.57},
issn = {0162-8828},
number = {11},
pages = {2765--2781},
volume = {35},
abstract = {Many real-world problems deal with collections of high-dimensional data, such as images, videos, text, and web documents, DNA microarray data, and more. Often, such high-dimensional data lie close to low-dimensional structures corresponding to several classes or categories to which the data belong. In this paper, we propose and study an algorithm, called sparse subspace clustering, to cluster data points that lie in a union of low-dimensional subspaces. The key idea is that, among the infinitely many possible representations of a data point in terms of other points, a sparse representation corresponds to selecting a few points from the same subspace. This motivates solving a sparse optimization program whose solution is used in a spectral clustering framework to infer the clustering of the data into subspaces. Since solving the sparse optimization program is in general NP-hard, we consider a convex relaxation and show that, under appropriate conditions on the arrangement of the subspaces and the distribution of the data, the proposed minimization program succeeds in recovering the desired sparse representations. The proposed algorithm is efficient and can handle data points near the intersections of subspaces. Another key advantage of the proposed algorithm with respect to the state of the art is that it can deal directly with data nuisances, such as noise, sparse outlying entries, and missing entries, by incorporating the model of the data into the sparse optimization program. We demonstrate the effectiveness of the proposed algorithm through experiments on synthetic data as well as the two real-world problems of motion segmentation and face clustering.},
journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence},
keywords = {$(ell_1)$-minimization,$(ell_1)$-minimization,Algorithms,algorithms,Artificial Intelligence,Artificial Intelligence,Automated,Biometry,Biometry,clustering,clustering,Clustering algorithms,Clustering algorithms,Clustering algorithms,computational complexity,computational complexity,Computer-Assisted,computer vision,computer vision,convex programming,convex programming,convex programming,convex relaxation,convex relaxation,data point clustering,data point clustering,data point representation,data point representation,data structures,data structures,data structures,Face,Face,face clustering,face clustering,general NP-hard problem,general NP-hard problem,High-dimensional data,high-dimensional data,high-dimensional data,high-dimensional data collection,high-dimensional data collection,Humans,Humans,Image Interpretation,Image Interpretation; Computer-Assisted,intrinsic low-dimensionality,intrinsic low-dimensionality,intrinsic low-dimensionality,minimisation,minimisation,minimization program,minimization program,motion segmentation,motion segmentation,Noise,Noise,Optimization,Optimization,pattern clustering,pattern clustering,pattern clustering,Pattern Recognition,Pattern Recognition; Automated,principal angles,principal angles,Sample Size,Sample Size,sparse matrices,sparse matrices,Sparse matrices,sparse optimization program,sparse optimization program,sparse representation,sparse representation,sparse subspace clustering algorithm,sparse subspace clustering algorithm,sparse subspace clustering algorithm,spectral clustering,spectral clustering,spectral clustering framework,spectral clustering framework,spectral clustering framework,subspaces,subspaces,synthetic data,synthetic data,Vectors,Vectors},
month = nov,
shorttitle = {Sparse Subspace Clustering},
timestamp = {2016-09-30T13:27:00Z},
year = {2013},
}
@Article{Chen2006,
author = {Chen, Jie and Huo, X.},
title = {Theoretical Results on Sparse Representations of Multiple-Measurement Vectors},
doi = {10.1109/TSP.2006.881263},
issn = {1053-587X},
number = {12},
pages = {4634--4643},
volume = {54},
abstract = {The sparse representation of a multiple-measurement vector (MMV) is
a relatively new problem in sparse representation. Efficient methods
have been proposed. Although many theoretical results that are available
in a simple case-single-measurement vector (SMV)-the theoretical
analysis regarding MMV is lacking. In this paper, some known results
of SMV are generalized to MMV. Some of these new results take advantages
of additional information in the formulation of MMV. We consider
the uniqueness under both an lscr0-norm-like criterion and an lscr1-norm-like
criterion. The consequent equivalence between the lscr0-norm approach
and the lscr1-norm approach indicates a computationally efficient
way of finding the sparsest representation in a redundant dictionary.
For greedy algorithms, it is proven that under certain conditions,
orthogonal matching pursuit (OMP) can find the sparsest representation
of an MMV with computational efficiency, just like in SMV. Simulations
show that the predictions made by the proved theorems tend to be
very conservative; this is consistent with some recent advances in
probabilistic analysis based on random matrix theory. The connections
will be discussed},
journal = {Signal Processing, IEEE Transactions on},
keywords = {Analytical models,Basis pursuit,Computational efficiency,Computational modeling,Dictionaries,Equations,Greedy algorithms,iterative methods,l0-norm-like criterion,l1-norm-like criterion,Magnetic analysis,Matching pursuit algorithms,matrix algebra,multiple-measurement vector (MMV),multiple-measurement vectors,orthogonal matching pursuit,orthogonal matching pursuit (OMP),Predictive models,probabilistic analysis,random matrix theory,redundant dictionary,signal representation,single-measurement vector,sparse matrices,sparse representation,sparse representations,statistical analysis,time-frequency analysis},
month = dec,
owner = {Fardin},
timestamp = {2016-09-30T11:20:03Z},
year = {2006},
}
@InProceedings{Kwon2012,
author = {Kwon, H. and Rao, B.D.},
booktitle = {Acoustics, Speech and Signal Processing (ICASSP), 2012 IEEE International Conference on},
title = {On the benefits of the block-sparsity structure in sparse signal recovery},
doi = {10.1109/ICASSP.2012.6288716},
pages = {3685-3688},
abstract = {We study the problem of support recovery of block-sparse signals,
where nonzero entries occur in clusters, via random noisy measurements.
By drawing analogy between the problem of block-sparse signal recovery
and the problem of communication over Gaussian multi-input and single-output
multiple access channel, we derive the sufficient and necessary condition
under which exact support recovery is possible. Based on the results,
we show that block-sparse signals can reduce the number of measurements
required for exact support recovery, by at least `1/(block size)',
compared to conventional or scalar-sparse signals. The minimum gain