-
Notifications
You must be signed in to change notification settings - Fork 1
/
BiblioLearning.bib
833 lines (745 loc) · 46.3 KB
/
BiblioLearning.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
@inproceedings{poupard2019large,
title={Large-scale unsupervised clustering of Orca vocalizations: a model for describing orca communication systems},
author={Poupard, Marion and Best, Paul and Schl{\"u}ter, Jan and Symonds, Helena and Spong, Paul and Lengagne, Thierry and Soriano, Thierry and Glotin, Herv{\'e}},
booktitle={2nd Int. Workshop on Vocal Interactivity in-and-between Humans, Animals and Robots},
year={2019}
}
@inproceedings{2020-Acusticum-STEREO_TO_FIVE_CHANNELS_BOMBYX_SONOBUOYS__FROM_FOUR_YEARS_CETACEAN_MONITORING_TO_REAL_TIME_WHALE_SHIP_ANTI_COLLISION_SYSTEM,
title={STEREO TO FIVE-CHANNELS BOMBYX SONOBUOYS: FROM FOUR YEARS CETACEAN MONITORING TO REAL-TIME WHALE-SHIP ANTI-COLLISION SYSTEM},
author={Best, Paul and Marzetti, Sebastian and Poupard, Marion and Ferrari, Maxence and Paris, S{\'e}bastien and Marxer, Ricard and Philipe, Olivier and Gies, Valentin and Barchasz, Valentin and Glotin, Herv{\'e}},
booktitle={e-Forum Acusticum 2020},
year={2020}
}
@inproceedings{2016-Springer-LifeCLEF_2016__multimedia_life_species_identification_challenges,
title={LifeCLEF 2016: multimedia life species identification challenges},
author={Joly, Alexis and Go{\"e}au, Herv{\'e} and Glotin, Herv{\'e} and Spampinato, Concetto and Bonnet, Pierre and Vellinga, Willem-Pier and Champ, Julien and Planqu{\'e}, Robert and Palazzo, Simone and M{\"u}ller, Henning},
booktitle={International Conference of the Cross-Language Evaluation Forum for European Languages},
pages={286--310},
year={2016},
organization={Springer}
}
@inproceedings{2018-SLTW-SpeakerRecognitionFromRawWaveformWithSincNet,
author = {Ravanelli, Mirco and Bengio, Y.},
year = {2018},
month = {12},
pages = {1021-1028},
title = {Speaker Recognition from Raw Waveform with SincNet},
booktitle={IEEE Spoken Language Technology Workshop},
doi = {10.1109/SLT.2018.8639585}
}
@inproceedings{2021-SLTW-InterpretableAndLearnableSuperResolutionTimeFrequencyRepresentation,
author = {Balesterio, Randall and Glotin, Herve and Baraniuk, Richard G.},
year = {2021},
month = {2},
volume={145},
pages = {1-35},
title = {Interpretable and Learnable Super-Resolution Time-Frequency Representation},
booktitle={2nd Annual Conference on Mathematical and Scientific Machine Learning},
}
@article{2017-IEEE-EfficientProcessingOfDeepNeuralNetworks,
author = {Sze, Vivienne and Chen, Yu-Hsin and Yang, Tien-Ju and Emer, Joel},
year = {2017},
month = {03},
pages = {},
title = {Efficient Processing of Deep Neural Networks: A Tutorial and Survey},
volume = {105},
journal = {Proceedings of the IEEE},
doi = {10.1109/JPROC.2017.2761740}
}
@article{2020-JSA-ASurveyOnHardwareAcceleratorsAndOptimizationTechniquesForRNNs,
author = {Mittal, Sparsh and Umesh, Sumanth},
year = {2020},
month = {07},
pages = {},
title = {A Survey on Hardware Accelerators and Optimization Techniques for RNNs},
journal = {Journal of Systems Architecture},
doi = {10.1016/j.sysarc.2020.101839}
}
@article{2018-Loihi-IEEE_Micro,
author = {Davies, Mike and Srinivasa, Narayan and Lin, Tsung-Han and Chinya, Gautham and Joshi, Prasad and Lines, Andrew and Wild, Andreas and Wang, Hong},
year = {2018},
month = {01},
pages = {1-1},
title = {Loihi: A Neuromorphic Manycore Processor with On-Chip Learning},
volume = {PP},
journal = {IEEE Micro},
doi = {10.1109/MM.2018.112130359}
}
@ARTICLE{2020-Convolution-Kernel-Operations-on-a-Two-Dimensional-Spin-Memristor-Cross-Array,
author={Zhu {Saike} and others},
journal={MDPI Sensors},
title={Convolution Kernel Operations on a Two-Dimensional Spin Memristor Cross Array.},
year={2020},
volume={21},
}
@article{2012-Neuroscience-How_many_neurons_do_you_have__Some_dogmas_of_quantitative_neuroscience_under_revision,
author = {Lent, Roberto and Azevedo, Frederico A. C. and Andrade-Moraes, Carlos H. and Pinto, Ana V. O.},
title = {How many neurons do you have? Some dogmas of quantitative neuroscience under revision},
journal = {European Journal of Neuroscience},
volume = {35},
number = {1},
pages = {1-9},
keywords = {brain development, brain evolution, glial cell number, neuron number},
doi = {https://doi.org/10.1111/j.1460-9568.2011.07923.x},
url = {https://onlinelibrary.wiley.com/doi/abs/10.1111/j.1460-9568.2011.07923.x},
eprint = {https://onlinelibrary.wiley.com/doi/pdf/10.1111/j.1460-9568.2011.07923.x},
abstract = {Abstract Owing to methodological shortcomings and a certain conservatism that consolidates wrong assumptions in the literature, some dogmas have become established and reproduced in papers and textbooks, derived from quantitative features of the brain. The first dogma states that the cerebral cortex is the pinnacle of brain evolution – based on the observations that its volume is greater in more ‘intelligent’ species, and that cortical surface area grows more than any other brain region, to reach the largest proportion in higher primates and humans. The second dogma claims that the human brain contains 100 billion neurons, plus 10-fold more glial cells. These round numbers have become widely adopted, although data provided by different authors have led to a broad range of 75–125 billion neurons in the whole brain. The third dogma derives from the second, and states that our brain is structurally special, an outlier as compared with other primates. Being so large and convoluted, it is a special construct of nature, unrelated to evolutionary scaling. Finally, the fourth dogma appeared as a tentative explanation for the considerable growth of the brain throughout development and evolution – being modular in structure, the brain (and particularly the cerebral cortex) grows by tangential addition of modules that are uniform in neuronal composition. In this review, we sought to examine and challenge these four dogmas, and propose other interpretations or simply their replacement with alternative views.},
year = {2012}
}
@ARTICLE{2019-TNNLS-RegularizingDeepNeuralNetworksByEnhancingDiversityinFeatureExtraction,
author={Ayinde, Babajide O. and Inanc, Tamer and Zurada, Jacek M.},
journal={IEEE Transactions on Neural Networks and Learning Systems},
title={Regularizing Deep Neural Networks by Enhancing Diversity in Feature Extraction},
year={2019},
volume={30},
number={9},
pages={2650-2661},
doi={10.1109/TNNLS.2018.2885972}}
@INPROCEEDINGS{2020-IEEE-VTC-DeepLearningBasedDiversityCombiningForGenericNoiseAndInterference,
author={Ahmed, Imtiaz and Allen, Evan J.},
booktitle={2020 IEEE 91st Vehicular Technology Conference (VTC2020-Spring)},
title={Deep Learning Based Diversity Combining for Generic Noise and Interference},
year={2020},
volume={},
number={},
pages={1-4},
doi={10.1109/VTC2020-Spring48590.2020.9129375}}
@INPROCEEDINGS{2015-EUSIPCO-DoesDiversityImproveDeepLearning,
author={Alvear-Sandoval, R. F. and Figueiras-Vidal, A. R.},
booktitle={2015 23rd European Signal Processing Conference (EUSIPCO)},
title={Does diversity improve deep learning?},
year={2015},
volume={},
number={},
pages={2496-2500},
doi={10.1109/EUSIPCO.2015.7362834}}
@INPROCEEDINGS{2020-ICDE-EfficientDiversityDrivenEnsembleForDeepNeuralNetworks,
author={Zhang, Wentao and Jiang, Jiawei and Shao, Yingxia and Cui, Bin},
booktitle={2020 IEEE 36th International Conference on Data Engineering (ICDE)},
title={Efficient Diversity-Driven Ensemble for Deep Neural Networks},
year={2020},
volume={},
number={},
pages={73-84},
doi={10.1109/ICDE48307.2020.00014}}
@article{2015-IJSSIS-WSN_based_thermal_modeling_a_new_indoor_energy_efficient_solution ,
author = {Zhao, Yi and Gies, Valentin and Ginoux Jean-Marc},
title = {WSN based thermal modeling: a new indoor energy efficient solution},
journal = {International Journal on Smart Sensing and Intelligent Systems},
volume = {8},
number = {2},
pages = {869-895},
keywords = {Wireless Sensors Network, Back-propagation Neural Network, Thermal Modeling, Linear Approximations, Effective Indoor Thermal Time Constant},
doi = {https://doi.org/10.21307/ijssis-2017-787},
url = {https://www.exeley.com/in_jour_smart_sensing_and_intelligent_systems/doi/10.21307/ijssis-2017-787},
year = {2015},
month = {06}
}
@INPROCEEDINGS{2013-IJCNN-Multi_pattern_cross_training_An_ANN_model_training_method_using_WSN_sensor_data,
author={Zhao, Yi and Gies, Valentin and Teles, Ademir Felipe and Ginoux, Jean Marc},
booktitle={The 2013 International Joint Conference on Neural Networks (IJCNN)},
title={Multi-pattern cross training: An ANN model training method using WSN sensor data},
year={2013},
volume={},
number={},
pages={1-6},
doi={10.1109/IJCNN.2013.6706767}}
@article{2000-Nervous_System-Types_of_neurons_in_the_enteric_nervous_system,
title = {Types of neurons in the enteric nervous system},
journal = {Journal of the Autonomic Nervous System},
volume = {81},
number = {1},
pages = {87-96},
year = {2000},
issn = {0165-1838},
doi = {https://doi.org/10.1016/S0165-1838(00)00127-2},
url = {https://www.sciencedirect.com/science/article/pii/S0165183800001272},
author = {J.B Furness},
keywords = {Enteric nervous system, Neurochemistry, Intestine},
abstract = {This paper, written for the symposium in honour of more than 40 years’ contribution to autonomic research by Professor Geoffrey Burnstock, highlights the progress made in understanding the organisation of the enteric nervous system over this time. Forty years ago, the prevailing view was that the neurons within the gut wall were post-ganglionic neurons of parasympathetic pathways. This view was replaced as evidence accrued that the neurons are part of the enteric nervous system and are involved in reflex and integrative activities that can occur even in the absence of neuronal influence from extrinsic sources. Work in Burnstock’s laboratory led to the discovery of intrinsic inhibitory neurons with then novel pharmacology of transmission, and precipitated investigation of neuron types in the enteric nervous system. All the types of neurons in the enteric nervous system of the small intestine of the guinea-pig have now been identified in terms of their morphologies, projections, primary neurotransmitters and physiological identification. In this region there are 14 functionally defined neuron types, each with a characteristic combination of morphological, neurochemical and biophysical properties. The nerve circuits underlying effects on motility, blood flow and secretion that are mediated through the enteric nervous system are constructed from these neurons. The circuits for simple motility reflexes are now known, and progress has been made in analysing those involved in local control of blood flow and transmucosal fluid movement in the small intestine.}
}
@article{1997-Neuroanatomy-Types_of_neurons__synaptic_connections_and_chemical_characteristics_of_cells_immunoreactive_for_calbindin_D28K__parvalbumin_and_calretinin_in_the_neocortex,
title = {Types of neurons, synaptic connections and chemical characteristics of cells immunoreactive for calbindin-D28K, parvalbumin and calretinin in the neocortex},
journal = {Journal of Chemical Neuroanatomy},
volume = {14},
number = {1},
pages = {1-19},
year = {1997},
issn = {0891-0618},
doi = {https://doi.org/10.1016/S0891-0618(97)10013-8},
url = {https://www.sciencedirect.com/science/article/pii/S0891061897100138},
author = {Javier DeFelipe},
keywords = {Cerebral cortex, Immunocytochemistry, Colocalization, Calcium-binding proteins, GABA, Neuropeptides, Receptors},
abstract = {This article provides a general account of types of neurons, synaptic connections and chemical characteristics (colocalization studies) of cells immunoreactive for the three main calcium-binding proteins found in the neocortex, namely, calbindin-D28K, parvalbumin and calretinin. The main conclusion is two-fold. First, all, or the majority, of calbindin-, parvalbumin- and calretinin-immunoreactive cells are smooth nonpyramidal neurons (interneurons) which participate in a variety of complex cortical circuits that may differ depending on the species, cortical area or layer where they are located. Second, in general, different types of nonpyramidal neurons are stained for each of these calcium-binding proteins and display different chemical characteristics regarding a variety of neurotransmitters (or related compounds), cell surface markers and receptors. However, a certain overlap exits, which also shows regional and species differences.}
}
@INBOOK{2019-Classification_Algorithm_Validation,
author={A. K. {Nandi} and H. {Ahmed}},
booktitle={Condition Monitoring with Vibration Signals: Compressive Sampling and Learning Algorithms for Rotating Machines},
title={Classification Algorithm Validation},
year={2019},
volume={},
number={},
pages={307-319},
keywords={Training;Testing;Data models;Vibrations;Classification algorithms;Task analysis;Condition monitoring},
doi={10.1002/9781119544678.ch15},
ISSN={null},
publisher={IEEE},
isbn={null},
url={https://ieeexplore.ieee.org/document/8958927},}
@unpublished{ng2012cs229,
added-at = {2016-11-24T22:48:07.000+0100},
author = {Ng, Andrew},
biburl = {https://www.bibsonomy.org/bibtex/24abdb571b427805f7bff139dbab7da85/nosebrain},
interhash = {1895c04e83b69104b04117b567babedd},
intrahash = {4abdb571b427805f7bff139dbab7da85},
keywords = {learning logistic regression supervised},
timestamp = {2016-11-24T22:48:07.000+0100},
title = {CS229 Lecture notes - Supervised learning},
year = 2012
}
@article{2017-Elsevier-An_improved_overlapping_k-means_clustering_method_for_medical_applications,
title={An improved overlapping k-means clustering method for medical applications},
author={Khanmohammadi, Sina and Adibeig, Naiier and Shanehbandy, Samaneh},
journal={Expert Systems with Applications},
volume={67},
pages={12--18},
year={2017},
publisher={Elsevier}
}
@article{2015-ImprovingDistributionalSimilarityWithLessonsLearned,
title = {Improving Distributional Similarity with Lessons Learned from Word Embeddings},
author = {O. Levy and Y. Goldberg and I. Dagan},
journal = {Transactions of the Association for Computational Linguistics},
volume = {3},
year = {2015},
url = {https://www.aclweb.org/anthology/Q15-1016},
doi = {10.1162/tacl_a_00134},
pages = {211--225},
}
@article{2006-ArtificialNeuralNetworksVSLDA,
title = {Artificial neural network vs linear discriminant analysis in credit ratings forecast: A comparative study of prediction performances},
author = {K. Kumar and S. Bhattacharya},
journal = {Review of Accounting and Finance},
volume = {5},
number = {3},
year = {2006},
pages = {216-227},
}
@article{1993-LDAvsANN,
author = {Yoon, Youngohc and Swales, George and Margavio, Thomas},
year = {1993},
month = {01},
pages = {51-60},
title = {A Comparison of Discriminant Analysis Versus Artificial Neural Networks},
volume = {44},
journal = {Journal of The Operational Research Society - J OPER RES SOC},
doi = {10.1057/jors.1993.6}
}
@article{2017-ReservoirComputingMemristor,
author = {Du, Chao and Cai, Fuxi and Zidan, Mohamed and Ma, Wen and Lee, Seung Hwan and Lu, Wei},
year = {2017},
month = {12},
pages = {2204-2210},
title = {Reservoir computing using dynamic memristors for temporal information processing},
volume = {8},
journal = {Nature communications},
doi = {10.1038/s41467-017-02337-y}
}
@article{1997-ClassicalvsANN,
author = {Newey, V},
year = {1997},
month = {08},
pages = {5-8},
title = {Classical versus artificial neural network analysis},
volume = {10},
journal = {Ultrasound in obstetrics \& gynecology : the official journal of the International Society of Ultrasound in Obstetrics and Gynecology},
doi = {10.1046/j.1469-0705.1997.10010005.x}
}
@article{2012-LDAvsANN,
author = {Castro, M.C.F.},
year = {2012},
month = {01},
pages = {351-355},
title = {Linear discriminant analysis versus artificial neural network as classifiers for elbow angular position recognition purposes},
journal = {BIOSIGNALS 2012 - Proceedings of the International Conference on Bio-Inspired Systems and Signal Processing}
}
@ARTICLE{2017-EmbeddedDeepNeuralNetworkProcessing,
author={M. {Verhelst} and B. {Moons}},
journal={IEEE Solid-State Circuits Magazine},
title={Embedded Deep Neural Network Processing: Algorithmic and Processor Techniques Bring Deep Learning to IoT and Edge Devices},
year={2017},
volume={9},
number={4},
pages={55-65},
abstract={Deep learning has recently become immensely popular for image recognition, as well as for other recognition and pattern matching tasks in, e.g., speech processing, natural language processing, and so forth. The online evaluation of deep neural networks, however, comes with significant computational complexity, making it, until recently, feasible only on power-hungry server platforms in the cloud. In recent years, we see an emerging trend toward embedded processing of deep learning networks in edge devices: mobiles, wearables, and Internet of Things (IoT) nodes. This would enable us to analyze data locally in real time, which is not only favorable in terms of latency but also mitigates privacy issues. Yet evaluating the powerful but large deep neural networks with power budgets in the milliwatt or even microwatt range requires a significant improvement in processing energy efficiency.},
keywords={image matching;Internet of Things;learning (artificial intelligence);neural nets;embedded deep neural network;algorithmic techniques;processor techniques;deep learning;IoT;edge devices;image recognition;pattern matching;Feature extraction;Biological neural networks;Training data;Machine learning;Image recognition;Tutorials},
doi={10.1109/MSSC.2017.2745818},
ISSN={},
month={Fall},}
@ARTICLE{Bianchi2019,
author={V. {Bianchi} and M. {Bassoli} and G. {Lombardo} and P. {Fornacciari} and M. {Mordonini} and I. {De Munari}},
journal={IEEE Internet of Things Journal},
title={IoT Wearable Sensor and Deep Learning: An Integrated Approach for Personalized Human Activity Recognition in a Smart Home Environment},
year={2019},
volume={6},
number={5},
pages={8553-8562},
keywords={assisted living;biomedical equipment;cloud computing;convolutional neural nets;geriatrics;home computing;image recognition;Internet of Things;learning (artificial intelligence);patient diagnosis;patient monitoring;sensors;wearable computers;wireless LAN;common home router;convolutional neural network network;daily activity monitor;IoT wearable sensor;personalized human activity recognition;smart home environment;continuous monitoring;human behaviors;ambient assisted living;medical diagnosis;elderly care;innovative HAR system;wearable devices;deep learning techniques;inertial measurement unit;Wi-Fi section;AAL;well-being management;rehabilitation;entertainment;surveillance;IMU;cloud service;Internet;CNN;embedded devices;low-cost devices;Wearable sensors;Activity recognition;Deep learning;Monitoring;Feature extraction;Internet of Things;Cloud computing;Activity recognition;Internet of Things (IoT);machine learning;wearable sensor},
doi={10.1109/JIOT.2019.2920283},
ISSN={2372-2541},
month={Oct},}
@article{2020-Interpretable_Super_Resolution,
title={Interpretable Super-Resolution via a Learned Time-Series Representation},
author={Balestriero, Randall and Glotin, Herve and Baraniuk, Richard G. },
year={2020},
eprint={2006.07713},
journal = {arXiv 2006.07713},
publisher={arXiv 2006.07713},
archivePrefix={arXiv},
primaryClass={eess.SP},
}
@misc{2019-Online_K-means_Clustering,
title={Online k-means Clustering},
author={Vincent Cohen-Addad and Benjamin Guedj and Varun Kanade and Guy Rom},
year={2019},
eprint={1909.06861},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
@inproceedings{2004-MLConf-K-means_clustering_via_principal_component_analysis,
title={K-means clustering via principal component analysis},
author={Ding, Chris and He, Xiaofeng},
booktitle={Proceedings of the twenty-first international conference on Machine learning},
pages={29},
year={2004}
}
@book{2020-Book-Joshi-Machine_Learning_and_Artificial_Intelligence,
title={Machine Learning and Artificial Intelligence},
author={Ameet V Joshi},
isbn={978-3-030-26622-6},
url={https://www.springer.com/gp/book/9783030266219},
year={2020},
publisher={Springer International Publishing}
}
@article{2016-Nature-Mastering_the_Game_of_Go_with_Deep_Neural_Networks_and_Tree_Search,
added-at = {2016-03-11T14:36:05.000+0100},
author = {Silver, David and Huang, Aja and Maddison, Chris J. and Guez, Arthur and Sifre, Laurent and van den Driessche, George and Schrittwieser, Julian and Antonoglou, Ioannis and Panneershelvam, Veda and Lanctot, Marc and Dieleman, Sander and Grewe, Dominik and Nham, John and Kalchbrenner, Nal and Sutskever, Ilya and Lillicrap, Timothy and Leach, Madeleine and Kavukcuoglu, Koray and Graepel, Thore and Hassabis, Demis},
biburl = {https://www.bibsonomy.org/bibtex/29e987f58d895c490144693139cbc90c7/ytyoun},
doi = {10.1038/nature16961},
interhash = {48430c7891aaf9fe2582faa8f5d076c1},
intrahash = {9e987f58d895c490144693139cbc90c7},
journal = {Nature},
keywords = {baduk go google},
month = jan,
number = 7587,
pages = {484--489},
publisher = {Nature Publishing Group},
timestamp = {2016-03-11T14:37:40.000+0100},
title = {Mastering the Game of {Go} with Deep Neural Networks and Tree Search},
volume = 529,
year = 2016
}
@article{1958-Psychological_Review-The_Perceptron:_A_Probabilistic_Model_For_Information_Storage_And_Organization_In_The_Brain,
author = {Franck Rosenblatt},
doi = {10.1037/h0042519},
journal = {Psychological Review},
title = {The Perceptron: A Probabilistic Model For Information Storage And Organization In The Brain},
year = 1958
}
@book{1969-MIT-Perceptrons:_An_Introduction_to_Computational_Geometry,
added-at = {2008-05-16T13:57:01.000+0200},
address = {Cambridge, MA, USA},
author = {Minsky, Marvin and Papert, Seymour},
biburl = {https://www.bibsonomy.org/bibtex/206a5a6751b3e61408455fca2ed8d87fc/sb3000},
description = {: mf : blob : » bibtex},
interhash = {d80d4948a422623047f1b800272c0389},
intrahash = {06a5a6751b3e61408455fca2ed8d87fc},
keywords = {linear-classification neural-networks seminal},
publisher = {MIT Press},
timestamp = {2008-05-16T13:57:02.000+0200},
title = {Perceptrons: An Introduction to Computational Geometry},
year = 1969
}
@phdthesis{1974-PhD_Thesis-Beyond_Regression:_New_Tools_for_Prediction_and_Analysis_in_the_Behavioral_Sciences,
added-at = {2008-02-26T11:58:58.000+0100},
author = {Werbos, P. J.},
biburl = {https://www.bibsonomy.org/bibtex/2b0644d7aa84be0df0f198d586d341843/schaul},
citeulike-article-id = {2381655},
description = {idsia},
interhash = {4165e2708a0468e89f8305f21ee2c711},
intrahash = {b0644d7aa84be0df0f198d586d341843},
keywords = {juergen},
priority = {2},
school = {Harvard University},
timestamp = {2008-02-26T11:59:46.000+0100},
title = {Beyond Regression: New Tools for Prediction and Analysis in the Behavioral Sciences},
year = 1974
}
@INPROCEEDINGS{1988-IEEE_ICNN-Backpropagation:_past_and_future, author={ {Werbos}}, booktitle={IEEE 1988 International Conference on Neural Networks}, title={Backpropagation: past and future}, year={1988}, volume={}, number={}, pages={343-353 vol.1},}
@Inbook{2012-Springer-Stochastic_Gradient_Descent_Tricks,
author={Bottou, L{\'e}on},
title={Stochastic Gradient Descent Tricks},
bookTitle="Neural Networks: Tricks of the Trade: Second Edition",
year="2012",
publisher="Springer Berlin Heidelberg",
address="Berlin, Heidelberg",
pages="421--436",
abstract="Chapter 1 strongly advocates the stochastic back-propagation method to train neural networks. This is in fact an instance of a more general technique called stochastic gradient descent (SGD). This chapter provides background material, explains why SGD is a good learning algorithm when the training set is large, and provides useful recommendations.",
isbn="978-3-642-35289-8",
doi="10.1007/978-3-642-35289-8_25",
url="https://doi.org/10.1007/978-3-642-35289-8_25"
}
@book{1996-Book-Numerical_Methods_for_Unconstrained_Optimization_and_Nonlinear_Equations,
author = {Dennis, J. E. and Schnabel, Robert B.},
title = {Numerical Methods for Unconstrained Optimization and Nonlinear Equations (Classics in Applied Mathematics, 16)},
year = {1996},
isbn = {0898713641},
publisher = {Soc for Industrial and Applied Math}
}
@article{2016-CoRR-An_overview_of_gradient_descent_optimization_algorithms,
author = {Sebastian Ruder},
title = {An overview of gradient descent optimization algorithms},
journal = {CoRR},
volume = {abs/1609.04747},
year = {2016},
url = {http://arxiv.org/abs/1609.04747},
archivePrefix = {arXiv},
eprint = {1609.04747},
timestamp = {Mon, 13 Aug 2018 16:48:10 +0200},
biburl = {https://dblp.org/rec/journals/corr/Ruder16.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@misc{2014-arXiv-Adam_A_Method_for_Stochastic_Optimization,
title={Adam: A Method for Stochastic Optimization},
author={Diederik P. Kingma and Jimmy Lei Ba},
year={2014},
eprint={1412.6980},
archivePrefix={arXiv},
primaryClass={cs.LG},
journal={International Conference on Learning Representations}
}
@article{2003-Pattern_Recognition-The_global_k-means_clustering_algorithm,
title = "The global k-means clustering algorithm",
journal = "Pattern Recognition",
volume = "36",
number = "2",
pages = "451 - 461",
year = "2003",
note = "Biometrics",
issn = "0031-3203",
doi = "https://doi.org/10.1016/S0031-3203(02)00060-2",
url = "http://www.sciencedirect.com/science/article/pii/S0031320302000602",
author = "Aristidis Likas, Nikos Vlassis and Jakob J. Verbeek",
keywords = "Clustering, -Means algorithm, Global optimization, - Trees, Data mining",
abstract = "We present the global k-means algorithm which is an incremental approach to clustering that dynamically adds one cluster center at a time through a deterministic global search procedure consisting of N (with N being the size of the data set) executions of the k-means algorithm from suitable initial positions. We also propose modifications of the method to reduce the computational load without significantly affecting solution quality. The proposed clustering methods are tested on well-known data sets and they compare favorably to the k-means algorithm with random restarts."
}
@ARTICLE{2002-ToPAaMI_An_efficient_k-means_clustering_algorithm_analysis_and_implementation,
author={T. {Kanungo} and D. M. {Mount} and N. S. {Netanyahu} and C. D. {Piatko} and R. {Silverman} and A. Y. {Wu}},
journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
title={An efficient k-means clustering algorithm: analysis and implementation},
year={2002},
volume={24},
number={7},
pages={881-892},
}
@InProceedings{2012-JMLR-Autoencoders_Unsupervised_Learning_and_Deep_Architectures,
title = {Autoencoders, Unsupervised Learning, and Deep Architectures},
author = {Pierre Baldi},
pages = {37--49},
year = {2012},
editor = {Isabelle Guyon and Gideon Dror and Vincent Lemaire and Graham Taylor and Daniel Silver},
volume = {27},
series = {Proceedings of Machine Learning Research},
address = {Bellevue, Washington, USA},
month = {02 Jul},
publisher = {JMLR Workshop and Conference Proceedings},
pdf = {http://proceedings.mlr.press/v27/baldi12a/baldi12a.pdf},
url = {http://proceedings.mlr.press/v27/baldi12a.html},
abstract = {Autoencoders play a fundamental role in unsupervised learning and in deep architectures for transfer learning and other tasks. In spite of their fundamental role, only linear autoencoders over the real numbers have been solved analytically. Here we present a general mathematical framework for the study of both linear and non-linear autoencoders. The framework allows one to derive an analytical treatment for the most non-linear autoencoder, the Boolean autoencoder. Learning in the Boolean autoencoder is equivalent to a clustering problem that can be solved in polynomial time when the number of clusters is small and becomes NP complete when the number of clusters is large. The framework sheds light on the different kinds of autoencoders, their learning complexity, their horizontal and vertical composability in deep architectures, their critical points, and their fundamental connections to clustering, Hebbian learning, and information theory.}
}
@article{2015-Nature-Deep_Learning,
title = {Deep learning},
abstract = {Deep learning allows computational models that are composed of multiple processing layers to learn representations of data with multiple levels of abstraction. These methods have dramatically improved the state-of-the-art in speech recognition, visual object recognition, object detection and many other domains such as drug discovery and genomics. Deep learning discovers intricate structure in large data sets by using the backpropagation algorithm to indicate how a machine should change its internal parameters that are used to compute the representation in each layer from the representation in the previous layer. Deep convolutional nets have brought about breakthroughs in processing images, video, speech and audio, whereas recurrent nets have shone light on sequential data such as text and speech.},
author = {Yann Lecun and Yoshua Bengio and Geoffrey Hinton},
year = {2015},
month = {may},
day = {27},
doi = {10.1038/nature14539},
language = {English (US)},
volume = {521},
pages = {436--444},
journal = {Nature Cell Biology},
issn = {1465-7392},
publisher = {Nature Publishing Group},
number = {7553},
}
@article{2012-ImageNetClassificationWithDeepConvolutionalNeuralNetworks,
author = {Krizhevsky, Alex and Sutskever, Ilya and Hinton, Geoffrey},
year = {2012},
month = {01},
pages = {},
title = {ImageNet Classification with Deep Convolutional Neural Networks},
volume = {25},
journal = {Neural Information Processing Systems},
doi = {10.1145/3065386}
}
@INPROCEEDINGS{2015-RESNET-DeepResidualLearningImageRecognition,
author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian},
booktitle={2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
title={Deep Residual Learning for Image Recognition},
year={2016},
volume={},
number={},
pages={770-778},
doi={10.1109/CVPR.2016.90}}
@article{2013-VisualizingUnderstandingCNN,
author = {Zeiler, M.D. and Fergus, R.},
year = {2013},
month = {01},
pages = {818-833},
title = {Visualizing and understanding convolutional networks},
volume = {8689},
journal = {European Conference on Computer Vision(ECCV)}
}
@article{2014-Article-Deep_Learning_Methods_and_Applications,
author = {Deng, Li and Yu, Dong},
title = {Deep Learning: Methods and Applications},
year = {2014},
issue_date = {June 2014},
publisher = {Now Publishers Inc.},
address = {Hanover, MA, USA},
volume = {7},
number = {3–4},
issn = {1932-8346},
url = {https://doi.org/10.1561/2000000039},
doi = {10.1561/2000000039},
abstract = {This monograph provides an overview of general deep learning methodology and its applications to a variety of signal and information processing tasks. The application areas are chosen with the following three criteria in mind: (1) expertise or knowledge of the authors; (2) the application areas that have already been transformed by the successful use of deep learning technology, such as speech recognition and computer vision; and (3) the application areas that have the potential to be impacted significantly by deep learning and that have been experiencing research growth, including natural language and text processing, information retrieval, and multimodal information processing empowered by multi-task deep learning.},
journal = {Found. Trends Signal Process.},
month = jun,
pages = {197–387},
numpages = {191},
keywords = {Autoencoders, Unsupervised learning, Hybrid deep networks, Language models, Multi-modal processing, Deep neural networks, Multi-task learning, Deep stacking networks, Machine learning, Deep learning, Object recognition, Natural language processing, Computer vision, Supervised learning, Artificial intelligence, Neural networks}
}
@article{2006-Neural_Computation-A_Fast_Learning_Algorithm_for_Deep_Belief_Nets,
author = {Hinton, Geoffrey E. and Osindero, Simon and Teh, Yee-Whye},
title = {A Fast Learning Algorithm for Deep Belief Nets},
journal = {Neural Computation},
volume = {18},
number = {7},
pages = {1527-1554},
year = {2006},
doi = {10.1162/neco.2006.18.7.1527},
note ={PMID: 16764513},
URL = {
https://doi.org/10.1162/neco.2006.18.7.1527
},
eprint = {
https://doi.org/10.1162/neco.2006.18.7.1527
}
,
abstract = { We show how to use “complementary priors” to eliminate the explaining-away effects that make inference difficult in densely connected belief nets that have many hidden layers. Using complementary priors, we derive a fast, greedy algorithm that can learn deep, directed belief networks one layer at a time, provided the top two layers form an undirected associative memory. The fast, greedy algorithm is used to initialize a slower learning procedure that fine-tunes the weights using a contrastive version of the wake-sleep algorithm. After fine-tuning, a network with three hidden layers forms a very good generative model of the joint distribution of handwritten digit images and their labels. This generative model gives better digit classification than the best discriminative learning algorithms. The low-dimensional manifolds on which the digits lie are modeled by long ravines in the free-energy landscape of the top-level associative memory, and it is easy to explore these ravines by using the directed connections to display what the associative memory has in mind. }
}
@inbook{1998-MIT-Convolutional_Networks_for_Images_Speech_and_Time_Series,
author = {LeCun, Yann and Bengio, Yoshua},
title = {Convolutional Networks for Images, Speech, and Time Series},
year = {1998},
isbn = {0262511029},
publisher = {MIT Press},
address = {Cambridge, MA, USA},
booktitle = {The Handbook of Brain Theory and Neural Networks},
pages = {255–258},
numpages = {4}
}
@incollection{2012-Curran-ImageNet_Classification_with_Deep_Convolutional_Neural_Networks,
title = {ImageNet Classification with Deep Convolutional Neural Networks},
author = {Alex Krizhevsky and Sutskever, Ilya and Hinton, Geoffrey E},
booktitle = {Advances in Neural Information Processing Systems 25},
editor = {F. Pereira and C. J. C. Burges and L. Bottou and K. Q. Weinberger},
pages = {1097--1105},
year = {2012},
publisher = {Curran Associates, Inc.},
url = {http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf}
}
@misc{2014-arXiv-Do_Deep_Nets_Really_Need_to_be_Deep,
title={Do Deep Nets Really Need to be Deep?},
author={Lei Jimmy Ba and Rich Caruana},
year={2014},
eprint={1312.6184},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
@misc{2013-arXiv-Big_Neural_Networks_Waste_Capacity,
title={Big Neural Networks Waste Capacity},
author={Yann N. Dauphin and Yoshua Bengio},
year={2013},
eprint={1301.3583},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
@misc{2012-arXiv-Improving_neural_networks_by_preventing_co_adaptation_of_feature_detectors,
title={Improving neural networks by preventing co-adaptation of feature detectors},
author={Geoffrey E. Hinton and Nitish Srivastava and Alex Krizhevsky and Ilya Sutskever and Ruslan R. Salakhutdinov},
year={2012},
eprint={1207.0580},
archivePrefix={arXiv},
primaryClass={cs.NE}
}
@inproceedings{2010-ICML-Rectified_linear_units_improve_restricted_boltzmann_machines,
author = {Nair, Vinod and Hinton, Geoffrey E.},
title = {Rectified Linear Units Improve Restricted Boltzmann Machines},
year = {2010},
isbn = {9781605589077},
publisher = {Omnipress},
address = {Madison, WI, USA},
abstract = {Restricted Boltzmann machines were developed using binary stochastic hidden units. These can be generalized by replacing each binary unit by an infinite number of copies that all have the same weights but have progressively more negative biases. The learning and inference rules for these "Stepped Sigmoid Units" are unchanged. They can be approximated efficiently by noisy, rectified linear units. Compared with binary units, these units learn features that are better for object recognition on the NORB dataset and face verification on the Labeled Faces in the Wild dataset. Unlike binary units, rectified linear units preserve information about relative intensities as information travels through multiple layers of feature detectors.},
booktitle = {Proceedings of the 27th International Conference on International Conference on Machine Learning},
pages = {807–814},
numpages = {8},
location = {Haifa, Israel},
series = {ICML'10}
}
@ARTICLE{2018-IEEE-Access-Benchmark_Analysis_of_Representative_Deep_Neural_Network,
author={S. {Bianco} and R. {Cadene} and L. {Celona} and P. {Napoletano}},
journal={IEEE Access},
title={Benchmark Analysis of Representative Deep Neural Network Architectures},
year={2018},
volume={6},
number={},
pages={64270-64277},
doi={10.1109/ACCESS.2018.2877890}}
@article{2019-Neural-Networks-Recent_advances_in_physical_reservoir_computing,
title = "Recent advances in physical reservoir computing: A review",
journal = "Neural Networks",
volume = "115",
pages = "100 - 123",
year = "2019",
issn = "0893-6080",
doi = "https://doi.org/10.1016/j.neunet.2019.03.005",
url = "http://www.sciencedirect.com/science/article/pii/S0893608019300784",
author = "Gouhei Tanaka and Toshiyuki Yamane and Jean Benoit Héroux and Ryosho Nakane and Naoki Kanazawa and Seiji Takeda and Hidetoshi Numata and Daiju Nakano and Akira Hirose",
keywords = "Neural networks, Machine learning, Reservoir computing, Nonlinear dynamical systems, Neuromorphic device"
}
@article{2016-FullyAnalogPhotonicReservoirComputer,
author = {Duport, François and Smerieri, Anteo and Akrout, Akram and Haelterman, M. and Massar, Serge},
year = {2016},
month = {03},
pages = {22381},
title = {Fully analogue photonic reservoir computer},
volume = {6},
journal = {Scientific Reports},
doi = {10.1038/srep22381}
}
@article{2017-ReservoirComputingWithAnEnsembleOfTimeDelayReservoirs,
author = {Ortín, Silvia and Pesquera, Luis},
year = {2017},
month = {06},
pages = {},
title = {Reservoir Computing with an Ensemble of Time-Delay Reservoirs},
volume = {9},
journal = {Cognitive Computation},
doi = {10.1007/s12559-017-9463-7}
}
@INPROCEEDINGS{2018-ICASSP-Phoneme_Based_Embedded_Segmental_K-Means_for_Unsupervised_Term_Discovery,
author={S. {Bhati} and H. {Kamper} and K. {Sri Rama Murty}},
booktitle={2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
title={Phoneme Based Embedded Segmental K-Means for Unsupervised Term Discovery},
year={2018},
volume={},
number={},
pages={5169-5173},
doi={10.1109/ICASSP.2018.8462264}
}
@INPROCEEDINGS{2015-ICMLA-Simplicity_of_Kmeans_Versus_Deepness_of_Deep_Learning_A_Case_of_Unsupervised_Feature_Learning_with_Limited_Data,
author={M. {Dundar} and Q. {Kou} and B. {Zhang} and Y. {He} and B. {Rajwa}},
booktitle={2015 IEEE 14th International Conference on Machine Learning and Applications (ICMLA)},
title={Simplicity of Kmeans Versus Deepness of Deep Learning: A Case of Unsupervised Feature Learning with Limited Data},
year={2015},
volume={},
number={},
pages={883-888},
doi={10.1109/ICMLA.2015.78}
}
@INPROCEEDINGS{2017-ASRU-An_embedded_segmental_K-means_model_for_unsupervised_segmentation_and_clustering_of_speech,
author={H. {Kamper} and K. {Livescu} and S. {Goldwater}},
booktitle={2017 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)},
title={An embedded segmental K-means model for unsupervised segmentation and clustering of speech},
year={2017},
volume={},
number={},
pages={719-726},
doi={10.1109/ASRU.2017.8269008}
}
@INPROCEEDINGS{2018-ISSCC-A_1μW_voice_activity_detector_using_analog_feature_extraction_and_digital_deep_neural_network,
author={M. {Yang} and C. {Yeh} and Y. {Zhou} and J. P. {Cerqueira} and A. A. {Lazar} and M. {Seok}},
booktitle={2018 IEEE International Solid - State Circuits Conference - (ISSCC)},
title={A 1μW voice activity detector using analog feature extraction and digital deep neural network},
year={2018},
volume={},
number={},
pages={346-348},
doi={10.1109/ISSCC.2018.8310326}
}
@article{1991-SD-Approximation_capabilities_of_multilayer_feedforward_networks_HORNIK,
title = {Approximation capabilities of multilayer feedforward networks},
journal = {Neural Networks},
volume = {4},
number = {2},
pages = {251-257},
year = {1991},
issn = {0893-6080},
doi = {https://doi.org/10.1016/0893-6080(91)90009-T},
url = {https://www.sciencedirect.com/science/article/pii/089360809190009T},
author = {Kurt Hornik},
keywords = {Multilayer feedforward networks, Activation function, Universal approximation capabilities, Input environment measure, () approximation, Uniform approximation, Sobolev spaces, Smooth approximation},
abstract = {We show that standard multilayer feedforward networks with as few as a single hidden layer and arbitrary bounded and nonconstant activation function are universal approximators with respect to Lp(μ) performance criteria, for arbitrary finite input environment measures μ, provided only that sufficiently many hidden units are available. If the activation function is continuous, bounded and nonconstant, then continuous mappings can be learned uniformly over compact input sets. We also give very general conditions ensuring that networks with sufficiently smooth activation functions are capable of arbitrarily accurate approximation to a function and its derivatives.}
}
@article{2009-SPIKING_NEURAL_NETWORKS-GHOSH_DASTIDAR_SAMANWOY_ADELI_HOJJAT,
author = {GHOSH-DASTIDAR, SAMANWOY and ADELI, HOJJAT},
title = {SPIKING NEURAL NETWORKS},
journal = {International Journal of Neural Systems},
volume = {19},
number = {04},
pages = {295-308},
year = {2009},
doi = {10.1142/S0129065709002002},
note ={PMID: 19731402},
URL = {
https://doi.org/10.1142/S0129065709002002
},
eprint = {
https://doi.org/10.1142/S0129065709002002
},
abstract = { Most current Artificial Neural Network (ANN) models are based on highly simplified brain dynamics. They have been used as powerful computational tools to solve complex pattern recognition, function estimation, and classification problems. ANNs have been evolving towards more powerful and more biologically realistic models. In the past decade, Spiking Neural Networks (SNNs) have been developed which comprise of spiking neurons. Information transfer in these neurons mimics the information transfer in biological neurons, i.e., via the precise timing of spikes or a sequence of spikes. To facilitate learning in such networks, new learning algorithms based on varying degrees of biological plausibility have also been developed recently. Addition of the temporal dimension for information encoding in SNNs yields new insight into the dynamics of the human brain and could result in compact representations of large neural networks. As such, SNNs have great potential for solving complicated time-dependent pattern recognition problems because of their inherent dynamic representation. This article presents a state-of-the-art review of the development of spiking neurons and SNNs, and provides insight into their evolution as the third generation neural networks. }
}
@INPROCEEDINGS{2001-IJCNN-Supervised_learning_with_spiking_neural_networks,
author={ {Jianguo Xin} and M. J. {Embrechts}},
booktitle={IJCNN'01. International Joint Conference on Neural Networks. Proceedings (Cat. No.01CH37222)},
title={Supervised learning with spiking neural networks},
year={2001}, volume={3}, number={}, pages={1772-1777 vol.3}, doi={10.1109/IJCNN.2001.938430}}
@INPROCEEDINGS{2011-IJCNN-Simulation_of_a_memristor_based_spiking_neural_network_immune_to_device_variations,
author={D. {Querlioz} and O. {Bichler} and C. {Gamrat}},
booktitle={The 2011 International Joint Conference on Neural Networks},
title={Simulation of a memristor-based spiking neural network immune to device variations},
year={2011}, volume={}, number={}, pages={1775-1781}, doi={10.1109/IJCNN.2011.6033439}}
@ARTICLE{2016-TNNLS-A_Spiking_Neural_Network_System_for_Robust_Sequence_Recognition,
author={Q. {Yu} and R. {Yan} and H. {Tang} and K. C. {Tan} and H. {Li}},
journal={IEEE Transactions on Neural Networks and Learning Systems},
title={A Spiking Neural Network System for Robust Sequence Recognition},
year={2016},
volume={27},
number={3},
pages={621-635},
doi={10.1109/TNNLS.2015.2416771}}
@article{2019-NN-Deep_learning_in_spiking_neural_networks,
title = {Deep learning in spiking neural networks},
journal = {Neural Networks},
volume = {111},
pages = {47-63},
year = {2019},
issn = {0893-6080},
doi = {https://doi.org/10.1016/j.neunet.2018.12.002},
url = {https://www.sciencedirect.com/science/article/pii/S0893608018303332},
author = {Amirhossein Tavanaei and Masoud Ghodrati and Saeed Reza Kheradpisheh and Timothée Masquelier and Anthony Maida},
keywords = {Deep learning, Spiking neural network, Biological plausibility, Machine learning, Power-efficient architecture},
abstract = {In recent years, deep learning has revolutionized the field of machine learning, for computer vision in particular. In this approach, a deep (multilayer) artificial neural network (ANN) is trained, most often in a supervised manner using backpropagation. Vast amounts of labeled training examples are required, but the resulting classification accuracy is truly impressive, sometimes outperforming humans. Neurons in an ANN are characterized by a single, static, continuous-valued activation. Yet biological neurons use discrete spikes to compute and transmit information, and the spike times, in addition to the spike rates, matter. Spiking neural networks (SNNs) are thus more biologically realistic than ANNs, and are arguably the only viable option if one wants to understand how the brain computes at the neuronal description level. The spikes of biological neurons are sparse in time and space, and event-driven. Combined with bio-plausible local learning rules, this makes it easier to build low-power, neuromorphic hardware for SNNs. However, training deep SNNs remains a challenge. Spiking neurons’ transfer function is usually non-differentiable, which prevents using backpropagation. Here we review recent supervised and unsupervised methods to train deep SNNs, and compare them in terms of accuracy and computational cost. The emerging picture is that SNNs still lag behind ANNs in terms of accuracy, but the gap is decreasing, and can even vanish on some tasks, while SNNs typically require many fewer operations and are the better candidates to process spatio-temporal data.}
}