-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathBrainComputation.bib
2436 lines (2330 loc) · 134 KB
/
BrainComputation.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
@article{RN3403,
author = {Albus, J. S.},
title = {A theory of cerebellar function},
journal = {Mathematical Bioscience},
volume = {10},
pages = {25-61},
DOI = {10.1016/0025-5564(71)90051-4},
year = {1971},
type = {Journal Article}
}
@article{RN6672,
author = {Amari, Shun-ichi},
title = {Natural Gradient Works Efficiently in Learning},
journal = {Neural Computation},
volume = {10},
number = {2},
pages = {251-276},
ISSN = {0899-7667
1530-888X},
DOI = {10.1162/089976698300017746},
year = {1998},
type = {Journal Article}
}
@article{RN3991,
author = {Aston-Jones, G. and Cohen, J. D.},
title = {An integrative theory of locus coeruleus-norepinephrine function: adaptive gain and optimal performance},
journal = {Annual Reviews in Neuroscience},
volume = {28},
pages = {403-50},
note = {0147-006X (Print)
Journal Article
Research Support, N.I.H., Extramural
Research Support, U.S. Gov't, Non-P.H.S.
Research Support, U.S. Gov't, P.H.S.
Review},
abstract = {Historically, the locus coeruleus-norepinephrine (LC-NE) system has been implicated in arousal, but recent findings suggest that this system plays a more complex and specific role in the control of behavior than investigators previously thought. We review neurophysiological and modeling studies in monkey that support a new theory of LC-NE function. LC neurons exhibit two modes of activity, phasic and tonic. Phasic LC activation is driven by the outcome of task-related decision processes and is proposed to facilitate ensuing behaviors and to help optimize task performance (exploitation). When utility in the task wanes, LC neurons exhibit a tonic activity mode, associated with disengagement from the current task and a search for alternative behaviors (exploration). Monkey LC receives prominent, direct inputs from the anterior cingulate (ACC) and orbitofrontal cortices (OFC), both of which are thought to monitor task-related utility. We propose that these frontal areas produce the above patterns of LC activity to optimize utility on both short and long timescales.},
keywords = {Action Potentials/physiology
Adaptation, Physiological/*physiology
Animals
Brain Mapping
Cognition/physiology
Humans
Locus Coeruleus/*physiology
Neural Networks (Computer)
Norepinephrine/*physiology
*Systems Integration
Time Factors},
DOI = {10.1146/annurev.neuro.28.061604.135709},
url = {http://www.ncbi.nlm.nih.gov/entrez/query.fcgi?cmd=Retrieve&db=PubMed&dopt=Citation&list_uids=16022602 },
year = {2005},
type = {Journal Article}
}
@article{RN133,
author = {Barraclough, D. J. and Conroy, M. L. and Lee, D.},
title = {Prefrontal cortex and decision making in a mixed-strategy game},
journal = {Nature Neuroscience},
volume = {7},
number = {4},
pages = {404-10},
note = {Barraclough, Dominic J
Conroy, Michelle L
Lee, Daeyeol
eng
EY01319/EY/NEI NIH HHS/
NS44270/NS/NINDS NIH HHS/
Nat Neurosci. 2004 Apr;7(4):404-10. Epub 2004 Mar 7.},
abstract = {In a multi-agent environment, where the outcomes of one's actions change dynamically because they are related to the behavior of other beings, it becomes difficult to make an optimal decision about how to act. Although game theory provides normative solutions for decision making in groups, how such decision-making strategies are altered by experience is poorly understood. These adaptive processes might resemble reinforcement learning algorithms, which provide a general framework for finding optimal strategies in a dynamic environment. Here we investigated the role of prefrontal cortex (PFC) in dynamic decision making in monkeys. As in reinforcement learning, the animal's choice during a competitive game was biased by its choice and reward history, as well as by the strategies of its opponent. Furthermore, neurons in the dorsolateral prefrontal cortex (DLPFC) encoded the animal's past decisions and payoffs, as well as the conjunction between the two, providing signals necessary to update the estimates of expected reward. Thus, PFC might have a key role in optimizing decision-making strategies.},
keywords = {Animals
Choice Behavior/*physiology
Electrophysiology
Evoked Potentials/physiology
*Game Theory
Macaca mulatta
Male
Mental Processes/*physiology
Neurons/physiology
Practice (Psychology)
Prefrontal Cortex/cytology/*physiology
*Probability Learning},
ISSN = {1097-6256 (Print)
1097-6256 (Linking)},
DOI = {10.1038/nn1209},
url = {http://www.ncbi.nlm.nih.gov/pubmed/15004564},
year = {2004},
type = {Journal Article}
}
@article{RN136,
author = {Barto, A. G. and Sutton, R. S. and Andersen, C. W.},
title = {Neuronlike adaptive elements that can solve difficult learning control problems},
journal = {IEEE Transactions on Systems, Man, and Cybernetics},
volume = {13},
number = {5},
pages = {834-846},
DOI = {10.1109/TSMC.1983.6313077},
year = {1983},
type = {Journal Article}
}
@article{RN183,
author = {Bell, A. J. and Sejnowski, T. J.},
title = {An information-maxization approach to blind separation and blind deconvolution},
journal = {Neural Computation},
volume = {7},
pages = {1129-1159},
DOI = {10.1162/neco.1995.7.6.1129 },
year = {1995},
type = {Journal Article}
}
@article{RN184,
author = {Bell, Anthony J. and Sejnowski, Terrence J.},
title = {The “independent components” of natural scenes are edge filters},
journal = {Vision Research},
volume = {37},
number = {23},
pages = {3327-3338},
ISSN = {00426989},
DOI = {10.1016/s0042-6989(97)00121-1},
year = {1997},
type = {Journal Article}
}
@article{RN6732,
author = {Bengio, Y. and Courville, A. and Vincent, P.},
title = {Representation learning: a review and new perspectives},
journal = {IEEE Trans Pattern Anal Mach Intell},
volume = {35},
number = {8},
pages = {1798-828},
note = {Bengio, Yoshua
Courville, Aaron
Vincent, Pascal
eng
Research Support, Non-U.S. Gov't
Review
IEEE Trans Pattern Anal Mach Intell. 2013 Aug;35(8):1798-828. doi: 10.1109/TPAMI.2013.50.},
abstract = {The success of machine learning algorithms generally depends on data representation, and we hypothesize that this is because different representations can entangle and hide more or less the different explanatory factors of variation behind the data. Although specific domain knowledge can be used to help design representations, learning with generic priors can also be used, and the quest for AI is motivating the design of more powerful representation-learning algorithms implementing such priors. This paper reviews recent work in the area of unsupervised feature learning and deep learning, covering advances in probabilistic models, autoencoders, manifold learning, and deep networks. This motivates longer term unanswered questions about the appropriate objectives for learning good representations, for computing representations (i.e., inference), and the geometrical connections between representation learning, density estimation, and manifold learning.},
keywords = {Algorithms
Artificial Intelligence/*trends
Humans
Neural Networks (Computer)},
ISSN = {1939-3539 (Electronic)
0098-5589 (Linking)},
DOI = {10.1109/TPAMI.2013.50},
url = {https://www.ncbi.nlm.nih.gov/pubmed/23787338},
year = {2013},
type = {Journal Article}
}
@article{RN223,
author = {Bialek, W. and Rieke, F. and de Ruyter van Steveninck, R. R. and Warland, D.},
title = {Reading a neural code},
journal = {Science},
volume = {252},
number = {5014},
pages = {1854-7},
note = {Bialek, W
Rieke, F
de Ruyter van Steveninck, R R
Warland, D
eng
Research Support, Non-U.S. Gov't
Research Support, U.S. Gov't, Non-P.H.S.
Research Support, U.S. Gov't, P.H.S.
1991/06/28
Science. 1991 Jun 28;252(5014):1854-7. doi: 10.1126/science.2063199.},
abstract = {Traditional approaches to neural coding characterize the encoding of known stimuli in average neural responses. Organisms face nearly the opposite task--extracting information about an unknown time-dependent stimulus from short segments of a spike train. Here the neural code was characterized from the point of view of the organism, culminating in algorithms for real-time stimulus estimation based on a single example of the spike train. These methods were applied to an identified movement-sensitive neuron in the fly visual system. Such decoding experiments determined the effective noise level and fault tolerance of neural computation, and the structure of the decoding algorithms suggested a simple model for real-time analog signal processing with spiking neurons.},
keywords = {Algorithms
Animals
Diptera
Mathematics
*Models, Neurological
Neurons/*physiology
Neurons, Afferent/*physiology
Photoreceptor Cells/physiology
Visual Perception},
ISSN = {0036-8075 (Print)
0036-8075 (Linking)},
DOI = {10.1126/science.2063199},
url = {https://www.ncbi.nlm.nih.gov/pubmed/2063199},
year = {1991},
type = {Journal Article}
}
@book{RN7347,
author = {Bishop, Christopher M.},
title = {Pattern Recognition and Machine Learning},
publisher = {Springer},
address = {New York},
ISBN = {978-0-387-31073-2},
url = {https://www.microsoft.com/en-us/research/people/cmbishop/prml-book/},
year = {2006},
type = {Book}
}
@article{RN5201,
author = {Bogacz, R.},
title = {A tutorial on the free-energy framework for modelling perception and learning},
journal = {Journal of Mathematical Psychology},
volume = {76},
number = {Pt B},
pages = {198-211},
note = {Bogacz, Rafal
eng
2017/03/17
J Math Psychol. 2017 Feb;76(Pt B):198-211. doi: 10.1016/j.jmp.2015.11.003.},
abstract = {This paper provides an easy to follow tutorial on the free-energy framework for modelling perception developed by Friston, which extends the predictive coding model of Rao and Ballard. These models assume that the sensory cortex infers the most likely values of attributes or features of sensory stimuli from the noisy inputs encoding the stimuli. Remarkably, these models describe how this inference could be implemented in a network of very simple computational elements, suggesting that this inference could be performed by biological networks of neurons. Furthermore, learning about the parameters describing the features and their uncertainty is implemented in these models by simple rules of synaptic plasticity based on Hebbian learning. This tutorial introduces the free-energy framework using very simple examples, and provides step-by-step derivations of the model. It also discusses in more detail how the model could be implemented in biological neural circuits. In particular, it presents an extended version of the model in which the neurons only sum their inputs, and synaptic plasticity only depends on activity of pre-synaptic and post-synaptic neurons.},
ISSN = {0022-2496 (Print)
0022-2496 (Linking)},
DOI = {10.1016/j.jmp.2015.11.003},
url = {https://www.ncbi.nlm.nih.gov/pubmed/28298703},
year = {2017},
type = {Journal Article}
}
@article{RN11965,
author = {Bogacz, R.},
title = {Dopamine role in learning and action inference},
journal = {Elife},
volume = {9},
note = {Bogacz, Rafal
eng
MC_UU_00003/1/MRC_/Medical Research Council/United Kingdom
MC_UU_12024/5/MRC_/Medical Research Council/United Kingdom
BB/S006338/1/BB_/Biotechnology and Biological Sciences Research Council/United Kingdom
Research Support, Non-U.S. Gov't
England
2020/07/08
Elife. 2020 Jul 7;9:e53262. doi: 10.7554/eLife.53262.},
abstract = {This paper describes a framework for modelling dopamine function in the mammalian brain. It proposes that both learning and action planning involve processes minimizing prediction errors encoded by dopaminergic neurons. In this framework, dopaminergic neurons projecting to different parts of the striatum encode errors in predictions made by the corresponding systems within the basal ganglia. The dopaminergic neurons encode differences between rewards and expectations in the goal-directed system, and differences between the chosen and habitual actions in the habit system. These prediction errors trigger learning about rewards and habit formation, respectively. Additionally, dopaminergic neurons in the goal-directed system play a key role in action planning: They compute the difference between a desired reward and the reward expected from the current motor plan, and they facilitate action planning until this difference diminishes. Presented models account for dopaminergic responses during movements, effects of dopamine depletion on behaviour, and make several experimental predictions.
In the brain, chemicals such as dopamine allow nerve cells to 'talk' to each other and to relay information from and to the environment. Dopamine, in particular, is released when pleasant surprises are experienced: this helps the organism to learn about the consequences of certain actions. If a new flavour of ice-cream tastes better than expected, for example, the release of dopamine tells the brain that this flavour is worth choosing again. However, dopamine has an additional role in controlling movement. When the cells that produce dopamine die, for instance in Parkinson's disease, individuals may find it difficult to initiate deliberate movements. Here, Rafal Bogacz aimed to develop a comprehensive framework that could reconcile the two seemingly unrelated roles played by dopamine. The new theory proposes that dopamine is released when an outcome differs from expectations, which helps the organism to adjust and minimise these differences. In the ice-cream example, the difference is between how good the treat is expected to taste, and how tasty it really is. By learning to select the same flavour repeatedly, the brain aligns expectation and the result of the choice. This ability would also apply when movements are planned. In this case, the brain compares the desired reward with the predicted results of the planned actions. For example, while planning to get a spoonful of ice-cream, the brain compares the pleasure expected from the movement that is currently planned, and the pleasure of eating a full spoon of the treat. If the two differ, for example because no movement has been planned yet, the brain releases dopamine to form a better version of the action plan. The theory was then tested using a computer simulation of nerve cells that release dopamine; this showed that the behaviour of the virtual cells closely matched that of their real-life counterparts. This work offers a comprehensive description of the fundamental role of dopamine in the brain. The model now needs to be verified through experiments on living nerve cells; ultimately, it could help doctors and researchers to develop better treatments for conditions such as Parkinson's disease or ADHD, which are linked to a lack of dopamine.
eng},
keywords = {Animals
Basal Ganglia/physiology
Corpus Striatum/*physiology
Dopamine/*metabolism
Dopaminergic Neurons/*physiology
Learning/*physiology
Mammals/*physiology
Models, Biological
*Reward
active inference
computational biology
dopamine
human
mouse
neuroscience
rat
reinforcement learning
rhesus macaque
systems biology},
ISSN = {2050-084X (Electronic)
2050-084X (Linking)},
DOI = {10.7554/eLife.53262},
url = {https://www.ncbi.nlm.nih.gov/pubmed/32633715},
year = {2020},
type = {Journal Article}
}
@article{RN411,
author = {Courville, A. C. and Daw, N. D. and Touretzky, D. S.},
title = {Bayesian theories of conditioning in a changing world},
journal = {Trends Cogn Sci},
volume = {10},
number = {7},
pages = {294-300},
note = {Courville, Aaron C
Daw, Nathaniel D
Touretzky, David S
eng
England
Trends Cogn Sci. 2006 Jul;10(7):294-300. Epub 2006 Jun 21.},
abstract = {The recent flowering of Bayesian approaches invites the re-examination of classic issues in behavior, even in areas as venerable as Pavlovian conditioning. A statistical account can offer a new, principled interpretation of behavior, and previous experiments and theories can inform many unexplored aspects of the Bayesian enterprise. Here we consider one such issue: the finding that surprising events provoke animals to learn faster. We suggest that, in a statistical account of conditioning, surprise signals change and therefore uncertainty and the need for new learning. We discuss inference in a world that changes and show how experimental results involving surprise can be interpreted from this perspective, and also how, thus understood, these phenomena help constrain statistical theories of animal and human learning.},
keywords = {Animals
Association Learning
*Bayes Theorem
*Behavior, Animal
*Conditioning, Classical
Extinction, Psychological
Humans
Inhibition (Psychology)
Problem Solving
Reinforcement Schedule
*Social Change
Social Environment
Stochastic Processes
Uncertainty},
ISSN = {1364-6613 (Print)
1364-6613 (Linking)},
DOI = {10.1016/j.tics.2006.05.004},
url = {http://www.ncbi.nlm.nih.gov/pubmed/16793323},
year = {2006},
type = {Journal Article}
}
@article{RN460,
author = {Daw, N. D. and Gershman, S. J. and Seymour, B. and Dayan, P. and Dolan, R. J.},
title = {Model-based influences on humans' choices and striatal prediction errors},
journal = {Neuron},
volume = {69},
number = {6},
pages = {1204-15},
note = {Daw, Nathaniel D
Gershman, Samuel J
Seymour, Ben
Dayan, Peter
Dolan, Raymond J
eng
1R01MH087882-01/MH/NIMH NIH HHS/
R01 MH087882-02/MH/NIMH NIH HHS/
Wellcome Trust/United Kingdom
Neuron. 2011 Mar 24;69(6):1204-15. doi: 10.1016/j.neuron.2011.02.027.},
abstract = {The mesostriatal dopamine system is prominently implicated in model-free reinforcement learning, with fMRI BOLD signals in ventral striatum notably covarying with model-free prediction errors. However, latent learning and devaluation studies show that behavior also shows hallmarks of model-based planning, and the interaction between model-based and model-free values, prediction errors, and preferences is underexplored. We designed a multistep decision task in which model-based and model-free influences on human choice behavior could be distinguished. By showing that choices reflected both influences we could then test the purity of the ventral striatal BOLD signal as a model-free report. Contrary to expectations, the signal reflected both model-free and model-based predictions in proportions matching those that best explained choice behavior. These results challenge the notion of a separate model-free learner and suggest a more integrated computational architecture for high-level human decision-making.},
keywords = {Adult
Basal Ganglia/*physiology
Brain Mapping
Choice Behavior/*physiology
Dopamine/*metabolism
Female
Humans
Logistic Models
Magnetic Resonance Imaging
Male
Models, Neurological
Neurons/physiology
Neuropsychological Tests
*Reinforcement (Psychology)},
ISSN = {1097-4199 (Electronic)
0896-6273 (Linking)},
DOI = {10.1016/j.neuron.2011.02.027},
url = {http://www.ncbi.nlm.nih.gov/pubmed/21435563},
year = {2011},
type = {Journal Article}
}
@book{RN7345,
author = {Dayan, P and Abbott, L F},
title = {Theoretical Neuroscience: Computation and Mathematical Modeling of Neural Systems},
publisher = {MIT Press},
url = {http://www.gatsby.ucl.ac.uk/~dayan/book/},
year = {2001},
type = {Book}
}
@techreport{RN5582,
author = {Doersch, Carl},
title = {Tutorial on variational autoencoders},
year = {2016},
type = {Report}
}
@article{RN2997,
author = {Doya, K.},
title = {What are the computations of the cerebellum, the basal ganglia, and the cerebral cortex},
journal = {Neural Networks},
volume = {12},
pages = {961-974},
note = {Neural Networks},
keywords = {basal ganglia},
DOI = {10.1016/S0893-6080(99)00046-5},
year = {1999},
type = {Journal Article}
}
@article{RN3028,
author = {Doya, K.},
title = {Complementary roles of basal ganglia and cerebellum in learning and motor control},
journal = {Curr Opin Neurobiol},
volume = {10},
number = {6},
pages = {732-9},
note = {Doya, K
eng
Review
England
2001/03/10
Curr Opin Neurobiol. 2000 Dec;10(6):732-9. doi: 10.1016/s0959-4388(00)00153-7.},
abstract = {The classical notion that the basal ganglia and the cerebellum are dedicated to motor control has been challenged by the accumulation of evidence revealing their involvement in non-motor, cognitive functions. From a computational viewpoint, it has been suggested that the cerebellum, the basal ganglia, and the cerebral cortex are specialized for different types of learning: namely, supervised learning, reinforcement learning and unsupervised learning, respectively. This idea of learning-oriented specialization is helpful in understanding the complementary roles of the basal ganglia and the cerebellum in motor control and cognitive functions.},
keywords = {Animals
Basal Ganglia/cytology/*physiology
Cerebellum/cytology/*physiology
Humans
Learning/*physiology
Motor Neurons/physiology
Movement/*physiology},
ISSN = {0959-4388 (Print)
0959-4388 (Linking)},
DOI = {10.1016/s0959-4388(00)00153-7},
url = {https://www.ncbi.nlm.nih.gov/pubmed/11240282},
year = {2000},
type = {Journal Article}
}
@article{RN3152,
author = {Doya, K.},
title = {Metalearning and Neuromodulation},
journal = {Neural Networks},
volume = {15},
pages = {495–506},
note = {Neural Networks},
DOI = {10.1016/S0893-6080(02)00044-8},
year = {2002},
type = {Journal Article}
}
@article{RN765,
author = {Doya, K.},
title = {Reinforcement learning: Computational theory and biological mechanisms},
journal = {HFSP Journal},
volume = {1},
number = {1},
pages = {30-40},
note = {Doya, Kenji
eng
France
HFSP J. 2007 May;1(1):30-40. doi: 10.2976/1.2732246/10.2976/1. Epub 2007 May 8.},
abstract = {Reinforcement learning is a computational framework for an active agent to learn behaviors on the basis of a scalar reward signal. The agent can be an animal, a human, or an artificial system such as a robot or a computer program. The reward can be food, water, money, or whatever measure of the performance of the agent. The theory of reinforcement learning, which was developed in an artificial intelligence community with intuitions from animal learning theory, is now giving a coherent account on the function of the basal ganglia. It now serves as the "common language" in which biologists, engineers, and social scientists can exchange their problems and findings. This article reviews the basic theoretical framework of reinforcement learning and discusses its recent and future contributions toward the understanding of animal behaviors and human decision making.},
ISSN = {1955-2068 (Print)
1955-205X (Linking)},
DOI = {10.2976/1.2732246/10.2976/1},
url = {http://www.ncbi.nlm.nih.gov/pubmed/19404458},
year = {2007},
type = {Journal Article}
}
@article{RN768,
author = {Doya, K.},
title = {Modulators of decision making},
journal = {Nature Neuroscience},
volume = {11},
number = {4},
pages = {410-6},
note = {Doya, Kenji
eng
Nat Neurosci. 2008 Apr;11(4):410-6. doi: 10.1038/nn2077.},
abstract = {Human and animal decisions are modulated by a variety of environmental and intrinsic contexts. Here I consider computational factors that can affect decision making and review anatomical structures and neurochemical systems that are related to contextual modulation of decision making. Expectation of a high reward can motivate a subject to go for an action despite a large cost, a decision that is influenced by dopamine in the anterior cingulate cortex. Uncertainty of action outcomes can promote risk taking and exploratory choices, in which norepinephrine and the orbitofrontal cortex appear to be involved. Predictable environments should facilitate consideration of longer-delayed rewards, which depends on serotonin in the dorsal striatum and dorsal prefrontal cortex. This article aims to sort out factors that affect the process of decision making from the viewpoint of reinforcement learning theory and to bridge between such computational needs and their neurophysiological substrates.},
keywords = {Algorithms
Animals
*Computational Biology
*Decision Making
Environment
Gyrus Cinguli/*physiology
Humans
*Models, Neurological
Models, Psychological
Neurotransmitter Agents/physiology
*Probability Learning
Reward},
ISSN = {1097-6256 (Print)
1097-6256 (Linking)},
DOI = {10.1038/nn2077},
url = {http://www.ncbi.nlm.nih.gov/pubmed/18368048},
year = {2008},
type = {Journal Article}
}
@book{RN3237,
author = {Doya, K. and Ishii, S. and Pouget, A. and Rao, R. },
title = {Bayesian Brain: Probabilistic Approach to Neural Coding and Learning},
publisher = {MIT Press},
year = {2007},
type = {Book}
}
@article{RN9413,
author = {Doya, Kenji and Miyazaki, Kayoko W. and Miyazaki, Katsuhiko},
title = {Serotonergic modulation of cognitive computations},
journal = {Current Opinion in Behavioral Sciences},
volume = {38},
pages = {116-123},
ISSN = {23521546},
DOI = {10.1016/j.cobeha.2021.02.003},
year = {2021},
type = {Journal Article}
}
@article{RN779,
author = {Elfwing, S. and Uchibe, E. and Doya, K. and Christensen, H. I.},
title = {Darwinian embodied evolution of the learning ability for survival},
journal = {Adaptive Behavior},
volume = {19},
number = {2},
pages = {101-120},
ISSN = {1059-7123
1741-2633},
DOI = {10.1177/1059712310397633},
year = {2011},
type = {Journal Article}
}
@article{RN12253,
author = {Ellery, A.},
title = {Tutorial Review of Bio-Inspired Approaches to Robotic Manipulation for Space Debris Salvage},
journal = {Biomimetics (Basel)},
volume = {5},
number = {2},
note = {Ellery, Alex
eng
Review
Switzerland
2020/05/16
Biomimetics (Basel). 2020 May 12;5(2):19. doi: 10.3390/biomimetics5020019.},
abstract = {We present a comprehensive tutorial review that explores the application of bio-inspired approaches to robot control systems for grappling and manipulating a wide range of space debris targets. Current robot manipulator control systems exploit limited techniques which can be supplemented by additional bio-inspired methods to provide a robust suite of robot manipulation technologies. In doing so, we review bio-inspired control methods because this will be the key to enabling such capabilities. In particular, force feedback control may be supplemented with predictive forward models and software emulation of viscoelastic preflexive joint behaviour. This models human manipulation capabilities as implemented by the cerebellum and muscles/joints respectively. In effect, we are proposing a three-level control strategy based on biomimetic forward models for predictive estimation, traditional feedback control and biomimetic muscle-like preflexes. We place emphasis on bio-inspired forward modelling suggesting that all roads lead to this solution for robust and adaptive manipulator control. This promises robust and adaptive manipulation for complex tasks in salvaging space debris.},
keywords = {cerebellum
on-orbit servicing
predictive forward models
preflex
space debris mitigation
space salvage
viscoelastic muscle},
ISSN = {2313-7673 (Electronic)
2313-7673 (Linking)},
DOI = {10.3390/biomimetics5020019},
url = {https://www.ncbi.nlm.nih.gov/pubmed/32408615},
year = {2020},
type = {Journal Article}
}
@article{RN12252,
author = {Engel, A. K. and Fries, P. and Singer, W.},
title = {Dynamic predictions: oscillations and synchrony in top-down processing},
journal = {Nat Rev Neurosci},
volume = {2},
number = {10},
pages = {704-16},
note = {Engel, A K
Fries, P
Singer, W
eng
Review
England
2001/10/05
Nat Rev Neurosci. 2001 Oct;2(10):704-16. doi: 10.1038/35094565.},
abstract = {Classical theories of sensory processing view the brain as a passive, stimulus-driven device. By contrast, more recent approaches emphasize the constructive nature of perception, viewing it as an active and highly selective process. Indeed, there is ample evidence that the processing of stimuli is controlled by top-down influences that strongly shape the intrinsic dynamics of thalamocortical networks and constantly create predictions about forthcoming sensory events. We discuss recent experiments indicating that such predictions might be embodied in the temporal structure of both stimulus-evoked and ongoing activity, and that synchronous oscillations are particularly important in this process. Coherence among subthreshold membrane potential fluctuations could be exploited to express selective functional relationships during states of expectancy or attention, and these dynamic patterns could allow the grouping and selection of distributed neuronal responses for further processing.},
keywords = {Animals
Brain/*physiology
Haplorhini
Humans
Mental Processes
Models, Neurological
Motor Activity/physiology
Neurons/*physiology
Oscillometry
Pattern Recognition, Visual
Time Factors
Visual Cortex/physiology},
ISSN = {1471-003X (Print)
1471-003X (Linking)},
DOI = {10.1038/35094565},
url = {https://www.ncbi.nlm.nih.gov/pubmed/11584308},
year = {2001},
type = {Journal Article}
}
@article{RN4762,
author = {Fermin, Alan and Yoshida, Takehiko and Ito, Makoto and Yoshimoto, Junichiro and Doya, Kenji},
title = {Neural mechanisms for model-free and model-based reinforcement strategies in humans performing a multi-step navigation task},
journal = {Neuroscience Research},
volume = {68},
pages = {E285-E286},
ISSN = {0168-0102},
DOI = {10.1016/j.neures.2010.07.1269},
year = {2010},
type = {Journal Article}
}
@techreport{RN7373,
author = {Fernando, Chrisantha and Banarse, Dylan and Blundell, Charles and Zwols, Yori and Ha, David and Rusu, Andrei A. and Pritzel, Alexander and Wierstra, Daan},
title = {Pathnet: Evolution channels gradient descent in super neural networks},
DOI = {10.48550/arXiv.1701.08734},
year = {2017},
type = {Report}
}
@article{RN7478,
author = {Friston, K.},
title = {A theory of cortical responses},
journal = {Philos Trans R Soc Lond B Biol Sci},
volume = {360},
number = {1456},
pages = {815-36},
note = {Friston, Karl
eng
Wellcome Trust/United Kingdom
Research Support, Non-U.S. Gov't
Review
England
Philos Trans R Soc Lond B Biol Sci. 2005 Apr 29;360(1456):815-36. doi: 10.1098/rstb.2005.1622.},
abstract = {This article concerns the nature of evoked brain responses and the principles underlying their generation. We start with the premise that the sensory brain has evolved to represent or infer the causes of changes in its sensory inputs. The problem of inference is well formulated in statistical terms. The statistical fundaments of inference may therefore afford important constraints on neuronal implementation. By formulating the original ideas of Helmholtz on perception, in terms of modern-day statistical theories, one arrives at a model of perceptual inference and learning that can explain a remarkable range of neurobiological facts.It turns out that the problems of inferring the causes of sensory input (perceptual inference) and learning the relationship between input and cause (perceptual learning) can be resolved using exactly the same principle. Specifically, both inference and learning rest on minimizing the brain's free energy, as defined in statistical physics. Furthermore, inference and learning can proceed in a biologically plausible fashion. Cortical responses can be seen as the brain's attempt to minimize the free energy induced by a stimulus and thereby encode the most likely cause of that stimulus. Similarly, learning emerges from changes in synaptic efficacy that minimize the free energy, averaged over all stimuli encountered. The underlying scheme rests on empirical Bayes and hierarchical models of how sensory input is caused. The use of hierarchical models enables the brain to construct prior expectations in a dynamic and context-sensitive fashion. This scheme provides a principled way to understand many aspects of cortical organization and responses. The aim of this article is to encompass many apparently unrelated anatomical, physiological and psychophysical attributes of the brain within a single theoretical perspective. In terms of cortical architectures, the theoretical treatment predicts that sensory cortex should be arranged hierarchically, that connections should be reciprocal and that forward and backward connections should show a functional asymmetry (forward connections are driving, whereas backward connections are both driving and modulatory). In terms of synaptic physiology, it predicts associative plasticity and, for dynamic models, spike-timing-dependent plasticity. In terms of electrophysiology, it accounts for classical and extra classical receptive field effects and long-latency or endogenous components of evoked cortical responses. It predicts the attenuation of responses encoding prediction error with perceptual learning and explains many phenomena such as repetition suppression, mismatch negativity (MMN) and the P300 in electroencephalography. In psychophysical terms, it accounts for the behavioural correlates of these physiological phenomena, for example, priming and global precedence. The final focus of this article is on perceptual learning as measured with the MMN and the implications for empirical studies of coupling among cortical areas using evoked sensory responses.},
keywords = {Biophysical Phenomena
Biophysics
Cerebral Cortex/*anatomy & histology/*physiology
Electrophysiology
Humans
Interneurons/cytology/physiology
Learning/*physiology
*Models, Neurological
*Models, Statistical
Perception/*physiology
Synapses/physiology},
ISSN = {0962-8436 (Print)
0962-8436 (Linking)},
DOI = {10.1098/rstb.2005.1622},
url = {https://www.ncbi.nlm.nih.gov/pubmed/15937014},
year = {2005},
type = {Journal Article}
}
@article{RN1008,
author = {Friston, K.},
title = {The free-energy principle: a unified brain theory?},
journal = {Nat Rev Neurosci},
volume = {11},
number = {2},
pages = {127-38},
note = {Friston, Karl
eng
088130/Wellcome Trust/United Kingdom
Wellcome Trust/United Kingdom
England
Nat Rev Neurosci. 2010 Feb;11(2):127-38. doi: 10.1038/nrn2787. Epub 2010 Jan 13.},
abstract = {A free-energy principle has been proposed recently that accounts for action, perception and learning. This Review looks at some key brain theories in the biological (for example, neural Darwinism) and physical (for example, information theory and optimal control theory) sciences from the free-energy perspective. Crucially, one key theme runs through each of these theories - optimization. Furthermore, if we look closely at what is optimized, the same quantity keeps emerging, namely value (expected reward, expected utility) or its complement, surprise (prediction error, expected cost). This is the quantity that is optimized under the free-energy principle, which suggests that several global brain theories might be unified within a free-energy framework.},
keywords = {Animals
Brain/*physiology
Cognition/physiology
Humans
Learning/physiology
Nerve Net/*physiology
Perception/physiology
*Psychological Theory},
ISSN = {1471-0048 (Electronic)
1471-003X (Linking)},
DOI = {10.1038/nrn2787},
url = {http://www.ncbi.nlm.nih.gov/pubmed/20068583},
year = {2010},
type = {Journal Article}
}
@article{RN4155,
author = {Fukushima, K.},
title = {Neocognitron: a self organizing neural network model for a mechanism of pattern recognition unaffected by shift in position},
journal = {Biol Cybern},
volume = {36},
number = {4},
pages = {193-202},
note = {Fukushima, K
eng
Germany
1980/01/01
Biol Cybern. 1980;36(4):193-202. doi: 10.1007/BF00344251.},
abstract = {A neural network model for a mechanism of visual pattern recognition is proposed in this paper. The network is self-organized by "learning without a teacher", and acquires an ability to recognize stimulus patterns based on the geometrical similarity (Gestalt) of their shapes without affected by their positions. This network is given a nickname "neocognitron". After completion of self-organization, the network has a structure similar to the hierarchy model of the visual nervous system proposed by Hubel and Wiesel. The network consists of an input layer (photoreceptor array) followed by a cascade connection of a number of modular structures, each of which is composed of two layers of cells connected in a cascade. The first layer of each module consists of "S-cells", which show characteristics similar to simple cells or lower order hypercomplex cells, and the second layer consists of "C-cells" similar to complex cells or higher order hypercomplex cells. The afferent synapses to each S-cell have plasticity and are modifiable. The network has an ability of unsupervised learning: We do not need any "teacher" during the process of self-organization, and it is only needed to present a set of stimulus patterns repeatedly to the input layer of the network. The network has been simulated on a digital computer. After repetitive presentation of a set of stimulus patterns, each stimulus pattern has become to elicit an output only from one of the C-cells of the last layer, and conversely, this C-cell has become selectively responsive only to that stimulus pattern. That is, none of the C-cells of the last layer responds to more than one stimulus pattern. The response of the C-cells of the last layer is not affected by the pattern's position at all. Neither is it affected by a small change in shape nor in size of the stimulus pattern.},
keywords = {Cognition/physiology
Computers
*Form Perception
Learning/*physiology
*Models, Neurological
Nerve Net/*physiology
*Nervous System Physiological Phenomena
*Pattern Recognition, Visual},
ISSN = {0340-1200 (Print)
0340-1200 (Linking)},
DOI = {10.1007/BF00344251},
url = {https://www.ncbi.nlm.nih.gov/pubmed/7370364},
year = {1980},
type = {Journal Article}
}
@article{RN5530,
author = {Funamizu, A. and Kuhn, B. and Doya, K.},
title = {Neural substrate of dynamic Bayesian inference in the cerebral cortex},
journal = {Nature Neuroscience},
volume = {19},
number = {12},
pages = {1682-1689},
note = {Funamizu, Akihiro
Kuhn, Bernd
Doya, Kenji
eng
2016/11/01 06:00
Nat Neurosci. 2016 Dec;19(12):1682-1689. doi: 10.1038/nn.4390. Epub 2016 Sep 19.},
abstract = {Dynamic Bayesian inference allows a system to infer the environmental state under conditions of limited sensory observation. Using a goal-reaching task, we found that posterior parietal cortex (PPC) and adjacent posteromedial cortex (PM) implemented the two fundamental features of dynamic Bayesian inference: prediction of hidden states using an internal state transition model and updating the prediction with new sensory evidence. We optically imaged the activity of neurons in mouse PPC and PM layers 2, 3 and 5 in an acoustic virtual-reality system. As mice approached a reward site, anticipatory licking increased even when sound cues were intermittently presented; this was disturbed by PPC silencing. Probabilistic population decoding revealed that neurons in PPC and PM represented goal distances during sound omission (prediction), particularly in PPC layers 3 and 5, and prediction improved with the observation of cue sounds (updating). Our results illustrate how cerebral cortex realizes mental simulation using an action-dependent dynamic model.},
ISSN = {1546-1726 (Electronic)
1097-6256 (Linking)},
DOI = {10.1038/nn.4390},
url = {https://www.ncbi.nlm.nih.gov/pubmed/27643432
http://www.nature.com/neuro/journal/v19/n12/pdf/nn.4390.pdf
http://www.nature.com/neuro/journal/v19/n12/full/nn.4390.html},
year = {2016},
type = {Journal Article}
}
@book{RN5898,
author = {Gerstner, Wulfram and Kistler, Werner M. and Naud, Richard and Paninski, Liam},
title = {Neuronal Dynamics: From Single Neurons to Networks and Models of Cognition},
publisher = {Cambridge Universty Press},
address = {Cambridge, UK},
ISBN = {978-1-107-06083-8},
url = {https://neuronaldynamics.epfl.ch},
year = {2014},
type = {Book}
}
@article{RN3430,
author = {Ghahramani, Z. and Wolpert, D. M.},
title = {Modular decomposition in visuomotor learning},
journal = {Nature},
volume = {386},
pages = {392-395},
DOI = {10.1038/386392a0},
year = {1997},
type = {Journal Article}
}
@article{RN1063,
author = {Glascher, J. and Daw, N. and Dayan, P. and O'Doherty, J. P.},
title = {States versus rewards: dissociable neural prediction error signals underlying model-based and model-free reinforcement learning},
journal = {Neuron},
volume = {66},
number = {4},
pages = {585-95},
note = {Glascher, Jan
Daw, Nathaniel
Dayan, Peter
O'Doherty, John P
eng
R03 MH075763-02/MH/NIMH NIH HHS/
Neuron. 2010 May 27;66(4):585-95. doi: 10.1016/j.neuron.2010.04.016.},
abstract = {Reinforcement learning (RL) uses sequential experience with situations ("states") and outcomes to assess actions. Whereas model-free RL uses this experience directly, in the form of a reward prediction error (RPE), model-based RL uses it indirectly, building a model of the state transition and outcome structure of the environment, and evaluating actions by searching this model. A state prediction error (SPE) plays a central role, reporting discrepancies between the current model and the observed state transitions. Using functional magnetic resonance imaging in humans solving a probabilistic Markov decision task, we found the neural signature of an SPE in the intraparietal sulcus and lateral prefrontal cortex, in addition to the previously well-characterized RPE in the ventral striatum. This finding supports the existence of two unique forms of learning signal in humans, which may form the basis of distinct computational strategies for guiding behavior.},
keywords = {Adolescent
Adult
Choice Behavior/physiology
Female
Forecasting
Humans
Learning/physiology
Male
*Models, Neurological
Psychomotor Performance/*physiology
*Reinforcement (Psychology)
*Research Design
*Reward
Young Adult},
ISSN = {1097-4199 (Electronic)
0896-6273 (Linking)},
DOI = {10.1016/j.neuron.2010.04.016},
url = {http://www.ncbi.nlm.nih.gov/pubmed/20510862},
year = {2010},
type = {Journal Article}
}
@book{RN7326,
author = {Goodfellow, Ian and Bengio, Yoshua and Courville, Aaron},
title = {Deep Learning},
publisher = {MIT Press},
year = {2016},
type = {Book}
}
@article{RN1091,
author = {Gray, Chrles M. and Koenig, Peter and Engel, Andreas K. and Singer, Wolf},
title = {Oscillatory responses in cat visual cortex exhibit inter-columnar synchronication which reflects global stimulus properties},
journal = {Nature},
volume = {338},
pages = {334-337},
DOI = {10.1038/338334a0},
year = {1989},
type = {Journal Article}
}
@article{RN10280,
author = {Grossman, C. D. and Bari, B. A. and Cohen, J. Y.},
title = {Serotonin neurons modulate learning rate through uncertainty},
journal = {Curr Biol},
note = {Grossman, Cooper D
Bari, Bilal A
Cohen, Jeremiah Y
eng
England
Curr Biol. 2021 Dec 17. pii: S0960-9822(21)01682-1. doi: 10.1016/j.cub.2021.12.006.},
abstract = {Regulating how fast to learn is critical for flexible behavior. Learning about the consequences of actions should be slow in stable environments, but accelerate when that environment changes. Recognizing stability and detecting change are difficult in environments with noisy relationships between actions and outcomes. Under these conditions, theories propose that uncertainty can be used to modulate learning rates ("meta-learning"). We show that mice behaving in a dynamic foraging task exhibit choice behavior that varied as a function of two forms of uncertainty estimated from a meta-learning model. The activity of dorsal raphe serotonin neurons tracked both types of uncertainty in the foraging task as well as in a dynamic Pavlovian task. Reversible inhibition of serotonin neurons in the foraging task reproduced changes in learning predicted by a simulated lesion of meta-learning in the model. We thus provide a quantitative link between serotonin neuron activity, learning, and decision making.},
keywords = {decision making
dorsal raphe
learning
serotonin
uncertainty},
ISSN = {1879-0445 (Electronic)
0960-9822 (Linking)},
DOI = {10.1016/j.cub.2021.12.006},
url = {https://www.ncbi.nlm.nih.gov/pubmed/34936883},
year = {2021},
type = {Journal Article}
}
@article{RN6130,
author = {Hassabis, D. and Kumaran, D. and Summerfield, C. and Botvinick, M.},
title = {Neuroscience-inspired artificial intelligence},
journal = {Neuron},
volume = {95},
number = {2},
pages = {245-258},
note = {Hassabis, Demis
Kumaran, Dharshan
Summerfield, Christopher
Botvinick, Matthew
eng
Review
Neuron. 2017 Jul 19;95(2):245-258. doi: 10.1016/j.neuron.2017.06.011.},
abstract = {The fields of neuroscience and artificial intelligence (AI) have a long and intertwined history. In more recent times, however, communication and collaboration between the two fields has become less commonplace. In this article, we argue that better understanding biological brains could play a vital role in building intelligent machines. We survey historical interactions between the AI and neuroscience fields and emphasize current advances in AI that have been inspired by the study of neural computation in humans and other animals. We conclude by highlighting shared themes that may be key for advancing future research in both fields.},
keywords = {artificial intelligence
brain
cognition
learning
neural network},
ISSN = {1097-4199 (Electronic)
0896-6273 (Linking)},
DOI = {10.1016/j.neuron.2017.06.011},
url = {https://www.ncbi.nlm.nih.gov/pubmed/28728020},
year = {2017},
type = {Journal Article}
}
@article{RN1162,
author = {Hasselmo, M. E. and Sarter, M.},
title = {Modes and models of forebrain cholinergic neuromodulation of cognition},
journal = {Neuropsychopharmacology},
volume = {36},
number = {1},
pages = {52-73},
abstract = {As indicated by the profound cognitive impairments caused by cholinergic receptor antagonists, cholinergic neurotransmission has a vital role in cognitive function, specifically attention and memory encoding. Abnormally regulated cholinergic neurotransmission has been hypothesized to contribute to the cognitive symptoms of neuropsychiatric disorders. Loss of cholinergic neurons enhances the severity of the symptoms of dementia. Cholinergic receptor agonists and acetylcholinesterase inhibitors have been investigated for the treatment of cognitive dysfunction. Evidence from experiments using new techniques for measuring rapid changes in cholinergic neurotransmission provides a novel perspective on the cholinergic regulation of cognitive processes. This evidence indicates that changes in cholinergic modulation on a timescale of seconds is triggered by sensory input cues and serves to facilitate cue detection and attentional performance. Furthermore, the evidence indicates cholinergic induction of evoked intrinsic, persistent spiking mechanisms for active maintenance of sensory input, and planned responses. Models have been developed to describe the neuronal mechanisms underlying the transient modulation of cortical target circuits by cholinergic activity. These models postulate specific locations and roles of nicotinic and muscarinic acetylcholine receptors and that cholinergic neurotransmission is controlled in part by (cortical) target circuits. The available evidence and these models point to new principles governing the development of the next generation of cholinergic treatments for cognitive disorders.},
keywords = {Acetylcholine/*physiology
Action Potentials/physiology
Animals
Attention/physiology
Cholinergic Fibers/*physiology
Cognition/*physiology
Humans
*Models, Neurological
Neural Pathways/anatomy & histology/chemistry/physiology
Neurons/physiology
Perception/physiology
Prosencephalon/anatomy & histology/chemistry/*physiology},
ISSN = {1740-634X (Electronic)
0006-3223 (Linking)},
DOI = {10.1038/npp.2010.104},
url = {http://www.ncbi.nlm.nih.gov/pubmed/20668433},
year = {2011},
type = {Journal Article}
}
@techreport{RN1204,
author = {Hinton, Geoffrey},
title = {A practical guide to training restricted Boltzmann machines},
institution = {Department of Computer Science, University of Toronto},
year = {2010},
type = {Report}
}
@article{RN7348,
author = {Hodgkin, A. L. and Huxley, A. F.},
title = {A quantitative description of membrane current and its application to conduction and excitation in nerve},
journal = {Journal of Physiology},
volume = {117},
pages = {500-544},
DOI = {10.1113/jphysiol.1952.sp004764},
year = {1952},
type = {Journal Article}
}
@article{RN6770,
author = {Horikawa, Tomoyasu and Kamitani, Yukiyasu},
title = {Generic decoding of seen and imagined objects using hierarchical visual features},
journal = {Nature Communications},
volume = {8},
ISSN = {2041-1723},
DOI = {10.1038/ncomms15037},
year = {2017},
type = {Journal Article}
}
@article{RN4035,
author = {Hubel, D. H. and Wiesel, T. N.},
title = {Receptive fields of single neurones in the cat's striate cortex},
journal = {Journal of Physiology},
volume = {148},
pages = {574-91},
note = {HUBEL, D H
WIESEL, T N
eng
England
J Physiol. 1959 Oct;148:574-91. doi: 10.1113/jphysiol.1959.sp006308.},
keywords = {Animals
Cats
Cerebral Cortex/*physiology
Neurons/*physiology
*Visual Cortex
*CEREBRAL CORTEX/physiology
*NEURONS/physiology},
ISSN = {0022-3751 (Print)
0022-3751 (Linking)},
DOI = {10.1113/jphysiol.1959.sp006308},
url = {https://www.ncbi.nlm.nih.gov/pubmed/14403679},
year = {1959},
type = {Journal Article}
}
@article{RN1237,
author = {Hubel, D. H. and Wiesel, T. N.},
title = {Receptive fields, binocular interaction and functional architecture in the cat's visual cortex},
journal = {Journal of Physiology},
volume = {160},
pages = {106-154},
note = {HUBEL, D H
WIESEL, T N
eng
England
J Physiol. 1962 Jan;160:106-54. doi: 10.1113/jphysiol.1962.sp006837.},
keywords = {Animals
Cats
Cerebral Cortex/*physiology
*Visual Cortex
*CEREBRAL CORTEX/physiology},
ISSN = {0022-3751 (Print)
0022-3751 (Linking)},
DOI = {10.1113/jphysiol.1962.sp006837},
url = {https://www.ncbi.nlm.nih.gov/pubmed/14449617},
year = {1962},
type = {Journal Article}
}
@book{RN12255,
author = {Ito, M.},
title = {The Cerebellum and Neural Control},
publisher = {Raven Press},
url = {https://archive.org/details/cerebellumneural0000itom},
year = {1984},
type = {Book}
}
@article{RN3458,
author = {Ito, M.},
title = {Movement and thought: identical control mechanisms by the cerebellum},
journal = {Trends Neurosci},
volume = {16},
number = {11},
pages = {448-50; discussion 453-4},
note = {Ito, M
eng
Comment
Review
England
1993/11/01
Trends Neurosci. 1993 Nov;16(11):448-50; discussion 453-4. doi: 10.1016/0166-2236(93)90073-u.},
keywords = {Animals
Cerebellum/*physiology
Humans
Movement/*physiology
Thinking/*physiology},
ISSN = {0166-2236 (Print)
0166-2236 (Linking)},
DOI = {10.1016/0166-2236(93)90073-u},
url = {https://www.ncbi.nlm.nih.gov/pubmed/7507615},
year = {1993},
type = {Journal Article}
}
@article{RN11708,
author = {Ito, M.},
title = {Mechanisms of motor learning in the cerebellum},
journal = {Brain Res},
volume = {886},
number = {1-2},
pages = {237-245},
note = {Ito, M
eng
Review
Netherlands
2000/12/20
Brain Res. 2000 Dec 15;886(1-2):237-245. doi: 10.1016/s0006-8993(00)03142-5.},
abstract = {How the elaborate neuronal circuit in the cerebellum operates and is involved in motor learning is a question addressed in earnest in studies on the cerebellum. During the past four decades, experimental studies have revealed circuit and module structures of the cerebellum, established long-term depression (LTD) as a unique and characteristic type of synaptic plasticity in the cerebellum, and analysed signal contents of activates of cerebellar neurons related to motor learning. In the 1990s, these studies were developed to detailed analyses of the signal transduction underlying LTD, and to uncovering the involvement of the cerebellum in cognitive function. On the other hand, theoretical studies yielded epochal Marr-Albus network models of the cerebellum around 1970, and introduced control system principles explaining the essential roles of the cerebellum in motor learning as providing internal models, both forward and inverse. The author maintains the hypothesis that reorganisation of the neuronal circuit by error-driven induction of LTD constitutes the major memory and learning mechanisms of the cerebellum. In this article, we examine the validity of the hypothesis in light of currently available data in recent studies of the cerebellum.},
keywords = {Animals
Cerebellum/*physiology
Feedback/physiology
Learning/*physiology
Models, Neurological
Motor Neurons/physiology
Motor Skills/*physiology
Nerve Net/*physiology
Neural Inhibition/physiology
Neuronal Plasticity/physiology
Signal Transduction/physiology},
ISSN = {0006-8993 (Print)
0006-8993 (Linking)},
DOI = {10.1016/s0006-8993(00)03142-5},
url = {https://www.ncbi.nlm.nih.gov/pubmed/11119699},
year = {2000},
type = {Journal Article}