Imperial College London

Dr Dan Goodman

Faculty of EngineeringDepartment of Electrical and Electronic Engineering

Senior Lecturer
 
 
 
//

Contact

 

+44 (0)20 7594 6264d.goodman Website

 
 
//

Location

 

1001Electrical EngineeringSouth Kensington Campus

//

Summary

 

Publications

Citation

BibTex format

@article{Blundell:2018:10.3389/fninf.2018.00068,
author = {Blundell, I and Brette, R and Cleland, TA and Close, TG and Coca, D and Davison, AP and Diaz-Pier, S and Musoles, CF and Gleeson, P and Goodman, DFM and Hines, M and Hopkins, MW and Kumbhar, P and Lester, DR and Marin, B and Morrison, A and Mueller, E and Nowotny, T and Peyser, A and Plotnikov, D and Richmond, P and Rowley, A and Rumpe, B and Stimberg, M and Stokes, AB and Tomkins, A and Trensch, G and Woodman, M and Eppler, JM},
doi = {10.3389/fninf.2018.00068},
journal = {Frontiers in Neuroinformatics},
title = {Code generation in computational neuroscience: A review of tools and techniques},
url = {http://dx.doi.org/10.3389/fninf.2018.00068},
volume = {12},
year = {2018}
}

RIS format (EndNote, RefMan)

TY  - JOUR
AB - Advances in experimental techniques and computational power allowing researchers to gather anatomical and electrophysiological data at unprecedented levels of detail have fostered the development of increasingly complex models in computational neuroscience. Large-scale, biophysically detailed cell models pose a particular set of computational challenges, and this has led to the development of a number of domain-specific simulators. At the other level of detail, the ever growing variety of point neuron models increases the implementation barrier even for those based on the relatively simple integrate-and-fire neuron model. Independently of the model complexity, all modeling methods crucially depend on an efficient and accurate transformation of mathematical model descriptions into efficiently executable code. Neuroscientists usually publish model descriptions in terms of the mathematical equations underlying them. However, actually simulating them requires they be translated into code. This can cause problems because errors may be introduced if this process is carried out by hand, and code written by neuroscientists may not be very computationally efficient. Furthermore, the translated code might be generated for different hardware platforms, operating system variants or even written in different languages and thus cannot easily be combined or even compared. Two main approaches to addressing this issues have been followed. The first is to limit users to a fixed set of optimized models, which limits flexibility. The second is to allow model definitions in a high level interpreted language, although this may limit performance. Recently, a third approach has become increasingly popular: using code generation to automatically translate high level descriptions into efficient low level code to combine the best of previous approaches. This approach also greatly enriches efforts to standardize simulator-independent model description languages. In the past few years, a number
AU - Blundell,I
AU - Brette,R
AU - Cleland,TA
AU - Close,TG
AU - Coca,D
AU - Davison,AP
AU - Diaz-Pier,S
AU - Musoles,CF
AU - Gleeson,P
AU - Goodman,DFM
AU - Hines,M
AU - Hopkins,MW
AU - Kumbhar,P
AU - Lester,DR
AU - Marin,B
AU - Morrison,A
AU - Mueller,E
AU - Nowotny,T
AU - Peyser,A
AU - Plotnikov,D
AU - Richmond,P
AU - Rowley,A
AU - Rumpe,B
AU - Stimberg,M
AU - Stokes,AB
AU - Tomkins,A
AU - Trensch,G
AU - Woodman,M
AU - Eppler,JM
DO - 10.3389/fninf.2018.00068
PY - 2018///
SN - 1662-5196
TI - Code generation in computational neuroscience: A review of tools and techniques
T2 - Frontiers in Neuroinformatics
UR - http://dx.doi.org/10.3389/fninf.2018.00068
UR - http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000449250100001&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=1ba7043ffcc86c417c072aa74d649202
UR - http://hdl.handle.net/10044/1/64579
VL - 12
ER -