diff --git a/brian2/codegen/codeobject.py b/brian2/codegen/codeobject.py index 9fd522d78..4af1e6230 100644 --- a/brian2/codegen/codeobject.py +++ b/brian2/codegen/codeobject.py @@ -1,13 +1,12 @@ """ Module providing the base `CodeObject` and related functions. """ - import collections import copy import platform -import weakref -from brian2.core.functions import Function +from brian2.core.base import weakproxy_with_fallback +from brian2.core.functions import DEFAULT_FUNCTIONS, Function from brian2.core.names import Nameable from brian2.equations.unitcheck import check_units_statements from brian2.utils.logger import get_logger @@ -73,11 +72,7 @@ def __init__( name="codeobject*", ): Nameable.__init__(self, name=name) - try: - owner = weakref.proxy(owner) - except TypeError: - pass # if owner was already a weakproxy then this will be the error raised - self.owner = owner + self.owner = weakproxy_with_fallback(owner) self.code = code self.compiled_code = {} self.variables = variables @@ -86,6 +81,23 @@ def __init__( self.template_source = template_source self.compiler_kwds = compiler_kwds + def __getstate__(self): + state = self.__dict__.copy() + state["owner"] = self.owner.__repr__.__self__ + # Replace Function objects for standard functions by their name + state["variables"] = self.variables.copy() + for k, v in state["variables"].items(): + if isinstance(v, Function) and v is DEFAULT_FUNCTIONS[k]: + state["variables"][k] = k + return state + + def __setstate__(self, state): + state["owner"] = weakproxy_with_fallback(state["owner"]) + for k, v in state["variables"].items(): + if isinstance(v, str): + state["variables"][k] = DEFAULT_FUNCTIONS[k] + self.__dict__ = state + @classmethod def is_available(cls): """ diff --git a/brian2/core/variables.py b/brian2/core/variables.py index 6d0a293fa..db3a0c8aa 100644 --- a/brian2/core/variables.py +++ b/brian2/core/variables.py @@ -184,6 +184,15 @@ def __init__( #: Whether the variable is an array self.array = array + def __getstate__(self): + state = self.__dict__.copy() + state["owner"] = state["owner"].__repr__.__self__ # replace proxy + return state + + def __setstate__(self, state): + state["owner"] = weakproxy_with_fallback(state["owner"]) + self.__dict__ = state + @property def is_boolean(self): return np.issubdtype(self.dtype, np.bool_) @@ -1561,18 +1570,24 @@ def __repr__(self): return f"<{self.group_name}.{varname}: {values}>" + def __hash__(self): + return hash((self.group_name, self.name)) + # Get access to some basic properties of the underlying array @property def shape(self): - return self.get_item(slice(None), level=1).shape + if self.ndim == 1: + return (self.variable.size,) + else: + return self.variable.size @property def ndim(self): - return self.get_item(slice(None), level=1).ndim + return getattr(self.variable, "ndim", 1) @property def dtype(self): - return self.get_item(slice(None), level=1).dtype + return self.variable.dtype class Variables(Mapping): @@ -1607,6 +1622,15 @@ def __init__(self, owner, default_index="_idx"): # Note that by using functools.partial (instead of e.g. a lambda # function) above, this object remains pickable. + def __getstate__(self): + state = self.__dict__.copy() + state["owner"] = state["owner"].__repr__.__self__ + return state + + def __setstate__(self, state): + state["owner"] = weakproxy_with_fallback(state["owner"]) + self.__dict__ = state + def __getitem__(self, item): return self._variables[item] diff --git a/brian2/devices/cpp_standalone/device.py b/brian2/devices/cpp_standalone/device.py index b4f340657..90a406a1e 100644 --- a/brian2/devices/cpp_standalone/device.py +++ b/brian2/devices/cpp_standalone/device.py @@ -12,7 +12,9 @@ import time import zlib from collections import Counter, defaultdict +from collections.abc import Mapping from distutils import ccompiler +from hashlib import md5 import numpy as np @@ -32,10 +34,12 @@ ) from brian2.devices.device import Device, all_devices, reset_device, set_device from brian2.groups.group import Group +from brian2.input import TimedArray from brian2.parsing.rendering import CPPNodeRenderer from brian2.synapses.synapses import Synapses from brian2.units import second -from brian2.units.fundamentalunits import Quantity +from brian2.units.fundamentalunits import Quantity, fail_for_dimension_mismatch +from brian2.utils.filelock import FileLock from brian2.utils.filetools import copy_directory, ensure_directory, in_directory from brian2.utils.logger import get_logger, std_silent from brian2.utils.stringtools import word_substitute @@ -167,6 +171,9 @@ def __init__(self): #: Whether the simulation has been run self.has_been_run = False + #: Whether apply_run_args has been called + self.run_args_applied = False + #: Whether a run should trigger a build self.build_on_run = False @@ -176,6 +183,9 @@ def __init__(self): #: The directory which contains the generated code and results self.project_dir = None + #: The directory which contains the results (relative to `project_dir``) + self.results_dir = None + #: Whether to generate profiling information (stored in an instance #: variable to be accessible during CodeObject generation) self.enable_profiling = False @@ -187,6 +197,9 @@ def __init__(self): #: Dict of all static saved arrays self.static_arrays = {} + #: Dict of all TimedArray objects + self.timed_arrays = {} + self.code_objects = {} self.main_queue = [] self.runfuncs = {} @@ -289,6 +302,14 @@ def insert_code(self, slot, code): else: logger.warn(f"Ignoring device code, unknown slot: {slot}, code: {code}") + def apply_run_args(self): + if self.run_args_applied: + raise RuntimeError( + "The 'apply_run_args()' function can only be called once." + ) + self.insert_code("main", "set_from_command_line(args);") + self.run_args_applied = True + def static_array(self, name, arr): arr = np.atleast_1d(arr) assert len(arr), f"length for {name}: {len(arr)}" @@ -324,7 +345,7 @@ def get_array_name(self, var, access_data=True): else: raise TypeError(f"Do not have a name for variable of type {type(var)}.") - def get_array_filename(self, var, basedir="results"): + def get_array_filename(self, var, basedir=None): """ Return a file name for a variable. @@ -334,23 +355,24 @@ def get_array_filename(self, var, basedir="results"): The variable to get a filename for. basedir : str The base directory for the filename, defaults to ``'results'``. + DEPRECATED: Will raise an error if specified. Returns ------- filename : str A filename of the form - ``'results/'+varname+'_'+str(zlib.crc32(varname))``, where varname + ``varname+'_'+str(zlib.crc32(varname))``, where varname is the name returned by `get_array_name`. Notes ----- - The reason that the filename is not simply ``'results/' + varname`` is + The reason that the filename is not simply ``varname`` is that this could lead to file names that are not unique in file systems that are not case sensitive (e.g. on Windows). """ + if basedir is not None: + raise ValueError("Specifying 'basedir' is no longer supported.") varname = self.get_array_name(var, access_data=False) - return os.path.join( - basedir, f"{varname}_{str(zlib.crc32(varname.encode('utf-8')))}" - ) + return f"{varname}_{str(zlib.crc32(varname.encode('utf-8')))}" def add_array(self, var): # Note that a dynamic array variable is added to both the arrays and @@ -529,7 +551,7 @@ def get_value(self, var, access_data=True): # disk if self.has_been_run: dtype = var.dtype - fname = os.path.join(self.project_dir, self.get_array_filename(var)) + fname = os.path.join(self.results_dir, self.get_array_filename(var)) with open(fname, "rb") as f: data = np.fromfile(f, dtype=dtype) # This is a bit of an heuristic, but our 2d dynamic arrays are @@ -696,6 +718,9 @@ def code_object( if self.enable_profiling: self.profiled_codeobjects.append(codeobj.name) + for var in codeobj.variables.values(): + if isinstance(var, TimedArray): + self.timed_arrays[var] = var.name # We mark all writeable (i.e. not read-only) variables used by the code # as "dirty" to avoid that the cache contains incorrect values. This # might remove a number of variables from the cache unnecessarily, @@ -745,7 +770,13 @@ def check_openmp_compatible(self, nb_threads): ) def generate_objects_source( - self, writer, arange_arrays, synapses, static_array_specs, networks + self, + writer, + arange_arrays, + synapses, + static_array_specs, + networks, + timed_arrays, ): arr_tmp = self.code_object_class().templater.objects( None, @@ -763,6 +794,7 @@ def generate_objects_source( get_array_name=self.get_array_name, profiled_codeobjects=self.profiled_codeobjects, code_objects=list(self.code_objects.values()), + timed_arrays=timed_arrays, ) writer.write("objects.*", arr_tmp) @@ -1193,7 +1225,75 @@ def seed(self, seed=None): """ self.main_queue.append(("seed", seed)) - def run(self, directory, with_output, run_args): + def run( + self, directory=None, results_directory=None, with_output=True, run_args=None + ): + if directory is None: + directory = self.project_dir + if results_directory is None: + results_directory = self.results_dir + else: + if os.path.isabs(results_directory): + raise TypeError( + "The 'results_directory' argument needs to be a relative path but" + f" was '{results_directory}'." + ) + # Translate path to absolute path which ends with / + self.results_dir = os.path.join( + os.path.abspath(os.path.join(directory, results_directory)), "" + ) + ensure_directory(self.results_dir) + + if run_args is None: + run_args = [] + elif isinstance(run_args, Mapping): + list_rep = [] + for key, value in run_args.items(): + if isinstance(key, VariableView): + ( + name, + string_value, + value_name, + value_ar, + ) = self._prepare_variableview_run_arg(key, value) + elif isinstance(key, TimedArray): + ( + name, + string_value, + value_name, + value_ar, + ) = self._prepare_timed_array_run_arg(key, value) + else: + raise TypeError( + "The keys for 'run_args' need to be 'VariableView' objects," + " i.e. attributes of groups such as 'neurongroup.v', or a" + f" 'TimedArray'. Key has type '{type(key)}' instead." + ) + if value_name: + fname = os.path.join(self.project_dir, "static_arrays", value_name) + # Make sure processes trying to write the same file don't clash + with FileLock(fname + ".lock"): + if not os.path.exists(fname): + value_ar.tofile(fname) + list_rep.append(f"{name}={string_value}") + + run_args = list_rep + + # Invalidate array cache for all variables set on the command line + for arg in run_args: + s = arg.split("=") + if len(s) == 2: + for var in self.array_cache: + if ( + hasattr(var.owner, "name") + and var.owner.name + "." + var.name == s[0] + ): + self.array_cache[var] = None + run_args = ["--results_dir", self.results_dir] + run_args + # Invalidate the cached end time of the clock and network, to deal with stopped simulations + for clock in self.clocks: + self.array_cache[clock.variables["t"]] = None + with in_directory(directory): # Set environment variables @@ -1209,7 +1309,7 @@ def run(self, directory, with_output, run_args): ) os.environ[key] = value if not with_output: - stdout = open("results/stdout.txt", "w") + stdout = open(os.path.join(self.results_dir, "stdout.txt"), "w") else: stdout = None if os.name == "nt": @@ -1226,16 +1326,18 @@ def run(self, directory, with_output, run_args): if stdout is not None: stdout.close() if x: - if os.path.exists("results/stdout.txt"): - with open("results/stdout.txt") as f: + stdout_fname = os.path.join(self.results_dir, "stdout.txt") + if os.path.exists(stdout_fname): + with open(stdout_fname) as f: print(f.read()) raise RuntimeError( "Project run failed (project directory:" f" {os.path.abspath(directory)})" ) self.has_been_run = True - if os.path.isfile("results/last_run_info.txt"): - with open("results/last_run_info.txt") as f: + run_info_fname = os.path.join(self.results_dir, "last_run_info.txt") + if os.path.isfile(run_info_fname): + with open(run_info_fname) as f: last_run_info = f.read() run_time, completed_fraction = last_run_info.split() self._last_run_time = float(run_time) @@ -1252,7 +1354,7 @@ def run(self, directory, with_output, run_args): already_checked = set() for owner in owners: try: - if owner.name in already_checked: + if not hasattr(owner, "name") or owner.name in already_checked: continue if isinstance(owner, Group): owner._check_for_invalid_states() @@ -1260,9 +1362,52 @@ def run(self, directory, with_output, run_args): except ReferenceError: pass + def _prepare_variableview_run_arg(self, key, value): + fail_for_dimension_mismatch(key.dim, value) # TODO: Give name of variable + value_ar = np.asarray(value, dtype=key.dtype) + if value_ar.ndim == 0 or value_ar.size == 1: + # single value, give value directly on command line + string_value = repr(value_ar.item()) + value_name = None + else: + if value_ar.ndim != 1 or ( + not key.variable.dynamic and value_ar.size != key.shape[0] + ): + raise TypeError( + "Incorrect size for variable" + f" '{key.group_name}.{key.name}'. Shape {key.shape} ≠" + f" {value_ar.shape}." + ) + value_name = ( + f"init_{key.group_name}_{key.name}_{md5(value_ar.data).hexdigest()}.dat" + ) + string_value = os.path.join("static_arrays", value_name) + name = f"{key.group_name}.{key.name}" + return name, string_value, value_name, value_ar + + def _prepare_timed_array_run_arg(self, key, value): + fail_for_dimension_mismatch(key.dim, value) # TODO: Give name of variable + value_ar = np.asarray(value, dtype=key.values.dtype) + if value_ar.ndim == 0 or value_ar.size == 1: + # single value, give value directly on command line + string_value = repr(value_ar.item()) + value_name = None + elif value_ar.shape == key.values.shape: + value_name = f"init_{key.name}_values_{md5(value_ar.data).hexdigest()}.dat" + string_value = os.path.join("static_arrays", value_name) + else: + raise TypeError( + "Incorrect size for variable" + f" '{key.name}.values'. Shape {key.values.shape} ≠" + f" {value_ar.shape}." + ) + name = f"{key.name}.values" + return name, string_value, value_name, value_ar + def build( self, directory="output", + results_directory="results", compile=True, run=True, debug=False, @@ -1352,6 +1497,15 @@ def build( directory = tempfile.mkdtemp(prefix="brian_standalone_") self.project_dir = directory ensure_directory(directory) + if os.path.isabs(results_directory): + raise TypeError( + "The 'results_directory' argument needs to be a relative path but was " + f"'{results_directory}'." + ) + # Translate path to absolute path which ends with / + self.results_dir = os.path.join( + os.path.abspath(os.path.join(directory, results_directory)), "" + ) # Determine compiler flags and directories compiler, default_extra_compile_args = get_compiler_and_args() @@ -1466,6 +1620,7 @@ def build( self.synapses, self.static_array_specs, self.networks, + self.timed_arrays, ) self.generate_main_source(self.writer) self.generate_codeobj_source(self.writer) @@ -1488,7 +1643,7 @@ def build( if compile: self.compile_source(directory, compiler, debug, clean) if run: - self.run(directory, with_output, run_args) + self.run(directory, results_directory, with_output, run_args) time_measurements = { "'make clean'": self.timers["compile"]["clean"], "'make'": self.timers["compile"]["make"], @@ -1516,13 +1671,13 @@ def delete(self, code=True, data=True, directory=True, force=False): # Delete data if data: - results_dir = os.path.join(self.project_dir, "results") + results_dir = self.results_dir logger.debug(f"Deleting data files in '{results_dir}'") - fnames.append(os.path.join("results", "last_run_info.txt")) + fnames.append(os.path.join(results_dir, "last_run_info.txt")) if self.profiled_codeobjects: - fnames.append(os.path.join("results", "profiling_info.txt")) + fnames.append(os.path.join(results_dir, "profiling_info.txt")) for var in self.arrays: - fnames.append(self.get_array_filename(var)) + fnames.append(os.path.join(results_dir, self.get_array_filename(var))) # Delete code if code: @@ -1778,6 +1933,9 @@ def network_run( run_lines.append(f"{net.name}.add(&{clock.name}, NULL);") run_lines.extend(self.code_lines["before_network_run"]) + if not self.run_args_applied: + run_lines.append("set_from_command_line(args);") + self.run_args_applied = True run_lines.append( f"{net.name}.run({float(duration)!r}, {report_call}," f" {float(report_period)!r});" diff --git a/brian2/devices/cpp_standalone/templates/main.cpp b/brian2/devices/cpp_standalone/templates/main.cpp index d2ba3d481..5758fff70 100644 --- a/brian2/devices/cpp_standalone/templates/main.cpp +++ b/brian2/devices/cpp_standalone/templates/main.cpp @@ -24,8 +24,27 @@ {{report_func|autoindent}} +void set_from_command_line(const std::vector args) +{ + for (const auto& arg : args) { + // Split into two parts + size_t equal_sign = arg.find("="); + auto name = arg.substr(0, equal_sign); + auto value = arg.substr(equal_sign + 1, arg.length()); + brian::set_variable_by_name(name, value); + } +} int main(int argc, char **argv) { + std::vector args(argv + 1, argv + argc); + if (args.size() >=2 && args[0] == "--results_dir") + { + brian::results_dir = args[1]; + #ifdef DEBUG + std::cout << "Setting results dir to '" << brian::results_dir << "'" << std::endl; + #endif + args.erase(args.begin(), args.begin()+2); + } {{'\n'.join(code_lines['before_start'])|autoindent}} brian_start(); {{'\n'.join(code_lines['after_start'])|autoindent}} diff --git a/brian2/devices/cpp_standalone/templates/network.cpp b/brian2/devices/cpp_standalone/templates/network.cpp index b2deacdeb..8374d00ec 100644 --- a/brian2/devices/cpp_standalone/templates/network.cpp +++ b/brian2/devices/cpp_standalone/templates/network.cpp @@ -140,10 +140,10 @@ void Network::compute_clocks() Clock* Network::next_clocks() { + if (clocks.empty()) + return NULL; // find minclock, clock with smallest t value Clock *minclock = *clocks.begin(); - if (!minclock) // empty list of clocks - return NULL; for(std::set::iterator i=clocks.begin(); i!=clocks.end(); i++) { diff --git a/brian2/devices/cpp_standalone/templates/objects.cpp b/brian2/devices/cpp_standalone/templates/objects.cpp index a6fc2d5a0..17a1afad3 100644 --- a/brian2/devices/cpp_standalone/templates/objects.cpp +++ b/brian2/devices/cpp_standalone/templates/objects.cpp @@ -1,5 +1,19 @@ {% macro cpp_file() %} +{% macro set_from_value(var_dtype, array_name) %} +{% if c_data_type(var_dtype) == 'double' %} +set_variable_from_value(name, {{array_name}}, var_size, (double)atof(s_value.c_str())); +{% elif c_data_type(var_dtype) == 'float' %} +set_variable_from_value(name, {{array_name}}, var_size, (float)atof(s_value.c_str())); +{% elif c_data_type(var_dtype) == 'int32_t' %} +set_variable_from_value(name, {{array_name}}, var_size, (int32_t)atoi(s_value.c_str())); +{% elif c_data_type(var_dtype) == 'int64_t' %} +set_variable_from_value(name, {{array_name}}, var_size, (int64_t)atol(s_value.c_str())); +{% elif c_data_type(var_dtype) == 'char' %} +set_variable_from_value(name, {{array_name}}, var_size, (char)atoi(s_value.c_str())); +{% endif %} +{%- endmacro %} + #include "objects.h" #include "synapses_classes.h" #include "brianlib/clocks.h" @@ -10,9 +24,14 @@ #include #include #include +#include +#include +#include +#include namespace brian { +std::string results_dir = "results/"; // can be overwritten by --results_dir command line arg std::vector< rk_state* > _mersenne_twister_states; //////////////// networks ///////////////// @@ -20,6 +39,105 @@ std::vector< rk_state* > _mersenne_twister_states; Network {{net.name}}; {% endfor %} +void set_variable_from_value(std::string varname, char* var_pointer, size_t size, char value) { + #ifdef DEBUG + std::cout << "Setting '" << varname << "' to " << (value == 1 ? "True" : "False") << std::endl; + #endif + std::fill(var_pointer, var_pointer+size, value); +} + +template void set_variable_from_value(std::string varname, T* var_pointer, size_t size, T value) { + #ifdef DEBUG + std::cout << "Setting '" << varname << "' to " << value << std::endl; + #endif + std::fill(var_pointer, var_pointer+size, value); +} + +template void set_variable_from_file(std::string varname, T* var_pointer, size_t data_size, std::string filename) { + ifstream f; + streampos size; + #ifdef DEBUG + std::cout << "Setting '" << varname << "' from file '" << filename << "'" << std::endl; + #endif + f.open(filename, ios::in | ios::binary | ios::ate); + size = f.tellg(); + if (size != data_size) { + std::cerr << "Error reading '" << filename << "': file size " << size << " does not match expected size " << data_size << std::endl; + return; + } + f.seekg(0, ios::beg); + if (f.is_open()) + f.read(reinterpret_cast(var_pointer), data_size); + else + std::cerr << "Could not read '" << filename << "'" << std::endl; + if (f.fail()) + std::cerr << "Error reading '" << filename << "'" << std::endl; +} + +//////////////// set arrays by name /////// +void set_variable_by_name(std::string name, std::string s_value) { + size_t var_size; + size_t data_size; + std::for_each(s_value.begin(), s_value.end(), [](char& c) // modify in-place + { + c = std::tolower(static_cast(c)); + }); + if (s_value == "true") + s_value = "1"; + else if (s_value == "false") + s_value = "0"; + // non-dynamic arrays + {% for var, varname in array_specs | dictsort(by='value') %} + {% if not var in dynamic_array_specs and not var.read_only %} + if (name == "{{var.owner.name}}.{{var.name}}") { + var_size = {{var.size}}; + data_size = {{var.size}}*sizeof({{c_data_type(var.dtype)}}); + if (s_value[0] == '-' || (s_value[0] >= '0' && s_value[0] <= '9')) { + // set from single value + {{ set_from_value(var.dtype, get_array_name(var)) }} + } else { + // set from file + set_variable_from_file(name, {{get_array_name(var)}}, data_size, s_value); + } + return; + } + {% endif %} + {% endfor %} + // dynamic arrays (1d) + {% for var, varname in dynamic_array_specs | dictsort(by='value') %} + {% if not var.read_only %} + if (name == "{{var.owner.name}}.{{var.name}}") { + var_size = {{get_array_name(var, access_data=False)}}.size(); + data_size = var_size*sizeof({{c_data_type(var.dtype)}}); + if (s_value[0] == '-' || (s_value[0] >= '0' && s_value[0] <= '9')) { + // set from single value + {{ set_from_value(var.dtype, "&" + get_array_name(var, False) + "[0]") }} + } else { + // set from file + set_variable_from_file(name, &{{get_array_name(var, False)}}[0], data_size, s_value); + } + return; + } + {% endif %} + {% endfor %} + {% for var, varname in timed_arrays | dictsort(by='value') %} + if (name == "{{varname}}.values") { + var_size = {{var.values.size}}; + data_size = var_size*sizeof({{c_data_type(var.values.dtype)}}); + if (s_value[0] == '-' || (s_value[0] >= '0' && s_value[0] <= '9')) { + // set from single value + {{ set_from_value(var.values.dtype, varname + "_values") }} + + } else { + // set from file + set_variable_from_file(name, {{varname}}_values, data_size, s_value); + } + return; + } + {% endfor %} + std::cerr << "Cannot set unknown variable '" << name << "'." << std::endl; + exit(1); +} //////////////// arrays /////////////////// {% for var, varname in array_specs | dictsort(by='value') %} {% if not var in dynamic_array_specs %} @@ -140,7 +258,7 @@ void _write_arrays() {% for var, varname in array_specs | dictsort(by='value') %} {% if not (var in dynamic_array_specs or var in dynamic_array_2d_specs) %} ofstream outfile_{{varname}}; - outfile_{{varname}}.open("{{get_array_filename(var) | replace('\\', '\\\\')}}", ios::binary | ios::out); + outfile_{{varname}}.open(results_dir + "{{get_array_filename(var)}}", ios::binary | ios::out); if(outfile_{{varname}}.is_open()) { outfile_{{varname}}.write(reinterpret_cast({{varname}}), {{var.size}}*sizeof({{get_array_name(var)}}[0])); @@ -154,7 +272,7 @@ void _write_arrays() {% for var, varname in dynamic_array_specs | dictsort(by='value') %} ofstream outfile_{{varname}}; - outfile_{{varname}}.open("{{get_array_filename(var) | replace('\\', '\\\\')}}", ios::binary | ios::out); + outfile_{{varname}}.open(results_dir + "{{get_array_filename(var)}}", ios::binary | ios::out); if(outfile_{{varname}}.is_open()) { if (! {{varname}}.empty() ) @@ -170,7 +288,7 @@ void _write_arrays() {% for var, varname in dynamic_array_2d_specs | dictsort(by='value') %} ofstream outfile_{{varname}}; - outfile_{{varname}}.open("{{get_array_filename(var) | replace('\\', '\\\\')}}", ios::binary | ios::out); + outfile_{{varname}}.open(results_dir + "{{get_array_filename(var)}}", ios::binary | ios::out); if(outfile_{{varname}}.is_open()) { for (int n=0; n<{{varname}}.n; n++) @@ -189,7 +307,7 @@ void _write_arrays() {% if profiled_codeobjects is defined and profiled_codeobjects %} // Write profiling info to disk ofstream outfile_profiling_info; - outfile_profiling_info.open("results/profiling_info.txt", ios::out); + outfile_profiling_info.open(results_dir + "profiling_info.txt", ios::out); if(outfile_profiling_info.is_open()) { {% for codeobj in profiled_codeobjects | sort %} @@ -203,7 +321,7 @@ void _write_arrays() {% endif %} // Write last run info to disk ofstream outfile_last_run_info; - outfile_last_run_info.open("results/last_run_info.txt", ios::out); + outfile_last_run_info.open(results_dir + "last_run_info.txt", ios::out); if(outfile_last_run_info.is_open()) { outfile_last_run_info << (Network::_last_run_time) << " " << (Network::_last_run_completed_fraction) << std::endl; @@ -260,6 +378,7 @@ void _dealloc_arrays() namespace brian { +extern std::string results_dir; // In OpenMP we need one state per thread extern std::vector< rk_state* > _mersenne_twister_states; @@ -273,6 +392,10 @@ extern Clock {{clock.name}}; extern Network {{net.name}}; {% endfor %} + + +void set_variable_by_name(std::string, std::string); + //////////////// dynamic arrays /////////// {% for var, varname in dynamic_array_specs | dictsort(by='value') %} extern std::vector<{{c_data_type(var.dtype)}}> {{varname}}; diff --git a/brian2/devices/device.py b/brian2/devices/device.py index 70703da4d..15d2e003d 100644 --- a/brian2/devices/device.py +++ b/brian2/devices/device.py @@ -483,6 +483,17 @@ def __init__(self): self.randn_buffer_index = np.zeros(1, dtype=np.int32) self.rand_buffer_index = np.zeros(1, dtype=np.int32) + def __getstate__(self): + state = dict(self.__dict__) + # Python's pickle module cannot pickle a WeakKeyDictionary, we therefore + # convert it to a standard dictionary + state["arrays"] = dict(self.arrays) + return state + + def __setstate__(self, state): + self.__dict__ = state + self.__dict__["arrays"] = WeakKeyDictionary(self.__dict__["arrays"]) + def get_array_name(self, var, access_data=True): # if no owner is set, this is a temporary object (e.g. the array # of indices when doing G.x[indices] = ...). The name is not diff --git a/brian2/groups/group.py b/brian2/groups/group.py index 6c080c763..973198e9b 100644 --- a/brian2/groups/group.py +++ b/brian2/groups/group.py @@ -1206,6 +1206,15 @@ def __init__( self.generate_empty_code = generate_empty_code self.codeobj = None + def __getstate__(self): + state = self.__dict__.copy() + state["group"] = state["group"].__repr__.__self__ + return state + + def __setstate__(self, state): + state["group"] = weakproxy_with_fallback(state["group"]) + self.__dict__ = state + def update_abstract_code(self, run_namespace): """ Update the abstract code for the code object. Will be called in diff --git a/brian2/input/timedarray.py b/brian2/input/timedarray.py index d48887fd9..447b92703 100644 --- a/brian2/input/timedarray.py +++ b/brian2/input/timedarray.py @@ -358,7 +358,7 @@ def is_locally_constant(self, dt): "dt of the TimedArray is not an integer multiple of " "the group's dt, the TimedArray's return value can " "therefore not be considered constant over one " - "timestep, making linear integration impossible.", + "timestep, making exact integration impossible.", once=True, ) return False diff --git a/brian2/monitors/statemonitor.py b/brian2/monitors/statemonitor.py index 32601fcdb..ac7556d1c 100644 --- a/brian2/monitors/statemonitor.py +++ b/brian2/monitors/statemonitor.py @@ -276,6 +276,7 @@ def __init__( dimensions=second.dim, constant=False, dtype=self._clock.variables["t"].dtype, + read_only=True, ) self.variables.add_array( "N", dtype=np.int32, size=1, scalar=True, read_only=True diff --git a/brian2/synapses/synapses.py b/brian2/synapses/synapses.py index 7144da332..9acce79e9 100644 --- a/brian2/synapses/synapses.py +++ b/brian2/synapses/synapses.py @@ -980,10 +980,11 @@ def __init__( self._pathways = [] if isinstance(on_event, str): - events_dict = defaultdict(lambda: on_event) + self.default_event = on_event + events_dict = {} else: - events_dict = defaultdict(lambda: "spike") - events_dict.update(on_event) + self.default_event = "spike" + events_dict = dict(on_event) #: "Events" for all the pathways self.events = events_dict @@ -993,7 +994,10 @@ def __init__( if isinstance(argument, str): pathway_delay = delay.get(prepost, None) self._add_updater( - argument, prepost, delay=pathway_delay, event=self.events[prepost] + argument, + prepost, + delay=pathway_delay, + event=self.events.get(prepost, self.default_event), ) elif isinstance(argument, Mapping): for key, value in argument.items(): @@ -1010,7 +1014,7 @@ def __init__( prepost, objname=key, delay=pathway_delay, - event=self.events[key], + event=self.events.get(prepost, self.default_event), ) # Check whether any delays were specified for pathways that don't exist diff --git a/brian2/tests/test_cpp_standalone.py b/brian2/tests/test_cpp_standalone.py index 52a7f18fc..0d4db56b8 100644 --- a/brian2/tests/test_cpp_standalone.py +++ b/brian2/tests/test_cpp_standalone.py @@ -2,9 +2,10 @@ import tempfile import pytest -from numpy.testing import assert_equal +from numpy.testing import assert_allclose, assert_equal from brian2 import * +from brian2.devices import device_module from brian2.devices.device import reinit_and_delete, reset_device, set_device from brian2.tests.utils import assert_allclose from brian2.utils.logger import catch_logs @@ -660,6 +661,308 @@ def test_constant_replacement(): assert G.y[0] == 42.0 +@pytest.mark.cpp_standalone +@pytest.mark.standalone_only +def test_change_parameter_without_recompile(): + prefs.core.default_float_dtype = np.float32 + set_device("cpp_standalone", directory=None, with_output=True, debug=True) + on_off = TimedArray([True, False, True], dt=defaultclock.dt, name="on_off") + stim = TimedArray( + np.arange(30).reshape(3, 10) * nA, dt=defaultclock.dt, name="stim" + ) + G = NeuronGroup( + 10, + """ + x : 1 (constant) + v : volt (constant) + n : integer (constant) + b : boolean (constant) + s = int(on_off(t))*stim(t, i) : amp + """, + name="neurons", + ) + G.x = np.arange(10) + G.n = np.arange(10) + G.b = np.arange(10) % 2 == 0 + G.v = np.arange(10) * volt + mon = StateMonitor(G, "s", record=True) + run(3 * defaultclock.dt) + assert array_equal(G.x, np.arange(10)) + assert array_equal(G.n, np.arange(10)) + assert array_equal(G.b, np.arange(10) % 2 == 0) + assert array_equal(G.v, np.arange(10) * volt) + assert_allclose( + mon.s.T / nA, + np.array( + [ + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # on_off(t) == False + [20, 21, 22, 23, 24, 25, 26, 27, 28, 29], + ] + ), + ) + + device.run( + run_args=[ + "neurons.x=5", + "neurons.v=3", + "neurons.n=17", + "neurons.b=True", + "on_off.values=True", + ] + ) + assert array_equal(G.x, np.ones(10) * 5) + assert array_equal(G.n, np.ones(10) * 17) + assert array_equal(G.b, np.ones(10, dtype=bool)) + assert array_equal(G.v, np.ones(10) * 3 * volt) + assert_allclose( + mon.s.T / nA, + np.array( + [ + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], + [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], + [20, 21, 22, 23, 24, 25, 26, 27, 28, 29], + ] + ), + ) + ar = np.arange(10) * 2.0 + ar.astype(G.x.dtype).tofile(os.path.join(device.project_dir, "init_values_x1.dat")) + ar.astype(G.n.dtype).tofile(os.path.join(device.project_dir, "init_values_n1.dat")) + (np.arange(10) % 2 != 0).tofile( + os.path.join(device.project_dir, "init_values_b1.dat") + ) + ar.astype(G.v.dtype).tofile(os.path.join(device.project_dir, "init_values_v1.dat")) + ar2 = 2 * np.arange(30).reshape(3, 10) * nA + ar2.astype(stim.values.dtype).tofile( + os.path.join(device.project_dir, "init_stim_values.dat") + ) + device.run( + run_args=[ + "neurons.v=init_values_v1.dat", + "neurons.x=init_values_x1.dat", + "neurons.b=init_values_b1.dat", + "neurons.n=init_values_n1.dat", + "stim.values=init_stim_values.dat", + ] + ) + assert array_equal(G.x, ar) + assert array_equal(G.n, ar) + assert array_equal(G.b, np.arange(10) % 2 != 0) + assert array_equal(G.v, ar * volt) + assert_allclose( + mon.s.T / nA, + np.array( + [ + [0, 2, 4, 6, 8, 10, 12, 14, 16, 18], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # on_off(t) == False + [40, 42, 44, 46, 48, 50, 52, 54, 56, 58], + ] + ), + ) + reset_device() + + +@pytest.mark.cpp_standalone +@pytest.mark.standalone_only +def test_change_parameter_without_recompile_errors(): + set_device("cpp_standalone", directory=None, with_output=False) + G = NeuronGroup(10, "v:volt", name="neurons") + G.v = np.arange(10) * volt + + run(0 * ms) + + with pytest.raises(DimensionMismatchError): + device.run(run_args={G.v: 5}) + with pytest.raises(DimensionMismatchError): + device.run(run_args={G.v: 5 * siemens}) + with pytest.raises(TypeError): + device.run(run_args={G.v: np.arange(9) * volt}) + + reset_device() + + +@pytest.mark.cpp_standalone +@pytest.mark.standalone_only +def test_change_parameter_without_recompile_dict_syntax(): + set_device("cpp_standalone", directory=None, with_output=False) + on_off = TimedArray([True, False, True], dt=defaultclock.dt, name="on_off") + stim = TimedArray( + np.arange(30).reshape(3, 10) * nA, dt=defaultclock.dt, name="stim" + ) + G = NeuronGroup( + 10, + """ + x : 1 (constant) + n : integer (constant) + b : boolean (constant) + v : volt (constant) + s = int(on_off(t))*stim(t, i) : amp + """, + name="neurons", + ) + G.x = np.arange(10) + G.n = np.arange(10) + G.b = np.arange(10) % 2 == 0 + G.v = np.arange(10) * volt + mon = StateMonitor(G, "s", record=True) + run(3 * defaultclock.dt) + assert array_equal(G.x, np.arange(10)) + assert array_equal(G.n, np.arange(10)) + assert array_equal(G.b, np.arange(10) % 2 == 0) + assert array_equal(G.v, np.arange(10) * volt) + assert_allclose( + mon.s.T / nA, + np.array( + [ + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # on_off(t) == False + [20, 21, 22, 23, 24, 25, 26, 27, 28, 29], + ] + ), + ) + device.run(run_args={G.x: 5, G.v: 3 * volt, G.n: 17, G.b: True, on_off: True}) + assert array_equal(G.x, np.ones(10) * 5) + assert array_equal(G.n, np.ones(10) * 17) + assert array_equal(G.b, np.ones(10, dtype=bool)) + assert array_equal(G.v, np.ones(10) * 3 * volt) + assert_allclose( + mon.s.T / nA, + np.array( + [ + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], + [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], + [20, 21, 22, 23, 24, 25, 26, 27, 28, 29], + ] + ), + ) + ar = np.arange(10) * 2.0 + ar2 = 2 * np.arange(30).reshape(3, 10) * nA + device.run( + run_args={ + G.x: ar, + G.v: ar * volt, + G.n: ar, + G.b: np.arange(10) % 2 != 0, + stim: ar2, + } + ) + assert array_equal(G.x, ar) + assert array_equal(G.n, ar) + assert array_equal(G.b, np.arange(10) % 2 != 0) + assert array_equal(G.v, ar * volt) + assert_allclose( + mon.s.T / nA, + np.array( + [ + [0, 2, 4, 6, 8, 10, 12, 14, 16, 18], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # on_off(t) == False + [40, 42, 44, 46, 48, 50, 52, 54, 56, 58], + ] + ), + ) + reset_device() + + +@pytest.mark.cpp_standalone +@pytest.mark.standalone_only +def test_change_synapse_parameter_without_recompile_dict_syntax(): + set_device("cpp_standalone", directory=None, with_output=False) + G = NeuronGroup(10, "", name="neurons") + S = Synapses(G, G, "w:1", name="synapses") + S.connect(j="i") + S.w = np.arange(10) + run(0 * ms) + assert array_equal(S.w, np.arange(10)) + + device.run(run_args={S.w: 17}) + assert array_equal(S.w, np.ones(10) * 17) + + ar = np.arange(10) * 2.0 + device.run(run_args={S.w: ar}) + assert array_equal(S.w, ar) + + reset_device() + + +@pytest.mark.cpp_standalone +@pytest.mark.standalone_only +def test_change_parameter_without_recompile_dependencies(): + set_device("cpp_standalone", directory=None, with_output=False) + G = NeuronGroup( + 10, + """ + v:volt + w:1 + """, + name="neurons", + ) + G.v = np.arange(10) * volt + device.apply_run_args() + G.w = "v/volt*2" + + run(0 * ms) + assert array_equal(G.v, np.arange(10)) + assert array_equal(G.w, np.arange(10) * 2) + + device.run(run_args=["neurons.v=5"]) + assert array_equal(G.v, np.ones(10) * 5 * volt) + assert array_equal(G.w, np.ones(10) * 5 * 2) + + ar = np.arange(10) * 2.0 + ar.astype(G.v.dtype).tofile(os.path.join(device.project_dir, "init_values_v2.dat")) + device.run(run_args=[f"neurons.v=init_values_v2.dat"]) + assert array_equal(G.v, ar * volt) + assert array_equal(G.w, ar * 2) + + reset_device() + + +class RunSim: + def __init__(self): + self.device = get_device() + self.G = NeuronGroup( + 10, + """ + v:volt + w:1 + x:1 + """, + name="neurons", + ) + run(0 * ms) + + def run_sim(self, idx): + # Ugly hack needed for windows + device_module.active_device = self.device + device.run( + results_directory=f"results_{idx}", + run_args={ + self.G.v: idx * volt, + self.G.w: np.arange(10), # Same values for all processes + self.G.x: np.arange(10) * idx, # Different values + }, + ) + return self.G.v[:], self.G.w[:], self.G.x[:] + + +@pytest.mark.cpp_standalone +@pytest.mark.standalone_only +def test_change_parameters_multiprocessing(): + set_device("cpp_standalone", directory=None) + sim = RunSim() + + import multiprocessing + + with multiprocessing.Pool() as p: + results = p.map(sim.run_sim, range(5)) + + for idx, result in zip(range(5), results): + v, w, x = result + assert array_equal(v, np.ones(10) * idx * volt) + assert array_equal(w, np.arange(10)) + assert array_equal(x, np.arange(10) * idx) + + @pytest.mark.cpp_standalone @pytest.mark.standalone_only def test_header_file_inclusion(): @@ -724,6 +1027,12 @@ def brian_function(index): test_delete_code_data, test_delete_directory, test_multiple_standalone_runs, + test_change_parameter_without_recompile, + test_change_parameter_without_recompile_errors, + test_change_parameter_without_recompile_dict_syntax, + test_change_parameter_without_recompile_dependencies, + test_change_synapse_parameter_without_recompile_dict_syntax, + test_change_parameters_multiprocessing, test_header_file_inclusion, ]: t() diff --git a/docs_sphinx/developer/standalone.rst b/docs_sphinx/developer/standalone.rst index 54c4b55c2..60cc126d1 100644 --- a/docs_sphinx/developer/standalone.rst +++ b/docs_sphinx/developer/standalone.rst @@ -11,6 +11,7 @@ This – currently very incomplete – document describes some of the implementa Array cache ----------- + As described in :ref:`standalone variables `, in standalone mode Python code does not usually have access to state variables and synaptic indices, since the code necessary to initialize/create them has not been run yet. Concretely, accessing a state variable (or other variables like synaptic indices), will call @@ -38,3 +39,36 @@ previously stored values:: >>> print(device.array_cache[v_var]) # Array cache for v has been invalidated None >>> set_device('runtime') # Reset device to avoid problems in other doctests + +Command line arguments +---------------------- + +The mechanisms described in :ref:`standalone_multiple_full_runs` are implemented via command-line arguments +to the ``main`` binary. A call such as:: + + device.run(results_directory='results_1', run_args={group.tau : 10*ms}) + +will be executed by calling the compiled binary as follows: + +.. code-block:: verbatim + + ./main --results_dir /full/path/to/results_1 neurongroup.tau=0.01 + +where ``neurongroup`` is ``group.name`` (either a default name, e.g. ``neurongroup``, or the name +set during construction with the ``name`` argument). The generated code applies the initialization +for ``tau`` after the usual variable initializations, before the call of ``network.run`` +(assuming that application has not been moved to a different place by using +`~brian2.devices.cpp_standalone.device.CPPStandaloneDevice.apply_run_args`). + +For initializations with a vector of values, the values are written to disk (in the same, simple +binary format that is used elsewhere, e.g. for the results). The command line argument then +specifies the file name instead of a value: + +.. code-block:: verbatim + + ./main neurongroup.tau=static_arrays/init_neurongroup_v_aca4cd6a3f7e526a61bb5a07468b377e.dat + +The hex string in the filename is an automatically generated MD5 hash of the array content. This +makes it possible to assure that each array is only written to disk once, even for repeated and +parallel executions with the same values (file locking is used to make sure only one process +writes to each file). diff --git a/docs_sphinx/resources b/docs_sphinx/resources index 4e0bdc48a..46f534022 160000 --- a/docs_sphinx/resources +++ b/docs_sphinx/resources @@ -1 +1 @@ -Subproject commit 4e0bdc48a83c6f9c65f2fd8afd24f30121308c8b +Subproject commit 46f5340227ad8e8551bbf1c256321efa90848918 diff --git a/docs_sphinx/user/computation.rst b/docs_sphinx/user/computation.rst index ce9aaa20c..8cbbe1459 100644 --- a/docs_sphinx/user/computation.rst +++ b/docs_sphinx/user/computation.rst @@ -3,7 +3,7 @@ Computational methods and efficiency .. contents:: :local: - :depth: 1 + :depth: 2 Brian has several different methods for running the computations in a simulation. The default mode is :ref:`runtime`, which runs the simulation loop @@ -94,7 +94,7 @@ At the beginning of the script, i.e. after the import statements, add:: set_device('cpp_standalone') -The `CPPStandaloneDevice.build` function will be automatically called with default arguments right after the `run` +The `Device.build` function will be automatically called with default arguments right after the `run` call. If you need non-standard arguments then you can specify them as part of the `set_device` call:: set_device('cpp_standalone', directory='my_directory', debug=True) @@ -105,28 +105,195 @@ At the beginning of the script, i.e. after the import statements, add:: set_device('cpp_standalone', build_on_run=False) -After the last `run` call, call `device.build` explicitly:: +After the last `run` call, call `CPPStandaloneDevice.build` explicitly:: - device.build(directory='output', compile=True, run=True, debug=False) + device.build() -The `~CPPStandaloneDevice.build` function has several arguments to specify the output directory, whether or not to +The `~Device.build` function has several arguments to specify the output directory, whether or not to compile and run the project after creating it and whether or not to compile it with debugging support or not. -Multiple builds -~~~~~~~~~~~~~~~ -To run multiple full simulations (i.e. multiple ``device.build`` calls, not just -multiple `run` calls as discussed above), you have to reinitialize the device -again:: +.. _standalone_multiple_full_runs: + +Multiple full simulation runs +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To run multiple full, independent, simulations (i.e. not just multiple `run` calls as discussed above), you can +use the device's `~brian2.devices.cpp_standalone.device.CPPStandaloneDevice.run` function after an initial build. This will use the previously +generated and compiled code, and will therefore run immediately. Note that you cannot change the model or its +parameters in the usual way between the `~brian2.devices.cpp_standalone.device.CPPStandaloneDevice.build` and +`~brian2.devices.cpp_standalone.device.CPPStandaloneDevice.run` calls. +If you want to change some of its parameters, you will have to use the ``run_args`` argument as described below. - device.reinit() - device.activate() +Running multiple simulations with same parameters ++++++++++++++++++++++++++++++++++++++++++++++++++ + +By default, a device's `~brian2.devices.cpp_standalone.device.CPPStandaloneDevice.run` will run the simulation again, +using the same model parameters and initializations. This can be useful, when the model is itself stochastic +(e.g. using the ``xi`` noise term in the equations, using a stochastic group such as `PoissonGroup` or +`PoissonInput`, etc.), when it uses random synaptic connections, or when it uses random variable initialization:: + + set_device('cpp_standalone') + group = NeuronGroup(1, 'dv/dt = -v / (10*ms) : 1') # a simple IF neuron without threshold + group.v = 'rand()' # v is randomly initialized between 0 and 1 + mon = StateMonitor(group, 'v', record=0) + run(100*ms) # calls device.build and device.run + results = [mon.v[0]] + # Do 9 more runs without recompiling, each time initializing v to a new value + for _ in range(9): + device.run() + results.append(mon.v[0]) + +For more consistent code, you might consider to disable the automatic ``device.build``/``device.run`` call, so +that the initial run of the simulation is not different to subsequent runs:: + + set_device('cpp_standalone', build_on_run=False) + # ... Set up model as before + run(100*ms) # will not call device.build/device.run + device.build(run=False) # Compile the code + results = [] + # Do 10 runs without recompiling, each time initializing v to a new value + for _ in range(10): + device.run() + results.append(mon.v[0]) + + +Running multiple simulations with different parameters +++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +When launching new simulation runs as described above, you can also change parameters of the model. Note that this +only concerns parameters that are included in equations, you cannot change externally defined constants. You can +easily work around this limitation, however, by declaring such constants in the equations, using the ``(shared, constant)`` +flags. Here's a similar example to the one shown before, now exploring the effect of the time constant ``tau``, while +assuring via a `seed` call that the random initializations are identical across runs:: + + set_device('cpp_standalone', build_on_run=False) + seed(111) # same random numbers for each run + group = NeuronGroup(10, '''dv/dt = -v / tau : 1 + tau : second (shared, constant)''') # 10 simple IF neuron without threshold + group.v = 'rand()' + mon = StateMonitor(group, 'v', record=0) + run(100*ms) + device.build(run=False) # Compile the code + results = [] + # Do 10 runs without recompiling, each time setting group.tau to a new value + for tau_value in (np.arange(10)+1)*5*ms: + device.run(run_args={group.tau: tau_value}) + results.append(mon.v[:]) + +You can use the same mechanism to provide an array of initial values for a group. E.g., to systematically try out +different initializations of ``v``, you could use:: + + set_device('cpp_standalone', build_on_run=False) + group = NeuronGroup(10, 'dv/dt = -v / (10*ms) : 1') # ten simple IF neurons without threshold + mon = StateMonitor(group, 'v', record=True) + run(100*ms) # will not call device.build/device.run + device.build(run=False) # Compile the code + results = [] + # Do 10 runs without recompiling, each time initializing v differently + for idx in range(10): + device.run(run_args={group.v: np.arange(10)*0.01 + 0.1*idx}) + results.append(mon.v[0]) + +You can also overwrite the values in a `TimedArray` using this mechanism, by using the `TimedArray` as a key in the +``run_args`` dictionary:: + + set_device('cpp_standalone', build_on_run=False) + stim = TimedArray(np.zeros(10), dt=10*ms) + group = NeuronGroup(10, 'dv/dt = (stim(t) - v)/ (10*ms) : 1') # time-dependent stimulus + mon = StateMonitor(group, 'v', record=True) + run(100 * ms) + device.build(run=False) + results = [] + # Do 10 runs with a 10ms at a random time + for idx in range(10): + values = np.zeros(10) + values[np.random.randint(0, 10)] = 1 + device.run(run_args={stim: values}) + results.append(mon.v[0]) + +By default, the initialization provided via ``run_args`` overwrites any initializations done in the usual way. +This might not exactly do what you want if you use string-based variable initializations that refer to each other. +For example, if your equations contain two synaptic time constants ``tau_exc`` and ``tau_inh``, and you always +want the latter to be twice the value of the former, you can write:: + + group.tau_exc = 5*ms + group.tau_inh = 'tau_exc * 2' + +If you now use the ``run_args`` argument to set ``tau_exc`` to a different value, this will not be taken into account +for setting ``tau_inh``, since the value change for ``tau_exc`` happens *after* the initialization of ``tau_inh``. +Of course you can simply set the value for ``tau_inh`` manually using ``run_args`` as well, but a more general solution +is to move the point where the ``run_args`` are applied. You can do this by calling the device's +`~brian2.devices.cpp_standalone.device.CPPStandaloneDevice.apply_run_args` function:: + + group.tau_exc = 5*ms + device.apply_run_args() + group.tau_inh = 'tau_exc * 2' + +With this change, setting ``tau_exc`` via ``run_args`` will affect the value of ``tau_inh``. + +Running multiple simulations in parallel +++++++++++++++++++++++++++++++++++++++++ + +The techniques mentioned above cannot be directly used to run simulations in parallel (e.g. with Python's +`multiprocessing` module), since all of them will try to write the results to the same place. You can +circumvent this problem by specifying the ``results_directory`` argument, and setting it to a different value +for each run. Note that using the standalone device with `multiprocessing` can be a bit tricky, since the +currently selected device is stored globally in the ``device`` module. Use the approach presented below to +make sure the device is selected correctly. Here's a variant of the previously shown example running a +simulation with random initialization repeatedly, this time running everything in parallel using Python's +`multiprocessing` module:: + + class SimWrapper: + def __init__(self): + # Runs once to set up the simulation + group = NeuronGroup(1, 'dv/dt = -v / (10*ms) : 1', name='group') + group.v = 'rand()' # v is randomly initialized between 0 and 1 + mon = StateMonitor(group, 'v', record=0, name='monitor') + # Store everything in a network + self.network = Network([group, mon]) + self.network.run(100*ms) + device.build(run=False) + self.device = get_device() # store device object + + def do_run(self, result_dir): + # Runs in every process + # Workaround to set the device globally in this context + from brian2.devices import device_module + device_module.active_device = self.device + self.device.run(results_directory=result_dir) + # Return the results + return self.network['monitor'].v[0] + + if __name__ == '__main__': # Important for running on Windows and OS X + set_device('cpp_standalone', build_on_run=False) + sim = SimWrapper() + import multiprocessing + with multiprocessing.Pool() as p: + # Run 10 simulations in parallel + results = p.map(sim.do_run, [f'result_{idx}' for idx in range(10)]) + +You can also use parallel runs with the ``run_args`` argument. For example, to do 10 simulations +with different (deterministic) initial values for ``v``:: + + class SimWrapper: + # ... model definition without random initialization + + def do_run(self, v_init): + # Set result directory based on variable + result_dir = f'result_{v_init}' + self.device.run(run_args={self.network['group'].v: v_init}, + results_directory=result_dir) + # Return the results + return self.network['monitor'].v[0] + + if __name__ == '__main__': # Important for running on Windows and OS X + set_device('cpp_standalone', build_on_run=False) + sim = SimWrapper() + import multiprocessing + with multiprocessing.Pool() as p: + # Run 10 simulations in parallel + results = p.map(sim.do_run, np.linspace(0, 1, 10)) -Note that the device "forgets" about all previously set build options provided -to `set_device` (most importantly the ``build_on_run`` option, but also e.g. the -directory), you'll have to specify them as part of the `Device.activate` call. -Also, `Device.activate` will reset the `defaultclock`, you'll therefore have to -set its ``dt`` *after* the ``activate`` call if you want to use a non-default -value. Limitations ~~~~~~~~~~~ @@ -150,9 +317,10 @@ C++ source code and modifying it, or by inserting code directly into the main lo Variables ~~~~~~~~~ In standalone mode, code will only be executed when the simulation is run (after the `run` call by default, or after a call -to `.Device.build`, if `set_device` has been called with ``build_on_run`` set to ``False``). This means that it is not -possible to access state variables and synaptic connection indices in the Python script doing the set up of the model. For -example, the following code would work fine in runtime mode, but raise a ``NotImplementedError`` in standalone mode:: +to `~brian2.devices.cpp_standalone.device..build`, if `set_device` has been called with ``build_on_run`` set to ``False``). +This means that it is not possible to access state variables and synaptic connection indices in the Python script doing the +set up of the model. For example, the following code would work fine in runtime mode, but raise a ``NotImplementedError`` +in standalone mode:: neuron = NeuronGroup(10, 'v : volt') neuron.v = '-70*mV + rand()*10*mV' diff --git a/examples/frompapers/Stimberg_et_al_2019/example_3_bisection.py b/examples/frompapers/Stimberg_et_al_2019/example_3_bisection.py new file mode 100644 index 000000000..aed0d98d8 --- /dev/null +++ b/examples/frompapers/Stimberg_et_al_2019/example_3_bisection.py @@ -0,0 +1,122 @@ +#!/usr/bin/env python3 +""" +Reproduces Figure 4B from Stimberg et al. (2019). + +Same code as in the https://github.com/brian-team/brian2_paper_examples repository, but using matplotlib instead of +Plotly for plotting. + +Marcel Stimberg, Romain Brette, Dan FM Goodman (2019) +Brian 2, an intuitive and efficient neural simulator eLife 8:e47314 + +https://doi.org/10.7554/eLife.47314 +""" +from brian2 import * + +defaultclock.dt = 0.01*ms # small time step for stiff equations + +# Our model of the neuron is based on the classical model of from Hodgkin and Huxley (1952). Note that this is not +# actually a model of a neuron, but rather of a (space-clamped) axon. However, to avoid confusion with spatially +# extended models, we simply use the term "neuron" here. In this model, the membrane potential is shifted, i.e. the +# resting potential is at 0mV: +El = 10.613*mV +ENa = 115*mV +EK = -12*mV +gl = 0.3*msiemens/cm**2 +gK = 36*msiemens/cm**2 +gNa_max = 100*msiemens/cm**2 +gNa_min = 15*msiemens/cm**2 +C = 1*uF/cm**2 + +eqs = """ +dv/dt = (gl * (El-v) + gNa * m**3 * h * (ENa-v) + gK * n**4 * (EK-v)) / C : volt +gNa : siemens/meter**2 (constant) +dm/dt = alpham * (1-m) - betam * m : 1 +dn/dt = alphan * (1-n) - betan * n : 1 +dh/dt = alphah * (1-h) - betah * h : 1 +alpham = (0.1/mV) * 10*mV / exprel((-v+25*mV) / (10*mV))/ms : Hz +betam = 4 * exp(-v/(18*mV))/ms : Hz +alphah = 0.07 * exp(-v/(20*mV))/ms : Hz +betah = 1/(exp((-v+30*mV) / (10*mV)) + 1)/ms : Hz +alphan = (0.01/mV) * 10*mV / exprel((-v+10*mV) / (10*mV))/ms : Hz +betan = 0.125*exp(-v/(80*mV))/ms : Hz +""" + +# We simulate 100 neurons at the same time, each of them having a density of sodium channels between 15 and 100 mS/cm²: +neurons = NeuronGroup(100, eqs, method="rk4", threshold="v>50*mV", reset="") +neurons.gNa = "gNa_min + (gNa_max - gNa_min)*1.0*i/N" + +# We initialize the state variables to their resting state values, note that the values for $m$, $n$, $h$ depend on the +# values of $\alpha_m$, $\beta_m$, etc. which themselves depend on $v$. The order of the assignments ($v$ is +# initialized before $m$, $n$, and $h$) therefore matters, something that is naturally expressed by stating initial +# values as sequential assignments to the state variables. In a declarative approach, this would be potentially +# ambiguous. + +neurons.v = 0*mV +neurons.m = "1/(1 + betam/alpham)" +neurons.n = "1/(1 + betan/alphan)" +neurons.h = "1/(1 + betah/alphah)" + +# We record the spiking activity of the neurons and store the current network state so that we can later restore it and +# run another iteration of our experiment: + +S = SpikeMonitor(neurons) +store() + +# The algorithm we use here to find the voltage threshold is a simple bisection: we try to find the threshold voltage of +# a neuron by repeatedly testing values and increasing or decreasing these values depending on whether we observe a +# spike or not. By continously halving the size of the correction, we quickly converge to a precise estimate. +# +# We start with the same initial estimate for all segments, 25mV above the resting potential, and the same value for +# the size of the "correction step": + +v0 = 25*mV*np.ones(len(neurons)) +step = 25*mV + +# For later visualization of how the estimates converged towards their final values, we also store the intermediate values of the estimates: +estimates = np.full((11, len(neurons)), np.nan)*mV +estimates[0, :] = v0 + +# We now run 10 iterations of our algorithm: + +for i in range(10): + print(".", end="") + # Reset to the initial state + restore() + # Set the membrane potential to our threshold estimate and update the initial values of the gating variables + neurons.v = v0 + # Note that we do not update the initial values of the gating variables to their steady state values with + # respect to v, but rather to the resting potential. + # Run the simulation for 20ms + run(20*ms) + print() + # Decrease the estimates for neurons that spiked + v0[S.count > 0] -= step + # Increase the estimate for neurons that did not spike + v0[S.count == 0] += step + # Reduce step size and store current estimate + step /= 2.0 + estimates[i + 1, :] = v0 +print() + +# After the 10 iteration steps, we plot the results: +plt.rcParams.update({'axes.spines.top': False, + 'axes.spines.right': False}) +fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True, figsize=(8, 3), layout="constrained") + +colors = ["#1f77b4", "#ff7f03", "#2ca02c"] +examples = [10, 50, 90] +for example, color in zip(examples, colors): + ax1.plot(np.arange(11), estimates[:, example] / mV, + "o-", mec='white', lw=2, color=color, clip_on=False, ms=10, zorder=100, + label=f"gNA = {(neurons.gNa[example]/(mS/cm**2)):.1f}mS/cm$^2$") + +ax2.plot(neurons.gNa/(mS/cm**2), v0/mV, color="gray", lw=2) + +for idx, (example, color) in enumerate(zip(examples, colors)): + ax2.plot([neurons.gNa[example]/(mS/cm**2)], [estimates[-1, example]/mV], "o", color=color, mec="white", ms=10, + label=f"gNA = {(neurons.gNa[example]/(mS/cm**2)):.1f}mS/cm$^2$") + +ax1.set(title="iteration", ylim=(0, 45), ylabel="threshold estimate (mV)") +ax2.set(title="gNA (mS/cm²)") + +plt.show() diff --git a/examples/frompapers/Stimberg_et_al_2019/example_3_bisection_standalone.py b/examples/frompapers/Stimberg_et_al_2019/example_3_bisection_standalone.py new file mode 100644 index 000000000..2446744cb --- /dev/null +++ b/examples/frompapers/Stimberg_et_al_2019/example_3_bisection_standalone.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python3 +""" +Reproduces Figure 4B from Stimberg et al. (2019). + +In contrast to the original example, this version uses the standalone mode of Brian 2, using the new ``run_args`` +feature introduced in version 2.6. + +Marcel Stimberg, Romain Brette, Dan FM Goodman (2019) +Brian 2, an intuitive and efficient neural simulator eLife 8:e47314 + +https://doi.org/10.7554/eLife.47314 +""" + +from brian2 import * + +defaultclock.dt = 0.01*ms # small time step for stiff equations +set_device('cpp_standalone', build_on_run=False) + +# Our model of the neuron is based on the classical model of from Hodgkin and Huxley (1952). Note that this is not +# actually a model of a neuron, but rather of a (space-clamped) axon. However, to avoid confusion with spatially +# extended models, we simply use the term "neuron" here. In this model, the membrane potential is shifted, i.e. the +# resting potential is at 0mV: +El = 10.613*mV +ENa = 115*mV +EK = -12*mV +gl = 0.3*msiemens/cm**2 +gK = 36*msiemens/cm**2 +gNa_max = 100*msiemens/cm**2 +gNa_min = 15*msiemens/cm**2 +C = 1*uF/cm**2 + +eqs = """ +dv/dt = (gl * (El-v) + gNa * m**3 * h * (ENa-v) + gK * n**4 * (EK-v)) / C : volt +gNa : siemens/meter**2 (constant) +dm/dt = alpham * (1-m) - betam * m : 1 +dn/dt = alphan * (1-n) - betan * n : 1 +dh/dt = alphah * (1-h) - betah * h : 1 +alpham = (0.1/mV) * 10*mV / exprel((-v+25*mV) / (10*mV))/ms : Hz +betam = 4 * exp(-v/(18*mV))/ms : Hz +alphah = 0.07 * exp(-v/(20*mV))/ms : Hz +betah = 1/(exp((-v+30*mV) / (10*mV)) + 1)/ms : Hz +alphan = (0.01/mV) * 10*mV / exprel((-v+10*mV) / (10*mV))/ms : Hz +betan = 0.125*exp(-v/(80*mV))/ms : Hz +""" + +# We simulate 100 neurons at the same time, each of them having a density of sodium channels between 15 and 100 mS/cm²: +neurons = NeuronGroup(100, eqs, method="rk4", threshold="v>50*mV", reset="") +neurons.gNa = "gNa_min + (gNa_max - gNa_min)*1.0*i/N" + +# We initialize the state variables to their resting state values, note that the values for $m$, $n$, $h$ depend on the +# values of $\alpha_m$, $\beta_m$, etc. which themselves depend on $v$. The order of the assignments ($v$ is +# initialized before $m$, $n$, and $h$) therefore matters, something that is naturally expressed by stating initial +# values as sequential assignments to the state variables. In a declarative approach, this would be potentially +# ambiguous. + +neurons.v = 0*mV +# Note that the initial values of the gating variables are fixed here, i.e. they are relative to the resting potential, +# not to the membrane potential we set to check for the threshold. If we wanted this to be the case, we'd have to call +# `device.apply_run_args()` here, so that the value we provide is applied before initializing m, n, and h. +neurons.m = "1/(1 + betam/alpham)" +neurons.n = "1/(1 + betan/alphan)" +neurons.h = "1/(1 + betah/alphah)" + +S = SpikeMonitor(neurons) + +# The algorithm we use here to find the voltage threshold is a simple bisection: we try to find the threshold voltage of +# a neuron by repeatedly testing values and increasing or decreasing these values depending on whether we observe a +# spike or not. By continously halving the size of the correction, we quickly converge to a precise estimate. +# +# We start with the same initial estimate for all segments, 25mV above the resting potential, and the same value for +# the size of the "correction step": + +v0 = 25*mV*np.ones(len(neurons)) +step = 25*mV + +# For later visualization of how the estimates converged towards their final values, we also store the intermediate values of the estimates: +estimates = np.full((11, len(neurons)), np.nan)*mV +estimates[0, :] = v0 +# Run the simulation for 20ms +run(20*ms) +device.build(run=False, directory=None) + +for i in range(10): + print(".", end="") + # Set the membrane potential to our threshold estimate + device.run(run_args={neurons.v: v0}) + # Decrease the estimates for neurons that spiked + v0[S.count > 0] -= step + # Increase the estimate for neurons that did not spike + v0[S.count == 0] += step + # Reduce step size and store current estimate + step /= 2.0 + estimates[i + 1, :] = v0 +print() + +# After the 10 iteration steps, we plot the results: +plt.rcParams.update({'axes.spines.top': False, + 'axes.spines.right': False}) +fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True, figsize=(8, 3), layout="constrained") + +colors = ["#1f77b4", "#ff7f03", "#2ca02c"] +examples = [10, 50, 90] +for example, color in zip(examples, colors): + ax1.plot(np.arange(11), estimates[:, example] / mV, + "o-", mec='white', lw=2, color=color, clip_on=False, ms=10, zorder=100, + label=f"gNA = {(neurons.gNa[example]/(mS/cm**2)):.1f}mS/cm$^2$") + +ax2.plot(neurons.gNa/(mS/cm**2), v0/mV, color="gray", lw=2) + +for idx, (example, color) in enumerate(zip(examples, colors)): + ax2.plot([neurons.gNa[example]/(mS/cm**2)], [estimates[-1, example]/mV], "o", color=color, mec="white", ms=10, + label=f"gNA = {(neurons.gNa[example]/(mS/cm**2)):.1f}mS/cm$^2$") + +ax1.set(title="iteration", ylim=(0, 45), ylabel="threshold estimate (mV)") +ax2.set(title="gNA (mS/cm²)") + +plt.show() diff --git a/examples/standalone/standalone_multiple_processes.py b/examples/standalone/standalone_multiple_processes.py new file mode 100644 index 000000000..4cef43284 --- /dev/null +++ b/examples/standalone/standalone_multiple_processes.py @@ -0,0 +1,66 @@ +""" +This example shows how to run several, independent simulations in standalone mode using multiple processes to run the +simulations in parallel. +Given that this example only involves a single neuron, an alternative – and arguably more elegant – solution +would be to run the simulations in a single `NeuronGroup`, where each neuron receives input with a different rate. + +The example is a standalone equivalent of the one presented in :doc:`/tutorials/3-intro-to-brian-simulations`. +""" +import numpy as np +import matplotlib.pyplot as plt +import brian2 as b2 +from time import time + +b2.set_device('cpp_standalone', build_on_run=False) + +class SimWrapper: + def __init__(self): + self.net = b2.Network() + P = b2.PoissonGroup(num_inputs, rates=input_rate) + eqs = """ + dv/dt = -v/tau : 1 + tau : second (constant) + """ + G = b2.NeuronGroup(1, eqs, threshold='v>1', reset='v=0', method='euler', name='neuron') + S = b2.Synapses(P, G, on_pre='v += weight') + S.connect() + M = b2.SpikeMonitor(G, name='spike_monitor') + self.net.add([P, G, S, M]) + + self.net.run(1000 * b2.ms) + + self.device = b2.get_device() + self.device.build(run=False, directory=None) # compile the code, but don't run it yet + + def do_run(self, tau_i): + # Workaround to set the device globally in this context + from brian2.devices import device_module + device_module.active_device = self.device + + result_dir = f'result_{tau_i}' + self.device.run(run_args={self.net['neuron'].tau: tau_i}, + results_directory=result_dir) + return self.net["spike_monitor"].num_spikes/ b2.second + + +if __name__ == "__main__": + start_time = time() + num_inputs = 100 + input_rate = 10 * b2.Hz + weight = 0.1 + + npoints = 15 + tau_range = np.linspace(1, 15, npoints) * b2.ms + + sim = SimWrapper() + + from multiprocessing import Pool + with Pool(npoints) as pool: + output_rates = pool.map(sim.do_run, tau_range) + + print(f"Done in {time() - start_time}") + + plt.plot(tau_range/b2.ms, output_rates) + plt.xlabel(r"$\tau$ (ms)") + plt.ylabel("Firing rate (sp/s)") + plt.show() diff --git a/examples/standalone/standalone_multiplerun.py b/examples/standalone/standalone_multiplerun.py index 899f39340..921c81826 100644 --- a/examples/standalone/standalone_multiplerun.py +++ b/examples/standalone/standalone_multiplerun.py @@ -1,31 +1,29 @@ """ -This example shows how to run several, independent simulations in standalone -mode. Note that this is not the optimal approach if running the same model with -minor differences (as in this example). +This example shows how to run several, independent simulations in standalone mode. +Given that this example only involves a single neuron, an alternative – and arguably more elegant – solution +would be to run the simulations in a single `NeuronGroup`, where each neuron receives input with a different rate. -The example come from Tutorial part 3. -For a discussion see this `post on the Brian forum `_. +The example is a standalone equivalent of the one presented in :doc:`/tutorials/3-intro-to-brian-simulations`. """ - import numpy as np -import pylab as plt +import matplotlib.pyplot as plt import brian2 as b2 from time import time -b2.set_device('cpp_standalone') - +b2.set_device('cpp_standalone', build_on_run=False) -def simulate(tau): - # These two lines are needed to start a new standalone simulation: - b2.device.reinit() - b2.device.activate() - - eqs = ''' - dv/dt = -v/tau : 1 - ''' +if __name__ == "__main__": + start_time = time() + num_inputs = 100 + input_rate = 10 * b2.Hz + weight = 0.1 net = b2.Network() P = b2.PoissonGroup(num_inputs, rates=input_rate) + eqs = """ + dv/dt = -v/tau : 1 + tau : second (constant) + """ G = b2.NeuronGroup(1, eqs, threshold='v>1', reset='v=0', method='euler') S = b2.Synapses(P, G, on_pre='v += weight') S.connect() @@ -34,21 +32,15 @@ def simulate(tau): net.run(1000 * b2.ms) - return M + b2.device.build(run=False) # compile the code, but don't run it yet - -if __name__ == "__main__": - start_time = time() - num_inputs = 100 - input_rate = 10 * b2.Hz - weight = 0.1 npoints = 15 tau_range = np.linspace(1, 15, npoints) * b2.ms output_rates = np.zeros(npoints) for ii in range(npoints): tau_i = tau_range[ii] - M = simulate(tau_i) + b2.device.run(run_args={G.tau: tau_i}) output_rates[ii] = M.num_spikes / b2.second print(f"Done in {time() - start_time}")