From 4c988717e328787193a06a9c9afd392e102203e3 Mon Sep 17 00:00:00 2001 From: GaboFGuerra Date: Tue, 15 Aug 2023 17:55:50 +0200 Subject: [PATCH 1/2] Quick clean up and todos for deeper cleaning. Signed-off-by: GaboFGuerra --- .../optimization/solvers/generic/builder.py | 4 + .../solvers/generic/dataclasses.py | 2 + .../solution_readout/models.py | 2 +- .../solvers/generic/nebm/process.py | 5 +- .../optimization/solvers/generic/processes.py | 40 ------- .../optimization/solvers/generic/qp/models.py | 7 +- .../optimization/solvers/generic/qp/solver.py | 105 ------------------ .../solvers/generic/solution_finder/models.py | 2 + .../optimization/solvers/generic/solver.py | 9 +- .../solvers/generic/sub_process_models.py | 26 +++-- 10 files changed, 40 insertions(+), 162 deletions(-) delete mode 100644 src/lava/lib/optimization/solvers/generic/processes.py delete mode 100644 src/lava/lib/optimization/solvers/generic/qp/solver.py diff --git a/src/lava/lib/optimization/solvers/generic/builder.py b/src/lava/lib/optimization/solvers/generic/builder.py index 5eecd384..2b1c98b5 100644 --- a/src/lava/lib/optimization/solvers/generic/builder.py +++ b/src/lava/lib/optimization/solvers/generic/builder.py @@ -153,6 +153,8 @@ def constructor( name: ty.Optional[str] = None, log_config: ty.Optional[LogConfig] = None, ) -> None: + # TODO try method extraction to simplify some methods might be + # useful for both proc and model constructors super(type(self), self).__init__( backend=backend, hyperparameters=hyperparameters, @@ -229,6 +231,8 @@ def _create_model_constructor(self, target_cost: int): """ def constructor(self, proc): + # TODO try method extraction to simplify some methods might be + # useful for both proc and model constructors discrete_var_shape = None if hasattr(proc, "discrete_variables"): discrete_var_shape = proc.discrete_variables.shape diff --git a/src/lava/lib/optimization/solvers/generic/dataclasses.py b/src/lava/lib/optimization/solvers/generic/dataclasses.py index 67ccce24..62c30e88 100644 --- a/src/lava/lib/optimization/solvers/generic/dataclasses.py +++ b/src/lava/lib/optimization/solvers/generic/dataclasses.py @@ -14,6 +14,8 @@ from lava.proc.dense.process import Dense +# TODO convert all these into subproc models, move to +# hierarchical_processes.py file and delete dataclasses.py @dataclass class CostMinimizer: """Processes implementing an optimization problem's cost function.""" diff --git a/src/lava/lib/optimization/solvers/generic/monitoring_processes/solution_readout/models.py b/src/lava/lib/optimization/solvers/generic/monitoring_processes/solution_readout/models.py index 9f0e5a55..efe85ad7 100644 --- a/src/lava/lib/optimization/solvers/generic/monitoring_processes/solution_readout/models.py +++ b/src/lava/lib/optimization/solvers/generic/monitoring_processes/solution_readout/models.py @@ -11,7 +11,7 @@ from lava.magma.core.resources import CPU from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol - +# TODO: change directory structure, flatten it @implements(SolutionReadout, protocol=LoihiProtocol) @requires(CPU) class SolutionReadoutPyModel(PyLoihiProcessModel): diff --git a/src/lava/lib/optimization/solvers/generic/nebm/process.py b/src/lava/lib/optimization/solvers/generic/nebm/process.py index 2dd8c867..5eac2267 100644 --- a/src/lava/lib/optimization/solvers/generic/nebm/process.py +++ b/src/lava/lib/optimization/solvers/generic/nebm/process.py @@ -57,6 +57,7 @@ def __init__( # Initial state determined in DiscreteVariables self.state = Var(shape=shape, init=init_state.astype(int)) + # TODO: is this property indented wrongly? @property def shape(self) -> ty.Tuple[int, ...]: return self.proc_params["shape"] @@ -124,7 +125,7 @@ def __init__( ) self.temperature = Var(shape=shape, init=int(max_temperature)) - + # TODO we can avoid line breaking by adding an intermediary variable self.refract_counter = Var( shape=shape, init=(refract or 0) @@ -141,6 +142,8 @@ def __init__( else np.zeros(shape=shape, dtype=int), ) + + # TODO here too, is this property idented wrongly? @property def shape(self) -> ty.Tuple[int, ...]: return self.proc_params["shape"] diff --git a/src/lava/lib/optimization/solvers/generic/processes.py b/src/lava/lib/optimization/solvers/generic/processes.py deleted file mode 100644 index bb068214..00000000 --- a/src/lava/lib/optimization/solvers/generic/processes.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright (C) 2021 Intel Corporation -# SPDX-License-Identifier: BSD-3-Clause -# See: https://spdx.org/licenses/ -import typing as ty - -import numpy as np -from lava.lib.optimization.problems.coefficients import CoefficientTensorsMixin -from lava.magma.core.process.interfaces import AbstractProcessMember -from lava.magma.core.process.ports.ports import InPort -from lava.magma.core.process.variable import Var - - -def _vars_from_coefficients( - coefficients: CoefficientTensorsMixin, -) -> ty.Dict[int, AbstractProcessMember]: - vars = dict() - for rank, coeff in coefficients.items(): - if rank == 1: - init = -coeff - if rank == 2: - linear_component = -coeff.diagonal() - quadratic_component = coeff * np.logical_not(np.eye(*coeff.shape)) - if 1 in vars.keys(): - vars[1].init = vars[1].init + linear_component - else: - vars[1] = Var( - shape=linear_component.shape, init=linear_component - ) - init = -quadratic_component - vars[rank] = Var(shape=coeff.shape, init=init) - return vars - - -def _in_ports_from_coefficients( - coefficients: CoefficientTensorsMixin, -) -> ty.List[AbstractProcessMember]: - in_ports = [ - InPort(shape=coeff.shape) for coeff in coefficients.coefficients - ] - return in_ports diff --git a/src/lava/lib/optimization/solvers/generic/qp/models.py b/src/lava/lib/optimization/solvers/generic/qp/models.py index 30af44bd..3e608de3 100644 --- a/src/lava/lib/optimization/solvers/generic/qp/models.py +++ b/src/lava/lib/optimization/solvers/generic/qp/models.py @@ -197,7 +197,9 @@ def __init__(self, proc): shape=shape_constraint_matrix_T, weights=constraint_matrix_T, ) - + # TODO: perform method extraction, this is doing more than one thing + # by definition. Maybe the inner conditional is unnecessary if the + # interface is common and the leaf proc deals with the input if sparse: if model == "SigDel": print("[INFO]: Using Sigma Delta Solution Neurons") @@ -305,6 +307,7 @@ def __init__(self, proc_params: dict) -> None: self.connectivity_spike = 0 def run_spk(self): + # TODO all this conditionals should be done in an extracted method. self.decay_counter += 1 a_in = self.a_in.recv() if self.decay_counter % 2 == 1: @@ -373,6 +376,7 @@ def run_spk(self): self.growth_counter += 1 a_in = self.a_in.recv() + # TODO all this conditionals should be done in an extracted method. if self.growth_counter % 2 == 1: if self.lr_growth_type == "schedule": if self.growth_counter == self.beta_growth_schedule: @@ -436,6 +440,7 @@ def run_spk(self): delta_state = s_in - self.x_internal self.x_internal = s_in self.decay_counter += 1 + # TODO all this conditionals should be done in an extracted method. if self.theta_decay_type == "schedule": if self.decay_counter == self.theta_decay_schedule: # TODO: guard against shift overflows in fixed-point diff --git a/src/lava/lib/optimization/solvers/generic/qp/solver.py b/src/lava/lib/optimization/solvers/generic/qp/solver.py deleted file mode 100644 index c43f8e9c..00000000 --- a/src/lava/lib/optimization/solvers/generic/qp/solver.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright (C) 2021-2022 Intel Corporation -# SPDX-License-Identifier: BSD-3-Clause -# See: https://spdx.org/licenses/ - -import numpy as np -from lava.magma.core.run_conditions import RunSteps -from lava.magma.core.run_configs import Loihi1SimCfg -from lava.lib.optimization.problems.problems import QP -from lava.lib.optimization.solvers.generic.qp.models import ( - ConstraintCheck, - GradientDynamics, -) - - -# Future: inheritance from OptimizationSolver class -class QPSolver: - """Solve Full QP by connecting two Lava processes, GradDynamics and - ConstraintCheck - - Parameters - ---------- - - alpha : 1-D np.array - The learning rate for gradient descent - beta : 1-D np.array - The learning rate for constraint correction - alpha_decay_schedule : int, default 10000 - Number of iterations after which one right shift takes place for - alpha - beta_growth_schedule : int, default 10000 - Number of iterations after which one left shift takes place for - beta - - """ - - def __init__( - self, - alpha: np.ndarray, - beta: np.ndarray, - alpha_decay_schedule: int, - beta_growth_schedule: int, - ): - self.alpha = alpha - self.beta = beta - self.beta_g = beta_growth_schedule - self.alpha_d = alpha_decay_schedule - - def solve(self, problem: QP, iterations: int = 400): - """solves the supplied QP problem - - Parameters - ---------- - problem : QP - The QP containing the matrices that set up the problem - iterations : int, optional - Number of iterations for which QP has to run, by default 400 - - Returns - -------- - sol : 1-D np.array - Solution to the quadratic program - """ - ( - hessian, - linear_offset, - ) = (problem.get_hessian, problem.get_linear_offset) - if problem.get_constraint_hyperplanes is not None: - constraint_hyperplanes, constraint_biases = ( - problem.get_constraint_hyperplanes, - problem.get_constraint_biases, - ) - - else: - constraint_hyperplanes, constraint_biases = np.zeros( - (hessian.shape[0], hessian.shape[1]) - ), np.zeros((hessian.shape[0], 1)) - - init_sol = np.random.rand(hessian.shape[0], 1) - i_max = iterations - ConsCheck = ConstraintCheck( - constraint_matrix=constraint_hyperplanes, - constraint_bias=constraint_biases, - ) - GradDyn = GradientDynamics( - hessian=hessian, - constraint_matrix_T=constraint_hyperplanes.T, - qp_neurons_init=init_sol, - grad_bias=linear_offset, - alpha=self.alpha, - beta=self.beta, - alpha_decay_schedule=self.alpha_d, - beta_growth_schedule=self.beta_g, - ) - - # core solver - GradDyn.s_out.connect(ConsCheck.s_in) - ConsCheck.s_out.connect(GradDyn.s_in) - - GradDyn.run( - condition=RunSteps(num_steps=i_max), - run_cfg=Loihi1SimCfg(select_sub_proc_model=True), - ) - sol = GradDyn.vars.qp_neuron_state.get() - GradDyn.stop() - return sol diff --git a/src/lava/lib/optimization/solvers/generic/solution_finder/models.py b/src/lava/lib/optimization/solvers/generic/solution_finder/models.py index e7858b08..79ed4337 100644 --- a/src/lava/lib/optimization/solvers/generic/solution_finder/models.py +++ b/src/lava/lib/optimization/solvers/generic/solution_finder/models.py @@ -55,6 +55,8 @@ def __init__(self, proc): problem = proc.proc_params.get("problem") backend = proc.proc_params.get("backend") + # TODO perform method extraction and simplify, the following is too + # large and might include some code duplication. # Subprocesses self.variables = VariablesImplementation() if discrete_var_shape is not None: diff --git a/src/lava/lib/optimization/solvers/generic/solver.py b/src/lava/lib/optimization/solvers/generic/solver.py index 79b55998..543ee4d1 100644 --- a/src/lava/lib/optimization/solvers/generic/solver.py +++ b/src/lava/lib/optimization/solvers/generic/solver.py @@ -60,6 +60,9 @@ NEBMSimulatedAnnealingAbstractModel, ) +# Todo: avoid the following try in this file, do it on a separate file from +# which the proc -> model mapping can be imported. That way irrelevant +# classes will not harm readability try: from lava.proc.dense.ncmodels import NcModelDense from lava.proc.sparse.ncmodel import NcModelSparse @@ -105,11 +108,7 @@ class NcL2ModelPG: class NcL2ModelPI: pass - -from lava.lib.optimization.solvers.generic.read_gate.models import ( - ReadGatePyModel, -) - +# Todo: the following should be defined in a file to BACKENDS = ty.Union[CPU, Loihi2NeuroCore, NeuroCore, str] HP_TYPE = ty.Union[ty.Dict, ty.List[ty.Dict]] CPUS = [CPU, "CPU"] diff --git a/src/lava/lib/optimization/solvers/generic/sub_process_models.py b/src/lava/lib/optimization/solvers/generic/sub_process_models.py index 16f1ff6a..2b733e59 100644 --- a/src/lava/lib/optimization/solvers/generic/sub_process_models.py +++ b/src/lava/lib/optimization/solvers/generic/sub_process_models.py @@ -2,13 +2,6 @@ # SPDX-License-Identifier: BSD-3-Clause # See: https://spdx.org/licenses/ -try: - from lava.magma.core.model.nc.net import NetL2 -except ImportError: - - class NetL2: - pass - import numpy as np from lava.lib.optimization.solvers.generic.cost_integrator.process import ( @@ -45,6 +38,7 @@ class NetL2: from lava.lib.optimization.utils.datatype_converter import convert_to_fp from scipy.sparse import csr_matrix +# todo the following is also used in solver.py, we should import, not redefine CPUS = [CPU, "CPU"] NEUROCORES = [Loihi2NeuroCore, NeuroCore, "Loihi2"] BACKEND_MSG = f""" was requested as backend. However, @@ -70,6 +64,7 @@ def __init__(self, proc): backend = proc.backend neuron_model = proc.hyperparameters.get("neuron_model", "qp-lp_pipg") + # todo invert conditional for readability. if neuron_model == "qp-lp_pipg": # adding them here to show that they are need for the neurons models # since some values are calculated based on these weights @@ -90,6 +85,10 @@ def __init__(self, proc): "alpha_decay_indices", [0] ) alpha = proc.hyperparameters.get("alpha", 1) + # TODO: Why is the parameterization different with backend? Simplify + # TODO maybe we are letting lower level details transpire, + # the backend handling might need to be done by the process + # initializer. if backend in CPUS: self.ProjGrad = ProjectedGradientNeuronsPIPGeq( shape=init_state.shape, @@ -137,7 +136,7 @@ def __init__(self, proc): # The input shape is a 2D vector (shape of the weight matrix). backend = proc.backend neuron_model = proc.hyperparameters.get("neuron_model", "qp-lp_pipg") - + # TODO: invert conditional to simplify if neuron_model == "qp-lp_pipg": # adding them here to show that they are need for the neurons models # since some values are calculated based on these weights @@ -158,7 +157,7 @@ def __init__(self, proc): "beta_growth_indices", [0] ) lr_change = proc.hyperparameters.get("lr_change_type", "indices") - + # TODO try to simplify to avoid the use of backend conditional if backend in CPUS: self.conn_A = Sparse( weights=csr_matrix(A_pre), @@ -248,6 +247,15 @@ def __init__(self, proc): available_sa_models = ['nebm-sa', 'nebm-sa-balanced', 'nebm-sa-refract-approx-unbalanced', 'nebm-sa-refract-approx', 'nebm-sa-refract'] + # TODO perform method straction to shorten the following code maybe + # each proc should get the params internally? So, it could accept + # the proc.hyperparameters dict alongside the other params, + # or we just unpack, hyperparameters should satisfy model interface + # otherwise we can let it fail. If not good UX we could also ignore + # unnecessary params. we could do neuron_proc = get_proc( + # neuron_model) then neruon_proc(**proc.hyperparameters) that will + # eliminate all following lines and reduce to 2 lines. It also means + # defaults are only handled at the leaf proc level. if neuron_model == "nebm": temperature = proc.hyperparameters.get("temperature", 1) refract = proc.hyperparameters.get("refract", 0) From 9bf70bc3a8815e4b15d8e0d507fd9c91c452a3fb Mon Sep 17 00:00:00 2001 From: GaboFGuerra Date: Tue, 15 Aug 2023 18:00:27 +0200 Subject: [PATCH 2/2] Flatten out SolutionReadout folder. Signed-off-by: GaboFGuerra --- .../solution_readout/models.py | 174 +++++++++--------- .../solution_readout/process.py | 114 ++++++------ 2 files changed, 144 insertions(+), 144 deletions(-) rename src/lava/lib/optimization/solvers/generic/{monitoring_processes => }/solution_readout/models.py (97%) rename src/lava/lib/optimization/solvers/generic/{monitoring_processes => }/solution_readout/process.py (97%) diff --git a/src/lava/lib/optimization/solvers/generic/monitoring_processes/solution_readout/models.py b/src/lava/lib/optimization/solvers/generic/solution_readout/models.py similarity index 97% rename from src/lava/lib/optimization/solvers/generic/monitoring_processes/solution_readout/models.py rename to src/lava/lib/optimization/solvers/generic/solution_readout/models.py index efe85ad7..36b8b12e 100644 --- a/src/lava/lib/optimization/solvers/generic/monitoring_processes/solution_readout/models.py +++ b/src/lava/lib/optimization/solvers/generic/solution_readout/models.py @@ -1,87 +1,87 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: BSD-3-Clause -# See: https://spdx.org/licenses/ -import numpy as np -from lava.lib.optimization.solvers.generic.monitoring_processes\ - .solution_readout.process import SolutionReadout -from lava.magma.core.decorator import implements, requires -from lava.magma.core.model.py.model import PyLoihiProcessModel -from lava.magma.core.model.py.ports import PyInPort -from lava.magma.core.model.py.type import LavaPyType -from lava.magma.core.resources import CPU -from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol - -# TODO: change directory structure, flatten it -@implements(SolutionReadout, protocol=LoihiProtocol) -@requires(CPU) -class SolutionReadoutPyModel(PyLoihiProcessModel): - """CPU model for the SolutionReadout process. - The process receives two types of messages, an updated cost and the - state of - the solver network representing the current candidate solution to an - OptimizationProblem. Additionally, a target cost can be defined by the - user, once this cost is reached by the solver network, this process - will request the runtime service to pause execution. - """ - - solution: np.ndarray = LavaPyType(np.ndarray, np.int32, 32) - solution_step: np.ndarray = LavaPyType(np.ndarray, np.int32, 32) - read_solution: PyInPort = LavaPyType( - PyInPort.VEC_DENSE, np.int32, precision=32 - ) - cost_in: PyInPort = LavaPyType(PyInPort.VEC_DENSE, np.int32, precision=32) - timestep_in: PyInPort = LavaPyType( - PyInPort.VEC_DENSE, np.int32, precision=32 - ) - target_cost: int = LavaPyType(int, np.int32, 32) - min_cost: np.ndarray = LavaPyType(np.ndarray, np.int32, 32) - stop = False - - def run_spk(self): - if self.stop: - return - raw_cost, min_cost_id = self.cost_in.recv() - if raw_cost != 0: - timestep, raw_solution = self._receive_data() - cost = self.decode_cost(raw_cost) - self.solution_step = abs(timestep) - self.solution[:] = self.decode_solution(raw_solution) - self.min_cost[:] = np.asarray([cost[0], min_cost_id]) - if cost[0] < 0: - self._printout_new_solution(cost, min_cost_id, timestep) - self._printout_if_converged() - self._stop_if_requested(timestep, min_cost_id) - - def _receive_data(self): - timestep = self.timestep_in.recv()[0] - raw_solution = self.read_solution.recv() - return timestep, raw_solution - - @staticmethod - def decode_cost(raw_cost) -> np.ndarray: - return np.array([raw_cost]).astype(np.int32) - - @staticmethod - def decode_solution(raw_solution) -> np.ndarray: - raw_solution &= 0x1F # AND with 0x1F (=0b11111) retains 5 LSBs - # The binary solution was attained 2 steps ago. Shift down by 4. - return raw_solution.astype(np.int8) >> 4 - - def _printout_new_solution(self, cost, min_cost_id, timestep): - self.log.info( - f"Host: better solution found by network {min_cost_id} at " - f"step {abs(timestep) - 2} " - f"with cost {cost[0]}: {self.solution}" - ) - - def _printout_if_converged(self): - if ( - self.min_cost[0] is not None - and self.min_cost[0] <= self.target_cost - ): - self.log.info( - f"Host: network reached target cost {self.target_cost}.") - - def _stop_if_requested(self, timestep, min_cost_id): - if (timestep > 0 or timestep == -1) and min_cost_id != -1: - self.stop = True +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ +import numpy as np +from lava.lib.optimization.solvers.generic.monitoring_processes\ + .solution_readout.process import SolutionReadout +from lava.magma.core.decorator import implements, requires +from lava.magma.core.model.py.model import PyLoihiProcessModel +from lava.magma.core.model.py.ports import PyInPort +from lava.magma.core.model.py.type import LavaPyType +from lava.magma.core.resources import CPU +from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol + +# TODO: change directory structure, flatten it +@implements(SolutionReadout, protocol=LoihiProtocol) +@requires(CPU) +class SolutionReadoutPyModel(PyLoihiProcessModel): + """CPU model for the SolutionReadout process. + The process receives two types of messages, an updated cost and the + state of + the solver network representing the current candidate solution to an + OptimizationProblem. Additionally, a target cost can be defined by the + user, once this cost is reached by the solver network, this process + will request the runtime service to pause execution. + """ + + solution: np.ndarray = LavaPyType(np.ndarray, np.int32, 32) + solution_step: np.ndarray = LavaPyType(np.ndarray, np.int32, 32) + read_solution: PyInPort = LavaPyType( + PyInPort.VEC_DENSE, np.int32, precision=32 + ) + cost_in: PyInPort = LavaPyType(PyInPort.VEC_DENSE, np.int32, precision=32) + timestep_in: PyInPort = LavaPyType( + PyInPort.VEC_DENSE, np.int32, precision=32 + ) + target_cost: int = LavaPyType(int, np.int32, 32) + min_cost: np.ndarray = LavaPyType(np.ndarray, np.int32, 32) + stop = False + + def run_spk(self): + if self.stop: + return + raw_cost, min_cost_id = self.cost_in.recv() + if raw_cost != 0: + timestep, raw_solution = self._receive_data() + cost = self.decode_cost(raw_cost) + self.solution_step = abs(timestep) + self.solution[:] = self.decode_solution(raw_solution) + self.min_cost[:] = np.asarray([cost[0], min_cost_id]) + if cost[0] < 0: + self._printout_new_solution(cost, min_cost_id, timestep) + self._printout_if_converged() + self._stop_if_requested(timestep, min_cost_id) + + def _receive_data(self): + timestep = self.timestep_in.recv()[0] + raw_solution = self.read_solution.recv() + return timestep, raw_solution + + @staticmethod + def decode_cost(raw_cost) -> np.ndarray: + return np.array([raw_cost]).astype(np.int32) + + @staticmethod + def decode_solution(raw_solution) -> np.ndarray: + raw_solution &= 0x1F # AND with 0x1F (=0b11111) retains 5 LSBs + # The binary solution was attained 2 steps ago. Shift down by 4. + return raw_solution.astype(np.int8) >> 4 + + def _printout_new_solution(self, cost, min_cost_id, timestep): + self.log.info( + f"Host: better solution found by network {min_cost_id} at " + f"step {abs(timestep) - 2} " + f"with cost {cost[0]}: {self.solution}" + ) + + def _printout_if_converged(self): + if ( + self.min_cost[0] is not None + and self.min_cost[0] <= self.target_cost + ): + self.log.info( + f"Host: network reached target cost {self.target_cost}.") + + def _stop_if_requested(self, timestep, min_cost_id): + if (timestep > 0 or timestep == -1) and min_cost_id != -1: + self.stop = True diff --git a/src/lava/lib/optimization/solvers/generic/monitoring_processes/solution_readout/process.py b/src/lava/lib/optimization/solvers/generic/solution_readout/process.py similarity index 97% rename from src/lava/lib/optimization/solvers/generic/monitoring_processes/solution_readout/process.py rename to src/lava/lib/optimization/solvers/generic/solution_readout/process.py index 5ea38ea4..3a3ce358 100644 --- a/src/lava/lib/optimization/solvers/generic/monitoring_processes/solution_readout/process.py +++ b/src/lava/lib/optimization/solvers/generic/solution_readout/process.py @@ -1,57 +1,57 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: BSD-3-Clause -# See: https://spdx.org/licenses/ -import numpy as np -import typing as ty - -from lava.magma.core.process.ports.ports import InPort -from lava.magma.core.process.process import AbstractProcess, LogConfig -from lava.magma.core.process.variable import Var - - -class SolutionReadout(AbstractProcess): - """Process to readout solution from SNN and make it available on host. - - Parameters - ---------- - shape: The shape of the set of nodes, or process, which state will be read. - target_cost: cost value at which, once attained by the network, - this process will stop execution. - name: Name of the Process. Default is 'Process_ID', where ID is an - integer value that is determined automatically. - log_config: Configuration options for logging. - - Attributes - ---------- - read_solution: InPort - A message received on this ports signifies the process - should call read on its RefPort. - ref_port: RefPort - A reference port to a variable in another process which state - will be remotely accessed upon read request. Here, it reads the - current variables assignment by a solver to an optimization problem. - target_cost: Var - Cost value at which, once attained by the network. - - """ - - def __init__( - self, - shape: ty.Tuple[int, ...], - target_cost=None, - name: ty.Optional[str] = None, - log_config: ty.Optional[LogConfig] = None, - ) -> None: - super().__init__( - shape=shape, - target_cost=target_cost, - name=name, - log_config=log_config, - ) - self.solution = Var(shape=shape, init=-1) - self.solution_step = Var(shape=(1,), init=-1) - self.min_cost = Var(shape=(2,), init=-1) - self.target_cost = Var(shape=(1,), init=target_cost) - self.read_solution = InPort(shape=shape) - self.cost_in = InPort(shape=(2,)) - self.timestep_in = InPort(shape=(1,)) +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ +import numpy as np +import typing as ty + +from lava.magma.core.process.ports.ports import InPort +from lava.magma.core.process.process import AbstractProcess, LogConfig +from lava.magma.core.process.variable import Var + + +class SolutionReadout(AbstractProcess): + """Process to readout solution from SNN and make it available on host. + + Parameters + ---------- + shape: The shape of the set of nodes, or process, which state will be read. + target_cost: cost value at which, once attained by the network, + this process will stop execution. + name: Name of the Process. Default is 'Process_ID', where ID is an + integer value that is determined automatically. + log_config: Configuration options for logging. + + Attributes + ---------- + read_solution: InPort + A message received on this ports signifies the process + should call read on its RefPort. + ref_port: RefPort + A reference port to a variable in another process which state + will be remotely accessed upon read request. Here, it reads the + current variables assignment by a solver to an optimization problem. + target_cost: Var + Cost value at which, once attained by the network. + + """ + + def __init__( + self, + shape: ty.Tuple[int, ...], + target_cost=None, + name: ty.Optional[str] = None, + log_config: ty.Optional[LogConfig] = None, + ) -> None: + super().__init__( + shape=shape, + target_cost=target_cost, + name=name, + log_config=log_config, + ) + self.solution = Var(shape=shape, init=-1) + self.solution_step = Var(shape=(1,), init=-1) + self.min_cost = Var(shape=(2,), init=-1) + self.target_cost = Var(shape=(1,), init=target_cost) + self.read_solution = InPort(shape=shape) + self.cost_in = InPort(shape=(2,)) + self.timestep_in = InPort(shape=(1,))