Skip to content

Commit

Permalink
Merge pull request #88 from chrhansk/hotfix-nonlinearity
Browse files Browse the repository at this point in the history
Disable faulty reporting of nonlinearity
  • Loading branch information
chrhansk authored May 17, 2024
2 parents 1fa70f8 + 6a18d70 commit 89cd53d
Show file tree
Hide file tree
Showing 5 changed files with 91 additions and 32 deletions.
2 changes: 1 addition & 1 deletion docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
project = "pygradflow"
copyright = "2023, Christoph Hansknecht"
author = "Christoph Hansknecht"
release = "0.4.18"
release = "0.4.19"

# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
Expand Down
4 changes: 3 additions & 1 deletion pygradflow/display.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,9 @@ def problem_display(problem: Problem, params: Params):
cols.append(AttrColumn("Rcond", 5, RCondFormatter(), StateAttr("rcond")))

cols.append(AttrColumn("Obj nonlin", 16, "{:16.8e}", StateAttr("obj_nonlin")))
cols.append(AttrColumn("Cons nonlin", 16, "{:16.8e}", StateAttr("cons_nonlin")))

if problem.num_cons > 0:
cols.append(AttrColumn("Cons nonlin", 16, "{:16.8e}", StateAttr("cons_nonlin")))

cols.append(AttrColumn("Type", 8, StepFormatter(), StateAttr("step_accept")))

Expand Down
91 changes: 70 additions & 21 deletions pygradflow/eval.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import abc
import math
from enum import Enum, auto

import numpy as np
import scipy as sp
Expand Down Expand Up @@ -35,71 +36,112 @@ def warn():
warn_hessian_values = warn_once("Hessian not numerically symmetric")


class Component(Enum):
Obj = auto()
ObjGrad = auto()
Cons = auto()
ConsJac = auto()
LagHess = auto()

def name(self):
return {
Component.Obj: "Objective",
Component.ObjGrad: "Objective Gradient",
Component.Cons: "Constraints",
Component.ConsJac: "Constraint Jacobian",
Component.LagHess: "Lagrangian Hessian",
}[self]


class Evaluator(abc.ABC):
@abc.abstractmethod
def __init__(self, problem, params):
self.problem = problem
self.dtype = params.dtype

self.reset_num_evals()

def reset_num_evals(self):
self.num_evals = {comp: 0 for comp in Component}

def obj(self, x: np.ndarray) -> float:
self.num_evals[Component.Obj] += 1
return self._eval_obj(x)

def obj_grad(self, x: np.ndarray) -> np.ndarray:
self.num_evals[Component.ObjGrad] += 1
return self._eval_obj_grad(x)

def cons(self, x: np.ndarray) -> np.ndarray:
self.num_evals[Component.Cons] += 1
return self._eval_cons(x)

def cons_jac(self, x: np.ndarray) -> sp.sparse.spmatrix:
self.num_evals[Component.ConsJac] += 1
return self._eval_cons_jac(x)

def lag_hess(self, x: np.ndarray, lag: np.ndarray) -> sp.sparse.spmatrix:
self.num_evals[Component.LagHess] += 1
return self._eval_lag_hess(x, lag)

@abc.abstractmethod
def _eval_obj(self, x: np.ndarray) -> float:
raise NotImplementedError()

@abc.abstractmethod
def obj_grad(self, x: np.ndarray) -> np.ndarray:
def _eval_obj_grad(self, x: np.ndarray) -> np.ndarray:
raise NotImplementedError()

@abc.abstractmethod
def cons(self, x: np.ndarray) -> np.ndarray:
def _eval_cons(self, x: np.ndarray) -> np.ndarray:
raise NotImplementedError()

@abc.abstractmethod
def cons_jac(self, x: np.ndarray) -> sp.sparse.spmatrix:
def _eval_cons_jac(self, x: np.ndarray) -> sp.sparse.spmatrix:
raise NotImplementedError()

@abc.abstractmethod
def lag_hess(self, x: np.ndarray, lag: np.ndarray) -> sp.sparse.spmatrix:
def _eval_lag_hess(self, x: np.ndarray, lag: np.ndarray) -> sp.sparse.spmatrix:
raise NotImplementedError()


class SimpleEvaluator(Evaluator):
def __init__(self, problem, params):
self.problem = problem
self.dtype = params.dtype

def obj(self, x: np.ndarray) -> float:
def _eval_obj(self, x: np.ndarray) -> float:
return self.problem.obj(x)

def obj_grad(self, x: np.ndarray) -> np.ndarray:
def _eval_obj_grad(self, x: np.ndarray) -> np.ndarray:
return astype(self.problem.obj_grad(x), self.dtype)

def cons(self, x: np.ndarray) -> np.ndarray:
def _eval_cons(self, x: np.ndarray) -> np.ndarray:
if self.problem.num_cons == 0:
return np.array([], dtype=self.dtype)

return astype(self.problem.cons(x), self.dtype)

def cons_jac(self, x: np.ndarray) -> sp.sparse.spmatrix:
def _eval_cons_jac(self, x: np.ndarray) -> sp.sparse.spmatrix:
if self.problem.num_cons == 0:
return sp.sparse.csr_matrix((0, self.problem.num_vars), dtype=self.dtype)

return astype(self.problem.cons_jac(x), self.dtype)

def lag_hess(self, x: np.ndarray, lag: np.ndarray) -> sp.sparse.spmatrix:
def _eval_lag_hess(self, x: np.ndarray, lag: np.ndarray) -> sp.sparse.spmatrix:
return astype(self.problem.lag_hess(x, lag), self.dtype)


class ValidatingEvaluator(Evaluator):
def __init__(self, problem, params):
self.problem = problem
self.dtype = params.dtype
super().__init__(problem, params)
self.num_vars = problem.num_vars
self.num_cons = problem.num_cons

def obj(self, x: np.ndarray) -> float:
def _eval_obj(self, x: np.ndarray) -> float:
obj = self.problem.obj(x)

if not math.isfinite(obj):
raise EvalError("Infinite objective", x)

return obj

def obj_grad(self, x: np.ndarray) -> np.ndarray:
def _eval_obj_grad(self, x: np.ndarray) -> np.ndarray:
grad = self.problem.obj_grad(x)

if grad.shape != (self.num_vars,):
Expand All @@ -110,7 +152,7 @@ def obj_grad(self, x: np.ndarray) -> np.ndarray:

return astype(grad, self.dtype)

def cons(self, x: np.ndarray) -> np.ndarray:
def _eval_cons(self, x: np.ndarray) -> np.ndarray:
if self.num_cons == 0:
return np.array([], dtype=self.dtype)

Expand All @@ -124,7 +166,7 @@ def cons(self, x: np.ndarray) -> np.ndarray:

return astype(cons, self.dtype)

def cons_jac(self, x: np.ndarray) -> sp.sparse.spmatrix:
def _eval_cons_jac(self, x: np.ndarray) -> sp.sparse.spmatrix:
if self.num_cons == 0:
return sp.sparse.csr_matrix((0, self.num_vars), dtype=self.dtype)

Expand All @@ -138,7 +180,7 @@ def cons_jac(self, x: np.ndarray) -> sp.sparse.spmatrix:

return astype(cons_jac, self.dtype)

def lag_hess(self, x: np.ndarray, lag: np.ndarray) -> sp.sparse.spmatrix:
def _eval_lag_hess(self, x: np.ndarray, lag: np.ndarray) -> sp.sparse.spmatrix:
lag_hess = self.problem.lag_hess(x, lag)

if lag_hess.shape != (self.num_vars, self.num_vars):
Expand Down Expand Up @@ -167,3 +209,10 @@ def lag_hess(self, x: np.ndarray, lag: np.ndarray) -> sp.sparse.spmatrix:
warn_hessian_values()

return astype(lag_hess, self.dtype)


def create_evaluator(problem, params):
if params.validate_input:
return ValidatingEvaluator(problem, params)
else:
return SimpleEvaluator(problem, params)
24 changes: 16 additions & 8 deletions pygradflow/solver.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

from pygradflow.callbacks import Callbacks, CallbackType
from pygradflow.display import Format, problem_display
from pygradflow.eval import Evaluator, SimpleEvaluator, ValidatingEvaluator
from pygradflow.eval import create_evaluator
from pygradflow.iterate import Iterate
from pygradflow.log import logger
from pygradflow.newton import newton_method
Expand Down Expand Up @@ -297,6 +297,15 @@ def print_result(
logger.info("%20s: %45e", "Constraint violation", iterate.cons_violation)
logger.info("%20s: %45e", "Dual violation", iterate.stat_res)

eval = self.evaluator

eval_name = Format.bold("{:>20s}".format("Evaluations"))
logger.info("%20s", eval_name)

for component, num_evals in eval.num_evals.items():
name = component.name()
logger.info("%20s: %45d", name, num_evals)

def _create_initial_iterate(
self, x0: Optional[np.ndarray], y0: Optional[np.ndarray]
):
Expand Down Expand Up @@ -452,10 +461,7 @@ def solve(
params = self.params
problem = self.problem

if params.validate_input:
self.evaluator: Evaluator = ValidatingEvaluator(self.problem, params)
else:
self.evaluator = SimpleEvaluator(self.problem, params)
self.evaluator = create_evaluator(problem, params)

self.penalty = penalty_strategy(self.problem, params)
self.rho = -1.0
Expand Down Expand Up @@ -539,9 +545,11 @@ def compute_last_active_set():
state["curr_active_set"] = lambda: step_result.active_set

state["obj_nonlin"] = lambda: iterate.obj_nonlin(next_iterate)
state["cons_nonlin"] = lambda: np.linalg.norm(
iterate.cons_nonlin(next_iterate), ord=np.inf
)

if problem.num_cons > 0:
state["cons_nonlin"] = lambda: np.linalg.norm(
iterate.cons_nonlin(next_iterate), ord=np.inf
)

state["aug_lag"] = lambda: iterate.aug_lag(self.rho)
state["obj"] = lambda: iterate.obj()
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "pygradflow"
version = "0.4.18"
version = "0.4.19"
description = "PyGradFlow is a simple implementation of the sequential homotopy method to be used to solve general nonlinear programs."
authors = ["Christoph Hansknecht <[email protected]>"]
readme = "README.md"
Expand Down

0 comments on commit 89cd53d

Please sign in to comment.