Skip to content

Commit

Permalink
Merge pull request #15 from chrhansk/feature-displaying
Browse files Browse the repository at this point in the history
Improve displaying
  • Loading branch information
chrhansk authored Nov 10, 2023
2 parents c546266 + b61cf5f commit 187a744
Show file tree
Hide file tree
Showing 7 changed files with 253 additions and 123 deletions.
117 changes: 117 additions & 0 deletions pygradflow/display.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,117 @@
from termcolor import colored

from pygradflow.problem import Problem


class Format:
@staticmethod
def bold(s: str) -> str:
return colored(s, attrs=["bold"])

@staticmethod
def redgreen(s: str, cond: bool, bold: bool) -> str:
color = "green" if cond else "red"

if bold:
return colored(s, color, attrs=["bold"])

return colored(s, color)


class BoldFormatter:
def __init__(self, format: str):
self.format = format

def __call__(self, state):
return Format.bold(self.format.format(state))


class StringFormatter:
def __init__(self, format: str):
self.format = format

def __call__(self, state):
return self.format.format(state)


class StepFormatter:
def __call__(self, state):
accepted = state
accept_str = "accepted" if accepted else "rejected"
return Format.redgreen(accept_str, accepted, bold=True)


class Column:
def __init__(self,
name: str,
width: int,
format,
attr):
self.name = name
self.width = width

if isinstance(format, str):
self.format = StringFormatter(format)
else:
self.format = format

self.attr = attr

@property
def header(self):
return "{:^{}s}".format(self.name, self.width)

def content(self, state):
return self.format(self.attr(state))


class Display:
def __init__(self, cols):
self.cols = cols

@property
def header(self):
return " ".join([col.header for col in self.cols])

def row(self, state):
return " ".join([col.content(state) for col in self.cols])


class StateAttr:
def __init__(self, name: str):
self.name = name

def __call__(self, state):
value = state[self.name]
return value()


class IterateAttr:
def __init__(self, name: str):
self.name = name

def __call__(self, state):
iterate = state["iterate"]
return getattr(iterate, self.name)


def problem_display(problem: Problem):

is_bounded = problem.var_bounded

cols = []

cols.append(Column("Iter", 6, BoldFormatter("{:6d}"), StateAttr("iter")))
cols.append(Column("Aug Lag", 16, "{:16.8e}", StateAttr("aug_lag")))

if is_bounded:
cols.append(Column("Bound inf", 16, "{:16.8e}", IterateAttr("bound_violation")))

cols.append(Column("Cons inf", 16, "{:16.8e}", IterateAttr("cons_violation")))
cols.append(Column("Dual inf", 16, "{:16.8e}", IterateAttr("stat_res")))
cols.append(Column("Primal step", 16, "{:16.8e}", StateAttr("primal_step_norm")))
cols.append(Column("Dual step", 16, "{:16.8e}", StateAttr("dual_step_norm")))
cols.append(Column("Lambda", 16, "{:16.8e}", StateAttr("lamb")))
cols.append(Column("Type", 8, StepFormatter(), StateAttr("step_accept")))

return Display(cols)
36 changes: 26 additions & 10 deletions pygradflow/iterate.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
import functools

import numpy as np
import scipy as sp

from pygradflow.util import norm_mult
from pygradflow.lazy import lazyprop
from pygradflow.active_set import ActiveSet
from pygradflow.eval import Evaluator, SimpleEvaluator
from pygradflow.params import Params
Expand Down Expand Up @@ -40,19 +41,19 @@ def copy(self) -> "Iterate":
np.copy(self.y),
self.eval)

@lazyprop
@functools.cached_property
def obj(self) -> float:
return self.eval.obj(self.x)

@lazyprop
@functools.cached_property
def obj_grad(self) -> np.ndarray:
return _read_only(self.eval.obj_grad(self.x))

@lazyprop
@functools.cached_property
def cons(self) -> np.ndarray:
return _read_only(self.eval.cons(self.x))

@lazyprop
@functools.cached_property
def cons_jac(self) -> sp.sparse.spmatrix:
return self.eval.cons_jac(self.x)

Expand Down Expand Up @@ -96,11 +97,26 @@ def aug_lag_deriv_xx(self, rho: float) -> sp.sparse.spmatrix:
def dist(self, other: "Iterate") -> float:
return norm_mult(self.x - other.x, self.y - other.y)

@lazyprop
def locally_infeasible(self, tol: float) -> bool:
"""
Check if the iterate is locally infeasible. It is
judged to be locally infeasible if the constraint
violation is greater than the tolerance and
optimality conditions for the minimization
of the constraint violation are (approximately) satisfied.
"""
if self.cons_violation <= tol:
return False

infeas_opt_res = self.cons_jac.T.dot(self.cons)

return np.linalg.norm(infeas_opt_res) <= tol

@functools.cached_property
def active_set(self) -> ActiveSet:
return ActiveSet(self)

@lazyprop
@functools.cached_property
def bound_duals(self) -> np.ndarray:
r = -(self.obj_grad + self.cons_jac.T.dot(self.y))
d = np.zeros_like(self.x)
Expand All @@ -113,7 +129,7 @@ def bound_duals(self) -> np.ndarray:

return d

@lazyprop
@functools.cached_property
def bound_violation(self) -> float:
lb = self.problem.var_lb
ub = self.problem.var_ub
Expand All @@ -124,14 +140,14 @@ def bound_violation(self) -> float:

return max(lower, upper)

@lazyprop
@functools.cached_property
def cons_violation(self) -> float:
c = self.cons
if c.size == 0:
return 0.0
return np.linalg.norm(c, np.inf)

@lazyprop
@functools.cached_property
def stat_res(self) -> float:
r = self.obj_grad + self.cons_jac.T.dot(self.y) + self.bound_duals
return np.linalg.norm(r, np.inf)
Expand Down
28 changes: 0 additions & 28 deletions pygradflow/lazy.py

This file was deleted.

15 changes: 14 additions & 1 deletion pygradflow/penalty.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

import numpy as np
from pygradflow.iterate import Iterate
from pygradflow.params import Params
from pygradflow.params import Params, PenaltyUpdate
from pygradflow.problem import Problem


Expand Down Expand Up @@ -86,3 +86,16 @@ def update(self, prev_iterate: Iterate, next_iterate: Iterate) -> float:
self.rho = next_rho

return self.rho


def penalty_strategy(problem: Problem, params: Params) -> PenaltyStrategy:
penalty_update = params.penalty_update

if penalty_update == PenaltyUpdate.Constant:
return ConstantPenalty(problem, params)
elif penalty_update == PenaltyUpdate.DualNorm:
return DualNormUpdate(problem, params)
elif penalty_update == PenaltyUpdate.DualEquilibration:
return DualEquilibration(problem, params)

raise ValueError("Invalid penalty update strategy")
6 changes: 6 additions & 0 deletions pygradflow/problem.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
import abc
import functools

import numpy as np
import scipy as sp

Expand All @@ -18,6 +20,10 @@ def __init__(
self.var_ub = np.copy(var_ub)
self.num_cons = num_cons

@functools.cached_property
def var_bounded(self):
return np.isfinite(self.var_lb).any() or np.isfinite(self.var_ub).any()

@property
def num_vars(self) -> int:
(num_vars,) = self.var_lb.shape
Expand Down
Loading

0 comments on commit 187a744

Please sign in to comment.