Skip to content

Commit

Permalink
Merge pull request #21 from chrhansk/feature-isort-black
Browse files Browse the repository at this point in the history
Add isort and black
  • Loading branch information
chrhansk authored Nov 27, 2023
2 parents fd3fc1c + 83b1859 commit f9b89ae
Show file tree
Hide file tree
Showing 28 changed files with 206 additions and 216 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/package.yml
Original file line number Diff line number Diff line change
Expand Up @@ -33,4 +33,4 @@ jobs:
- name: Install the project dependencies
run: poetry install
- name: Test with pytest
run: poetry run pytest -v
run: poetry run pytest -v --isort --black
2 changes: 1 addition & 1 deletion pygradflow/controller.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from dataclasses import dataclass
import math
from dataclasses import dataclass

from pygradflow.params import Params

Expand Down
18 changes: 10 additions & 8 deletions pygradflow/deriv_check.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,9 @@ def __init__(self, expected_value, actual_value, col_index, atol) -> None:
self.actual_value = actual_value
self.atol = atol

self.invalid_deriv = np.isclose(self.expected_value,
self.actual_value,
atol=self.atol)
self.invalid_deriv = np.isclose(
self.expected_value, self.actual_value, atol=self.atol
)

self.invalid_deriv = np.logical_not(self.invalid_deriv)
self.invalid_indices = np.where(self.invalid_deriv)[0]
Expand All @@ -34,11 +34,13 @@ def __init__(self, expected_value, actual_value, col_index, atol) -> None:
def __str__(self):
num_invalid_indices = self.invalid_indices.size

message = (f"Expected derivative: {self.expected_value} "
f"and actual (findiff) derivative: {self.actual_value} "
f"differ at the {num_invalid_indices} "
f"indices: {self.invalid_indices} "
f"(max diff: {self.max_deriv_diff}, tolerance: {self.atol})")
message = (
f"Expected derivative: {self.expected_value} "
f"and actual (findiff) derivative: {self.actual_value} "
f"differ at the {num_invalid_indices} "
f"indices: {self.invalid_indices} "
f"(max diff: {self.max_deriv_diff}, tolerance: {self.atol})"
)

return message

Expand Down
6 changes: 1 addition & 5 deletions pygradflow/display.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,11 +43,7 @@ def __call__(self, state):


class Column:
def __init__(self,
name: str,
width: int,
format,
attr):
def __init__(self, name: str, width: int, format, attr):
self.name = name
self.width = width

Expand Down
10 changes: 6 additions & 4 deletions pygradflow/eval.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import abc
import math

import numpy as np
import scipy as sp

Expand Down Expand Up @@ -35,7 +36,6 @@ def warn():


class Evaluator(abc.ABC):

@abc.abstractmethod
def obj(self, x: np.ndarray) -> float:
raise NotImplementedError()
Expand Down Expand Up @@ -82,6 +82,8 @@ class ValidatingEvaluator(Evaluator):
def __init__(self, problem, params):
self.problem = problem
self.dtype = params.dtype
self.num_vars = problem.num_vars
self.num_cons = problem.num_cons

def obj(self, x: np.ndarray) -> float:
obj = self.problem.obj(x)
Expand Down Expand Up @@ -109,7 +111,7 @@ def cons(self, x: np.ndarray) -> np.ndarray:
raise EvalError("Invalid shape of constraints", x)

if not np.isfinite(cons).all():
raise EvalError("Non-finite constraints")
raise EvalError("Non-finite constraints", x)

return astype(cons, self.dtype)

Expand All @@ -120,7 +122,7 @@ def cons_jac(self, x: np.ndarray) -> sp.sparse.spmatrix:
raise EvalError("Invalid shape of Jacobian", x)

if not np.isfinite(cons_jac.data).all():
raise EvalError("Non-finite Jacobian")
raise EvalError("Non-finite Jacobian", x)

return astype(cons_jac, self.dtype)

Expand All @@ -131,7 +133,7 @@ def lag_hess(self, x: np.ndarray, lag: np.ndarray) -> sp.sparse.spmatrix:
raise EvalError("Invalid shape of Hessian", x)

if not np.isfinite(lag_hess.data).all():
raise EvalError("Non-finite Hessian")
raise EvalError("Non-finite Hessian", x)

coo_hess = lag_hess.tocoo()
coo_hess_T = coo_hess.T
Expand Down
5 changes: 3 additions & 2 deletions pygradflow/implicit_func.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
import abc

import numpy as np
import scipy as sp

from pygradflow.iterate import Iterate
from pygradflow.problem import Problem

Expand Down Expand Up @@ -136,8 +138,7 @@ def deriv(self, jac, hess, active_set):
F_21 = -dt * jac
F_22 = sp.sparse.eye(m, dtype=params.dtype)

deriv = sp.sparse.bmat([[F_11, F_12], [F_21, F_22]],
format="csc")
deriv = sp.sparse.bmat([[F_11, F_12], [F_21, F_22]], format="csc")

assert deriv.dtype == params.dtype

Expand Down
28 changes: 13 additions & 15 deletions pygradflow/iterate.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,11 @@
import numpy as np
import scipy as sp

from pygradflow.util import norm_mult
from pygradflow.active_set import ActiveSet
from pygradflow.eval import Evaluator, SimpleEvaluator
from pygradflow.params import Params
from pygradflow.problem import Problem
from pygradflow.util import norm_mult


def _read_only(a):
Expand All @@ -16,12 +16,14 @@ def _read_only(a):


class Iterate:
def __init__(self,
problem: Problem,
params: Params,
x: np.ndarray,
y: np.ndarray,
eval: Evaluator = None):
def __init__(
self,
problem: Problem,
params: Params,
x: np.ndarray,
y: np.ndarray,
eval: Evaluator = None,
):
assert x.shape == (problem.num_vars,)
assert y.shape == (problem.num_cons,)
self.x = _read_only(np.copy(x))
Expand All @@ -35,11 +37,9 @@ def __init__(self,
self.problem = problem

def copy(self) -> "Iterate":
return Iterate(self.problem,
self.params,
np.copy(self.x),
np.copy(self.y),
self.eval)
return Iterate(
self.problem, self.params, np.copy(self.x), np.copy(self.y), self.eval
)

@functools.cached_property
def obj(self) -> float:
Expand Down Expand Up @@ -97,9 +97,7 @@ def aug_lag_deriv_xx(self, rho: float) -> sp.sparse.spmatrix:
def dist(self, other: "Iterate") -> float:
return norm_mult(self.x - other.x, self.y - other.y)

def locally_infeasible(self,
feas_tol: float,
local_infeas_tol: float) -> bool:
def locally_infeasible(self, feas_tol: float, local_infeas_tol: float) -> bool:
"""
Check if the iterate is locally infeasible. It is
judged to be locally infeasible if the constraint
Expand Down
8 changes: 4 additions & 4 deletions pygradflow/newton.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
import abc

import numpy as np
import scipy as sp

from pygradflow.implicit_func import ImplicitFunc
from pygradflow.iterate import Iterate

from pygradflow.log import logger as lgg
from pygradflow.params import Params, NewtonType
from pygradflow.params import NewtonType, Params
from pygradflow.problem import Problem
from pygradflow.step import step_solver
from pygradflow.step.step_solver import StepSolver, StepResult

from pygradflow.step.step_solver import StepResult, StepSolver

logger = lgg.getChild("newton")

Expand Down
6 changes: 3 additions & 3 deletions pygradflow/params.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from enum import Enum, Flag, auto
from dataclasses import dataclass
from enum import Enum, Flag, auto

import numpy as np

Expand Down Expand Up @@ -42,8 +42,8 @@ class Precision(Enum):

class DerivCheck(Flag):
NoCheck = 0
CheckFirst = (1 << 0)
CheckSecond = (1 << 1)
CheckFirst = 1 << 0
CheckSecond = 1 << 1
CheckAll = CheckFirst | CheckSecond


Expand Down
1 change: 1 addition & 0 deletions pygradflow/penalty.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import abc

import numpy as np

from pygradflow.iterate import Iterate
from pygradflow.params import Params, PenaltyUpdate
from pygradflow.problem import Problem
Expand Down
78 changes: 40 additions & 38 deletions pygradflow/solver.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,20 +3,14 @@

import numpy as np

from pygradflow.display import Format, problem_display
from pygradflow.iterate import Iterate
from pygradflow.log import logger
from pygradflow.params import Params
from pygradflow.problem import Problem
from pygradflow.newton import newton_method
from pygradflow.params import Params
from pygradflow.penalty import penalty_strategy

from pygradflow.step.step_control import (
StepResult,
step_controller,
StepController,
)

from pygradflow.display import problem_display, Format
from pygradflow.problem import Problem
from pygradflow.step.step_control import StepController, StepResult, step_controller


class SolverStatus(Enum):
Expand Down Expand Up @@ -57,16 +51,17 @@ def success(self):

class Solver:
def __init__(self, problem: Problem, params: Params = Params()) -> None:

self.problem = problem
self.params = params

if params.validate_input:
from .eval import SimpleEvaluator
self.evaluator = SimpleEvaluator(problem, params)
else:
from .eval import ValidatingEvaluator

self.evaluator = ValidatingEvaluator(problem, params)
else:
from .eval import SimpleEvaluator

self.evaluator = SimpleEvaluator(problem, params)

self.penalty = penalty_strategy(problem, params)
self.rho = -1.0
Expand Down Expand Up @@ -114,14 +109,17 @@ def _deriv_check(self, x: np.ndarray, y: np.ndarray) -> None:
lambda x: eval.obj_grad(x) + eval.cons_jac(x).T.dot(y),
x,
eval.lag_hess(x, y),
params)

def print_result(self,
status: SolverStatus,
iterate: Iterate,
iterations: int,
accepted_steps: int,
dist_factor: float) -> None:
params,
)

def print_result(
self,
status: SolverStatus,
iterate: Iterate,
iterations: int,
accepted_steps: int,
dist_factor: float,
) -> None:
rho = self.rho

desc = "{:>30s}".format(status.description)
Expand Down Expand Up @@ -177,7 +175,7 @@ def solve(self, x_0: np.ndarray, y_0: np.ndarray) -> SolverResult:

logger.info(display.header)

path_dist = 0.
path_dist = 0.0
initial_iterate = iterate
accepted_steps = 0

Expand All @@ -191,14 +189,14 @@ def solve(self, x_0: np.ndarray, y_0: np.ndarray) -> SolverResult:
status = SolverStatus.Converged
break

if iterate.locally_infeasible(params.opt_tol,
params.local_infeas_tol):
if iterate.locally_infeasible(params.opt_tol, params.local_infeas_tol):
logger.debug("Local infeasibility detected")
status = SolverStatus.LocallyInfeasible
break

if (iterate.obj <= params.obj_lower_limit) and \
(iterate.is_feasible(params.opt_tol)):
if (iterate.obj <= params.obj_lower_limit) and (
iterate.is_feasible(params.opt_tol)
):
logger.debug("Unboundedness detected")
status = SolverStatus.Unbounded
break
Expand All @@ -213,7 +211,9 @@ def solve(self, x_0: np.ndarray, y_0: np.ndarray) -> SolverResult:
lamb = step_result.lamb

if lamb >= params.lamb_max:
raise Exception(f"Inverse step size {lamb} exceeded maximum {params.lamb_max} (incorrect derivatives?)")
raise Exception(
f"Inverse step size {lamb} exceeded maximum {params.lamb_max} (incorrect derivatives?)"
)

primal_step_norm = np.linalg.norm(next_iterate.x - iterate.x)
dual_step_norm = np.linalg.norm(next_iterate.y - iterate.y)
Expand Down Expand Up @@ -247,16 +247,16 @@ def solve(self, x_0: np.ndarray, y_0: np.ndarray) -> SolverResult:
next_rho = self.penalty.update(iterate, next_iterate)

if next_rho != self.rho:
logger.debug("Updating penalty parameter from %e to %e",
self.rho,
next_rho)
logger.debug(
"Updating penalty parameter from %e to %e", self.rho, next_rho
)
self.rho = next_rho

delta = iterate.dist(next_iterate)

iterate = next_iterate

path_dist += (primal_step_norm + dual_step_norm)
path_dist += primal_step_norm + dual_step_norm
accepted_steps += 1

if (lamb <= params.lamb_term) and (delta <= params.opt_tol):
Expand All @@ -272,15 +272,17 @@ def solve(self, x_0: np.ndarray, y_0: np.ndarray) -> SolverResult:

assert path_dist >= direct_dist

dist_factor = path_dist / direct_dist if direct_dist != 0. else 1.
dist_factor = path_dist / direct_dist if direct_dist != 0.0 else 1.0

assert status is not None

self.print_result(status=status,
iterate=iterate,
iterations=iteration,
accepted_steps=accepted_steps,
dist_factor=dist_factor)
self.print_result(
status=status,
iterate=iterate,
iterations=iteration,
accepted_steps=accepted_steps,
dist_factor=dist_factor,
)

x = iterate.x
y = iterate.y
Expand Down
Loading

0 comments on commit f9b89ae

Please sign in to comment.