Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Operator Norms #446

Open
wants to merge 4 commits into
base: development
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion cpp/examples/proximal_admm/inpainting.cc
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,6 @@ int main(int argc, char const **argv) {
.l1_proximal_real_constraint(true)
.residual_convergence(epsilon * 1.001)
.lagrange_update_scale(0.9)
.sq_op_norm(1e0)
.Psi(psi)
.Phi(sampling);

Expand Down
1 change: 0 additions & 1 deletion cpp/examples/proximal_admm/reweighted.cc
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,6 @@ int main(int argc, char const **argv) {
.l1_proximal_real_constraint(true)
.residual_convergence(epsilon * 1.001)
.lagrange_update_scale(0.9)
.sq_op_norm(1e0)
.Psi(psi)
.Phi(sampling);

Expand Down
11 changes: 4 additions & 7 deletions cpp/sopt/forward_backward.h
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,6 @@ class ForwardBackward {
: itermax_(std::numeric_limits<t_uint>::max()),
regulariser_strength_(1e-8),
step_size_(1),
sq_op_norm_(1),
is_converged_(),
fista_(true),
f_gradient_(f_gradient),
Expand Down Expand Up @@ -104,8 +103,6 @@ class ForwardBackward {
SOPT_MACRO(regulariser_strength, Real);
//! β parameter
SOPT_MACRO(step_size, Real);
//! ν parameter
SOPT_MACRO(sq_op_norm, Real);
//! flag to for FISTA Forward-Backward algorithm. True by default but should be false when using a learned g_proximal.
SOPT_MACRO(fista, bool);
//! \brief A function verifying convergence
Expand Down Expand Up @@ -215,7 +212,7 @@ class ForwardBackward {
//! - x = Φ^T y / ν
//! - residuals = Φ x - y
std::tuple<t_Vector, t_Vector> initial_guess() const {
return ForwardBackward<SCALAR>::initial_guess(target(), Phi(), sq_op_norm());
return ForwardBackward<SCALAR>::initial_guess(target(), Phi());
}

//! \brief Computes initial guess for x and the residual using the targets
Expand All @@ -225,9 +222,9 @@ class ForwardBackward {
//!
//! This function simplifies creating overloads for operator() in FB wrappers.
static std::tuple<t_Vector, t_Vector> initial_guess(t_Vector const &target,
t_LinearTransform const &phi, Real sq_op_norm) {
t_LinearTransform const &phi) {
std::tuple<t_Vector, t_Vector> guess;
std::get<0>(guess) = static_cast<t_Vector>(phi.adjoint() * target) / sq_op_norm;
std::get<0>(guess) = static_cast<t_Vector>(phi.adjoint() * target) / phi.sq_norm();
std::get<1>(guess) = phi * std::get<0>(guess) - target;
return guess;
}
Expand Down Expand Up @@ -278,7 +275,7 @@ void ForwardBackward<SCALAR>::iteration_step(t_Vector &image, t_Vector &residual
t_Vector &gradient_current, const t_real FISTA_step) {
t_Vector prev_image = image;
f_gradient(gradient_current, auxilliary_image, residual, Phi()); // assigns gradient_current (non normalised)
t_Vector auxilliary_with_step = auxilliary_image - step_size() / sq_op_norm() * gradient_current; // step to new image using gradient
t_Vector auxilliary_with_step = auxilliary_image - step_size() / Phi().sq_norm() * gradient_current; // step to new image using gradient
const Real weight = regulariser_strength() * step_size();
g_proximal(image, weight, auxilliary_with_step); // apply proximal operator to new image
auxilliary_image = image + FISTA_step * (image - prev_image); // update auxilliary vector with FISTA acceleration step
Expand Down
12 changes: 4 additions & 8 deletions cpp/sopt/imaging_forward_backward.h
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,6 @@ class ImagingForwardBackward {
regulariser_strength_(1e-8),
step_size_(1),
sigma_(1),
sq_op_norm_(1),
fista_(true),
is_converged_()
{
Expand All @@ -100,7 +99,6 @@ class ImagingForwardBackward {
regulariser_strength_(1e-8),
step_size_(1),
sigma_(1),
sq_op_norm_(1),
fista_(true),
is_converged_()
{
Expand Down Expand Up @@ -152,8 +150,6 @@ class ImagingForwardBackward {
SOPT_MACRO(step_size, Real);
//! γ parameter
SOPT_MACRO(sigma, Real);
//! ν parameter
SOPT_MACRO(sq_op_norm, Real);
//! flag to for FISTA Forward-Backward algorithm. True by default but should be false when using a learned g_function.
SOPT_MACRO(fista, bool);
//! A function verifying convergence
Expand Down Expand Up @@ -226,8 +222,9 @@ class ImagingForwardBackward {

//! \brief Calls Forward Backward
//! \param[out] out: Output vector x
Diagnostic operator()(t_Vector &out) {
return operator()(out, ForwardBackward<SCALAR>::initial_guess(target(), Phi(), sq_op_norm()));
Diagnostic operator()(t_Vector &out) const {
return operator()(out, ForwardBackward<SCALAR>::initial_guess(target(), Phi()));

}
//! \brief Calls Forward Backward
//! \param[out] out: Output vector x
Expand Down Expand Up @@ -260,7 +257,7 @@ class ImagingForwardBackward {
DiagnosticAndResult operator()() {
DiagnosticAndResult result;
static_cast<Diagnostic &>(result) = operator()(
result.x, ForwardBackward<SCALAR>::initial_guess(target(), Phi(), sq_op_norm()));
result.x, ForwardBackward<SCALAR>::initial_guess(target(), Phi()));
return result;
}
//! Makes it simple to chain different calls to FB
Expand Down Expand Up @@ -364,7 +361,6 @@ typename ImagingForwardBackward<SCALAR>::Diagnostic ImagingForwardBackward<SCALA
.itermax(itermax())
.step_size(gradient_step_size)
.regulariser_strength(regulariser_strength())
.sq_op_norm(sq_op_norm())
.fista(fista())
.Phi(Phi())
.is_converged(convergence)
Expand Down
8 changes: 2 additions & 6 deletions cpp/sopt/imaging_padmm.h
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,6 @@ class ImagingProximalADMM {
objective_convergence_(nullptr),
itermax_(std::numeric_limits<t_uint>::max()),
regulariser_strength_(1e-8),
sq_op_norm_(1),
lagrange_update_scale_(0.9),
is_converged_(),
Phi_(linear_transform_identity<Scalar>()),
Expand Down Expand Up @@ -110,8 +109,6 @@ class ImagingProximalADMM {
SOPT_MACRO(itermax, t_uint);
//! γ parameter
SOPT_MACRO(regulariser_strength, Real);
//! ν parameter
SOPT_MACRO(sq_op_norm, Real);
//! Lagrange update scale β
SOPT_MACRO(lagrange_update_scale, Real);
//! A function verifying convergence
Expand All @@ -132,7 +129,7 @@ class ImagingProximalADMM {
//! \brief Calls Proximal ADMM
//! \param[out] out: Output vector x
Diagnostic operator()(t_Vector &out) const {
return operator()(out, PADMM::initial_guess(target(), Phi(), sq_op_norm()));
return operator()(out, PADMM::initial_guess(target(), Phi()));
}
//! \brief Calls Proximal ADMM
//! \param[out] out: Output vector x
Expand Down Expand Up @@ -165,7 +162,7 @@ class ImagingProximalADMM {
DiagnosticAndResult operator()() const {
DiagnosticAndResult result;
static_cast<Diagnostic &>(result) = operator()(result.x,
PADMM::initial_guess(target(), Phi(), sq_op_norm()));
PADMM::initial_guess(target(), Phi()));
return result;
}
//! Makes it simple to chain different calls to PADMM
Expand Down Expand Up @@ -307,7 +304,6 @@ typename ImagingProximalADMM<SCALAR>::Diagnostic ImagingProximalADMM<SCALAR>::op
auto const padmm = PADMM(f_proximal, g_proximal, target())
.itermax(itermax())
.regulariser_strength(regulariser_strength())
.sq_op_norm(sq_op_norm())
.lagrange_update_scale(lagrange_update_scale())
.Phi(Phi())
.is_converged(convergence);
Expand Down
8 changes: 2 additions & 6 deletions cpp/sopt/imaging_primal_dual.h
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,6 @@ class ImagingPrimalDual {
precondition_iters_(0),
xi_(1),
rho_(1),
sq_op_norm_(1),
is_converged_(),
Phi_(linear_transform_identity<Scalar>()),
Psi_(linear_transform_identity<Scalar>()),
Expand Down Expand Up @@ -132,8 +131,6 @@ class ImagingPrimalDual {
SOPT_MACRO(xi, Real);
//! rho parameter
SOPT_MACRO(rho, Real);
//! ν parameter
SOPT_MACRO(sq_op_norm, Real);
//! precondtion step size parameter
SOPT_MACRO(precondition_stepsize, Real);
//! precondition weights parameter
Expand Down Expand Up @@ -170,7 +167,7 @@ class ImagingPrimalDual {
//! \brief Calls Primal Dual
//! \param[out] out: Output vector x
Diagnostic operator()(t_Vector &out) const {
return operator()(out, PD::initial_guess(target(), Phi(), sq_op_norm()));
return operator()(out, PD::initial_guess(target(), Phi()));
}
//! \brief Calls Primal Dual
//! \param[out] out: Output vector x
Expand Down Expand Up @@ -203,7 +200,7 @@ class ImagingPrimalDual {
DiagnosticAndResult operator()() const {
DiagnosticAndResult result;
static_cast<Diagnostic &>(result) = operator()(result.x,
PD::initial_guess(target(), Phi(), sq_op_norm()));
PD::initial_guess(target(), Phi()));
return result;
}
//! Makes it simple to chain different calls to PD
Expand Down Expand Up @@ -346,7 +343,6 @@ typename ImagingPrimalDual<SCALAR>::Diagnostic ImagingPrimalDual<SCALAR>::operat
.update_scale(update_scale())
.xi(xi())
.rho(rho())
.sq_op_norm(sq_op_norm())
.regulariser_strength(regulariser_strength())
.Phi(Phi())
.Psi(Psi())
Expand Down
8 changes: 2 additions & 6 deletions cpp/sopt/l2_forward_backward.h
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,6 @@ class L2ForwardBackward {
regulariser_strength_(1e-8),
step_size_(1),
sigma_(1),
sq_op_norm_(1),
is_converged_(),
Phi_(linear_transform_identity<Scalar>()),
target_(target) {}
Expand Down Expand Up @@ -123,8 +122,6 @@ class L2ForwardBackward {
SOPT_MACRO(step_size, Real);
//! γ parameter
SOPT_MACRO(sigma, Real);
//! ν parameter
SOPT_MACRO(sq_op_norm, Real);
//! A function verifying convergence
SOPT_MACRO(is_converged, t_IsConverged);
//! Measurement operator
Expand All @@ -149,7 +146,7 @@ class L2ForwardBackward {
//! \brief Calls Forward Backward
//! \param[out] out: Output vector x
Diagnostic operator()(t_Vector &out) const {
return operator()(out, ForwardBackward<SCALAR>::initial_guess(target(), Phi(), sq_op_norm()));
return operator()(out, ForwardBackward<SCALAR>::initial_guess(target(), Phi()));
}
//! \brief Calls Forward Backward
//! \param[out] out: Output vector x
Expand Down Expand Up @@ -182,7 +179,7 @@ class L2ForwardBackward {
DiagnosticAndResult operator()() const {
DiagnosticAndResult result;
static_cast<Diagnostic &>(result) = operator()(
result.x, ForwardBackward<SCALAR>::initial_guess(target(), Phi(), sq_op_norm()));
result.x, ForwardBackward<SCALAR>::initial_guess(target(), Phi()));
return result;
}
//! Makes it simple to chain different calls to FB
Expand Down Expand Up @@ -280,7 +277,6 @@ typename L2ForwardBackward<SCALAR>::Diagnostic L2ForwardBackward<SCALAR>::operat
.itermax(itermax())
.step_size(step_size())
.regulariser_strength(regulariser_strength())
.sq_op_norm(sq_op_norm())
.Phi(Phi())
.is_converged(convergence);
static_cast<typename ForwardBackward<SCALAR>::Diagnostic &>(result) =
Expand Down
8 changes: 2 additions & 6 deletions cpp/sopt/l2_primal_dual.h
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,6 @@ class ImagingPrimalDual {
precondition_iters_(0),
xi_(1),
rho_(1),
sq_op_norm_(1),
is_converged_(),
Phi_(linear_transform_identity<Scalar>()),
Psi_(linear_transform_identity<Scalar>()),
Expand Down Expand Up @@ -131,8 +130,6 @@ class ImagingPrimalDual {
SOPT_MACRO(xi, Real);
//! rho parameter
SOPT_MACRO(rho, Real);
//! ν parameter
SOPT_MACRO(sq_op_norm, Real);
//! precondtion step size parameter
SOPT_MACRO(precondition_stepsize, Real);
//! precondition weights parameter
Expand Down Expand Up @@ -169,7 +166,7 @@ class ImagingPrimalDual {
//! \brief Calls Primal Dual
//! \param[out] out: Output vector x
Diagnostic operator()(t_Vector &out) const {
return operator()(out, PD::initial_guess(target(), Phi(), sq_op_norm()));
return operator()(out, PD::initial_guess(target(), Phi()));
}
//! \brief Calls Primal Dual
//! \param[out] out: Output vector x
Expand Down Expand Up @@ -202,7 +199,7 @@ class ImagingPrimalDual {
DiagnosticAndResult operator()() const {
DiagnosticAndResult result;
static_cast<Diagnostic &>(result) = operator()(result.x,
PD::initial_guess(target(), Phi(), sq_op_norm()));
PD::initial_guess(target(), Phi()));
return result;
}
//! Makes it simple to chain different calls to PD
Expand Down Expand Up @@ -346,7 +343,6 @@ typename ImagingPrimalDual<SCALAR>::Diagnostic ImagingPrimalDual<SCALAR>::operat
.update_scale(update_scale())
.xi(xi())
.rho(rho())
.sq_op_norm(sq_op_norm())
.gamma(gamma())
.Phi(Phi())
.Psi(Psi())
Expand Down
27 changes: 25 additions & 2 deletions cpp/sopt/linear_transform.h
Original file line number Diff line number Diff line change
Expand Up @@ -63,16 +63,20 @@ class LinearTransform : public details::WrapFunction<VECTOR> {
details::WrapFunction<VECTOR> const &indirect)
: details::WrapFunction<VECTOR>(direct), indirect_(indirect) {}
LinearTransform(LinearTransform const &c)
: details::WrapFunction<VECTOR>(c), indirect_(c.indirect_) {}
: details::WrapFunction<VECTOR>(c), indirect_(c.indirect_), norm_(c.norm_), sq_norm_(c.sq_norm_) {}
LinearTransform(LinearTransform &&c)
: details::WrapFunction<VECTOR>(std::move(c)), indirect_(std::move(c.indirect_)) {}
: details::WrapFunction<VECTOR>(std::move(c)), indirect_(std::move(c.indirect_)), norm_(c.norm_), sq_norm_(c.sq_norm_) {}
void operator=(LinearTransform const &c) {
details::WrapFunction<VECTOR>::operator=(c);
indirect_ = c.indirect_;
norm_ = c.norm_;
sq_norm_ = c.sq_norm_;
}
void operator=(LinearTransform &&c) {
details::WrapFunction<VECTOR>::operator=(std::move(c));
indirect_ = std::move(c.indirect_);
norm_ = c.norm_;
sq_norm_ = c.sq_norm_;
}

//! Indirect transform
Expand All @@ -84,9 +88,28 @@ class LinearTransform : public details::WrapFunction<VECTOR> {
using details::WrapFunction<VECTOR>::sizes;
using details::WrapFunction<VECTOR>::rows;

void set_norm(t_real n)
{
norm_ = n;
sq_norm_ = n*n;
}

sopt::t_real norm() const
{
return norm_;
}

sopt::t_real sq_norm() const
{
return sq_norm_;
}

private:
//! Function applying conjugate transpose operator
details::WrapFunction<VECTOR> indirect_;

sopt::t_real norm_ = 1.0;
sopt::t_real sq_norm_ = 1.0;
};

//! Helper function to creates a function operator
Expand Down
13 changes: 5 additions & 8 deletions cpp/sopt/padmm.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,6 @@ class ProximalADMM {
Eigen::MatrixBase<DERIVED> const &target)
: itermax_(std::numeric_limits<t_uint>::max()),
regulariser_strength_(1e-8),
sq_op_norm_(1),
lagrange_update_scale_(0.9),
is_converged_(),
Phi_(linear_transform_identity<Scalar>()),
Expand All @@ -88,8 +87,6 @@ class ProximalADMM {
SOPT_MACRO(itermax, t_uint);
//! γ parameter
SOPT_MACRO(regulariser_strength, Real);
//! ν parameter
SOPT_MACRO(sq_op_norm, Real);
//! Lagrange update scale β
SOPT_MACRO(lagrange_update_scale, Real);
//! \brief A function verifying convergence
Expand Down Expand Up @@ -184,7 +181,7 @@ class ProximalADMM {
//! - x = Φ^T y / ν
//! - residuals = Φ x - y
std::tuple<t_Vector, t_Vector> initial_guess() const {
return ProximalADMM<SCALAR>::initial_guess(target(), Phi(), sq_op_norm());
return ProximalADMM<SCALAR>::initial_guess(target(), Phi());
}

//! \brief Computes initial guess for x and the residual using the targets
Expand All @@ -194,9 +191,9 @@ class ProximalADMM {
//!
//! This function simplifies creating overloads for operator() in PADMM wrappers.
static std::tuple<t_Vector, t_Vector> initial_guess(t_Vector const &target,
t_LinearTransform const &phi, Real sq_op_norm) {
t_LinearTransform const &phi) {
std::tuple<t_Vector, t_Vector> guess;
std::get<0>(guess) = static_cast<t_Vector>(phi.adjoint() * target) / sq_op_norm;
std::get<0>(guess) = static_cast<t_Vector>(phi.adjoint() * target) / phi.sq_norm();
std::get<1>(guess) = phi * std::get<0>(guess) - target;
return guess;
}
Expand Down Expand Up @@ -230,8 +227,8 @@ template <typename SCALAR>
void ProximalADMM<SCALAR>::iteration_step(t_Vector &out, t_Vector &residual, t_Vector &lambda,
t_Vector &z) const {
g_proximal(z, regulariser_strength(), -lambda - residual);
f_proximal(out, regulariser_strength() / sq_op_norm(),
out - static_cast<t_Vector>(Phi().adjoint() * (residual + lambda + z)) / sq_op_norm());
f_proximal(out, regulariser_strength() / Phi().sq_norm(),
out - static_cast<t_Vector>(Phi().adjoint() * (residual + lambda + z)) / Phi().sq_norm());
residual = static_cast<t_Vector>(Phi() * out - target());
lambda += lagrange_update_scale() * (residual + z);
}
Expand Down
Loading
Loading