-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdriver5.c
186 lines (156 loc) · 4.51 KB
/
driver5.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
/* Although there is a rigorous theory justifying a Wolfe line search,
the performance of the Approximate Wolfe line search is often superior.
Nonetheless, the user can turn off the Approximate Wolfe line search
by setting AWolfe to FALSE and AWolfeFac to 0. (Since AWolfe is
FALSE by default, we only need to adjust AWolfeFac). When the
code detects that the Wolfe line search fails, then it will
automatically attempt the approximate Wolfe line search. To
see that the Wolfe line search failed, we also need to set the
PrintLevel to at least 1. The synopsis of the output is the following:
....
iter: 26 f = -6.530787e+02 gnorm = 3.634731e-07 AWolfe = 0
QuadOK: 0 initial a: 4.310872e-01 f0: -6.530787e+02 dphi: -4.952636e-13
Wolfe line search
WOLFE LINE SEARCH FAILS
Approximate Wolfe line search
RESTART CG
iter: 27 f = -6.530787e+02 gnorm = 1.823353e-07 AWolfe = 1
QuadOK: 0 initial a: 4.842801e-01 f0: -6.530787e+02 dphi: -2.079947e-13
Approximate Wolfe line search
....
Termination status: 0
Convergence tolerance for gradient satisfied
maximum norm for gradient: 9.781479e-09
function value: -6.530787e+02
cg iterations: 32
function evaluations: 155
gradient evaluations: 148
===================================
The large number of function and gradient evaluations arose when the
Wolfe line search failed. If the error tolerance is increased to 1.e-6,
then the Wolfe line search is successful, giving the following results:
Termination status: 0
Convergence tolerance for gradient satisfied
maximum norm for gradient: 8.773852e-07
function value: -6.530787e+02
cg iterations: 24
function evaluations: 46
gradient evaluations: 30
On the other hand, if we turn on the Approximate Wolfe line search by
resetting AWolfeFac to its default value, we can solve the
original problem with error tolerance 1.e-8 in far fewer iterations:
Termination status: 0
Convergence tolerance for gradient satisfied
maximum norm for gradient: 6.244802e-09
function value: -6.530787e+02
cg iterations: 32
function evaluations: 54
gradient evaluations: 46 */
#include <math.h>
#include "cg_user.h"
double myvalue
(
double *x,
INT n,
void *User
) ;
void mygrad
(
double *g,
double *x,
INT n,
void *User
) ;
double myvalgrad
(
double *g,
double *x,
INT n,
void *User
) ;
int main (void)
{
double *x ;
INT i, n ;
cg_parameter Parm ;
/* allocate work space */
n = 100 ;
x = (double *) malloc (n*sizeof (double)) ;
/* starting guess */
for (i = 0; i < n; i++) x [i] = 1. ;
/* set default parameter values */
cg_default (&Parm) ;
/* turn off approximate Wolfe line search */
Parm.AWolfeFac = 0. ;
Parm.PrintLevel = 1 ;
/* solve the problem with error tolerance 1.e-8 */
cg_descent(x, n, NULL, &Parm, 1.e-8, myvalue, mygrad, myvalgrad, NULL, NULL, NULL) ;
/* starting guess */
for (i = 0; i < n; i++) x [i] = 1. ;
/* solve the problem with error tolerance 1.e-6 */
cg_descent(x, n, NULL, &Parm, 1.e-6, myvalue, mygrad, myvalgrad, NULL, NULL, NULL) ;
/* starting guess */
for (i = 0; i < n; i++) x [i] = 1. ;
/* restore default parameter value for AWolfeFac */
cg_default (&Parm) ;
/* solve the problem with error tolerance 1.e-8 */
cg_descent(x, n, NULL, &Parm, 1.e-8, myvalue, mygrad, myvalgrad, NULL, NULL, NULL) ;
free (x) ; /* free work space */
}
double myvalue
(
double *x,
INT n,
void *User
)
{
double f, t ;
INT i ;
f = 0. ;
for (i = 0; i < n; i++)
{
t = i+1 ;
t = sqrt (t) ;
f += exp (x [i]) - t*x [i] ;
}
return (f) ;
}
void mygrad
(
double *g,
double *x,
INT n,
void *User
)
{
double t ;
INT i ;
for (i = 0; i < n; i++)
{
t = i + 1 ;
t = sqrt (t) ;
g [i] = exp (x [i]) - t ;
}
return ;
}
double myvalgrad
(
double *g,
double *x,
INT n,
void *User
)
{
double ex, f, t ;
INT i ;
f = (double) 0 ;
for (i = 0; i < n; i++)
{
t = i + 1 ;
t = sqrt (t) ;
ex = exp (x [i]) ;
f += ex - t*x [i] ;
g [i] = ex - t ;
}
return (f) ;
}