add a new example

This commit is contained in:
张壹 2019-10-22 10:25:55 +08:00
parent bcafb30f35
commit cb3d271071
3 changed files with 119 additions and 12 deletions

View File

@ -39,4 +39,11 @@ add_executable(lbfgs_sample2 sample/sample2.cpp)
#
set_target_properties(lbfgs_sample2 PROPERTIES INSTALL_RPATH "/usr/local/lib")
#
target_link_libraries(lbfgs_sample2 PUBLIC lbfgs)
target_link_libraries(lbfgs_sample2 PUBLIC lbfgs)
#
add_executable(lbfgs_sample3 sample/sample3.cpp)
#
set_target_properties(lbfgs_sample3 PROPERTIES INSTALL_RPATH "/usr/local/lib")
#
target_link_libraries(lbfgs_sample3 PUBLIC lbfgs)

View File

@ -777,7 +777,7 @@ const char* lbfgs_strerror(int err)
}
}
// 反向搜索,即根据一个初值时情况增大或减小步长
static int line_search_backtracking(
int n,
lbfgsfloatval_t *x,
@ -803,7 +803,7 @@ static int line_search_backtracking(
}
/* Compute the initial gradient in the search direction. */
vecdot(&dginit, g, s, n);
vecdot(&dginit, g, s, n); //计算点积 g为梯度方向 s为下降方向
/* Make sure that s points to a descent direction. */
if (0 < dginit) {
@ -812,20 +812,23 @@ static int line_search_backtracking(
/* The initial value of the objective function. */
finit = *f;
dgtest = param->ftol * dginit;
dgtest = param->ftol * dginit; // ftol 大概为 function tolerance
for (;;) {
veccpy(x, xp, n);
vecadd(x, s, *stp, n);
vecadd(x, s, *stp, n); // vecadd x += (*stp)*s
/* Evaluate the function and gradient values. */
// 这里我们发现的cd的用法即传递函数指针
*f = cd->proc_evaluate(cd->instance, x, g, cd->n, *stp);
++count;
// 充分下降条件
if (*f > finit + *stp * dgtest) {
width = dec; //减小步长
width = dec; //如果不满充分下降条件则减小步长
} else {
// 充分下降条件满足并搜索方法为backtracking搜索条件为Armijo则可以退出了。否则更新步长继续搜索。
/* The sufficient decrease condition (Armijo condition). */
if (param->linesearch == LBFGS_LINESEARCH_BACKTRACKING_ARMIJO) {
/* Exit with the Armijo condition. */
@ -833,10 +836,11 @@ static int line_search_backtracking(
}
/* Check the Wolfe condition. */
vecdot(&dg, g, s, n);
vecdot(&dg, g, s, n); // 验证标准Wolfe条件 需要计算新的梯度信息
if (dg < param->wolfe * dginit) {
width = inc; //增大步长
width = inc; //注意这里dginit一般是负的所以在测试Wolfe条件时上式为下限。不满足标准Wolfe条件增大步长
} else {
// 标准Wolfe条件满足且搜索方法为backtracking搜索条件为Wolfe则可以退出了。否则继续测试是否满足Strong Wolfe
if(param->linesearch == LBFGS_LINESEARCH_BACKTRACKING_WOLFE) {
/* Exit with the regular Wolfe condition. */
return count;
@ -844,7 +848,7 @@ static int line_search_backtracking(
/* Check the strong Wolfe condition. */
if(dg > -param->wolfe * dginit) {
width = dec; //减小步长
width = dec; //不满足曲率的绝对值条件,减小步长
} else {
/* Exit with the strong Wolfe condition. */
return count;
@ -852,16 +856,20 @@ static int line_search_backtracking(
}
}
// 以下情况返回的步长不能保证满足搜索条件
if (*stp < param->min_step) {
/* The step is the minimum value. */
// 退出 此时步长小于最小步长
return LBFGSERR_MINIMUMSTEP;
}
if (*stp > param->max_step) {
/* The step is the maximum value. */
// 退出 此时步长大于最大步长
return LBFGSERR_MAXIMUMSTEP;
}
if (param->max_linesearch <= count) {
/* Maximum number of iteration. */
// 退出 线性搜索次数超过了最大限制
return LBFGSERR_MAXIMUMLINESEARCH;
}
@ -869,8 +877,7 @@ static int line_search_backtracking(
}
}
// 还是反向搜索 只是添加了L1模方向
static int line_search_backtracking_owlqn(
int n,
lbfgsfloatval_t *x,
@ -880,7 +887,7 @@ static int line_search_backtracking_owlqn(
lbfgsfloatval_t *stp,
const lbfgsfloatval_t* xp,
const lbfgsfloatval_t* gp,
lbfgsfloatval_t *wp,
lbfgsfloatval_t *wp, // 这个数组只在这个函数内使用
callback_data_t *cd,
const lbfgs_parameter_t *param
)

93
src/sample/sample3.cpp Normal file
View File

@ -0,0 +1,93 @@
#include "iostream"
#include "cmath"
#include "../lib/lbfgs.h"
using std::clog;
using std::endl;
class TEST_FUNC
{
public:
TEST_FUNC();
~TEST_FUNC();
static lbfgsfloatval_t _Func(void *instance, const lbfgsfloatval_t *x, lbfgsfloatval_t *g,
const int n, const lbfgsfloatval_t step)
{
return reinterpret_cast<TEST_FUNC*>(instance)->Func(x, g, n, step);
}
lbfgsfloatval_t Func(const lbfgsfloatval_t *x, lbfgsfloatval_t *g,
const int n, const lbfgsfloatval_t step);
static int _Progress(void *instance, const lbfgsfloatval_t *x, const lbfgsfloatval_t *g, const lbfgsfloatval_t fx,
const lbfgsfloatval_t xnorm, const lbfgsfloatval_t gnorm, const lbfgsfloatval_t step,
int n, int k, int ls)
{
return reinterpret_cast<TEST_FUNC*>(instance)->Progress(x, g, fx, xnorm, gnorm, step, n, k, ls);
}
int Progress(const lbfgsfloatval_t *x, const lbfgsfloatval_t *g, const lbfgsfloatval_t fx,
const lbfgsfloatval_t xnorm, const lbfgsfloatval_t gnorm, const lbfgsfloatval_t step,
int n, int k, int ls);
int Routine();
private:
lbfgsfloatval_t *m_x;
};
TEST_FUNC::TEST_FUNC()
{
m_x = NULL;
m_x = lbfgs_malloc(3);
m_x[0] = m_x[1] = m_x[2] = 1.0;
}
TEST_FUNC::~TEST_FUNC()
{
if (m_x != NULL) lbfgs_free(m_x);
}
// test functions
// 3 = 3*x1 + x2 + 2*x3*x3
// 1 = -3*x1 + 5*x2*x2 + 2*x1*x3
// -12 = 25*x1*x2 + 20*x3
lbfgsfloatval_t TEST_FUNC::Func(const lbfgsfloatval_t *x, lbfgsfloatval_t *g,
const int n, const lbfgsfloatval_t step)
{
double f0,f1,f2,temp;
f0 = 3*x[0] + x[1] + 2*x[2]*x[2] - 3;
f1 = -3*x[0] + 5*x[1]*x[1] + 2*x[0]*x[2] - 1;
f2 = 25*x[0]*x[1] + 20*x[2] + 12;
temp = sqrt(f0*f0+f1*f1+f2*f2);
g[0] = 0.5*(6*f0+2*f1*(2*x[2]-3)+50*f2*x[1])/temp;
g[1] = 0.5*(2*f0+20*f1*x[1]+50*f2*x[0])/temp;
g[2] = 0.5*(8*f0*x[2]+4*f1*x[0]+40*f2)/temp;
return temp;
}
int TEST_FUNC::Progress(const lbfgsfloatval_t *x, const lbfgsfloatval_t *g, const lbfgsfloatval_t fx,
const lbfgsfloatval_t xnorm, const lbfgsfloatval_t gnorm, const lbfgsfloatval_t step,
int n, int k, int ls)
{
clog << "iteration times: " << k << endl;
clog << x[0] << " " << x[1] << " " << x[2] << endl;
return 0;
}
int TEST_FUNC::Routine()
{
lbfgsfloatval_t fx;
int ret = lbfgs(3, m_x, &fx, _Func, _Progress, this, NULL);
clog << "L-BFGS optimization terminated with status: " << endl << lbfgs_strerror(ret) << endl;
clog << m_x[0] << " " << m_x[1] << " " << m_x[2] << endl;
return 0;
}
int main(int argc, char const *argv[])
{
TEST_FUNC test;
test.Routine();
return 0;
}