大家好,又见面了,我是你们的朋友全栈君。如果您正在找激活码,请点击查看最新教程,关注关注公众号 “全栈程序员社区” 获取激活教程,可能之前旧版本教程已经失效.最新Idea2022.1教程亲测有效,一键激活。
Jetbrains全系列IDE稳定放心使用
一、算法推导
二、代码实践
#include <Eigen/Dense>
#include <Eigen/Sparse>
#include <iostream>
#include <iomanip>
#include <math.h>
using namespace std;
using namespace Eigen;
const double DERIV_STEP = 1e-5;
const int MAX_ITER = 100;
#define max(a,b) (((a)>(b))?(a):(b))
double func(const VectorXd& input, const VectorXd& output, const VectorXd& params, double objIndex)
{
// obj = A * sin(Bx) + C * cos(D*x) - F
double x1 = params(0);
double x2 = params(1);
double x3 = params(2);
double x4 = params(3);
double t = input(objIndex);
double f = output(objIndex);
return x1 * sin(x2 * t) + x3 * cos(x4 * t) - f;
}
//return vector make up of func() element.
VectorXd objF(const VectorXd& input, const VectorXd& output, const VectorXd& params)
{
VectorXd obj(input.rows());
for (int i = 0; i < input.rows(); i++)
obj(i) = func(input, output, params, i);
return obj;
}
//F = (f ^t * f)/2
double Func(const VectorXd& obj)
{
//平方和,所有误差的平方和
return obj.squaredNorm() / 2;
}
double Deriv(const VectorXd& input, const VectorXd& output, int objIndex, const VectorXd& params,
int paraIndex)
{
VectorXd para1 = params;
VectorXd para2 = params;
para1(paraIndex) -= DERIV_STEP;
para2(paraIndex) += DERIV_STEP;
double obj1 = func(input, output, para1, objIndex);
double obj2 = func(input, output, para2, objIndex);
return (obj2 - obj1) / (2 * DERIV_STEP);
}
MatrixXd Jacobin(const VectorXd& input, const VectorXd& output, const VectorXd& params)
{
int rowNum = input.rows();
int colNum = params.rows();
MatrixXd Jac(rowNum, colNum);
for (int i = 0; i < rowNum; i++)
{
for (int j = 0; j < colNum; j++)
{
Jac(i, j) = Deriv(input, output, i, params, j);
}
}
return Jac;
}
double maxMatrixDiagonale(const MatrixXd& Hessian)
{
int max = 0;
for (int i = 0; i < Hessian.rows(); i++)
{
if (Hessian(i, i) > max)
max = Hessian(i, i);
}
return max;
}
//L(h) = F(x) + h^t*J^t*f + h^t*J^t*J*h/2
//deltaL = h^t * (u * h - g)/2
double linerDeltaL(const VectorXd& step, const VectorXd& gradient, const double u)
{
double L = step.transpose() * (u * step - gradient);
return L;
}
void levenMar(const VectorXd& input, const VectorXd& output, VectorXd& params)
{
int errNum = input.rows(); //error num
int paraNum = params.rows(); //parameter num
//initial parameter
VectorXd obj = objF(input, output, params); //得到误差
MatrixXd Jac = Jacobin(input, output, params); //得到雅可比矩阵
MatrixXd A = Jac.transpose() * Jac; //Hessian矩阵,此处为4x4的矩阵
VectorXd gradient = Jac.transpose() * obj; //gradient
//initial parameter tao v epsilon1 epsilon2
double tao = 1e-3;
long long v = 2;
double eps1 = 1e-12, eps2 = 1e-12;
double u = tao * maxMatrixDiagonale(A); //找到雅可比矩阵对角线上最大的值,并乘tao
bool found = gradient.norm() <= eps1; //判断是否小于阈值,小于这个阈值,即可退出。
if (found) return;
double last_sum = 0;
int iterCnt = 0;
while (iterCnt < MAX_ITER)
{
VectorXd obj = objF(input, output, params);
MatrixXd Jac = Jacobin(input, output, params); //jacobin
MatrixXd A = Jac.transpose() * Jac; //Hessian
VectorXd gradient = Jac.transpose() * obj; //gradient
if (gradient.norm() <= eps1)
{
cout << "stop g(x) = 0 for a local minimizer optimizer." << endl;
break;
}
cout << "A: " << endl << A << endl;
VectorXd step = (A + u * MatrixXd::Identity(paraNum, paraNum)).inverse() * gradient; //negtive Hlm.
cout << "step: " << endl << step << endl;
if (step.norm() <= eps2 * (params.norm() + eps2))
{
cout << "stop because change in x is small" << endl;
break;
}
VectorXd paramsNew(params.rows());
paramsNew = params - step; //h_lm = -step;
//compute f(x)
obj = objF(input, output, params);
//compute f(x_new)
VectorXd obj_new = objF(input, output, paramsNew);
double deltaF = Func(obj) - Func(obj_new);
double deltaL = linerDeltaL(-1 * step, gradient, u);
double roi = deltaF / deltaL;
cout << "roi is : " << roi << endl;
if (roi > 0)
{
params = paramsNew;
u *= max(1.0 / 3.0, 1 - pow(2 * roi - 1, 3));
v = 2;
}
else
{
u = u * v;
v = v * 2;
}
cout << "u = " << u << " v = " << v << endl;
iterCnt++;
cout << "Iterator " << iterCnt << " times, result is :" << endl << endl;
}
}
int main(int argc, char* argv[])
{
// obj = A * sin(Bx) + C * cos(D*x) - F
//there are 4 parameter: A, B, C, D.
int num_params = 4;
//generate random data using these parameter
int total_data = 100;
VectorXd input(total_data);
VectorXd output(total_data);
double A = 5, B = 1, C = 10, D = 2;
//load observation data
for (int i = 0; i < total_data; i++)
{
//generate a random variable [-10 10]
double x = 20.0 * ((rand() % 1000) / 1000.0) - 10.0;
double deltaY = 2.0 * (rand() % 1000) / 1000.0;
double y = A * sin(B*x) + C * cos(D*x) + deltaY;
input(i) = x;
output(i) = y;
}
//gauss the parameters
VectorXd params_gaussNewton(num_params);
//init gauss
params_gaussNewton << 3.6, 1.3, 7.2, 1.7;
VectorXd params_levenMar = params_gaussNewton;
levenMar(input, output, params_levenMar);
cout << "Levenberg-Marquardt parameter: " << endl << params_levenMar << endl << endl << endl;
}
参考
1:https://zhuanlan.zhihu.com/p/136143299
2:https://blog.csdn.net/stihy/article/details/52737723
3:参考文献:A Brief Description of the
Levenberg-Marquardt Algorithm Implemened
发布者:全栈程序员-用户IM,转载请注明出处:https://javaforall.cn/188418.html原文链接:https://javaforall.cn
【正版授权,激活自己账号】: Jetbrains全家桶Ide使用,1年售后保障,每天仅需1毛
【官方授权 正版激活】: 官方授权 正版激活 支持Jetbrains家族下所有IDE 使用个人JB账号...