105 lines
2.6 KiB
C++
105 lines
2.6 KiB
C++
#include "xyz2shc.h"
|
|
|
|
void XYZ2SHC::Optimize_CG(){
|
|
int i,j;
|
|
double data_object_func, PartB_m,gradient_m,gradient_m1,dTAd,ak,beta_k1;
|
|
_1dArray gradientk, dk, Adk, Adk_part, predict_field;
|
|
//reserve vector
|
|
gradientk.resize(shc_num_); dk.resize(shc_num_);
|
|
Adk.resize(shc_num_); Adk_part.resize(obs_num_);
|
|
predict_field.resize(obs_num_);
|
|
|
|
#pragma omp parallel for private(i,j) schedule(guided)
|
|
for (i = 0; i < obs_num_; i++){
|
|
Adk_part[i] = 0.0;
|
|
for (j = 0; j < shc_num_; j++){
|
|
Adk_part[i] += c_kernel_[i][j]*shc_value_[j];
|
|
}
|
|
}
|
|
|
|
#pragma omp parallel for private(i,j) schedule(guided)
|
|
for (j = 0; j < shc_num_; j++){
|
|
Adk[j] = 0.0;
|
|
for (i = 0; i < obs_num_; i++){
|
|
Adk[i] += c_kernel_[i][j]*wdTwd_[i]*Adk_part[i];
|
|
}
|
|
}
|
|
|
|
#pragma omp parallel for private(i) schedule(guided)
|
|
for (j = 0; j < shc_num_; j++){
|
|
gradientk[j] = Adk[j] - PartB_[j];
|
|
dk[j] = -1.0*gradientk[j];
|
|
}
|
|
|
|
PartB_m = 0.0;
|
|
gradient_m = 0.0;
|
|
for (j = 0; j < shc_num_; j++){
|
|
PartB_m += PartB_[j]*PartB_[j];
|
|
gradient_m += gradientk[i]*gradientk[i];
|
|
}
|
|
|
|
for (int time = 0; time < iter_times_; time++){
|
|
//计算数据拟合差函数
|
|
#pragma omp parallel for private(i,j) schedule(guided)
|
|
for (i = 0; i < obs_num_; i++){
|
|
predict_field[i] = 0.0;
|
|
for (j = 0; j < shc_num_; j++){
|
|
predict_field[i] += c_kernel_[i][j] * shc_value_[j];
|
|
}
|
|
}
|
|
|
|
data_object_func = 0.0;
|
|
for (i = 0; i < obs_num_; i++){
|
|
data_object_func += pow((obs_point_[i].val - predict_field[i])/obs_point_[i].dev,2);
|
|
}
|
|
data_object_func /= obs_num_;
|
|
|
|
if (data_object_func <= 1.0){
|
|
cout << "Iteration limit rearched!\tconvergence value = " << data_object_func << endl;
|
|
cout << ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>" << endl;
|
|
break;
|
|
}
|
|
else{
|
|
cout << "Iteration times: " << time << "\tconvergence value = " << data_object_func << endl;
|
|
MOVEUP(1);
|
|
cout << CLEARLINE;
|
|
}
|
|
|
|
#pragma omp parallel for private(i,j) schedule(guided)
|
|
for (i = 0; i < obs_num_; i++){
|
|
Adk_part[i] = 0.0;
|
|
for (j = 0; j < shc_num_; j++){
|
|
Adk_part[i] += c_kernel_[i][j]*dk[j];
|
|
}
|
|
}
|
|
|
|
#pragma omp parallel for private(i,j) schedule(guided)
|
|
for (j = 0; j < shc_num_; j++){
|
|
Adk[j] = 0.0;
|
|
for (i = 0; i < obs_num_; i++){
|
|
Adk[i] += c_kernel_[i][j]*wdTwd_[i]*Adk_part[i];
|
|
}
|
|
}
|
|
|
|
dTAd = 0.0;
|
|
for (j = 0; j < shc_num_; j++){
|
|
dTAd += dk[j]*Adk[j];
|
|
}
|
|
|
|
ak = gradient_m/dTAd;
|
|
|
|
gradient_m1 = 0;
|
|
for (j = 0; j < shc_num_; j++){
|
|
shc_value_[j] += ak*dk[j];
|
|
gradientk[j] += ak*Adk[j];
|
|
gradient_m1 += gradientk[j]*gradientk[j];
|
|
}
|
|
|
|
beta_k1 = gradient_m1/gradient_m;
|
|
gradient_m = gradient_m1;
|
|
for (j = 0; j < shc_num_; j++){
|
|
dk[j] = beta_k1*dk[j] - gradientk[j];
|
|
}
|
|
}
|
|
return;
|
|
} |