Lens distortion correction division model 1p
 All Classes Files Functions Variables
lens_distortion_procedures.h
1 /*
2  Copyright (c) 2010-2013, AMI RESEARCH GROUP <lalvarez@dis.ulpgc.es>
3  License : CC Creative Commons "Attribution-NonCommercial-ShareAlike"
4  see http://creativecommons.org/licenses/by-nc-sa/3.0/es/deed.en
5  */
6 
7 
8 #ifndef LENS_DISTORTION_PROCEDURE_H
9 #define LENS_DISTORTION_PROCEDURE_H
10 
11 #include "../ami_primitives/point2d.h"
12 #include "../ami_lens_distortion/lens_distortion_model.h"
13 #include "../ami_primitives/line_points.h"
14 #include "../ami_image/image.h"
15 #include <vector>
16 using namespace std;
17 
18 double model_estimation_1p_quotient(point2d<double> distortion_center,
19  vector< line_points > &lines ,
21 
22 int build_l1r_quotient_vector(std::vector<double> &l1r,
23  double max_distance_corner, double *a,int Na);
24 
25 double distortion_points_to_line_equation_quotient(lens_distortion_model &d,
26  line_points &l);
27 
37 template <class U>
38 ami::image<U> undistort_quotient_image_inverse(
39 ami::image<U> input_image,
41 const double &image_amplification_factor
42 )
43 {
44  int width0=input_image.width();
45  int height0=input_image.height();
46  int size =width0*height0;
47  int width=width0,height=height0;
48 
49  // DISPLACEMENT OF IMAGE CENTER
50  //point2d<double> displacement=point2d<double>((double) (width-width0)/2.,(double) (height-height0)/2.);
51 
52  // WE CREATE OUTPUT IMAGE
53  ami::image<U> output_image(width,height,0,0,0);
54 
55  //CALCULATE MAXIMUM DISTANCE FROM CENTRE TO A CORNER
57  point2d<double> corner(0,0);
58  // cout << "Width " << width0 << " Height " << height0 << endl;
59  // cout << "LDM centre " << ldm_centre.x << " " << ldm_centre.y << endl;
60  double max_distance_corner= (ldm_centre-corner).norm();
61  corner.y=height0;
62  double distance_corner=(ldm_centre-corner).norm();
63  if(distance_corner>max_distance_corner) max_distance_corner=distance_corner;
64  corner.x=width0;
65  distance_corner=(ldm_centre-corner).norm();
66  if(distance_corner>max_distance_corner) max_distance_corner=distance_corner;
67  corner.y=0;
68  distance_corner=(ldm_centre-corner).norm();
69  if(distance_corner>max_distance_corner) max_distance_corner=distance_corner;
70 
71  // cout << "Max distance corner " << max_distance_corner << endl;
72 
73  //BUILD INTERMEDIATE VECTOR
74  vector<double> l1r;
75  double *a;
76  int Na;
77  vector<double> d1=d.get_d();
78  Na=2*(d1.size()-1);
79  if(d1.size()<2) {output_image=input_image; return(output_image);}
80  a=(double*)malloc(sizeof(double)*(Na+1));
81  a[0]=d1[0];
82  for(int i=1;i<(int)d1.size();i++){a[2*i-1]=0.; a[2*i]=d1[i]; }
83  int cont2=Na;
84  while(a[cont2]==0){ cont2--; if (cont2 == 0)break;}
85  Na=cont2;
86 
87  //WE UPDATE THE max_distance_corner ACCORDING TO LENS DISTORSION MAX DISPLACEMENT
88  if(d1.size()==2){
89  max_distance_corner=max_distance_corner/(d1[0]+d1[1]*max_distance_corner*max_distance_corner);
90  }
91  else{
92  max_distance_corner=max_distance_corner/(d1[0]+d1[1]*max_distance_corner*max_distance_corner+
93  d1[2]*max_distance_corner*max_distance_corner*max_distance_corner*max_distance_corner);
94  }
95  // WE BUILD THE LENS DISTORTION INVERSE VECTOR
96  if(d1.size()==2){
97  if(build_l1r_quotient_vector(l1r,max_distance_corner,a,Na)<0){
98  output_image=input_image;
99  return(output_image);
100  }
101  }
102  else{
103  l1r.resize((int)(max_distance_corner+1.5));
104  l1r[0]=1;
105  double r0=1;
106  for(int m=1;m<(int)l1r.size();m++){
107  //WE COMPUTE THE INVERSE USING NEWTON-RAPHSON
108  double h=1e-5;
109  for(int i=0;i<1000;i++){
110  //EVALUATION OF LENS DISTORTION MODEL AND DERIVATIVE
111  double r2=r0*r0;
112  double sum=d1[0];
113  for(int k=1;k<(int)d1.size();k++){
114  sum+=d1[k]*r2;
115  r2*=r0*r0;
116  }
117  double f_r0=r0/sum;
118  //DERIVATIVE
119  r2=(r0+h)*(r0+h);
120  sum=d1[0];
121  for(int k=1;k<(int)d1.size();k++){
122  sum+=d1[k]*r2;
123  r2*=(r0+h)*(r0+h);
124  }
125  double f_r0_h=(r0+h)/sum;
126  double derivative=(f_r0_h-f_r0)/h;
127  // WE COMPUTE THE NEW ROOT
128  double r1=r0-(f_r0-m)/derivative;
129  if(fabs(r0-r1)<fabs(r0)*1e-5){
130  r0=r1;
131  break;
132  }
133  r0=r1;
134  //printf("iter=%d,m=%d, r0=%lf\n",i,m,r0);
135  }
136  l1r[m]=r0/m;
137  //system("pause");
138  }
139  }
140 
141  // WE FIT IMAGE SCALING
142  double scale;
144  if(image_amplification_factor==1.){ //ALL IMAGE IS INCLUDED IN CORRECTED ONE
145  ami::point2d<double> temp2=d.evaluation_quotient( ami::point2d<double>((double) width,(double) height));
146  scale=(temp2-ldm_centre ).norm()/ldm_centre.norm();
147 
148  temp2=d.evaluation_quotient( ami::point2d<double>((double) width,(double) 0.));
149  double scale2=(temp2-ldm_centre ).norm()/ldm_centre.norm();
150  //if(scale2>scale) scale=scale2;
151  if(scale2<scale) scale=scale2;
152 
153  temp2=d.evaluation_quotient( ami::point2d<double>((double) 0.,(double) height));
154  scale2=(temp2-ldm_centre ).norm()/ldm_centre.norm();
155  //if(scale2>scale) scale=scale2;
156  if(scale2<scale) scale=scale2;
157 
158  temp2=d.evaluation_quotient( ami::point2d<double>((double) 0.,(double) 0.));
159  scale2=(temp2-ldm_centre ).norm()/ldm_centre.norm();
160  //if(scale2>scale) scale=scale2;
161  if(scale2<scale) scale=scale2;
162 
163 
164  printf("scale=%lf\n",scale);
165 // t.x=(scale*width-width)*0.5;
166 // t.y=(scale*height-height)*0.5;
167  t.x=(scale*ldm_centre.x-ldm_centre.x);
168  t.y=(scale*ldm_centre.y-ldm_centre.y);
169  }
170  else if(image_amplification_factor==2.){ //IMAGE IS FITTED TO KEEP WIDTH
171  ami::point2d<double> temp2=d.evaluation_quotient( ami::point2d<double>((double) width,ldm_centre.y));
172  scale=(temp2-ldm_centre ).norm()/(ldm_centre.x);
173 
174  temp2=d.evaluation_quotient( ami::point2d<double>((double) 0.,ldm_centre.y));
175  double scale2=(temp2-ldm_centre ).norm()/(ldm_centre.x);
176  //if(scale2>scale) scale=scale2;
177  if(scale2<scale) scale=scale2;
178 
179  printf("scale=%lf\n",scale);
180 // t.x=(scale*width-width)*0.5;
181 // t.y=(scale*height-height)*0.5;
182  t.x=(scale*ldm_centre.x-ldm_centre.x);
183  t.y=(scale*ldm_centre.y-ldm_centre.y);
184  }
185  else{ //IMAGE IS FITTED TO KEEP WIDTH
186  ami::point2d<double> temp2=d.evaluation_quotient( ami::point2d<double>(ldm_centre.x,(double) height));
187  scale=(temp2-ldm_centre ).norm()/(ldm_centre.y);
188 
189  temp2=d.evaluation_quotient( ami::point2d<double>(ldm_centre.x,(double) 0.));
190  double scale2=(temp2-ldm_centre ).norm()/(ldm_centre.y);
191  //if(scale2>scale) scale=scale2;
192  if(scale2<scale) scale=scale2;
193 
194  printf("scale=%lf\n",scale);
195 // t.x=(scale*width-width)*0.5;
196 // t.y=(scale*height-height)*0.5;
197  t.x=(scale*ldm_centre.x-ldm_centre.x);
198  t.y=(scale*ldm_centre.y-ldm_centre.y);
199  }
200 
201 
202  int nc,n2,i,j;
203  #pragma omp parallel for \
204  shared(width,height,width0,height0,output_image,input_image,size)\
205  private(nc,i,j,n2)
206  for (nc=0;nc<3;nc++)
207  {
208  n2=nc*size;
209  //#pragma omp for nowait
210  for(i=0;i<height;i++){
211  for(j=0;j<width;j++){
212  ami::point2d<double> temp((double) j*scale-t.x,(double) i*scale-t.y);
213  double distance_centre= (ldm_centre-temp).norm();
214 
215  //INTERPOLATION
216  int ind=(int)distance_centre;
217  if(ind>=(int)l1r.size()) continue;
218  double dl1r=l1r[ind]+(distance_centre-ind)*(l1r[ind+1]-l1r[ind]);
220 
221  p.x=ldm_centre.x+(temp.x-ldm_centre.x)*dl1r;
222  p.y=ldm_centre.y+(temp.y-ldm_centre.y)*dl1r;
223 
224  //p=d.inverse_evaluation_quotient(temp);
225 
226  int m = (int)p.y;
227  int n = (int)p.x;
228  if(0<=m && m<height0 && 0<=n && n<width0)
229  {
230  //COLOUR INTERPOLATION
231  double di=p.y-m;
232  double dj=p.x-n;
233  unsigned int k=i*width+j;
234  unsigned int k0=m*width0+n;
235  double accum=0;
236  double w_accum=0;
237  double w=((1.-di)*(1.-dj));
238 
239  accum+=(double)w*input_image[k0+n2];
240  w_accum+=w;
241 
242 
243  if( (di*(1.-dj))>0. && (m+1)<height0)
244  {
245  k0=(m+1)*width0+n;
246  w=(di*(1.-dj));
247  accum+=(double)w*input_image[k0+n2];
248  w_accum+=w;
249  }
250  if( ((1-di)*(dj))>0. && (n+1)<width0)
251  {
252  k0=(m)*width0+n+1;
253  w=(1.-di)*(dj);
254  accum+=(double)w*input_image[k0+n2];
255  w_accum+=w;
256  }
257  if( ((di)*(dj))>0. && (n+1)<width0 && (m+1)<height0)
258  {
259  k0=(m+1)*width0+n+1;
260  w=(di)*(dj);
261  accum+=(double)w*input_image[k0+n2];
262  w_accum+=w;
263  }
264  if(w_accum>0.) output_image[k+n2]=(U) (accum/w_accum);
265  }
266  }
267  }
268  }
269  free(a);
270  return(output_image);
271 }
272 
273 #endif
class to store together a collection of aligned points and line equation and basic method ...
Definition: line_points.h:40
image< T > const resize(const int width, const int height) const
Definition: image.h:871
point2d< double > & get_distortion_center()
This function returns the center of the lens distortion model.
Definition: lens_distortion_model.h:82
Class to store distortion model and basic methods.
Definition: lens_distortion_model.h:38
T x
Definition: point2d.h:37
T y
Definition: point2d.h:38
std::vector< double > & get_d()
This function returns the vector with the lens distortion model parameters.
Definition: lens_distortion_model.h:52
Class to store multiChannel images and basic methods.
Definition: image.h:65