Lens distortion correction division model 1p
 All Classes Files Functions Variables
filters.h
Go to the documentation of this file.
1 /*
2  Copyright (c) 2010-2013, AMI RESEARCH GROUP <lalvarez@dis.ulpgc.es>
3  License : CC Creative Commons "Attribution-NonCommercial-ShareAlike"
4  see http://creativecommons.org/licenses/by-nc-sa/3.0/es/deed.en
5  */
6 
7 
8  #ifndef AMI_DLL_H
9  #define AMI_DLL_H
10 #endif
11 
19 #ifndef filters_H
20 #define filters_H
21 
22 #include <math.h>
23 #include <vector>
24 #include "../ami_image/image.h"
25 #include "../ami_utilities/utilities.h"
26 #include "../ami_primitives/subpixel_image_contours.h"
27 using namespace std;
28 
36 template <class T,class U>
38 ami::image<T> &img ,
39 ami::image<U> &img_conv ,
40 double sigma_x ,
41 double sigma_y ,
42 double precision )
44 {
45 
46  if(img.get_roi().size()==6){
47  img.get_roi_image(img_conv);
48  }
49  else{
50  if(img.width()!=img_conv.width() || img.height()!=img_conv.height() ||
51  img.nChannels()!=img_conv.nChannels()){
52  img_conv=ami::image<U>(img.width(),img.height(),img.nChannels());
53  }
54  int k,k_end=img.width()*img.height()*img.nChannels();
55  #ifdef _OPENMP
56  #pragma omp parallel for shared(k_end) private(k)
57  #endif
58  for(k=0;k<k_end;k++) img_conv[k]= (U) img[k];
59  }
60 
61  unsigned int width=img_conv.width();
62  unsigned int height=img_conv.height();
63  unsigned int nChannels=img_conv.nChannels();
64  unsigned int image_size=width*height;
65  unsigned int image_size_total=image_size*nChannels;
66 
67  if(precision<0) precision=0;
68 
69  unsigned int Nc_x=(unsigned int) (precision*sigma_x>1?precision*sigma_x:1);
70  unsigned int Nc_y=(unsigned int) (precision*sigma_y>1?precision*sigma_y:1);
71  unsigned int Nc_a=(Nc_x>Nc_y)?Nc_x:Nc_y;
72  Nc_x=Nc_y=Nc_a;
73  double t_y=sigma_x*sigma_x/(2*Nc_x);
74  double t_x=sigma_y*sigma_y/(2*Nc_y);
75  double l_x,v_x,l_y,v_y,l_x_1,l_y_1;
76 
77  // WE CHECK IS STANDARD DEVIATIONS ARE 0
78  if(t_x==0 && t_y==0) return;
79 
80  // PARAMETERS X AND Y
81  if(t_x>0){
82  l_x=(float)(1.+2.*t_x-sqrt((double) 4*t_x+1))/(2*t_x);
83  v_x=l_x/t_x;
84  l_x_1=(1-l_x);
85  }
86  if(t_y>0){
87  l_y=(float)(1.+2.*t_y-sqrt((double) 4*t_y+1))/(2*t_y);
88  v_y=l_y/t_y;
89  l_y_1=(1-l_y);
90  }
91 
92  //MAIN LOOP
93  int cont;
94  int c,y,x,x_end,m,y_end;
95  for(cont=0;cont<(int)Nc_a;cont++){
96  if(t_y>0){
97  #ifdef _OPENMP
98  #pragma omp parallel \
99  shared(width,image_size_total,image_size,l_y,v_y,l_y_1) \
100  private(c,x,y,m,x_end)
101  #endif
102  for(c=0;c<(int)image_size_total;c+=image_size){
103  #ifdef _OPENMP
104  #pragma omp for nowait
105  #endif
106  for(y=0;y<(int)image_size;y+=width){
107  m=c+y;
108  vector <U> paso(width);
109  paso[0]= img_conv[m]/l_y_1;
110  x_end=m+width;
111  unsigned int n=1;
112  for(x=m+1;x<x_end;x++,n++){
113  paso[n]= img_conv[x]+l_y*paso[n-1];
114  }
115  img_conv[x_end-1]=paso[width-1]/l_y_1;
116  n=width-2;
117  for(x=x_end-2;x>m;x--,n--){
118  img_conv[x]= paso[n]+l_y*img_conv[x+1];
119  }
120  img_conv[x]= paso[n]+l_y*img_conv[x+1];
121  for(x=m;x<x_end;x++){
122  img_conv[x]*=v_y;
123  }
124  }
125  }
126  }
127  if(t_y>0){
128  #ifdef _OPENMP
129  #pragma omp parallel \
130  shared(width,height,image_size_total,image_size,l_x,v_x,l_x_1) \
131  private(c,x,y,m,y_end)
132  #endif
133  for(c=0;c<(int)image_size_total;c+=image_size){
134  #ifdef _OPENMP
135  #pragma omp for nowait
136  #endif
137  for(x=0;x<(int)width;x++){
138  m=c+x;
139  vector <U> paso(height);
140  paso[0]= img_conv[m]/l_x_1;
141  y_end=m+image_size;
142  unsigned int n=1;
143  for(y=m+width;y<y_end;y+=width,n++){
144  paso[n]= img_conv[y]+l_x*paso[n-1];
145  }
146  img_conv[y_end-width]=paso[height-1]/l_x_1;
147  n=height-2;
148  for(y=y_end-2*width;y>m;y-=width,n--){
149  img_conv[y]= paso[n]+l_x*img_conv[y+width];
150  }
151  img_conv[y]= paso[n]+l_x*img_conv[y+width];
152  for(y=m;y<y_end;y+=width){
153  img_conv[y]*=v_x;
154  }
155  }
156  }
157  }
158  }
159 }
160 
161 
169 template <class T,class U>
170 void grad(
171  const ami::image<T> &img ,
172  ami::image<U> &grad_x ,
173  ami::image<U> &grad_y ,
174  const bool NeigborhoodType )
176 {
177  if(img.get_roi().size()==6){
178  ami::image<T> img2;
179  img.get_roi_image(img2);
180  grad(img2,grad_x,grad_y,NeigborhoodType);
181  return;
182  }
183 
184 
185  unsigned int width=img.width();
186  unsigned int height=img.height();
187  unsigned int nChannels=img.nChannels();
188  unsigned int size_image=width*height;
189  unsigned int size_image_width=size_image-width;
190  unsigned int total_image_size=size_image*nChannels;
191  unsigned int width_1=width-1;
192 
193  if((int)width!=grad_x.width() || (int)height!=grad_x.height() ||
194  (int)nChannels!=grad_x.nChannels()){
195  grad_x=ami::image<U>(width,height,nChannels);
196  }
197 
198  if((int)width!=grad_y.width() || (int)height!=grad_y.height() ||
199  (int)nChannels!=grad_y.nChannels()){
200  grad_y=ami::image<U>(width,height,nChannels);
201  }
202 
203  if(NeigborhoodType==0){
204 
205  //ami::utilities u;
206  vector < vector <unsigned int> > b=boundary_neighborhood_5n(width,height);
207  int m,c,y,k,k_end,b_size=b.size();
208 
209  #ifdef _OPENMP
210  #pragma omp parallel \
211  shared(width,width_1,total_image_size,size_image,size_image_width,b,b_size)\
212  private(c,y,m,k,k_end)
213  #endif
214  for(c=0;c<(int)total_image_size;c+=size_image){
215  #ifdef _OPENMP
216  #pragma omp for nowait
217  #endif
218  for(y=width;y<(int)size_image_width;y+=width){
219  m=c+y;
220  k_end=m+width_1;
221  for(k=m+1; k<k_end; k++){
222  grad_y[k]=(U) img[k+width]- (U) img[k-width];
223  grad_x[k]=(U) img[k-1]- (U) img[k+1];
224  }
225  }
226  #ifdef _OPENMP
227  #pragma omp for nowait
228  #endif
229  for(int k=0;k<b_size;k++){ // IMAGE BOUNDARY GRADIENT ESTIMATION
230  grad_y[b[k][0]+c]=(U) img[b[k][2]+c]- (U) img[b[k][1]+c];
231  grad_x[b[k][0]+c]=(U) img[b[k][4]+c]- (U) img[b[k][3]+c];
232  }
233  }
234  }
235  else{
236  vector < vector <unsigned int> > b=boundary_neighborhood_9n(width,height);
237  double coef1,coef2,c1,d1;
238  coef1=sqrt((double) 2.);
239  coef2=0.25*(2.-coef1);
240  coef1=0.5*(coef1-1);
241  int m,c,y,k,l,k_end,b_size=b.size();
242  #ifdef _OPENMP
243  #pragma omp parallel \
244  shared(width,width_1,total_image_size,size_image,size_image_width,b,b_size,\
245  coef1,coef2) private(c,y,m,k,k_end,c1,d1)
246  #endif
247  for(c=0;c<(int)total_image_size;c+=size_image){
248  #ifdef _OPENMP
249  #pragma omp for nowait
250  #endif
251  for(y=width;y<(int)size_image_width;y+=width){
252  m=c+y;
253  k_end=m+width_1;
254  for(k=m+1; k<k_end; k++){
255  c1=img[k+width+1]-img[k-width-1];
256  d1=img[k-width+1]-img[k+width-1];
257  grad_y[k]=(U)(coef1*((U) img[k+width]-(U)img[k-width])+coef2*(c1-d1));
258  grad_x[k]=(U)(-(coef1*((U) img[k+1]- (U) img[k-1])+coef2*(c1+d1)));
259  }
260  }
261  #ifdef _OPENMP
262  #pragma omp for nowait
263  #endif
264  for(l=0;l<b_size;l++){ // IMAGE BOUNDARY GRADIENT ESTIMATION
265  c1=img[c+b[l][6]]-img[c+b[l][7]];
266  d1=img[c+b[l][5]]-img[c+b[l][8]];
267  grad_y[c+b[l][0]]=(U)(coef1*((U) img[c+b[l][2]]- (U) img[c+b[l][1]])+
268  coef2*(c1-d1));
269  grad_x[c+b[l][0]]=(U)(-(coef1*((U) img[c+b[l][3]]- (U) img[c+b[l][4]])+
270  coef2*(c1+d1)));
271  }
272  }
273  }
274 }
275 
285 float ami_median_float(int k, int n, float *x)
286 {
287  int i,ir,j,l,mid;
288  float a,*y,paso;
289  ami_malloc1d(y,float,n);
290  for(int mm=0;mm<n;mm++) y[mm]=x[mm];
291 
292  l=0;
293  ir=n-1;
294  for (;;) {
295  if (ir <= l+1) {
296  if (ir == l+1 && y[ir] < y[l]) {
297  paso=y[l]; y[l]=y[ir]; y[ir]=paso;
298  }
299  a=y[k];
300  free(y);
301  return a;
302  } else {
303  mid=(l+ir) >> 1;
304  paso=y[mid]; y[mid]=y[l+1]; y[l+1]=paso;
305  if (y[l] > y[ir]) {
306  paso=y[l]; y[l]=y[ir]; y[ir]=paso;
307  }
308  if (y[l+1] > y[ir]) {
309  paso=y[l+1]; y[l+1]=y[ir]; y[ir]=paso;
310  }
311  if (y[l] > y[l+1]) {
312  paso=y[l]; y[l]=y[l+1]; y[l+1]=paso;
313 
314  }
315  i=l+1;
316  j=ir;
317  a=y[l+1];
318  for (;;) {
319  do i++; while (y[i] < a);
320  do j--; while (y[j] > a);
321  if (j < i) break;
322  paso=y[i]; y[i]=y[j]; y[j]=paso;
323  }
324  y[l+1]=y[j];
325  y[j]=a;
326  if (j >= k) ir=j-1;
327  if (j <= k) l=i;
328  }
329  }
330 
331 }
332 
333 //THIS PROCEDURE FILLS THE MAP RECURSIVELY (USED BY CANNY)
334 static void fill_imap(ami::image<int> &imap,
335  float *gradien_norm,
336  double low_threshold,
337  int k)
338 {
339  if(imap[k]==1) return;
340  int width = imap.width();
341  imap[k] = 3;
342  //NEIGHBOURS = O, E, N, S, NO, NE, SO, SE
343  int nv[8] = {k-1, k+1, k-width, k+width,
344  k-width-1, k-width+1, k+width-1, k+width+1};
345  //EXPLORE THE NEIGHBOURS OF THE PIXEL
346  for(int iv=0; iv<8; iv++)
347  {
348  int nk = nv[iv];
349  if(imap[nk]==3) continue;
350  if((imap[nk]==0) && (gradien_norm[nk]>low_threshold))
351  fill_imap(imap, gradien_norm, low_threshold, nk);
352  else
353  imap[nk] = 1;
354  }
355 }
356 
364 template<class T>
365 void canny(ami::image<T> input ,
366  ami::image<T> &output ,
367  float *seno ,
368  float *coseno ,
369  int *x ,
370  int *y ,
371  float per_low ,
372  float per_high )
373 {
374  //cout << "AMI::FILTERS::CANNY STARTS" << endl;
375  bool neighborhoodtype = 9; //9 NEIGHBOURS FOR THE GRADIENT
376  int width = input.width();
377  int height = input.height();
378  int size = width*height;
379  //THRESHOLDS
380  float low_threshold,high_threshold;
381  //MAP
382  ami::image<int> imap(width,height,1,0);
383  //NORM OF THE GRADIENT
384  float *gradient_norm = new float[size];
385  //GRADIENT COMPONENTS
386  ami::image<float> x_grad(width,height,input.nChannels(),0);
387  ami::image<float> y_grad(width,height,input.nChannels(),0);
388  //IMAGE FOR THE RESULT OF THE CONVOLUTION WITH THE GAUSSIAN
389  ami::image<float> blurred(width,height,input.nChannels(),0);
390 
391  //WE APPLY A GAUSSIAN CONVOLUTION TO THE INPUT IMAGE (NOISE REDUCTION)
392  gauss_conv(input,blurred,2.,2.,2.);
393 
394  //WE COMPUTE THE GRADIENT OF THE BLURRED IMAGE
395  grad(blurred,x_grad,y_grad,neighborhoodtype);
396  for(int k=0; k<size; k++)
397  gradient_norm[k] = sqrt((x_grad[k]*x_grad[k]) + (y_grad[k]*y_grad[k]));
398 
399  //WE COMPUTE THE THRESHOLDS
400  low_threshold = ami_median_float(per_low*size, size, gradient_norm);
401  high_threshold = ami_median_float(per_high*size, size, gradient_norm);
402  //WE IMPOSE A MINIMUM VALUE FOR THE THRESHOLDS
403  low_threshold=(low_threshold<2)?2:low_threshold;
404  high_threshold=(high_threshold<4)?4:high_threshold;
405 
406 
407  for(int i=0; i<height; i++)
408  {
409  for(int j=0; j<width; j++)
410  {
411  int pos = i*width+j;
412  //WE PUT THE MAP = 2 FOR VALUES > HIGH_THRESHOLD
413  if(gradient_norm[pos] > high_threshold)
414  {
415  imap[pos] = 2;
416  }
417  //LIMITS OF THE IMAGE
418  if((i==0) || (i==height-1) || (j==0) || (j==width-1))
419  {
420  imap[pos] = 1;
421  }
422  }
423  }
424 
425  //NON-MAXIMUM SUPPRESSION
426  float c = 1/(sqrt(2.));
427  for(int i=1; i<height-1; i++)
428  {
429  for(int j=1; j<width-1; j++)
430  {
431  int k = i*width+j;
432  //ORIENTATIONS
433  float o1 = fabs(x_grad[k]);
434  float o2 = fabs(y_grad[k]);
435  float o3 = fabs(c*(x_grad[k]+y_grad[k]));
436  float o4 = fabs(c*(x_grad[k]-y_grad[k]));
437  //NEIGHBOURS
438  float neigh1=0;
439  float neigh2=0;
440  //WE SELECT THE MAXIMUM OF THE ORIENTATIONS
441  float candidate = max(o1,max(o2,max(o3,o4)));
442  //WE TAKE THE VALUE OF THE NEIGHBOURS BASED ON MAXIMUM ORIENTATION
443  //0º
444  if(candidate == o1)
445  {
446  //O - E
447  neigh1 = gradient_norm[k-1];
448  neigh2 = gradient_norm[k+1];
449  }
450  //90º
451  if(candidate == o2)
452  {
453  //N - S
454  neigh1 = gradient_norm[k-width];
455  neigh2 = gradient_norm[k+width];
456  }
457  //45º
458  if(candidate == o3)
459  {
460  //NE - SO
461  neigh1 = gradient_norm[k-width+1];
462  neigh2 = gradient_norm[k+width-1];
463  }
464  //135º
465  if(candidate == o4)
466  {
467  //NO - SE
468  neigh1 = gradient_norm[k-width-1];
469  neigh2 = gradient_norm[k+width+1];
470  }
471  //WE UPDATE THE RESULT IN FUNCTION OF THE MAXIMUM VALUE OF
472  //THE GRADIENT MODULE BETWEEN THE POINT AND ITS NEIGHBOURS
473  float p_grad = gradient_norm[k]; //GRADIENT VALUE IN THE POINT
474  if((p_grad>neigh1) && (p_grad>neigh2))
475  {
476  if(gradient_norm[k] >= high_threshold)
477  imap[k] = 2;
478  else
479  imap[k] = 0;
480  }
481  else
482  {
483  imap[k] = 1;
484  }
485  }
486  }
487 
488  //HISTERESIS PROCESS OF THE CANNY METHOD
489  for(int k=0; k<size; k++)
490  {
491  if(imap[k]==2)
492  {
493  //NEIGHBOURS = O, E, N, S, NO, NE, SO, SE
494  int nv[8] = {k-1, k+1, k-width, k+width,
495  k-width-1, k-width+1, k+width-1, k+width+1};
496  for(int iv=0; iv<8; iv++)
497  {
498  int nk = nv[iv];
499  if(imap[nk]>1) continue;
500  if((imap[nk]==0) && (gradient_norm[nk]>low_threshold))
501  {
502  fill_imap(imap, gradient_norm, low_threshold, nk);
503  }
504  else
505  imap[nk] = 1;
506  }
507  }
508  }
509 
510  /*float maxi=0.;
511  for(int i=0; i<size; i++)
512  {
513  maxi = max(maxi, gradient_norm[i]);
514  }*/
515 
516  //FILL THE OUTPUT IMAGE TAKING INTO ACCOUNT THE IMAP IMAGE VALUES
517  //THE EDGES ARE THE PIXELS WITH MAP > 1
518  for(int i=1; i<height-1; i++)
519  {
520  for(int j=1; j<width-1; j++)
521  {
522  int pos = i*width+j;
523  if(imap[pos]>1)
524  {
525  output[pos] = 255;
526  //VALUE OF THE EDGES (NORMALIZED TO THE RANGE OF 0..255)
527  /*output[pos] = (unsigned char)(255*
528  gradient_norm[pos]/maxi);
529  output[pos+size] = (unsigned char)(255*
530  gradient_norm[pos]/maxi);
531  output[pos+size*2] = (unsigned char)(255*
532  gradient_norm[pos]/maxi);*/
533 
534  //ORIENTATION OF THE EDGE
535  coseno[pos] = x_grad[pos]/gradient_norm[pos];
536  seno[pos] = y_grad[pos]/gradient_norm[pos];
537 
538  //POSITION OF THE EDGE
539  x[pos] = j;
540  y[pos] = i;
541  }
542  else
543  {
544  //BACKGROUND
545  output[pos] = 0;
546  /*output[pos+size] = 0;
547  output[pos+size*2] = 0;*/
548  }
549  }
550  }
551  delete []gradient_norm;
552  //cout << "AMI::FILTERS::CANNY ENDS" << endl;
553 }
554 
561 template<class T>
563 ami::image<T> input ,
564 ami::image<T> &edges ,
565 float canny_low_threshold ,
566 float canny_high_threshold )
567 {
568  int size_=input.width()*input.height();
569  float *seno = new float[size_];//edge point orientation
570  float *coseno = new float[size_];//edge point orientation
571  int *x_pos = new int[size_];//edge point location
572  int *y_pos = new int[size_];//edge point location
573  // WE COMPUTE THE EDGES
574  canny(input,edges,seno,coseno,x_pos,y_pos,canny_low_threshold,canny_high_threshold);
575 
576  //Filling subpixel_image_contours object to call Hough transform
577  ami::subpixel_image_contours contours(input.width(),input.height());
578  for(int i=0; i<size_; i++){
579  if(edges[i]>0){ //edge point condition
580  contours.get_c()[i] = true;
581  contours.get_x()[i] = (float)x_pos[i];
582  contours.get_y()[i] = (float)y_pos[i];
583  contours.get_coseno()[i] = seno[i];
584  contours.get_seno()[i] = coseno[i];
585  }
586  else{
587  contours.get_c()[i] = false;
588  }
589  }
590  delete []seno;
591  delete []coseno;
592  delete []x_pos;
593  delete []y_pos;
594 
595  invert(edges,edges);//inverting black-white colors
596 
597  return(contours);
598 
599 }
600 #endif
Class to store subpixel contours.
Definition: subpixel_image_contours.h:39
void grad(const ami::image< T > &img, ami::image< U > &grad_x, ami::image< U > &grad_y, const bool NeigborhoodType)
Definition: filters.h:170
void canny(ami::image< T > input, ami::image< T > &output, float *seno, float *coseno, int *x, int *y, float per_low, float per_high)
Definition: filters.h:365
bool * get_c()
Return array c to identity contour points.
Definition: subpixel_image_contours.h:106
void gauss_conv(ami::image< T > &img, ami::image< U > &img_conv, double sigma_x, double sigma_y, double precision)
Definition: filters.h:37
float ami_median_float(int k, int n, float *x)
FUNCTION TO COMPUTE THE MEDIAN OF A VECTOR IN FLOAT PRECISION.
Definition: filters.h:285
Class to store multiChannel images and basic methods.
Definition: image.h:65