//# include //# include # include "nsl_include.h" ///////////////////////////////////////////////////// nsl_vector gauss1D(nsl_data& sig,int xn) // Implementation of a 1D Gaussian. // ------------ { nsl_vector c(xn); float m = xn/2; for (int i=0; i < xn; i++) c.elem(i) = (float) exp(-(i-m)*(i-m)/(2*sig.elem()*sig.elem())) / (sqrt(2*3.1415)*sig.elem()); return c; } ///////////////////////////////////////////////////// nsl_matrix gauss2D(nsl_data& sigma,nsl_data& x, nsl_data& y) // Implementation of a 2D Gaussian // ----------- // Used for the implementation of Kernels. { int xn,yn; xn = x.get_data(); yn = y.get_data(); nsl_matrix kernel(xn, yn); float sig = sigma.get_data(); float f ; int k1 = xn/2; int k2 = yn/2; const float PI = 3.14159; f= sig*sqrt(2.0*PI); for (int i=-k1; i<=k1 - (1 - xn%2); i++) for (int j=-k2; j<=k2 - (1 - yn%2); j++) kernel.elem(i+k1,j+k2) = exp(-(i*i+j*j)/(2*sig*sig))/f; return kernel; } /////////////////////////////////////////////////// nsl_vector wrap_conv(nsl_vector& a,nsl_vector& b) // Convolution with wrap around boundary conditions. { int saimax = a.get_imax(); int sbimax = b.get_imax(); int sm = saimax/2; int simax = saimax + sbimax; nsl_vector c(sbimax); nsl_vector d(simax); d = d.put_sector(b,saimax/2); int i,m; for (i = sm-1; i >= 0; i--) d.elem(i) = b.elem((i-sm+sbimax)%sbimax); for (i = sm+sbimax; i < simax; i++) d.elem(i) = b.elem((i-sm)%sbimax); for (i = 0; i < sbimax; i++) { num_type val = (num_type) 0; for (m = 0; m < saimax; m++) val = val + a.elem(m) * d.elem(i+m); c.elem(i) = val; } return c; } ////////////////////////////////////////////////////// nsl_vector matrix_mult(nsl_matrix& w,nsl_vector& x) // ----------- { nsl_vector c(w.get_xn()); for (int i = 0; i < w.get_xn(); i++) { c.elem(i) = 0; for (int j = 0; j < w.get_yn(); j++) // x.get_xn() c.elem(i) = c.elem(i) + w.elem(i,j)*x.elem(j); } return c; } /* Convolution used in the 140x140 Retina */ nsl_matrix newconv(nsl_matrix& a,nsl_matrix& b) { int sbimax = b.get_imax(); int sbjmax = b.get_jmax(); nsl_matrix c(sbjmax,sbjmax); c = a*b; return c; } /* nsl_matrix newconv(nsl_matrix& a,nsl_matrix& b) // a is the Mask and b is the input layer. { int saimax = a.get_imax(); int sajmax = a.get_jmax(); int sbimax = b.get_imax(); int sbjmax = b.get_jmax(); int leftbound = 60; // 32; for the 72x72; 100 for 140x140. nsl_matrix c(30,30); // Make this variable size. // c(8,8) for 72x72; c(25,25) for 140x140. for (int i = 0; i < leftbound; i = i+2) { for (int j = 0; j < leftbound; j = j+2) { num_type val = (num_type) 0; for (int m = 0; m < saimax; m++) for (int n = 0; n < sajmax; n++) val = val + a.elem(m,n) * b.elem(i+m,j+n); c.elem(i/2,j/2) = val; } } return c; } */ /* Convolution used from the retina to the thalamus */ nsl_matrix conv25_1(nsl_matrix& a,nsl_matrix& b) // a is the Mask and b is the input layer. { int saimax = a.get_imax(); int sajmax = a.get_jmax(); int sbimax = b.get_imax(); int sbjmax = b.get_jmax(); int leftbound = 1; nsl_matrix c(1,1); // Make this variable size. for (int i = 0; i < leftbound; i++) { for (int j = 0; j < leftbound; j++) { num_type val = (num_type) 0; for (int m = 0; m < saimax; m++) for (int n = 0; n < sajmax; n++) val = val + a.elem(m,n) * b.elem(i+m,j+n); c.elem(i,j) = val; } } return c; } /* Convolution used for different Masks for every neuron */ nsl_matrix mult_conv(nsl_matrix& a,nsl_matrix& b) // a is the Mask and b is the input layer. { int saimax = a.get_imax(); int sajmax = a.get_jmax(); int sbimax = b.get_imax(); int sbjmax = b.get_jmax(); int leftbound = 60; // 32; for the 72x72; 100 for 140x140. nsl_matrix c(30,30); // Make this variable size. // c(8,8) for 72x72; c(25,25) for 140x140. for (int i = 0; i < leftbound; i = i+2) { for (int j = 0; j < leftbound; j = j+2) { num_type val = (num_type) 0; for (int m = 0; m < saimax; m++) for (int n = 0; n < sajmax; n++) val = val + a.elem(m,n) * b.elem(i+m,j+n); c.elem(i/2,j/2) = val; } } return c; }