// DLM.h // This version is for the NSL-book // Author: (c) Laurenz Wiskott, wiskott@salk.edu, 96-03-11 ; // read DLMreadme ; /* Layer 1 is the object layer, the model layers are a vector of layers 2 (exept the first one, which is an average face). The indices \nu and \mu become (i1,j1) and (i2,j2) or vica versa; i1={0,1,...,i1max-1}, ... , j2={0,1,...,j2max-1}. Beside the complete all to all connectivity the program allows you to define a more local one. Each cell in the layers 2 then is connected to a smaller patch of size i1Rmax * j1Rmax on layer 1. The patches are equally distributed in the correct spatial order. With this local connectivity the system looses its invariance against rotation, but the features are not rotation invariant anyway. Due to the prerestricted connectivity the system converges more rapidly and reliably. A frame is put around layer 1 in order to give the attention blob space to move to the border. The neurons of the frame are not connected to layers 2. If layer 1 is of same size as layer 2 no attention blob is required and the frame not necessary. */ /* //#define SMALL_LAYER // layer 1 has only 10x10 instead of 16x17 neurons ; #ifndef SMALL_LAYER #define SMALL_PATCHES // neurons 2 are only connected to ... ; // ... 8x8 patches on layer 1 ; #endif // SMALL_LAYER ; */ #define WIN_DISPLAY #ifdef WIN_DISPLAY //#define WIN_FRAME // the frame is displayed as well ; #endif // WIN_DISPLAY ; /* #ifdef SMALL_LAYER const int frame = 0; // width of the frame around layer 1 const int i1max = 10+2*frame; // size of the image layer 1, including frame; const int j1max = 10+2*frame; #else // SMALL_LAYER ; const int frame = 2; const int i1max = 17+2*frame; const int j1max = 16+2*frame; #endif // SMALL_LAYER ; #ifdef SMALL_PATCHES const int i1Rmax= 8; // size of the projection patches .. ; const int j1Rmax= 8; // ... from model layer 2 to image layer 1 ; #else // SMALL_PATCHES ; const int i1Rmax= i1max-2*frame; const int j1Rmax= j1max-2*frame; #endif // SMALL_PATCHES ; const int i2max = 10; // size of the model layers 2 ; const int j2max = 10; */ inline int ij2Index(int i2, int j2, int j2max) {return i2*j2max+j2;}; inline int i2Index(int ij2, int j2max) {return ij2/j2max;}; inline int j2Index(int ij2, int j2max) {return ij2%j2max;}; inline int ij1Index(int i1, int j1, int j1max) {return i1*j1max+j1;}; inline int i1Index(int ij1, int j1max) {return ij1/j1max;}; inline int j1Index(int ij1, int j1max) {return ij1%j1max;}; inline int ij1RIndex(int i1R, int j1R, int j1Rmax) {return i1R*j1Rmax+j1R;}; inline int i1RIndex(int ij1R, int j1Rmax) {return ij1R/j1Rmax;}; inline int j1RIndex(int ij1R, int j1Rmax) {return ij1R%j1Rmax;}; inline int i1Index(int i2, int i1R, int frame,int i1max,int i1Rmax,int i2max) {return i1R + frame + int((i1max-2*frame-i1Rmax)*i2/float(i2max-1)+0.5);}; inline int j1Index(int j2, int j1R, int frame,int j1max,int j1Rmax,int j2max) {return j1R + frame + int((j1max-2*frame-j1Rmax)*j2/float(j2max-1)+0.5);}; inline int i1RIndex(int i2, int i1, int frame,int i1max,int i1Rmax,int i2max) {return i1 - frame - int((i1max-2*frame-i1Rmax)*i2/float(i2max-1)+0.5);}; inline int j1RIndex(int j2, int j1, int frame,int j1max,int j1Rmax,int j2max) {return j1 - frame - int((j1max-2*frame-j1Rmax)*j2/float(j2max-1)+0.5);}; const int gSize=9; // size of the one dimensional Gauss kernel ; #ifdef WIN_DISPLAY // ---- display variables ; /* Beside the standard NSL display we use a special display for the trajectory of the blob and the net display of the connectivity. It is a C++-class called "XMonoWin" with very simple X11 display routines. */ const int winWidth=220; // size of the display window ; #endif // WIN_DISPLAY ; // ==== simple minimum and maximum function ============================ ; inline float minimum(float val1, float val2) {if (val1val2) return val1; else return val2;} // ==== compute center of gravity of layer activity =================== ; extern void centerOfGravity(nsl_matrix &Sh, float ¢erI, float ¢erJ); // ==== input function ================================================ ; inline float inputFunc(float val1, float val2) { return maximum(val1, val2);} /* For the sake of efficiency some operations such as the modification of the dynamic links or the display of several layers takes place only after loops single iterations. The correlation for the modification is accumulated in each simulation step. */ class Recognition : public NslModule { public: NslDinFloat1 Sh2Sum; // sum over Sh2 ; private: NslFloat1 rec; // recognition variable ; NslFloat0 lambda_r; // time constant for the recognition dynamics ; NslFloat0 r_theta; // threshold for model suppression ; int gallerySizeMax; public: Recognition(nsl_string, NslModule*); ~Recognition() {} int memAlloc(int); int initSysTemp(NslFloat0&); int simRunTemp(NslFloat0&,NslFloat0&,int*,int,int); }; class H1 : public NslModule { public: NslDinFloat2 h1Input; NslDinFloat2 Sa1; NslDoutFloat2 Sh1; private: NslFloat2 h1TransE; NslFloat2 h1; NslFloat2 s1; NslFloat2 d1; NslFloat0 beta_h; // strength of global inhibition ; NslFloat0 beta_ac; NslFloat0 kappa_hs; // strength of self-inhibition ; NslFloat0 kappa_hh; // strength of mutual interaction ; NslFloat0 kappa_ha; NslFloat0 lambda_p; // decay constant for h-s>0 ; NslFloat0 lambda_m; // decay constant for h-s<0 ; int i1max, j1max; public: H1(nsl_string, NslModule*); ~H1() {} int memAlloc(int,int); int initSysTemp(); int simRunTemp(NslFloat0&,NslFloat1&,NslFloat0&,int, XMonoWin&); }; class H2 : public NslModule { public: NslDinFloat3 h2Input; NslDinFloat2 Sa2; NslDoutFloat1 Sh2Sum; // sum over Sh2 ; NslDoutFloat2 Sh2max; // maximum activity in the layers 2 ; NslDoutFloat3 Sh2; private: NslFloat2 h2TransE; // excitatoryly transferred signal ; NslFloat3 h2; NslFloat3 s2; NslFloat2 d2; NslFloat0 beta_h; // strength of global inhibition ; NslFloat0 beta_ac; NslFloat0 kappa_hs; // strength of self-inhibition ; NslFloat0 kappa_hh; // strength of mutual interaction ; NslFloat0 kappa_ha; NslFloat0 lambda_p; // decay constant for h-s>0 ; NslFloat0 lambda_m; // decay constant for h-s<0 ; NslFloat2 showh2Input; // layers to display a model layer ; NslFloat2 showh2; NslFloat2 showSh2; int gallerySizeMax,i2max,j2max; public: H2(nsl_string, NslModule*); ~H2() {} int memAlloc(int,int,int); int initSysTemp(NslFloat0&); int simRunTemp(NslFloat0&,NslFloat0&,NslFloat0&,int*,int,int, NslFloat1&,NslFloat0&,NslFloat0&,NslFloat0&,XMonoWin&); }; class Attention1 : public NslModule { public: NslDoutFloat2 Sa1; NslDinFloat2 Sh1; private: NslFloat2 a1; // attention blob on layers 1 ; NslFloat2 a1TransE; // excitatoryly transfered signal ; NslFloat2 Sa1LI; // cell activities leaky integrator ; NslFloat0 lambda_a; // time constant for the attention dynamics ; NslFloat0 beta_a; // strength of global inhibition for attention blob ; NslFloat0 kappa_ah; // effect of running blob on attention blob ; NslFloat0 alpha_N; // parameter for attention blob initialization ; int i1max,j1max; public: Attention1(nsl_string, NslModule*); ~Attention1() {} int memAlloc(int,int); int initRunTemp(NslFloat0&,NslFloat0&); int simRunTemp(NslFloat0&,NslFloat1&,NslFloat0&); }; class Attention2 : public NslModule { public: NslDoutFloat2 Sa2; NslDinFloat2 Sh2max; // maximum activity in the layers 2 ; /* Since the dynamics on layers 2 are very similar, only one attention blob is simulated for all layers 2 for sake of efficiency. */ private: NslFloat2 a2; // attention blob on layers 2 ; NslFloat2 a2TransE; // excitatoryly transfered signal ; NslFloat0 lambda_a; // time constant for the attention dynamics ; NslFloat0 beta_a; // strength of global inhibition for attention blob ; NslFloat0 kappa_ah; // effect of running blob on attention blob ; int i2max,j2max; public: Attention2(nsl_string, NslModule*); ~Attention2() {} int memAlloc(int,int); int initRunTemp(NslFloat0&); int simRunTemp(NslFloat0&,NslFloat0&,NslFloat1&,NslFloat0&); }; class Correlation : public NslModule { public: NslDinFloat2 Sh1; NslDinFloat3 Sh2; NslDoutFloat3 correl21Sum; #ifdef WIN_DISPLAY NslFloat3 correl21LI; // leaky correlation integrated ; NslFloat2 showcorrel21LI; #endif // WIN_DISPLAY ; NslFloat0 lambda_c; // rate for the leaky correlation integrater ; #ifdef WIN_DISPLAY NslFloat0 netWeightExp; // exponent for the net display computation ; NslFloat2 gravI2; // center of gravity for the connections ; NslFloat2 gravJ2; // center of gravity for the connections ; #endif // WIN_DISPLAY ; int gallerySizeMax,i1max,j1max,i2max,j2max,i1Rmax,j1Rmax; public: Correlation(nsl_string,NslModule*); ~Correlation(){} int memAlloc(int,int,int,int,int,int,int); int initSysTemp(NslFloat0&); int initRunTemp(int,XMonoWin&); int simRunTemp(NslFloat0&,NslFloat0&,int*,int,int,NslFloat0&, int,XMonoWin&); }; class W12 : public NslModule { public: NslDinFloat3 S21; NslDinFloat3 Sh2; NslDinFloat3 correl21Sum; NslDoutFloat2 h1Input; private: NslFloat3 w12; NslFloat0 lambda_W; NslFloat1 normFactor1; NslFloat2 showW12; NslFloat1 w12Sums; int gallerySizeMax,i1max,j1max,i2max,j2max,i1Rmax,j1Rmax; public: W12(nsl_string,NslModule*); ~W12(){} int memAlloc(int,int,int,int,int,int,int); int initRunTemp(NslFloat0&,int,XMonoWin&); int simRunTemp(NslFloat0& ,NslFloat0&,NslFloat0&,int*,int,int,NslFloat0&, int,XMonoWin&); }; class W21 : public NslModule { public: NslDinFloat3 S21; NslDinFloat2 Sh1; NslDinFloat3 correl21Sum; NslDoutFloat3 h2Input; private: NslFloat3 w21; NslFloat0 lambda_W; NslFloat1* normFactor2[gallerySizeMax0]; NslFloat2 showW21; NslFloat1 w21Sums; #ifdef WIN_DISPLAY NslFloat0 netWeightExp; // exponent for the net display computation ; NslFloat2 gravI2; // center of gravity for the connections ; NslFloat2 gravJ2; // center of gravity for the connections ; #endif // WIN_DISPLAY ; int gallerySizeMax,i1max,j1max,i2max,j2max,i1Rmax,j1Rmax; public: W21(nsl_string,NslModule*); ~W21(){} int memAlloc(int,int,int,int,int,int,int); int initSysTemp(NslFloat0&); int initRunTemp(NslFloat0&,int,XMonoWin&); int simRunTemp(NslFloat0& ,NslFloat0&,NslFloat0&,int*,int,int,NslFloat0&, int,XMonoWin&); }; class DLM : public NslModel { public: DLMSimilarity similarity; Recognition recognition; Correlation correlation; W12 w12; W21 w21; H1 h1; H2 h2; Attention1 a1; Attention2 a2; private: /* #ifdef WIN_DISPLAY NslFloat0 netWeightExp; // exponent for the net display computation ; NslFloat2 gravI2; // center of gravity for the connections ; NslFloat2 gravJ2; // center of gravity for the connections ; #endif // WIN_DISPLAY ; */ NslInt0 smallLayer; NslInt0 smallPatches; NslInt0 gallerySize; NslInt0 loops; // number of cheap simulation steps ; int loop; // loop counter ; int gallerySizeMax; int frame; // width of the frame around layer 1 int i1max; // size of the image layer 1, including frame; int j1max; int i2max; // size of the model layers 2 ; int j2max; int i1Rmax; // size of the projection patches .. ; int j1Rmax; // ... from model layer 2 to image layer 1 ; /* The reason for the +1 is that one of the layers 2 (index 0) is reserved for the simulation on the average face connectivity. */ NslFloat0 rho; // slope radius of squashing function ; // ---- Gauss kernel ; /* The convolution with a two-dimensional Gaussian kernel can be separated into two one-dimensional convolutions. The number of units for this one-dimensional kernel is gSize. */ NslFloat0 sigma_g; // Gauss width of excitatory kernel ; NslFloat1 g; // Gaussian interaction kernel ; #ifdef WIN_DISPLAY XMonoWin winT1; // display window ; #endif // WIN_DISPLAY ; /* Since not all layers 2 can be displayed, only one layer 2 is copied into respective 'show' layers. You can either select one special layer to be displayed indicated by the variable 'preferredLayer' or if this variable does not have a valid value, the layer with the highest activity is displayed. */ NslFloat0 preferredLayer; // number of the layer 2 to show ; // a value of -1 indicates no preferrences ; NslFloat0 showLayer; // actual number of the layer 2 to show ; // depending on preferredLayer and the ; // activity of the layers ; int modelLoIndex; // range of layers to be simulated ; int modelHiIndex; #ifdef WIN_DISPLAY XMonoWin winT2; XMonoWin winW; XMonoWin winC; #endif // WIN_DISPLAY ; // ---- control variables ; /* If the recognition variable of a model drops below r_theta, the model becomes ruled out by a strong inhibition term. In the simulation it is just skipped in oder to save cpu-time. skipModel indicates which models to skip. */ int skipModel[gallerySizeMax0]; // indicates which models to skip ; float centerI, centerJ; // center of gravity ; // ---- control variables ; NslFloat0 Attention; // Attention!=0 indicates use of attention dynamics ; NslFloat0 Average; // Average!=0 indicates to simulate on the average ... ; // ... instead of the model gallery in the beginning ; NslFloat0 workOnAverage; // workOnAverage indicates whether to simulate ... ; // .. currently in average instead of the model gallery ; NslFloat0 avTimeLimit; // time for attention dynamics on average model only ; public: DLM(); ~DLM() {} int memAlloc(); int initModule(); int initSys(); int initRun(); int simRun(); };