// DLM.h // This version is for the NSL-book // Author: (c) Laurenz Wiskott, wiskott@salk.edu, 96-03-11 ; // read DLMreadme ; /* Layer 1 is the object layer, the model layers are a vector of layers 2 (exept the first one, which is an average face). The indices \nu and \mu become (i1,j1) and (i2,j2) or vica versa; i1={0,1,...,i1max-1}, ... , j2={0,1,...,j2max-1}. Beside the complete all to all connectivity the program allows you to define a more local one. Each cell in the layers 2 then is connected to a smaller patch of size i1Rmax * j1Rmax on layer 1. The patches are equally distributed in the correct spatial order. With this local connectivity the system looses its invariance against rotation, but the features are not rotation invariant anyway. Due to the prerestricted connectivity the system converges more rapidly and reliably. A frame is put around layer 1 in order to give the attention blob space to move to the border. The neurons of the frame are not connected to layers 2. If layer 1 is of same size as layer 2 no attention blob is required and the frame not necessary. */ /* For the sake of efficiency some operations such as the modification of the dynamic links or the display of several layers takes place only after loops single iterations. The correlation for the modification is accumulated in each simulation step. */ class DlmAHCommon : public DlmCommon { protected: NslFloat0 rho; // slope radius of squashing function ; NslInt0 attention; // Attention!=0 indicates use of attention dynamics ; NslFloat0 sigma_g; // Gauss width of excitatory kernel ; NslFloat1 g; // Gaussian interaction kernel ; NslFloat2 temp; public: DlmAHCommon(nsl_string, NslModule*); ~DlmAHCommon() {} protected: float initGauss(NslFloat0&,float); void initGauss(NslFloat1&,NslFloat0&); NslFloat2 gaussConvolved(NslFloat1&,NslFloat2&,NslFloat0&); void gaussConvolved(NslFloat2&,NslFloat1&,NslFloat2&); float outputFunc(float,NslFloat0&); void computeOutputFunc(NslFloat2&, NslFloat2&, NslFloat0&); }; class DlmRecognition : public DlmCommon { public: NslDinFloat0 shSum[gallerySizeMax]; // sum over sh2 ; NslDoutInt0 skipModel[gallerySizeMax]; // indicates which models to skip ; private: NslFloat0 rec[gallerySizeMax]; // recognition variable ; NslFloat0 lambda_r; // time constant for the recognition dynamics ; NslFloat0 r_theta; // threshold for model suppression ; NslFloat0 avTimeLimit; // time for attention dynamics on average model only ; NslInt0 workOnAverage; // workOnAverage indicates whether to simulate ... ; // .. currently in average instead of the model gallery ; int modelLoIndex; // range of layers to be simulated ; int modelHiIndex; NslInt0 gallerySize; public: DlmRecognition(nsl_string, NslModule*); ~DlmRecognition() {} void initRun(); void simRun(); }; class DlmH : public DlmAHCommon { public: NslDinFloat2 sa; protected: NslFloat2 hTransE; // excitatoryly transferred signal ; NslFloat2 d; NslFloat0 beta_h; // strength of global inhibition ; NslFloat0 beta_ac; NslFloat0 kappa_hs; // strength of self-inhibition ; NslFloat0 kappa_hh; // strength of mutual interaction ; NslFloat0 kappa_ha; NslFloat0 lambda_p; // decay constant for h-s>0 ; NslFloat0 lambda_m; // decay constant for h-s<0 ; public: DlmH(nsl_string, NslModule*); ~DlmH() {} }; class DlmH1 : public DlmH { public: NslDinFloat2 hInput; NslDoutFloat2 sh; private: NslFloat2 h; NslFloat2 s; int i1max, j1max; public: DlmH1(nsl_string, NslModule*); ~DlmH1() {} void memAlloc(int,int); void initRun(); void simRun(); }; class DlmH2 : public DlmH { public: NslDinFloat2 hInput[gallerySizeMax]; NslDoutFloat0 shSum[gallerySizeMax]; // sum over sh2 ; NslDoutFloat2 shMax; // maximum activity in the layers 2 ; NslDoutFloat2 sh[gallerySizeMax]; NslDinInt0 skipModel[gallerySizeMax]; // indicates which models to skip ; NslDoutFloat2 shShow; NslDinInt0 showLayer; private: NslFloat2 h[gallerySizeMax]; NslFloat2 s[gallerySizeMax]; NslFloat0 avTimeLimit; // time for attention dynamics on average model only ; NslInt0 workOnAverage; // workOnAverage indicates whether to simulate ... ; // .. currently in average instead of the model gallery ; int modelLoIndex; // range of layers to be simulated ; int modelHiIndex; NslInt0 gallerySize; int i2max,j2max; public: DlmH2(nsl_string, NslModule*); ~DlmH2() {} void memAlloc(int,int); void initRun(); void simRun(); }; class DlmAttention : public DlmAHCommon { public: NslDoutFloat2 sa; NslDinFloat2 sh; private: NslFloat2 a; // attention blob on layers 1 ; NslFloat2 aTransE; // excitatoryly transfered signal ; NslFloat0 lambda_a; // time constant for the attention dynamics ; NslFloat0 beta_a; // strength of global inhibition for attention blob ; NslFloat0 kappa_ah; // effect of running blob on attention blob ; NslFloat0 alpha_N; // parameter for attention blob initialization ; public: DlmAttention(nsl_string, NslModule*); ~DlmAttention() {} void memAlloc(int,int); void initRun(); void simRun(); }; class DlmCorrelation : public DlmCommon { public: NslDinFloat2 sh1; NslDinFloat2 sh2[gallerySizeMax]; NslDoutFloat4 correlSum[gallerySizeMax]; NslDoutFloat4 correlLI[gallerySizeMax]; NslDinInt0 skipModel[gallerySizeMax]; // indicates which models to skip ; NslDoutFloat4 correlLIShow; NslDinInt0 showLayer; private: NslFloat0 lambda_c; // rate for the leaky correlation integrater ; NslInt0 workOnAverage; // workOnAverage indicates whether to simulate ... ; // .. currently in average instead of the model gallery ; int modelLoIndex; // range of layers to be simulated ; int modelHiIndex; NslFloat0 avTimeLimit; // time for attention dynamics on average model only ; NslInt0 gallerySize; NslInt0 loops; // number of cheap simulation steps ; int loop; // loop counter ; int frame,i1max,j1max,i2max,j2max,i1Rmax,j1Rmax; // gallerySizeMax, NslFloat4 temp; public: DlmCorrelation(nsl_string,NslModule*); ~DlmCorrelation(){} void memAlloc(int,int,int,int,int,int,int); void initRun(); void simRun(); }; class DlmW : public DlmCommon { public: NslDinFloat4 sim[gallerySizeMax]; NslDinFloat4 correlSum[gallerySizeMax]; NslDinInt0 skipModel[gallerySizeMax]; // indicates which models to skip ; NslDinInt0 showLayer; NslDoutFloat4 wShow; protected: NslFloat4 w[gallerySizeMax]; NslFloat0 lambda_W; NslFloat0 wSums[gallerySizeMax]; NslInt0 workOnAverage; // workOnAverage indicates whether to simulate ... ; // .. currently in average instead of the model gallery ; int modelLoIndex; // range of layers to be simulated ; int modelHiIndex; NslFloat0 avTimeLimit; // time for attention dynamics on average model only ; NslInt0 gallerySize; NslInt0 loops; // number of cheap simulation steps ; int loop; // loop counter ; int frame,i1max,j1max,i2max,j2max,i1Rmax,j1Rmax; public: DlmW(nsl_string,NslModule*); ~DlmW(){} }; class DlmW12 : public DlmW { public: NslDinFloat2 sh[gallerySizeMax]; NslDoutFloat2 hInput; private: NslFloat2 normFactor; public: DlmW12(nsl_string,NslModule*); ~DlmW12(){} void memAlloc(int,int,int,int,int,int,int); void initRun(); void simRun(); }; class DlmW21 : public DlmW { public: NslDinFloat2 sh; NslDoutFloat2 hInput[gallerySizeMax]; private: NslFloat2 normFactor[gallerySizeMax]; public: DlmW21(nsl_string,NslModule*); ~DlmW21(){} void memAlloc(int,int,int,int,int,int,int); void initRun(); void simRun(); }; class DLM : public NslModel { public: DlmSimilarity similarity; DlmRecognition recognition; DlmCorrelation correlation; DlmW12 w12; DlmW21 w21; DlmH1 h1; DlmH2 h2; DlmAttention a1; DlmAttention a2; DlmDisplay display; NslDinFloat0 shSum[gallerySizeMax]; // sum over sh2 ; NslDoutInt0 skipModel[gallerySizeMax]; // indicates which models to skip ; private: NslInt0 gallerySize; NslInt0 loops; // number of cheap simulation steps ; int loop; // loop counter ; /* The reason for the +1 is that one of the layers 2 (index 0) is reserved for the simulation on the average face connectivity. */ /* The convolution with a two-dimensional Gaussian kernel can be separated into two one-dimensional convolutions. The number of units for this one-dimensional kernel is gSize. */ /* Since not all layers 2 can be displayed, only one layer 2 is copied into respective 'show' layers. You can either select one special layer to be displayed indicated by the variable 'preferredLayer' or if this variable does not have a valid value, the layer with the highest activity is displayed. */ NslInt0 preferredLayer; // number of the layer 2 to show ; // a value of -1 indicates no preferrences ; NslInt0 showLayer; // actual number of the layer 2 to show ; // depending on preferredLayer and the ; // activity of the layers ; // ---- control variables ; /* If the recognition variable of a model drops below r_theta, the model becomes ruled out by a strong inhibition term. In the simulation it is just skipped in oder to save cpu-time. skipModel indicates which models to skip. */ NslInt0 workOnAverage; // workOnAverage indicates whether to simulate ... ; // .. currently in average instead of the model gallery ; int modelLoIndex; // range of layers to be simulated ; int modelHiIndex; NslFloat0 avTimeLimit; // time for attention dynamics on average model only ; public: DLM(); ~DLM() {} void memAlloc(); void makeConn(); void initSys(); void initRun(); void simRun(); };