// DLM.C // This version is for the NSL-book // Author: (c) Laurenz Wiskott, wiskott@salk.edu, 96-03-11 ; // read DLMreadme ; #include "nsl_include.h" #include "DLMwin.h" #include "DLMdisplay.h" #include "DLMSimilarity.h" #include "DLM.h" static const int smallLayer = 0; static const int smallPatches = 1; DLM::DLM() : NslModel("DLM"), similarity("similarity",this), recognition("recognition",this), correlation("correlation",this), w12("w12",this), w21("w21",this), h1("h1",this), h2("h2",this), a1("a1",this), a2("a2",this), display("display",this), /* For the sake of efficiency some operations such as the modification of the dynamic links or the display of several layers takes place only after loops single iterations. The correlation for the modification is accumulated in each simulation step. */ loops("loops",this), // number of cheap simulation steps ; gallerySize("gallerySize",this), // actual size of the simmulated gallery ; /* Since not all layers 2 can be displayed, only one layer 2 is copied into respective 'show' layers. You can either select one special layer to be displayed indicated by the variable 'preferredLayer' or if this variable does not have a valid value, the layer with the highest activity is displayed. */ preferredLayer("preferredLayer",this), // number of the layer 2 to show ; // a value of -1 indicates no preferrences ; showLayer("showLayer",this), // actual number of the layer 2 to show ; // depending on preferredLayer and the ; // activity of the layers ; /* For objects and models there are stored graphs with the precomputed Gabor-wavelet transform coefficients. For the models they are taken from a grid of 10 x 10 nodes centered on the faces. For the objects the grids cover the whole image plane with 16 x 17 nodes. From these stored graphs a subgraph can be selected. From the 16 x 17 graph for example a 12 x 12 subgraph will automatically be selected if the size of layer 1 is 12 x 12. In addition one can choose the location of the subgraph by the offsets Si1offset, ... , and Sj2offset. They are given as integer numbers behind the model names in the gallery files. */ workOnAverage("workOnAverage",this), // workOnAverage indicates whether to simulate ... ; // .. currently in average instead of the model gallery ; avTimeLimit("avTimeLimit",this) // time for attention dynamics on average model only ; { nsl_string _str; for (int i=0; i=gallerySizeMax) { NSLoutput("WARNING : gallerySize(",gallerySize.elem()); NSLoutput(")>=gallerySizeMax(",gallerySizeMax); NSLoutput("),\n"); NSLoutput(" set gallerySize=gallerySizeMax !!\n"); gallerySize=gallerySizeMax; } loop = (int)loops.elem(); if (workOnAverage == 1) { modelLoIndex=0; modelHiIndex=0; } else { modelLoIndex=1; modelHiIndex=int(gallerySize.elem()); } showLayer = 0; for (int model=0; model<=gallerySize; model++) skipModel[model] = 0; } void DLM::initRun() { loop = 0; } void DLM::simRun() { NSLoutput(":"); if (loop<=0) { loop=(int)(loops.elem()); // ---- after 'avTimeLimit' switch from average to model simulation -- ; if (nslSystem.getSimTime()>avTimeLimit) workOnAverage = 0; if (workOnAverage == 1) { modelLoIndex=0; modelHiIndex=0; } else { modelLoIndex=1; modelHiIndex=int(gallerySize.elem()); } if (workOnAverage == 1) showLayer = 0; else if ((preferredLayer>0)&&(preferredLayer<=gallerySize)) // if a valid preferredLayer is given take it, ... ; showLayer = preferredLayer; else { // ... otherwise take the most active layer ; showLayer = 1; for (int model=modelLoIndex; model<=modelHiIndex; model++) { if (skipModel[model].get_data() == 1) continue; if (shSum[model]>shSum[int(showLayer.elem())]) showLayer = model; } } // ---- print the current recognition status: ----------------------- ; // ---- time, showed layer, recognition values ; NSLoutput("\nt = ",nslSystem.getSimTime()); NSLoutput("\nshowLayer = ",showLayer.elem()); } // if (loop<=0) ; loop--; } // W DlmW::DlmW(nsl_string str,NslModule* parent) : DlmCommon(str,parent), loops("loops",this), // number of cheap simulation steps ; workOnAverage("workOnAverage",this), avTimeLimit("avTimeLimit",this), gallerySize("gallerySize",this), lambda_W("lambda_W",this) { nsl_string _str; for (int i=0; i0) { // ---- copy 's1' to 'W21' and 'W12' ; for (model=1; model<=gallerySize; model++) { for (i2=0; i20) for (i2=0; i2sim[model][i2][j2][i1R][j1R]) { // 17 normFactor[i1][j1] = NSLmin(normFactor[i1][j1], sim[model][i2][j2][i1R][j1R]/w[model][i1R][j1R][i2][j2]); } } // for (j1R=...) ; } // for (model=...) ; // ---- normalize weights ------------------------------------------ ; for (i2=0; i2avTimeLimit) workOnAverage = 0; if (workOnAverage == 1) { modelLoIndex=0; modelHiIndex=0; } else { modelLoIndex=1; modelHiIndex=int(gallerySize.elem()); } } // if (loop<=0) loop--; } // W21 DlmW21::DlmW21(nsl_string str,NslModule* parent) : DlmW(str,parent), sh("sh",this) { nsl_string _str; for (int i=0; i0) { for (model=1; model<=gallerySize; model++) { w[model] = sim[model]; } // ---- initialize 'W21', for average layer ; float stmp; if (gallerySize>0) for (i2=0; i2=i1Rmax)) continue; for (j2=0; j2=j1Rmax)) continue; // 2 hInput[model][i2][j2] = NSLmax(hInput[model][i2][j2], w[model][i2][j2][i1R][j1R]*sh[i1][j1]); } } } } if (loop<=0) { loop=(int)(loops.elem()); if (workOnAverage == 0) { if (lambda_W!=0) { // ---- modify weights 'W21' and 'W12' ------------------------- ; for (model=modelLoIndex; model<=modelHiIndex; model++) { if (skipModel[model].get_data() == 1) continue; normFactor[model] = 1.; // *(normFactor[model]) = 1.; for (i2=0; i2sim[model][i2][j2][i1R][j1R]) { // 18 normFactor[model][i2][j2] = NSLmin(normFactor[model][i2][j2], sim[model][i2][j2][i1R][j1R]/w[model][i2][j2][i1R][j1R]); } } // for (j1R=...) ; } // for (model=...) ; // ---- normalize weights ------------------------------------------ ; for (i2=0; i2avTimeLimit) workOnAverage = 0; if (workOnAverage == 1) { modelLoIndex=0; modelHiIndex=0; } else { modelLoIndex=1; modelHiIndex=int(gallerySize.elem()); } } // if (loop<=0) loop--; } // Correlation DlmCorrelation::DlmCorrelation(nsl_string str,NslModule* parent) : DlmCommon(str,parent), loops("loops",this), // number of cheap simulation steps ; correlLIShow("correl21LIShow",this), showLayer("showLayer",this), workOnAverage("workOnAverage",this), avTimeLimit("avTimeLimit",this), gallerySize("gallerySize",this), sh1("sh",this), // maximum activity in the layers 2 lambda_c("lambda_c",this), // rate for the leaky correlation integrater ; temp() { nsl_string _str; for (int i=0; iavTimeLimit) workOnAverage = 0; if (workOnAverage == 1) { modelLoIndex=0; modelHiIndex=0; } else { modelLoIndex=1; modelHiIndex=int(gallerySize.elem()); } } // if (loop<=0) loop--; } // Attention DlmAttention::DlmAttention(nsl_string str,NslModule* parent) : DlmAHCommon(str,parent), a("a",this), // attention blob on layer 1 ; sa("sa",this), // cell activities ; aTransE("aTransE",this), // excitatoryly transfered signal ; sh("sh",this), // maximum activity in the layers 2 lambda_a("lambda_a",this), // time constant for the attention dynamics ; beta_a("beta_a",this), // strength of global inhibition for attention blob ; kappa_ah("kappa_ah",this), // effect of running blob on attention blob ; alpha_N("alpha_N",this) // parameter for attention blob initialization ; { } void DlmAttention::memAlloc(int imax,int jmax) { a.memAlloc(imax,jmax); sa.memAlloc(imax,jmax); aTransE.memAlloc(imax,jmax); sh.memAlloc(imax,jmax); temp.memAlloc(imax,jmax); } void DlmAttention::initRun() { if (attention == 1) { a = alpha_N; // a2: alpha_N = 0; computeOutputFunc(sa,a,rho); // a2: sa = 0; initGauss(g,sigma_g); } // if (attention!=0) ; } void DlmAttention::simRun() { if (attention!=0) { // 10, 12a // aTransE = gaussConvolved(g,sa,rho); gaussConvolved(aTransE,g,sa); // 11, 12b // nslDiff(a1,1.,lambda_a * (- a + aTransE - beta_a*sa.sum() + kappa_ah*sh)); NSLmult_equal(temp,kappa_ah,sh); NSLmult_minus(temp,beta_a,sa.sum()); NSLadd_plus(temp,aTransE); NSLadd_minus(temp,a); NSLmult_equal(temp,lambda_a,temp); nslDiff(a,1.,temp); // 13 computeOutputFunc(sa,a,rho); } // if (attention!=0) } // h DlmH::DlmH(nsl_string str,NslModule* parent) : DlmAHCommon(str,parent), sa("sa",this), // cell activities ; hTransE("hTransE",this), d("d",this), beta_h("beta_h",this), beta_ac("beta_ac",this), // strength of global inhibition compensating ... ; kappa_hs("kappa_hs",this), kappa_hh("kappa_hh",this), kappa_ha("kappa_ha",this), // effect of attention blob on running blob ; lambda_p("lambda_p",this), lambda_m("lambda_m",this) { } // ---- Gaussian excitation kernel -------------------------------------- ; DlmH1::DlmH1(nsl_string str,NslModule* parent) : DlmH(str,parent), hInput("hInput",this), h("h",this), s("s",this), sh("sh",this) { } void DlmH1::memAlloc(int _i1max,int _j1max) { i1max = _i1max; j1max = _j1max; hInput.memAlloc(i1max,j1max); hTransE.memAlloc(i1max,j1max); h.memAlloc(i1max,j1max); s.memAlloc(i1max,j1max); d.memAlloc(i1max,j1max); sh.memAlloc(i1max,j1max); sa.memAlloc(i1max,j1max); temp.memAlloc(i1max,j1max); } void DlmH1::initRun() { hInput=0; h=0; s=0; sh=0; initGauss(g,sigma_g); } void DlmH1::simRun() { int i1,j1; // ---- introduce 'noise' on 'h' if necessary ----------------------- ; if (sh.sum()<=0) { h[i1max/2][j1max/2]=0.10; h[i1max/4][j1max/2]=0.12; h[i1max/2][j1max/4]=0.14; } // 3 hTransE = gaussConvolved(g,sh,rho); if (attention!=0) { // 5a // nslDiff(h,1.,- h + hTransE - beta_h*sh.sum() - kappa_hs*s // + kappa_hh*hInput + kappa_ha*(sa-beta_ac)); temp = sa; NSLadd_minus(temp,beta_ac); NSLmult_equal(temp,kappa_ha,temp); NSLmult_plus(temp,kappa_hh,hInput); NSLmult_minus(temp,kappa_hs,s); NSLmult_minus(temp,beta_h,sh.sum()); NSLadd_plus(temp,hTransE); NSLadd_minus(temp,h); nslDiff(h,1.,temp); } // if (attention!=0) ; else { // 5b //nslDiff(h,1., - h + hTransE - beta_h*sh.sum() - kappa_hs*s // + kappa_hh* hInput); NSLmult_equal(temp,kappa_hh,hInput); NSLmult_minus(temp,kappa_hs,s); NSLmult_minus(temp,beta_h,sh.sum()); NSLadd_plus(temp,hTransE); NSLadd_minus(temp,h); nslDiff(h,1.,temp); } // if (attention!=0) else ; // ---- compute 'sh' and 'sh' ---------------------------------- ; // ---- needs valid 'h' and 'h' ; // 7a computeOutputFunc(sh,h,rho); // ---- simulate one time step on 's' and 's' ------------------- ; // ---- needs valid 'h' and 'h' ; // 8a d = h - s; for (i1=0; i10) d[i1][j1] = d[i1][j1]*lambda_p.elem(); else d[i1][j1] = d[i1][j1]*lambda_m.elem(); // 8c nslDiff(s,1.,d); } // h DlmH2::DlmH2(nsl_string str,NslModule* parent) : DlmH(str,parent), showLayer("showLayer",this), avTimeLimit("avTimeLimit",this), workOnAverage("workOnAverage",this), gallerySize("gallerySize",this), shShow("shShow",this), shMax("shMax",this) // maximum activity in the layers 2 { nsl_string _str; for (int i=0; i0) d[i2][j2] = d[i2][j2]*lambda_p.elem(); else d[i2][j2] = d[i2][j2]*lambda_m.elem(); // 9c nslDiff(s[model],1.,d); } if (nslSystem.getSimTime()>avTimeLimit) workOnAverage = 0; if (workOnAverage == 1) { modelLoIndex=0; modelHiIndex=0; } else { modelLoIndex=1; modelHiIndex=int(gallerySize.elem()); } } // Recognition DlmRecognition::DlmRecognition(nsl_string str,NslModule* parent) : DlmCommon(str,parent), avTimeLimit("avTimeLimit",this), workOnAverage("workOnAverage",this), gallerySize("gallerySize",this), lambda_r("lambda_r",this), // time constant for the recognition dynamics ; r_theta("r_theta",this) // threshold for model suppression ; { nsl_string _str; for (int i=0; i0 && rec[model] <= r_theta) { skipModel[model] = 1; } } // ---- print the current recognition status: ----------------------- ; // ---- time, showed layer, recognition values ; for (model=1; model<=gallerySize; model++) if (rec[model]>r_theta) NSLoutput(" "); else NSLoutput(" *"); NSLoutput("_"); if (nslSystem.getSimTime()>avTimeLimit) workOnAverage = 0; if (workOnAverage == 1) { modelLoIndex=0; modelHiIndex=0; } else { modelLoIndex=1; modelHiIndex=int(gallerySize.elem()); } } DlmAHCommon::DlmAHCommon(nsl_string str,NslModule* parent) : DlmCommon(str,parent), rho("rho",this), // slope radius of squashing function ; attention("attention",this), // attention!=0 indicates use of attention dynamics ; sigma_g("sigma_g",this), // Gauss width of excitatory kernel ; g("g",this,9), // Gaussian interaction kernel ; gSize temp() { } float DlmAHCommon::initGauss(NslFloat0& sigma_g,float x) { return exp(-x*x/(2*sigma_g.elem()*sigma_g.elem())); } void DlmAHCommon::initGauss(NslFloat1& g,NslFloat0& sigma_g) { for (int k=0; k=rho) return 1.; else return sqrt(h*rhoInv); } void DlmAHCommon::computeOutputFunc(NslFloat2& sh, NslFloat2& h, NslFloat0& rho) { int i,j; for (i=0; i _DLM("DLM");