// Code generated by "goal build"; DO NOT EDIT.
//line act-layer.goal:1
// Copyright (c) 2023, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
import (
// "fmt"
"cogentcore.org/core/gpu"
"cogentcore.org/core/math32"
"cogentcore.org/lab/tensor"
"github.com/emer/axon/v2/fsfffb"
)
//gosl:start
//////// ApplyExt
// ApplyExtFlags gets the clear mask and set mask for updating neuron flags
// based on layer type, and whether input should be applied to Target (else Ext)
func (ly *LayerParams) ApplyExtFlags(clearMask, setMask *NeuronFlags, toTarg *bool) {
*clearMask = NeuronHasExt | NeuronHasTarg | NeuronHasCmpr
*toTarg = false
switch ly.Type {
case TargetLayer:
*setMask = NeuronHasTarg
*toTarg = true
case CompareLayer:
*setMask = NeuronHasCmpr
*toTarg = true
default:
*setMask = NeuronHasExt
}
return
}
// InitExt initializes external input state for given neuron
func (ly *LayerParams) InitExt(ni, di uint32) {
Neurons.Set(0.0, int(ni), int(di), int(Ext))
Neurons.Set(0.0, int(ni), int(di), int(Target))
NeuronClearFlag(NeuronHasExt|NeuronHasTarg|NeuronHasCmpr, ni, di)
}
// ApplyExtValue applies given external value to given neuron,
// setting flags based on type of layer.
// Should only be called on Input, Target, Compare layers.
// Negative values are not valid, and will be interpreted as missing inputs.
func (ly *LayerParams) ApplyExtValue(ni, di uint32, val float32) {
if val < 0 {
return
}
var clearMask, setMask NeuronFlags
var toTarg bool
ly.ApplyExtFlags(&clearMask, &setMask, &toTarg)
if toTarg {
Neurons.Set(val, int(ni), int(di), int(Target))
} else {
Neurons.Set(val, int(ni), int(di), int(Ext))
}
NeuronClearFlag(clearMask, ni, di)
NeuronSetFlag(setMask, ni, di)
}
func (ly *LayerParams) ApplyExtsNeuron(ni, di uint32) {
lni := ni - ly.Indexes.NeurSt // layer-based
ly.InitExt(ni, di)
if IsExtLayerType(ly.Type) {
ei := ly.Indexes.ExtsSt + lni
ly.ApplyExtValue(ni, di, Exts.Value(int(ei), int(di)))
}
}
// SetNeuronExtPosNeg sets neuron Ext value based on neuron index
// with positive values going in first unit, negative values rectified
// to positive in 2nd unit
func SetNeuronExtPosNeg(ctx *Context, ni, di uint32, val float32) {
if ni == 0 {
if val >= 0 {
Neurons.Set(val, int(ni), int(di), int(Ext))
} else {
Neurons.Set(float32(0), int(ni), int(di), int(Ext))
}
} else {
if val >= 0 {
Neurons.Set(float32(0), int(ni), int(di), int(Ext))
} else {
Neurons.Set(-val, int(ni), int(di), int(Ext))
}
}
}
// IsTarget returns true if this layer is a Target layer.
// By default, returns true for layers of Type == TargetLayer
// Other Target layers include the PulvinarLayer in deep predictive learning.
// It is used in SynScale to not apply it to target layers.
// In both cases, Target layers are purely error-driven.
func (ly *LayerParams) IsTarget() bool {
return ly.Type == TargetLayer || ly.Type == PulvinarLayer
}
// IsInput returns true if this layer is an Input layer.
// By default, returns true for layers of Type == axon.InputLayer
// Used to prevent adapting of inhibition or TrgAvg values.
func (ly *LayerParams) IsInput() bool {
return ly.Type == InputLayer
}
// IsInputOrTarget returns true if this layer is either an Input
// or a Target layer.
func (ly *LayerParams) IsInputOrTarget() bool {
return (ly.IsTarget() || ly.IsInput())
}
// IsLearnTrgAvg returns true if this layer has Learn.TrgAvgAct.RescaleOn set for learning
// adjustments based on target average activity levels, and the layer is not an
// input or target layer.
func (ly *LayerParams) IsLearnTrgAvg() bool {
if ly.IsInput() || ly.IsTarget() || ly.Learn.TrgAvgAct.RescaleOn.IsFalse() {
return false
}
return true
}
// LearnTrgAvgErrLRate returns the effective error-driven learning rate for adjusting
// target average activity levels. This is 0 if !IsLearnTrgAvg() and otherwise
// is Learn.TrgAvgAct.ErrLRate
func (ly *LayerParams) LearnTrgAvgErrLRate() float32 {
if !ly.IsLearnTrgAvg() {
return 0
}
return ly.Learn.TrgAvgAct.ErrLRate
}
//////// Cycle
// GatherSpikes integrates G*Raw and G*Syn values for given recv neuron
// while integrating the Recv Path-level GSyn integrated values.
func (ly *LayerParams) GatherSpikes(ctx *Context, ni, di uint32) {
lni := ni - ly.Indexes.NeurSt
ly.GatherSpikesInit(ctx, ni, di)
for pti := uint32(0); pti < ly.Indexes.RecvN; pti++ {
npti := RecvPathIxs.Value1D(int(ly.Indexes.RecvSt + pti))
pt := GetPaths(npti)
pt.GatherSpikes(ctx, ly, ni, di, lni)
}
ly.GiFromSpikes(ctx, ni, di)
}
// GatherSpikesInit initializes G*Raw and G*Syn values for given neuron
// prior to integration.
func (ly *LayerParams) GatherSpikesInit(ctx *Context, ni, di uint32) {
Neurons.Set(0.0, int(ni), int(di), int(GeRaw))
Neurons.Set(0.0, int(ni), int(di), int(GiRaw))
Neurons.Set(0.0, int(ni), int(di), int(GModRaw))
Neurons.Set(0.0, int(ni), int(di), int(GModSyn))
Neurons.Set(0.0, int(ni), int(di), int(GMaintRaw))
Neurons.Set(0.0, int(ni), int(di), int(CtxtGeRaw))
Neurons.Set(NeuronAvgs.Value(int(ni), int(GeBase)), int(ni), int(di), int(GeSyn))
Neurons.Set(NeuronAvgs.Value(int(ni), int(GiBase)), int(ni), int(di), int(GiSyn))
}
// GiFromSpikes gets the Spike, GeRaw and GeExt from neurons in the pools
// where Spike drives FBsRaw = raw feedback signal,
// GeRaw drives FFsRaw = aggregate feedforward excitatory spiking input.
// GeExt represents extra excitatory input from other sources.
// Then integrates new inhibitory conductances therefrom,
// at the layer and pool level.
// Called separately by Network.CycleImpl on all Layers
// Also updates all AvgMax values at the Cycle level.
func (ly *LayerParams) GiFromSpikes(ctx *Context, ni, di uint32) {
pi := ly.PoolIndex(NeuronIxs.Value(int(ni), int(NrnSubPool)))
spk := Neurons.Value(int(ni), int(di), int(Spike))
geRaw := Neurons.Value(int(ni), int(di), int(GeRaw))
geExt := Neurons.Value(int(ni), int(di), int(GeExt))
PoolInhibRawIncrInt(pi, di, spk, geRaw, geExt)
PoolAvgMaxUpdate(pi, di, ni)
if PoolIxs.Value(int(pi), int(PoolIsLayer)) == 0 { // also update layer pool if I am a subpool
lpi := ly.PoolIndex(0)
PoolInhibRawIncrInt(lpi, di, spk, geRaw, geExt)
PoolAvgMaxUpdate(lpi, di, ni)
}
}
// LayerGi updates the layer-level Gi inhibition from spikes.
func (ly *LayerParams) LayerGi(ctx *Context, li, di uint32) {
lpi := ly.PoolIndex(0)
PoolAvgMaxCalc(lpi, di)
PoolInhibIntToRaw(lpi, di)
ly.LayPoolGiFromSpikes(ctx, lpi, di)
}
// BetweenGi computes inhibition Gi between layers.
func (ly *LayerParams) BetweenGi(ctx *Context, di uint32) {
lpi := ly.PoolIndex(0)
maxGi := Pools.Value(int(lpi), int(di), int(fsfffb.TotalGi))
maxGi = ly.BetweenLayerGiMax(di, maxGi, ly.LayInhib.Index1)
maxGi = ly.BetweenLayerGiMax(di, maxGi, ly.LayInhib.Index2)
maxGi = ly.BetweenLayerGiMax(di, maxGi, ly.LayInhib.Index3)
maxGi = ly.BetweenLayerGiMax(di, maxGi, ly.LayInhib.Index4)
Pools.Set(maxGi, int(lpi), int(di), int(fsfffb.TotalGi)) // our inhib is max of us and everyone in the layer pool
}
// BetweenLayerGiMax returns max gi value for input maxGi vs
// the given layIndex layer
func (ly *LayerParams) BetweenLayerGiMax(di uint32, maxGi float32, layIndex int32) float32 {
if layIndex < 0 {
return maxGi
}
oly := GetLayers(uint32(layIndex))
opi := oly.PoolIndex(0)
ogi := Pools.Value(int(opi), int(di), int(fsfffb.TotalGi))
if ogi > maxGi {
return ogi
}
return maxGi
}
// LayPoolGiFromSpikes computes inhibition Gi from Spikes for layer-level pool.
func (ly *LayerParams) LayPoolGiFromSpikes(ctx *Context, lpi, di uint32) {
PoolInhibSpikesFromRaw(lpi, di)
PoolInhib(&ly.Inhib.Layer, lpi, di, LayerStates.Value(int(ly.Index), int(di), int(LayerGiMult)))
}
// SubPoolGiFromSpikes computes inhibition Gi from Spikes within a sub-pool
// pl is guaranteed not to be the overall layer pool
func (ly *LayerParams) SubPoolGiFromSpikes(ctx *Context, lpi, pi, di uint32, lyInhib bool, giMult float32) {
PoolInhibSpikesFromRaw(pi, di)
PoolInhib(&ly.Inhib.Pool, pi, di, giMult)
if lyInhib {
PoolInhibLayerMax(pi, di, Pools.Value(int(lpi), int(di), int(fsfffb.TotalGi))) // note: this requires lpl inhib to have been computed before!
} else {
PoolInhibPoolMax(pi, di, Pools.Value(int(pi), int(di), int(fsfffb.TotalGi))) // display only
Pools.Set(Pools.Value(int(lpi), int(di), int(fsfffb.TotalGi)), int(lpi), int(di), int(fsfffb.GiOrig))
}
}
//////// CycleNeuron methods
// CycleNeuron does one cycle (msec) of updating at the neuron level
// Called directly by Network, iterates over data.
func (ly *LayerParams) CycleNeuron(ctx *Context, ni, di uint32) {
pi := ly.PoolIndex(NeuronIxs.Value(int(ni), int(NrnSubPool)))
lpi := ly.PoolIndex(0)
ly.GInteg(ctx, pi, ni, di)
ly.SpikeFromG(ctx, lpi, ni, di)
}
// GInteg integrates conductances G over time (Ge, NMDA, etc).
// calls SpecialGFromRawSyn, GiInteg
func (ly *LayerParams) GInteg(ctx *Context, pi, ni, di uint32) {
drvGe := float32(0)
nonDrivePct := float32(0)
if ly.Type == PulvinarLayer {
ly.PulvinarDriver(ctx, ni-ly.Indexes.NeurSt, di, &drvGe, &nonDrivePct)
Neurons.Set(nonDrivePct, int(ni), int(di), int(Ext)) // use for regulating inhibition
}
saveVal := ly.SpecialPreGs(ctx, pi, ni, di, drvGe, nonDrivePct)
ly.GFromRawSyn(ctx, ni, di)
ly.GiInteg(ctx, pi, ni, di)
ly.SpecialPostGs(ctx, ni, di, saveVal)
}
///////// GInteg
// SpecialPreGs is used for special layer types to do things to the
// conductance values prior to doing the standard updates in GFromRawSyn
// drvAct is for Pulvinar layers, activation of driving neuron
func (ly *LayerParams) SpecialPreGs(ctx *Context, pi, ni, di uint32, drvGe float32, nonDrivePct float32) float32 {
saveVal := float32(0) // sometimes we need to use a value computed here, for the post Gs step
pil := pi - ly.PoolSt
pnn := uint32(PoolNNeurons(pi))
pni := NeuronIxs.Value(int(ni), int(NrnNeurIndex)) - uint32(PoolIxs.Value(int(pi), int(PoolNeurSt)))
nrnCtxtGe := Neurons.Value(int(ni), int(di), int(CtxtGe))
nrnGeRaw := Neurons.Value(int(ni), int(di), int(GeRaw))
hasRew := GlobalScalars.Value(int(GvHasRew), int(di)) > 0
switch ly.Type {
case PTPredLayer, CTLayer:
geCtxt := ly.CT.GeGain * nrnCtxtGe
Neurons.SetAdd(geCtxt, int(ni), int(di), int(GeRaw))
if ly.CT.DecayDt > 0 {
Neurons.SetSub(ly.CT.DecayDt*nrnCtxtGe, int(ni), int(di), int(CtxtGe))
}
ctxExt := ly.Acts.Dt.GeSynFromRawSteady(geCtxt)
Neurons.SetAdd(ctxExt, int(ni), int(di), int(GeSyn))
saveVal = ctxExt // used In PostGs to set nrn.GeExt
case PTMaintLayer:
if ly.Acts.SMaint.On.IsTrue() {
saveVal = ly.Acts.SMaint.Inhib * Neurons.Value(int(ni), int(di), int(GMaintRaw)) // used In PostGs to set nrn.GeExt
}
case PulvinarLayer:
if ctx.PlusPhase.IsFalse() {
break
}
// geSyn, goes into nrn.GeExt in PostGs, so inhibition gets it
saveVal = nonDrivePct*Neurons.Value(int(ni), int(di), int(GeSyn)) + ly.Acts.Dt.GeSynFromRawSteady(drvGe)
Neurons.Set(nonDrivePct*nrnGeRaw+drvGe, int(ni), int(di), int(GeRaw))
Neurons.Set(saveVal, int(ni), int(di), int(GeSyn))
case VSGatedLayer:
dr := float32(0)
if pil == 0 {
dr = GlobalScalars.Value(int(GvVSMatrixJustGated), int(di))
} else {
dr = GlobalScalars.Value(int(GvVSMatrixHasGated), int(di))
}
dr = math32.Abs(dr)
Neurons.Set(dr, int(ni), int(di), int(GeRaw))
Neurons.Set(ly.Acts.Dt.GeSynFromRawSteady(dr), int(ni), int(di), int(GeSyn))
case BLALayer:
if ly.Learn.NeuroMod.IsBLAExt() {
md := max(-GlobalScalars.Value(int(GvDA), int(di)), float32(0)) // ext is modulated by negative da
geCtxt := md * ly.CT.GeGain * Neurons.Value(int(ni), int(di), int(CtxtGeOrig))
Neurons.SetAdd(geCtxt, int(ni), int(di), int(GeRaw))
ctxExt := ly.Acts.Dt.GeSynFromRawSteady(geCtxt)
Neurons.SetAdd(ctxExt, int(ni), int(di), int(GeSyn))
saveVal = ctxExt // used In PostGs to set nrn.GeExt
}
case LHbLayer:
geRaw := float32(0)
if ni == 0 {
geRaw = 0.2 * math32.Abs(GlobalScalars.Value(int(GvLHbDip), int(di)))
} else {
geRaw = 0.2 * math32.Abs(GlobalScalars.Value(int(GvLHbBurst), int(di)))
}
Neurons.Set(geRaw, int(ni), int(di), int(GeRaw))
Neurons.Set(ly.Acts.Dt.GeSynFromRawSteady(geRaw), int(ni), int(di), int(GeSyn))
case DrivesLayer:
dr := GlobalVectors.Value(int(GvDrives), int(pil-1), int(di))
geRaw := dr
if dr > 0 {
geRaw = ly.Acts.PopCode.EncodeGe(pni, pnn, dr)
}
Neurons.Set(geRaw, int(ni), int(di), int(GeRaw))
Neurons.Set(ly.Acts.Dt.GeSynFromRawSteady(geRaw), int(ni), int(di), int(GeSyn))
case UrgencyLayer:
ur := GlobalScalars.Value(int(GvUrgency), int(di))
geRaw := ur
if ur > 0 {
geRaw = ly.Acts.PopCode.EncodeGe(pni, pnn, ur)
}
Neurons.Set(geRaw, int(ni), int(di), int(GeRaw))
Neurons.Set(ly.Acts.Dt.GeSynFromRawSteady(geRaw), int(ni), int(di), int(GeSyn))
case USLayer:
us := RubiconUSStimValue(di, pil-1, ly.Learn.NeuroMod.Valence)
geRaw := us
if us > 0 {
geRaw = ly.Acts.PopCode.EncodeGe(pni, pnn, us)
}
// D2Mod = final
if ly.Learn.NeuroMod.DAMod == D1Mod || (ly.Learn.NeuroMod.DAMod == D2Mod && hasRew && ctx.PlusPhase.IsTrue()) {
Neurons.Set(geRaw, int(ni), int(di), int(GeRaw))
Neurons.Set(ly.Acts.Dt.GeSynFromRawSteady(geRaw), int(ni), int(di), int(GeSyn))
}
case PVLayer:
if hasRew && ctx.PlusPhase.IsTrue() {
pv := float32(0)
if ly.Learn.NeuroMod.Valence == Positive {
pv = GlobalScalars.Value(int(GvPVpos), int(di))
} else {
pv = GlobalScalars.Value(int(GvPVneg), int(di))
}
pc := ly.Acts.PopCode.EncodeGe(pni, ly.Indexes.NNeurons, pv)
Neurons.Set(pc, int(ni), int(di), int(GeRaw))
Neurons.Set(ly.Acts.Dt.GeSynFromRawSteady(pc), int(ni), int(di), int(GeSyn))
}
case LDTLayer:
geRaw := 0.4 * GlobalScalars.Value(int(GvACh), int(di))
Neurons.Set(geRaw, int(ni), int(di), int(GeRaw))
Neurons.Set(ly.Acts.Dt.GeSynFromRawSteady(geRaw), int(ni), int(di), int(GeSyn))
case VTALayer:
geRaw := ly.RWDa.GeFromDA(GlobalScalars.Value(int(GvVtaDA), int(di)))
Neurons.Set(geRaw, int(ni), int(di), int(GeRaw))
Neurons.Set(ly.Acts.Dt.GeSynFromRawSteady(geRaw), int(ni), int(di), int(GeSyn))
case RewLayer:
NeuronSetFlag(NeuronHasExt, ni, di)
SetNeuronExtPosNeg(ctx, ni, di, GlobalScalars.Value(int(GvRew), int(di))) // Rew must be set in Context!
case RWDaLayer:
geRaw := ly.RWDa.GeFromDA(GlobalScalars.Value(int(GvDA), int(di)))
Neurons.Set(geRaw, int(ni), int(di), int(GeRaw))
Neurons.Set(ly.Acts.Dt.GeSynFromRawSteady(geRaw), int(ni), int(di), int(GeSyn))
case TDDaLayer:
geRaw := ly.TDDa.GeFromDA(GlobalScalars.Value(int(GvDA), int(di)))
Neurons.Set(geRaw, int(ni), int(di), int(GeRaw))
Neurons.Set(ly.Acts.Dt.GeSynFromRawSteady(geRaw), int(ni), int(di), int(GeSyn))
case TDIntegLayer:
NeuronSetFlag(NeuronHasExt, ni, di)
SetNeuronExtPosNeg(ctx, ni, di, GlobalScalars.Value(int(GvRewPred), int(di)))
default:
}
return saveVal
}
// SpecialPostGs is used for special layer types to do things
// after the standard updates in GFromRawSyn.
// It is passed the saveVal from SpecialPreGs
func (ly *LayerParams) SpecialPostGs(ctx *Context, ni, di uint32, saveVal float32) {
if ly.Type != DSMatrixLayer {
ly.GNeuroMod(ctx, ni, di)
}
switch ly.Type {
case PulvinarLayer, PTMaintLayer, CTLayer, BLALayer:
Neurons.Set(saveVal, int(ni), int(di), int(GeExt))
case PTPredLayer:
Neurons.Set(saveVal, int(ni), int(di), int(GeExt))
orig := Neurons.Value(int(ni), int(di), int(CtxtGeOrig))
if orig < 0.05 {
Neurons.Set(0.0, int(ni), int(di), int(Ge))
}
case DSMatrixLayer:
if GlobalScalars.Value(int(GvHasRew), int(di)) > 0 {
ly.GNeuroMod(ctx, ni, di)
} else {
pi := ly.PoolIndex(NeuronIxs.Value(int(ni), int(NrnSubPool)))
nda := ly.DSMatrix.PatchBurstGain*Pools.Value(int(pi), int(di), int(fsfffb.DAD1)) - Pools.Value(int(pi), int(di), int(fsfffb.DAD2))
ggain := 1.0 + ly.Learn.NeuroMod.DASign()*ly.DSMatrix.PatchDAModGain*nda
Neurons.SetMul(ggain, int(ni), int(di), int(Ge))
Neurons.SetMul(ggain, int(ni), int(di), int(Gi))
}
default:
}
}
// GFromRawSyn computes overall Ge and GiSyn conductances for neuron
// from GeRaw and GeSyn values, including NMDA, VGCC, AMPA, and GABA-A channels.
// drvAct is for Pulvinar layers, activation of driving neuron
func (ly *LayerParams) GFromRawSyn(ctx *Context, ni, di uint32) {
extraRaw := float32(0)
extraSyn := float32(0)
nrnGModRaw := Neurons.Value(int(ni), int(di), int(GModRaw))
nrnGModSyn := Neurons.Value(int(ni), int(di), int(GModSyn))
ach := GlobalScalars.Value(int(GvACh), int(di))
switch ly.Type {
case PTMaintLayer:
md := ly.Acts.Dend.ModGain * nrnGModSyn
if ly.Acts.Dend.ModACh.IsTrue() {
md *= ach
}
md += ly.Acts.Dend.ModBase
// key: excluding GModMaint here, so active maintenance can persist
Neurons.SetMul(md, int(ni), int(di), int(GeRaw))
Neurons.SetMul(md, int(ni), int(di), int(GeSyn))
extraRaw = ly.Acts.Dend.ModGain * nrnGModRaw
if ly.Acts.Dend.ModACh.IsTrue() {
extraRaw *= ach
}
extraSyn = md
case BLALayer:
// modulatory pathway from PTp is only used so we can modulate by da
md := max(-GlobalScalars.Value(int(GvDA), int(di)), 0.0) // ext is modulated by negative da
extraRaw = md * nrnGModRaw * ly.Acts.Dend.ModGain
extraSyn = md * nrnGModSyn * ly.Acts.Dend.ModGain
default:
if ly.Acts.Dend.HasMod.IsTrue() {
md := ly.Acts.Dend.ModBase + ly.Acts.Dend.ModGain*nrnGModSyn
if md > 1 {
md = 1
}
Neurons.SetMul(md, int(ni), int(di), int(GeRaw))
Neurons.SetMul(md, int(ni), int(di), int(GeSyn))
}
}
geRaw := Neurons.Value(int(ni), int(di), int(GeRaw))
geSyn := Neurons.Value(int(ni), int(di), int(GeSyn))
ly.Acts.NMDAFromRaw(ctx, ni, di, geRaw+extraRaw)
ly.Acts.MaintNMDAFromRaw(ctx, ni, di) // uses GMaintRaw directly
ly.Learn.LearnNMDAFromRaw(ctx, ni, di, geRaw)
ly.Acts.GvgccFromVm(ctx, ni, di)
ege := Neurons.Value(int(ni), int(di), int(Gnmda)) + Neurons.Value(int(ni), int(di), int(GnmdaMaint)) + Neurons.Value(int(ni), int(di), int(Gvgcc)) + extraSyn
ly.Acts.GeFromSyn(ctx, ni, di, geSyn, ege) // sets nrn.GeExt too
ly.Acts.GkFromVm(ctx, ni, di)
ly.Acts.GSkCaFromCa(ctx, ni, di)
Neurons.Set(ly.Acts.GiFromSyn(ctx, ni, di, Neurons.Value(int(ni), int(di), int(GiSyn))), int(ni), int(di), int(GiSyn))
}
// GiInteg adds Gi values from all sources including SubPool computed inhib
// and updates GABAB as well
func (ly *LayerParams) GiInteg(ctx *Context, pi, ni, di uint32) {
giMult := LayerStates.Value(int(ly.Index), int(di), int(LayerGiMult))
gi := giMult*Pools.Value(int(pi), int(di), int(fsfffb.TotalGi)) + Neurons.Value(int(ni), int(di), int(GiSyn)) + Neurons.Value(int(ni), int(di), int(GiNoise)) + ly.Learn.NeuroMod.GiFromACh(GlobalScalars.Value(int(GvACh), int(di)))
ssgi := Pools.Value(int(pi), int(di), int(fsfffb.SSGi))
Neurons.Set(gi, int(ni), int(di), int(Gi))
Neurons.Set(0.0, int(ni), int(di), int(SSGiDend))
if ctx.PlusPhase.IsTrue() && (ly.Type == PulvinarLayer) {
ext := Neurons.Value(int(ni), int(di), int(Ext)) // nonDrivePct
Neurons.Set(ext*ly.Acts.Dend.SSGi*ssgi, int(ni), int(di), int(SSGiDend))
} else {
if !ly.IsInputOrTarget() {
Neurons.Set(ly.Acts.Dend.SSGi*ssgi, int(ni), int(di), int(SSGiDend))
}
}
vm := Neurons.Value(int(ni), int(di), int(VmDend))
nrnGababM := Neurons.Value(int(ni), int(di), int(GababM))
nrnGababX := Neurons.Value(int(ni), int(di), int(GababX))
ly.Acts.GabaB.MX(gi, &nrnGababM, &nrnGababX)
Neurons.Set(nrnGababM, int(ni), int(di), int(GababM))
Neurons.Set(nrnGababX, int(ni), int(di), int(GababX))
nrnGgabaB := ly.Acts.GabaB.GgabaB(nrnGababM, vm)
Neurons.Set(nrnGgabaB, int(ni), int(di), int(GgabaB))
// Gk was already init
Neurons.SetAdd(nrnGgabaB, int(ni), int(di), int(Gk))
}
// GNeuroMod does neuromodulation of conductances
func (ly *LayerParams) GNeuroMod(ctx *Context, ni, di uint32) {
ggain := ly.Learn.NeuroMod.GGain(GlobalScalars.Value(int(GvDA), int(di)) + GlobalScalars.Value(int(GvDAtonic), int(di)))
Neurons.SetMul(ggain, int(ni), int(di), int(Ge))
Neurons.SetMul(ggain, int(ni), int(di), int(Gi))
}
//////// SendSpike
// SpikeFromG computes Vm from Ge, Gi, Gl conductances and then Spike from that
func (ly *LayerParams) SpikeFromG(ctx *Context, lpi, ni, di uint32) {
ly.Acts.VmFromG(ctx, ni, di)
ly.Acts.SpikeFromVm(ctx, ni, di)
if ly.Type != IOLayer {
ly.Learn.CaFromSpike(ctx, ni, di)
if !ly.IsTarget() {
learnNow := ly.Learn.Timing.LearnTiming(ctx, ni, di)
if learnNow {
da := GlobalScalars.Value(int(GvDA), int(di))
ach := GlobalScalars.Value(int(GvACh), int(di))
nrnCaD := Neurons.Value(int(ni), int(di), int(CaD))
mlr := ly.Learn.RLRate.RLRateSigDeriv(nrnCaD, PoolAvgMax(AMCaD, AMCycle, Max, lpi, di))
modlr := ly.Learn.NeuroMod.LRMod(da, ach)
dlr := ly.Learn.RLRate.RLRateDiff(Neurons.Value(int(ni), int(di), int(CaP)), nrnCaD)
Neurons.Set(mlr*dlr*modlr, int(ni), int(di), int(RLRate))
}
}
}
lmax := PoolAvgMax(AMGeInt, AMCycle, Max, lpi, di)
if lmax > 0 {
Neurons.Set(Neurons.Value(int(ni), int(di), int(GeInt))/lmax, int(ni), int(di), int(GeIntNorm))
} else {
Neurons.Set(Neurons.Value(int(ni), int(di), int(GeInt)), int(ni), int(di), int(GeIntNorm))
}
if ctx.MinusPhase.IsFalse() && ctx.PlusPhase.IsFalse() {
return
}
lrnCyc := ctx.Cycle - ctx.ISICycles
if lrnCyc >= ly.Acts.Dt.MaxCycStart {
Neurons.SetAdd(ly.Learn.CaSpike.Dt.PDt*(Neurons.Value(int(ni), int(di), int(CaM))-Neurons.Value(int(ni), int(di), int(CaPMaxCa))), int(ni), int(di), int(CaPMaxCa))
spkmax := Neurons.Value(int(ni), int(di), int(CaPMaxCa))
if spkmax > Neurons.Value(int(ni), int(di), int(CaPMax)) {
Neurons.Set(spkmax, int(ni), int(di), int(CaPMax))
}
}
if ly.Type != IOLayer { // uses bins for itself
CaBinIncrement(Neurons.Value(int(ni), int(di), int(CaSyn)), ctx.CyclesTotal, ni, di)
}
}
// SendSpike sends spike to receivers for all neurons that spiked
// last step in Cycle, integrated the next time around.
// Called directly by Network, iterates over data.
func (ly *LayerParams) SendSpike(ctx *Context, ni, di uint32) {
pi := ly.PoolIndex(NeuronIxs.Value(int(ni), int(NrnSubPool)))
lpi := ly.PoolIndex(0)
lni := ni - ly.Indexes.NeurSt
ly.PostSpike(ctx, lpi, pi, ni, di)
for pti := uint32(0); pti < ly.Indexes.SendN; pti++ {
pt := GetPaths(ly.Indexes.SendSt + pti)
pt.SendSpike(ctx, ni, di, lni)
}
}
// PostSpikeSpecial does updates at neuron level after spiking has been computed.
// This is where special layer types add extra code.
func (ly *LayerParams) PostSpikeSpecial(ctx *Context, lpi, pi, ni, di uint32) {
Neurons.Set(Neurons.Value(int(ni), int(di), int(CaP)), int(ni), int(di), int(Burst))
li := ly.Index
pil := pi - ly.PoolSt // 0-n pool index
pnn := uint32(PoolNNeurons(pi))
pni := NeuronIxs.Value(int(ni), int(NrnNeurIndex)) - uint32(PoolIxs.Value(int(pi), int(PoolNeurSt)))
hasRew := GlobalScalars.Value(int(GvHasRew), int(di)) > 0
switch ly.Type {
case SuperLayer:
if ctx.PlusPhase.IsTrue() {
actMax := PoolAvgMax(AMCaP, AMCycle, Max, lpi, di)
actAvg := PoolAvgMax(AMCaP, AMCycle, Avg, lpi, di)
thr := ly.Bursts.ThrFromAvgMax(actAvg, actMax)
if Neurons.Value(int(ni), int(di), int(CaP)) < thr {
Neurons.Set(0.0, int(ni), int(di), int(Burst))
}
}
case PTPredLayer, CTLayer:
if ctx.Cycle == ctx.ThetaCycles-1 {
if ly.CT.DecayTau == 0 {
Neurons.Set(Neurons.Value(int(ni), int(di), int(CtxtGeRaw)), int(ni), int(di), int(CtxtGe))
} else {
Neurons.SetAdd(Neurons.Value(int(ni), int(di), int(CtxtGeRaw)), int(ni), int(di), int(CtxtGe))
}
Neurons.Set(Neurons.Value(int(ni), int(di), int(CtxtGe)), int(ni), int(di), int(CtxtGeOrig))
}
case VSGatedLayer:
dr := float32(0)
if pil == 0 {
dr = GlobalScalars.Value(int(GvVSMatrixJustGated), int(di))
} else {
dr = GlobalScalars.Value(int(GvVSMatrixHasGated), int(di))
}
Neurons.Set(dr, int(ni), int(di), int(Act))
case IOLayer:
ly.IOUpdate(ctx, lpi, pi, ni, di)
case CNeLayer, CNiIOLayer, CNiUpLayer:
ly.IOLearn(ctx, ni-ly.Indexes.NeurSt, lpi, pi, ni, di)
case BLALayer:
if ctx.Cycle == ctx.ThetaCycles-1 {
if hasRew {
Neurons.Set(0.0, int(ni), int(di), int(CtxtGe))
Neurons.Set(0.0, int(ni), int(di), int(CtxtGeOrig))
} else if GlobalScalars.Value(int(GvACh), int(di)) > 0.1 {
Neurons.Set(Neurons.Value(int(ni), int(di), int(CtxtGeRaw)), int(ni), int(di), int(CtxtGe))
Neurons.Set(Neurons.Value(int(ni), int(di), int(CtxtGe)), int(ni), int(di), int(CtxtGeOrig))
}
}
case LHbLayer:
if pni == 0 {
Neurons.Set(GlobalScalars.Value(int(GvLHbDip), int(di)), int(ni), int(di), int(Act))
} else {
Neurons.Set(GlobalScalars.Value(int(GvLHbBurst), int(di)), int(ni), int(di), int(Act))
}
Neurons.Set(ly.Acts.Dt.GeSynFromRawSteady(Neurons.Value(int(ni), int(di), int(GeRaw))), int(ni), int(di), int(GeSyn))
case DrivesLayer:
dr := GlobalVectors.Value(int(GvDrives), int(pil-1), int(di))
act := dr
if dr > 0 {
act = ly.Acts.PopCode.EncodeValue(pni, pnn, dr)
}
Neurons.Set(act, int(ni), int(di), int(Act))
case UrgencyLayer:
ur := GlobalScalars.Value(int(GvUrgency), int(di))
act := ur
if ur > 0 {
act = ly.Acts.PopCode.EncodeValue(pni, pnn, ur)
}
Neurons.Set(act, int(ni), int(di), int(Act))
case USLayer:
us := RubiconUSStimValue(di, pil-1, ly.Learn.NeuroMod.Valence)
act := us
if us > 0 {
act = ly.Acts.PopCode.EncodeValue(pni, pnn, us)
}
// D2Mod = final
if ly.Learn.NeuroMod.DAMod == D1Mod || (ly.Learn.NeuroMod.DAMod == D2Mod && hasRew && ctx.PlusPhase.IsTrue()) {
Neurons.Set(act, int(ni), int(di), int(Act))
}
case PVLayer:
if hasRew {
pv := float32(0)
if ly.Learn.NeuroMod.Valence == Positive {
pv = GlobalScalars.Value(int(GvPVpos), int(di))
} else {
pv = GlobalScalars.Value(int(GvPVneg), int(di))
}
act := ly.Acts.PopCode.EncodeValue(pni, ly.Indexes.NNeurons, pv)
Neurons.Set(act, int(ni), int(di), int(Act))
}
case LDTLayer:
// I set this in CyclePost
Neurons.Set(GlobalScalars.Value(int(GvAChRaw), int(di)), int(ni), int(di), int(Act))
case VTALayer:
// I set this in CyclePost
Neurons.Set(GlobalScalars.Value(int(GvVtaDA), int(di)), int(ni), int(di), int(Act))
case RewLayer:
Neurons.Set(GlobalScalars.Value(int(GvRew), int(di)), int(ni), int(di), int(Act))
case RWPredLayer:
// clipped linear
Neurons.Set(ly.RWPred.PredRange.ClampValue(Neurons.Value(int(ni), int(di), int(Ge))), int(ni), int(di), int(Act))
if pni == 0 {
LayerStates.Set(Neurons.Value(int(ni), int(di), int(ActInt)), int(li), int(di), int(LayerRewPredPos))
} else {
LayerStates.Set(Neurons.Value(int(ni), int(di), int(ActInt)), int(li), int(di), int(LayerRewPredNeg))
}
case RWDaLayer:
// I set this in CyclePost
Neurons.Set(GlobalScalars.Value(int(GvDA), int(di)), int(ni), int(di), int(Act))
case TDPredLayer:
// linear
Neurons.Set(Neurons.Value(int(ni), int(di), int(Ge)), int(ni), int(di), int(Act))
if pni == 0 {
LayerStates.Set(Neurons.Value(int(ni), int(di), int(ActInt)), int(li), int(di), int(LayerRewPredPos))
} else {
LayerStates.Set(Neurons.Value(int(ni), int(di), int(ActInt)), int(li), int(di), int(LayerRewPredNeg))
}
case TDIntegLayer:
Neurons.Set(GlobalScalars.Value(int(GvRewPred), int(di)), int(ni), int(di), int(Act))
case TDDaLayer:
// I set this in CyclePost
Neurons.Set(GlobalScalars.Value(int(GvDA), int(di)), int(ni), int(di), int(Act))
default:
}
}
// PostSpike does updates at neuron level after spiking has been computed.
// It calls PostSpikeSpecial. It also updates the CaPCyc stats.
func (ly *LayerParams) PostSpike(ctx *Context, lpi, pi, ni, di uint32) {
ly.PostSpikeSpecial(ctx, lpi, pi, ni, di)
intdt := ly.Acts.Dt.IntDt
Neurons.SetAdd(intdt*(Neurons.Value(int(ni), int(di), int(Ge))-Neurons.Value(int(ni), int(di), int(GeInt))), int(ni), int(di), int(GeInt))
Neurons.SetAdd(intdt*(Neurons.Value(int(ni), int(di), int(GiSyn))-Neurons.Value(int(ni), int(di), int(GiInt))), int(ni), int(di), int(GiInt))
// act int is reset at start of the plus phase -- needs faster integration:
if ctx.PlusPhase.IsTrue() {
intdt *= 3.0
}
// using reg act here now
Neurons.SetAdd(intdt*(Neurons.Value(int(ni), int(di), int(Act))-Neurons.Value(int(ni), int(di), int(ActInt))), int(ni), int(di), int(ActInt))
}
// CyclePost is called after the standard Cycle update, as a separate
// network layer loop.
// This is reserved for any kind of special ad-hoc types that
// need to do something special after Spiking is finally computed and Sent.
// Typically used for updating global values in the Context state,
// such as updating a neuromodulatory signal such as dopamine.
// Any updates here must also be done in gpu_wgsl/gpu_cyclepost.wgsl
func (ly *LayerParams) CyclePost(ctx *Context, di uint32) {
lpi := ly.PoolIndex(0)
ly.CyclePostLayer(ctx, lpi, di)
switch ly.Type {
case VSMatrixLayer, BGThalLayer:
ly.GatedFromCaPMax(ctx, di)
case DSMatrixLayer:
ly.GatedFromCaPMax(ctx, di)
for spi := uint32(1); spi < ly.Indexes.NPools; spi++ {
pi := ly.PoolIndex(spi)
ly.CyclePostDSMatrixLayer(ctx, pi, di, int32(spi))
}
case CeMLayer:
ly.CyclePostCeMLayer(ctx, lpi, di)
case VSPatchLayer:
for spi := uint32(1); spi < ly.Indexes.NPools; spi++ {
pi := ly.PoolIndex(spi)
ly.CyclePostVSPatchLayer(ctx, pi, di, int32(spi))
}
case DSPatchLayer:
for spi := uint32(1); spi < ly.Indexes.NPools; spi++ {
pi := ly.PoolIndex(spi)
ly.CyclePostDSPatchLayer(ctx, pi, di, int32(spi))
}
case LDTLayer:
srcLay1Act := ly.LDTSrcLayAct(ly.LDT.SrcLay1Index, di)
srcLay2Act := ly.LDTSrcLayAct(ly.LDT.SrcLay2Index, di)
srcLay3Act := ly.LDTSrcLayAct(ly.LDT.SrcLay3Index, di)
srcLay4Act := ly.LDTSrcLayAct(ly.LDT.SrcLay4Index, di)
ly.CyclePostLDTLayer(ctx, di, srcLay1Act, srcLay2Act, srcLay3Act, srcLay4Act)
case VTALayer:
ly.CyclePostVTALayer(ctx, di)
case RWDaLayer:
ly.CyclePostRWDaLayer(ctx, di)
case TDPredLayer:
ly.CyclePostTDPredLayer(ctx, di)
case TDIntegLayer:
ly.CyclePostTDIntegLayer(ctx, di)
case TDDaLayer:
ly.CyclePostTDDaLayer(ctx, di)
default:
}
}
//////// Special CyclePost methods for different layer types
// CyclePostLayer is called for all layer types
func (ly *LayerParams) CyclePostLayer(ctx *Context, lpi, di uint32) {
casp := PoolAvgMax(AMCaP, AMCycle, Max, lpi, di)
if ctx.Cycle >= ly.Acts.Dt.MaxCycStart {
if casp > ly.Inhib.ActAvg.RTThr && LayerStates.Value(int(ly.Index), int(di), int(LayerRT)) <= 0 {
LayerStates.Set(float32(ctx.Cycle), int(ly.Index), int(di), int(LayerRT))
}
if PoolsInt.Value(int(lpi), int(di), int(PoolGated)) > 0 && LayerStates.Value(int(ly.Index), int(di), int(GatedRT)) <= 0 {
LayerStates.Set(float32(ctx.Cycle), int(ly.Index), int(di), int(GatedRT))
}
}
}
// LDTSrcLayAct returns the overall activity level for given source layer
// for purposes of computing ACh salience value.
// Typically the input is a superior colliculus (SC) layer that rapidly
// accommodates after the onset of a stimulus.
// using lpl.AvgMax.CaP.Cycle.Max for layer activity measure.
func (ly *LayerParams) LDTSrcLayAct(layIndex int32, di uint32) float32 {
if layIndex < 0 {
return 0
}
oly := GetLayers(uint32(layIndex))
opi := oly.PoolIndex(0)
return PoolAvgMax(AMCaP, AMCycle, Avg, opi, di)
}
func (ly *LayerParams) CyclePostLDTLayer(ctx *Context, di uint32, srcLay1Act, srcLay2Act, srcLay3Act, srcLay4Act float32) {
ach := ly.LDT.ACh(ctx, di, srcLay1Act, srcLay2Act, srcLay3Act, srcLay4Act)
GlobalScalars.Set(ach, int(GvAChRaw), int(di))
if ach > GlobalScalars.Value(int(GvACh), int(di)) { // instant up
GlobalScalars.Set(ach, int(GvACh), int(di))
} else {
GlobalScalars.SetAdd(ly.Acts.Dt.IntDt*(ach-GlobalScalars.Value(int(GvACh), int(di))), int(GvACh), int(di))
}
}
func (ly *LayerParams) CyclePostRWDaLayer(ctx *Context, di uint32) {
pli := uint32(ly.RWDa.RWPredLayIndex)
pred := LayerStates.Value(int(pli), int(di), int(LayerRewPredPos)) - LayerStates.Value(int(pli), int(di), int(LayerRewPredNeg))
GlobalScalars.Set(pred, int(GvRewPred), int(di)) // record
da := float32(0)
if GlobalScalars.Value(int(GvHasRew), int(di)) > 0 {
da = GlobalScalars.Value(int(GvRew), int(di)) - pred
}
GlobalScalars.Set(da, int(GvDA), int(di)) // updates global value that will be copied to layers next cycle.
}
func (ly *LayerParams) CyclePostTDPredLayer(ctx *Context, di uint32) {
if ctx.PlusPhase.IsFalse() {
return
}
pred := LayerStates.Value(int(ly.Index), int(di), int(LayerRewPredPos)) - LayerStates.Value(int(ly.Index), int(di), int(LayerRewPredNeg))
GlobalScalars.Set(pred, int(GvPrevPred), int(di))
}
func (ly *LayerParams) CyclePostTDIntegLayer(ctx *Context, di uint32) {
rew := float32(0)
if GlobalScalars.Value(int(GvHasRew), int(di)) > 0 {
rew = GlobalScalars.Value(int(GvRew), int(di))
}
rpval := float32(0)
if ctx.PlusPhase.IsTrue() {
pli := uint32(ly.TDInteg.TDPredLayIndex)
pred := LayerStates.Value(int(pli), int(di), int(LayerRewPredPos)) - LayerStates.Value(int(pli), int(di), int(LayerRewPredNeg))
rpval = rew + ly.TDInteg.Discount*ly.TDInteg.PredGain*pred
LayerStates.Set(rpval, int(ly.Index), int(di), int(LayerRewPredPos)) // our plus phase = new integrated value
} else {
rpval = ly.TDInteg.PredGain * GlobalScalars.Value(int(GvPrevPred), int(di))
LayerStates.Set(rpval, int(ly.Index), int(di), int(LayerRewPredNeg)) // our minus phase = prior integrated value
}
GlobalScalars.Set(rpval, int(GvRewPred), int(di)) // global value will be copied to layers next cycle
}
func (ly *LayerParams) CyclePostTDDaLayer(ctx *Context, di uint32) {
ili := uint32(ly.TDDa.TDIntegLayIndex)
da := LayerStates.Value(int(ili), int(di), int(LayerRewPredPos)) - LayerStates.Value(int(ili), int(di), int(LayerRewPredNeg))
if ctx.PlusPhase.IsFalse() {
da = 0
}
GlobalScalars.Set(da, int(GvDA), int(di)) // updates global value that will be copied to layers next cycle.
}
func (ly *LayerParams) CyclePostCeMLayer(ctx *Context, lpi, di uint32) {
casd := PoolAvgMax(AMCaD, AMCycle, Max, lpi, di)
if ly.Learn.NeuroMod.Valence == Positive {
GlobalScalars.Set(casd, int(GvCeMpos), int(di))
} else {
GlobalScalars.Set(casd, int(GvCeMneg), int(di))
}
}
func (ly *LayerParams) CyclePostVTALayer(ctx *Context, di uint32) {
ly.VTA.VTADA(ctx, di, GlobalScalars.Value(int(GvACh), int(di)), (GlobalScalars.Value(int(GvHasRew), int(di)) > 0))
}
// note: needs to iterate over sub-pools in layer!
func (ly *LayerParams) CyclePostVSPatchLayer(ctx *Context, pi, di uint32, spi int32) {
casd := PoolAvgMax(AMCaD, AMCycle, Avg, pi, di)
if ly.Learn.NeuroMod.DAMod == D1Mod {
GlobalVectors.Set(casd, int(GvVSPatchD1), int(uint32(spi-1)), int(di))
} else {
GlobalVectors.Set(casd, int(GvVSPatchD2), int(uint32(spi-1)), int(di))
}
}
//////// Phase timescale
// DecayStateNeuronsAll decays neural activation state by given proportion
// (default decay values are ly.Params.Acts.Decay.Act, Glong, AHP)
// for all data parallel indexes. Does not decay pool or layer state.
// This is used for minus phase of Pulvinar layers to clear state in prep
// for driver plus phase.
func (ly *LayerParams) DecayStateNeuronsAll(ctx *Context, decay, glong, ahp float32) {
nn := ly.Indexes.NNeurons
for lni := uint32(0); lni < nn; lni++ {
ni := ly.Indexes.NeurSt + lni
if NeuronIsOff(ni) {
continue
}
for di := uint32(0); di < ctx.NData; di++ {
ly.Acts.DecayState(ctx, ni, di, decay, glong, ahp)
}
}
}
// NewStateLayer does NewState at the layer level, called
func (ly *LayerParams) NewStateLayer(ctx *Context) {
actMinusAvg := float32(0)
actPlusAvg := float32(0)
np := uint32(ly.Indexes.NPools)
for di := uint32(0); di < ctx.NData; di++ {
lpi := ly.PoolIndex(0)
actMinusAvg += PoolAvgMax(AMAct, AMMinus, Avg, lpi, di)
actPlusAvg += PoolAvgMax(AMAct, AMPlus, Avg, lpi, di)
LayerStates.Set(-1.0, int(ly.Index), int(di), int(LayerRT))
LayerStates.Set(-1.0, int(ly.Index), int(di), int(GatedRT))
for spi := uint32(0); spi < np; spi++ {
pi := ly.PoolIndex(spi)
ly.NewStatePool(ctx, pi, di) // also calls DecayState on pool
}
}
// note: long-running averages must be based on aggregate data, drive adaptation
// of Gi layer inhibition.
davg := 1 / float32(ctx.NData)
actMinusAvg *= davg
actPlusAvg *= davg
for di := uint32(0); di < ctx.NData; di++ {
ly.NewStateLayerActAvg(ctx, di, actMinusAvg, actPlusAvg)
}
}
// NewStateLayerActAvg updates ActAvg.ActMAvg and ActPAvg based on current values
// that have been averaged across NData already.
func (ly *LayerParams) NewStateLayerActAvg(ctx *Context, di uint32, actMinusAvg, actPlusAvg float32) {
mavg := LayerStates.Value(int(ly.Index), int(di), int(LayerActMAvg))
pavg := LayerStates.Value(int(ly.Index), int(di), int(LayerActPAvg))
ly.Inhib.ActAvg.AvgFromAct(&mavg, actMinusAvg, ly.Acts.Dt.LongAvgDt)
ly.Inhib.ActAvg.AvgFromAct(&pavg, actPlusAvg, ly.Acts.Dt.LongAvgDt)
LayerStates.Set(mavg, int(ly.Index), int(di), int(LayerActMAvg))
LayerStates.Set(pavg, int(ly.Index), int(di), int(LayerActPAvg))
}
func (ly *LayerParams) NewStatePool(ctx *Context, pi, di uint32) {
PoolsInt.Set(0, int(pi), int(di), int(Clamped))
if ly.Acts.Clamp.Add.IsFalse() && ly.IsInput() {
PoolsInt.Set(1, int(pi), int(di), int(Clamped))
}
PoolInhibDecay(pi, di, ly.Acts.Decay.Act)
PoolsInt.Set(0, int(pi), int(di), int(PoolGated))
}
// NewStateNeuron handles all initialization at start of new input pattern.
// Should already have presented the external input to the network at this point.
func (ly *LayerParams) NewStateNeuron(ctx *Context, ni, di uint32) {
Neurons.Set(Neurons.Value(int(ni), int(di), int(Burst)), int(ni), int(di), int(BurstPrv))
Neurons.Set(Neurons.Value(int(ni), int(di), int(CaD)), int(ni), int(di), int(CaDPrev))
Neurons.Set(0.0, int(ni), int(di), int(CaPMax))
Neurons.Set(0.0, int(ni), int(di), int(CaPMaxCa))
ly.Acts.DecayState(ctx, ni, di, ly.Acts.Decay.Act, ly.Acts.Decay.Glong, ly.Acts.Decay.AHP)
// Note: synapse-level Ca decay happens in DWt
ly.Acts.KNaNewState(ctx, ni, di)
if ly.IsNuclear() {
ly.NuclearLearnReset(ctx, ni, di)
}
}
// Beta1Neuron does neuron level Beta1 updating.
func (ly *LayerParams) Beta1Neuron(ctx *Context, ni, di uint32) {
Neurons.Set(Neurons.Value(int(ni), int(di), int(CaP)), int(ni), int(di), int(Beta1))
}
// Beta2Neuron does neuron level Beta2 updating.
func (ly *LayerParams) Beta2Neuron(ctx *Context, ni, di uint32) {
Neurons.Set(Neurons.Value(int(ni), int(di), int(CaP)), int(ni), int(di), int(Beta2))
}
//////// Minus Phase
func (ly *LayerParams) MinusPhasePool(ctx *Context, pi uint32) {
for di := uint32(0); di < ctx.NData; di++ {
PoolCycleToMinus(pi, di)
if ly.Acts.Clamp.Add.IsFalse() && ly.IsTarget() {
PoolsInt.Set(1, int(pi), int(di), int(Clamped))
}
}
if PoolIxs.Value(int(pi), int(PoolIsLayer)) == 0 {
return
}
geIntMinusMax := float32(0)
giIntMinusMax := float32(0)
for di := uint32(0); di < ctx.NData; di++ {
geIntMinusMax = math32.Max(geIntMinusMax, PoolAvgMax(AMGeInt, AMMinus, Max, pi, di))
giIntMinusMax = math32.Max(giIntMinusMax, PoolAvgMax(AMGiInt, AMMinus, Max, pi, di))
}
for di := uint32(0); di < ctx.NData; di++ {
ly.AvgGeM(ctx, di, geIntMinusMax, giIntMinusMax)
}
}
// AvgGeM computes the average and max GeInt, GiInt in minus phase
// (AvgMaxGeM, AvgMaxGiM) stats, updated in MinusPhase,
// using values that already max across NData.
func (ly *LayerParams) AvgGeM(ctx *Context, di uint32, geIntMinusMax, giIntMinusMax float32) {
gem := LayerStates.Value(int(ly.Index), int(di), int(LayerAvgMaxGeM))
gim := LayerStates.Value(int(ly.Index), int(di), int(LayerAvgMaxGiM))
gem += ly.Acts.Dt.LongAvgDt * (geIntMinusMax - gem)
gim += ly.Acts.Dt.LongAvgDt * (giIntMinusMax - gim)
LayerStates.Set(gem, int(ly.Index), int(di), int(LayerAvgMaxGeM))
LayerStates.Set(gim, int(ly.Index), int(di), int(LayerAvgMaxGiM))
}
// MinusPhaseNeuron does neuron level minus-phase updating
func (ly *LayerParams) MinusPhaseNeuron(ctx *Context, ni, di uint32) {
Neurons.Set(Neurons.Value(int(ni), int(di), int(ActInt)), int(ni), int(di), int(ActM))
}
// MinusPhasePost does special algorithm processing at end of minus
func (ly *LayerParams) MinusPhasePost(ctx *Context) {
switch ly.Type {
case VSMatrixLayer, DSMatrixLayer:
ly.MatrixGated(ctx) // need gated state for decisions about action processing, so do in minus too
case PulvinarLayer:
ly.DecayStateNeuronsAll(ctx, 1, 1, 0)
default:
}
}
// PlusPhaseStartNeuron does neuron level plus-phase start:
// applies Target inputs as External inputs.
func (ly *LayerParams) PlusPhaseStartNeuron(ctx *Context, ni, di uint32) {
if NeuronHasFlag(NeuronHasTarg, ni, di) { // will be clamped in plus phase
Neurons.Set(Neurons.Value(int(ni), int(di), int(Target)), int(ni), int(di), int(Ext))
NeuronSetFlag(NeuronHasExt, ni, di)
// get fresh update on plus phase output acts
Neurons.Set(-1.0, int(ni), int(di), int(ISI))
Neurons.Set(-1.0, int(ni), int(di), int(ISIAvg))
// reset for plus phase
Neurons.Set(ly.Acts.Init.Act, int(ni), int(di), int(ActInt))
}
}
func (ly *LayerParams) PlusPhaseEndPool(ctx *Context, pi, di uint32) {
PoolCycleToPlus(pi, di)
}
// PlusPhaseEndNeuron does neuron level plus-phase end updating.
func (ly *LayerParams) PlusPhaseEndNeuron(ctx *Context, ni, di uint32) {
pi := ly.PoolIndex(NeuronIxs.Value(int(ni), int(NrnSubPool)))
lpi := ly.PoolIndex(0)
Neurons.Set(Neurons.Value(int(ni), int(di), int(ActInt)), int(ni), int(di), int(ActP))
nrnCaP := Neurons.Value(int(ni), int(di), int(CaP))
nrnCaD := Neurons.Value(int(ni), int(di), int(CaD))
ly.Learn.CaLearn.ETrace(ctx, ni, di, nrnCaD)
da := GlobalScalars.Value(int(GvDA), int(di))
ach := GlobalScalars.Value(int(GvACh), int(di))
mlr := ly.Learn.RLRate.RLRateSigDeriv(nrnCaD, PoolAvgMax(AMCaD, AMCycle, Max, lpi, di))
modlr := ly.Learn.NeuroMod.LRMod(da, ach)
dlr := float32(1)
hasRew := (GlobalScalars.Value(int(GvHasRew), int(di))) > 0
setRLRate := true
switch ly.Type {
case DSPatchLayer:
if hasRew { // reward time
mlr = 1 // don't use sig deriv
} else {
modlr = 1 // don't use mod
}
case VSPatchLayer:
da = GlobalScalars.Value(int(GvVSPatchPosRPE), int(di)) // our own personal
modlr = ly.Learn.NeuroMod.LRMod(da, ach)
mlr = ly.Learn.RLRate.RLRateSigDeriv(Neurons.Value(int(ni), int(di), int(CaDPrev)), 1) // note: don't have proper max here
case VSMatrixLayer, DSMatrixLayer:
// note: modlr is further modulated by PF in PostPlus
if hasRew { // reward time
mlr = 1 // don't use sig deriv
} else {
modlr = 1 // don't use mod
}
case BLALayer:
dlr = ly.Learn.RLRate.RLRateDiff(nrnCaP, Neurons.Value(int(ni), int(di), int(CaDPrev))) // delta on previous trial
if !ly.Learn.NeuroMod.IsBLAExt() && PoolIxs.Value(int(pi), int(PoolNeurSt)) == 0 { // first pool
dlr = 0 // first pool is novelty / curiosity -- no learn
}
default:
dlr = ly.Learn.RLRate.RLRateDiff(nrnCaP, nrnCaD)
if !ly.IsTarget() {
setRLRate = ly.Learn.Timing.On.IsFalse() // else computed at time of learning
}
}
if setRLRate {
Neurons.Set(mlr*dlr*modlr, int(ni), int(di), int(RLRate))
}
var tau float32
sahpN := Neurons.Value(int(ni), int(di), int(SahpN))
nrnSaphCa := Neurons.Value(int(ni), int(di), int(SahpCa))
ly.Acts.Sahp.NinfTauFromCa(nrnSaphCa, &sahpN, &tau)
nrnSaphCa = ly.Acts.Sahp.CaInt(nrnSaphCa, nrnCaD)
Neurons.Set(sahpN, int(ni), int(di), int(SahpN))
Neurons.Set(nrnSaphCa, int(ni), int(di), int(SahpCa))
Neurons.Set(ly.Acts.Sahp.GsAHP(sahpN), int(ni), int(di), int(Gsahp))
}
// PlusPhaseEndPost does special algorithm processing at end of plus.
func (ly *LayerParams) PlusPhaseEndPost(ctx *Context) {
ly.PlusPhaseEndActAvg(ctx)
ly.PhaseDiffFromActs(ctx) // GPU syncs down the state before this
np := ly.Indexes.NPools
if ly.Type == PTMaintLayer && ly.CT.OFCposPT.IsTrue() {
for spi := uint32(1); spi < np; spi++ {
for di := uint32(0); di < ctx.NData; di++ {
pi := ly.PoolIndex(spi)
val := PoolAvgMax(AMCaD, AMCycle, Avg, pi, di)
GlobalVectors.Set(val, int(GvOFCposPTMaint), int(uint32(spi-1)), int(di))
}
}
}
if ly.Acts.Decay.OnRew.IsTrue() {
for di := uint32(0); di < ctx.NData; di++ {
hasRew := (GlobalScalars.Value(int(GvHasRew), int(di)) > 0)
giveUp := (GlobalScalars.Value(int(GvGiveUp), int(di)) > 0)
if hasRew || giveUp {
ly.DecayState(ctx, di, 1, 1, 1)
for spi := uint32(0); spi < np; spi++ {
pi := ly.PoolIndex(spi)
PoolAvgMaxZero(pi, di)
}
}
}
}
if ly.Type == VSMatrixLayer || ly.Type == DSMatrixLayer {
ly.MatrixGated(ctx)
}
}
// PlusPhaseEndActAvg updates ActAvg and DTrgAvg at the plus phase end.
// Note: could be done on GPU but not worth it at this point..
func (ly *LayerParams) PlusPhaseEndActAvg(ctx *Context) {
nn := ly.Indexes.NNeurons
for lni := uint32(0); lni < nn; lni++ {
ni := ly.Indexes.NeurSt + lni
if NeuronIsOff(ni) {
continue
}
dTrgSum := float32(0)
avgSum := float32(0)
for di := uint32(0); di < ctx.NData; di++ {
dTrgSum += ly.LearnTrgAvgErrLRate() * (Neurons.Value(int(ni), int(di), int(CaP)) - Neurons.Value(int(ni), int(di), int(CaD)))
avgSum += ly.Acts.Dt.LongAvgDt * (Neurons.Value(int(ni), int(di), int(ActM)) - NeuronAvgs.Value(int(ni), int(ActAvg)))
}
NeuronAvgs.SetAdd(dTrgSum, int(ni), int(DTrgAvg))
NeuronAvgs.SetAdd(avgSum, int(ni), int(ActAvg))
}
}
//gosl:end
//////// Apply Ext
// InitExt initializes external input state.
// Should be called prior to ApplyExt on all layers receiving Ext input.
func (ly *Layer) InitExt() {
if !ly.Type.IsExt() {
return
}
nn := ly.NNeurons
for lni := uint32(0); lni < nn; lni++ {
ni := ly.NeurStIndex + lni
if NeuronIsOff(ni) {
continue
}
for di := uint32(0); di < ly.MaxData; di++ {
ly.Params.InitExt(ni, di)
Exts.Set(-1, int(ly.Params.Indexes.ExtsSt+lni), int(di)) // missing by default
}
}
}
// ApplyExtAll applies external input in the form of a tensor.Float32 or 64.
// Negative values and NaNs are not valid, and will be interpreted as missing inputs.
// This version applies all NData data parallel inputs at once, with outer dimension
// equal to NData.
// If dimensionality of tensor matches that of layer, and is 2D or 4D,
// then each dimension is iterated separately, so any mismatch preserves
// dimensional structure.
// Otherwise, the flat 1D view of the tensor is used.
// If the layer is a Target or Compare layer type, then it goes in Target
// otherwise it goes in Ext.
// Also sets the Exts values on layer, which are used for the GPU version,
// which requires calling the network ApplyExts() method -- is a no-op for CPU.
func (ly *Layer) ApplyExtAll(ctx *Context, ext tensor.Values) {
gpu.VectorizeFunc(0, int(ctx.NData), func(idx uint32) {
ed := ext.SubSpace(int(idx))
ly.ApplyExt(idx, ed)
})
}
// ApplyExt applies external input in the form of an tensor.Float32 or 64.
// Negative values and NaNs are not valid, and will be interpreted as missing inputs.
// The given data index di is the data parallel index (0 < di < MaxData):
// must present inputs separately for each separate data parallel set.
// If dimensionality of tensor matches that of layer, and is 2D or 4D,
// then each dimension is iterated separately, so any mismatch preserves
// dimensional structure.
// Otherwise, the flat 1D view of the tensor is used.
// If the layer is a Target or Compare layer type, then it goes in Target
// otherwise it goes in Ext.
// Also sets the Exts values on layer, which are used for the GPU version,
// which requires calling the network ApplyExts() method -- is a no-op for CPU.
func (ly *Layer) ApplyExt(di uint32, ext tensor.Tensor) {
switch {
case ext.NumDims() == 2 && ly.Shape.NumDims() == 4: // special case
ly.ApplyExt2Dto4D(di, ext)
case ext.NumDims() != ly.Shape.NumDims() || !(ext.NumDims() == 2 || ext.NumDims() == 4):
ly.ApplyExt1DTsr(di, ext)
case ext.NumDims() == 2:
ly.ApplyExt2D(di, ext)
case ext.NumDims() == 4:
ly.ApplyExt4D(di, ext)
}
}
// ApplyExtVal applies given external value to given neuron
// using clearMask, setMask, and toTarg from ApplyExtFlags.
// Also saves Val in Exts for potential use by GPU.
func (ly *Layer) ApplyExtValue(lni, di uint32, val float32, clearMask, setMask NeuronFlags, toTarg bool) {
ni := ly.NeurStIndex + lni
if NeuronIsOff(ni) {
return
}
Exts.Set(val, int(ly.Params.Indexes.ExtsSt+lni), int(di))
if val < 0 {
return
}
if toTarg {
Neurons.Set(val, int(ni), int(di), int(Target))
} else {
Neurons.Set(val, int(ni), int(di), int(Ext))
}
NeuronClearFlag(clearMask, ni, di)
NeuronSetFlag(setMask, ni, di)
}
// ApplyExtFlags gets the clear mask and set mask for updating neuron flags
// based on layer type, and whether input should be applied to Target (else Ext)
func (ly *Layer) ApplyExtFlags() (clearMask, setMask NeuronFlags, toTarg bool) {
ly.Params.ApplyExtFlags(&clearMask, &setMask, &toTarg)
return
}
// ApplyExt2D applies 2D tensor external input
func (ly *Layer) ApplyExt2D(di uint32, ext tensor.Tensor) {
clearMask, setMask, toTarg := ly.ApplyExtFlags()
ymx := min(ext.DimSize(0), ly.Shape.DimSize(0))
xmx := min(ext.DimSize(1), ly.Shape.DimSize(1))
for y := 0; y < ymx; y++ {
for x := 0; x < xmx; x++ {
idx := []int{y, x}
val := float32(ext.Float(idx...))
lni := uint32(ly.Shape.IndexTo1D(idx...))
ly.ApplyExtValue(lni, di, val, clearMask, setMask, toTarg)
}
}
}
// ApplyExt2Dto4D applies 2D tensor external input to a 4D layer
func (ly *Layer) ApplyExt2Dto4D(di uint32, ext tensor.Tensor) {
clearMask, setMask, toTarg := ly.ApplyExtFlags()
lNy, lNx, _, _ := tensor.Projection2DShape(&ly.Shape, false)
ymx := min(ext.DimSize(0), lNy)
xmx := min(ext.DimSize(1), lNx)
for y := 0; y < ymx; y++ {
for x := 0; x < xmx; x++ {
idx := []int{y, x}
val := float32(ext.Float(idx...))
lni := uint32(tensor.Projection2DIndex(&ly.Shape, false, y, x))
ly.ApplyExtValue(lni, di, val, clearMask, setMask, toTarg)
}
}
}
// ApplyExt4D applies 4D tensor external input
func (ly *Layer) ApplyExt4D(di uint32, ext tensor.Tensor) {
clearMask, setMask, toTarg := ly.ApplyExtFlags()
ypmx := min(ext.DimSize(0), ly.Shape.DimSize(0))
xpmx := min(ext.DimSize(1), ly.Shape.DimSize(1))
ynmx := min(ext.DimSize(2), ly.Shape.DimSize(2))
xnmx := min(ext.DimSize(3), ly.Shape.DimSize(3))
for yp := 0; yp < ypmx; yp++ {
for xp := 0; xp < xpmx; xp++ {
for yn := 0; yn < ynmx; yn++ {
for xn := 0; xn < xnmx; xn++ {
idx := []int{yp, xp, yn, xn}
val := float32(ext.Float(idx...))
lni := uint32(ly.Shape.IndexTo1D(idx...))
ly.ApplyExtValue(lni, di, val, clearMask, setMask, toTarg)
}
}
}
}
}
// ApplyExt1DTsr applies external input using 1D flat interface into tensor.
// If the layer is a Target or Compare layer type, then it goes in Target
// otherwise it goes in Ext
func (ly *Layer) ApplyExt1DTsr(di uint32, ext tensor.Tensor) {
clearMask, setMask, toTarg := ly.ApplyExtFlags()
mx := uint32(min(ext.Len(), int(ly.NNeurons)))
for lni := uint32(0); lni < mx; lni++ {
val := float32(ext.Float1D(int(lni)))
ly.ApplyExtValue(lni, di, val, clearMask, setMask, toTarg)
}
}
// ApplyExt1D applies external input in the form of a flat 1-dimensional slice of floats
// If the layer is a Target or Compare layer type, then it goes in Target
// otherwise it goes in Ext
func (ly *Layer) ApplyExt1D(di uint32, ext []float64) {
clearMask, setMask, toTarg := ly.ApplyExtFlags()
mx := uint32(min(len(ext), int(ly.NNeurons)))
for lni := uint32(0); lni < mx; lni++ {
val := float32(ext[lni])
ly.ApplyExtValue(lni, di, val, clearMask, setMask, toTarg)
}
}
// ApplyExt1D32 applies external input in the form of a flat 1-dimensional slice of float32s.
// If the layer is a Target or Compare layer type, then it goes in Target
// otherwise it goes in Ext
func (ly *Layer) ApplyExt1D32(di uint32, ext []float32) {
clearMask, setMask, toTarg := ly.ApplyExtFlags()
mx := uint32(min(len(ext), int(ly.NNeurons)))
for lni := uint32(0); lni < mx; lni++ {
val := ext[lni]
ly.ApplyExtValue(lni, di, val, clearMask, setMask, toTarg)
}
}
// UpdateExtFlags updates the neuron flags for external input based on current
// layer Type field -- call this if the Type has changed since the last
// ApplyExt* method call.
func (ly *Layer) UpdateExtFlags(ctx *Context) {
clearMask, setMask, _ := ly.ApplyExtFlags()
nn := ly.NNeurons
for lni := uint32(0); lni < nn; lni++ {
ni := ly.NeurStIndex + lni
if NeuronIsOff(ni) {
continue
}
for di := uint32(0); di < ctx.NData; di++ {
NeuronClearFlag(clearMask, ni, di)
NeuronSetFlag(setMask, ni, di)
}
}
}
// TargToExt sets external input Ext from target values Target
// This is done at end of MinusPhase to allow targets to drive activity in plus phase.
// This can be called separately to simulate alpha cycles within theta cycles, for example.
func (ly *Layer) TargToExt(ctx *Context) {
nn := ly.NNeurons
for lni := uint32(0); lni < nn; lni++ {
ni := ly.NeurStIndex + lni
if NeuronIsOff(ni) {
continue
}
for di := uint32(0); di < ctx.NData; di++ {
if !NeuronHasFlag(NeuronHasTarg, ni, di) { // will be clamped in plus phase
continue
}
Neurons.Set(Neurons.Value(int(ni), int(di), int(Target)), int(ni), int(di), int(Ext))
NeuronSetFlag(NeuronHasExt, ni, di)
Neurons.Set(-1, int(ni), int(di), int(ISI)) // get fresh update on plus phase output acts
Neurons.Set(-1, int(ni), int(di), int(ISIAvg))
}
}
}
// ClearTargExt clears external inputs Ext that were set from target values Target.
// This can be called to simulate alpha cycles within theta cycles, for example.
func (ly *Layer) ClearTargExt(ctx *Context) {
nn := ly.NNeurons
for lni := uint32(0); lni < nn; lni++ {
ni := ly.NeurStIndex + lni
if NeuronIsOff(ni) {
continue
}
for di := uint32(0); di < ctx.NData; di++ {
if !NeuronHasFlag(NeuronHasTarg, ni, di) { // will be clamped in plus phase
continue
}
Neurons.Set(0, int(ni), int(di), int(Ext))
NeuronClearFlag(NeuronHasExt, ni, di)
Neurons.Set(-1, int(ni), int(di), int(ISI)) // get fresh update on plus phase output acts
Neurons.Set(-1, int(ni), int(di), int(ISIAvg))
}
}
}
// Code generated by "goal build"; DO NOT EDIT.
//line act-net.goal:1
// Copyright (c) 2019, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
import (
"cogentcore.org/core/enums"
)
// Cycle runs one cycle of activation updating, equivalent to 1 msec.
// If getNeurons is true, then neuron state is synced back
// from the GPU (for cycle-level display etc). Otherwise, nothing is.
func (nt *Network) Cycle(getNeurons bool) {
nix := nt.NetIxs()
ctx := nt.Context()
nd := int(nix.NNeurons * ctx.NData)
ld := int(nix.NLayers * ctx.NData)
pd := int(nix.NPools * ctx.NData)
RunGatherSpikes(nd)
RunLayerGi(ld)
RunBetweenGi(ld)
RunPoolGi(pd)
RunCycleNeuron(nd)
RunSendSpike(nd)
RunCyclePost(ld)
RunCycleInc(1)
if getNeurons {
RunDoneLayersNeurons()
}
// todo: fix this:
// var ldt, vta *Layer
//
// for _, ly := range nt.Layers {
// if ly.Type == VTALayer {
// vta = ly
// } else if ly.Type == LDTLayer {
// ldt = ly
// } else {
// ly.CyclePost(ctx)
// }
// }
//
// // ordering of these is important
//
// if ldt != nil {
// ldt.CyclePost(ctx)
// }
//
// if vta != nil {
// vta.CyclePost(ctx)
// }
}
// ThetaCycleStart starts a new theta cycle, resetting the ctx.Cycle counter
// and all phase state information in the context.
// The current Context.NData should be set properly prior to calling this
// and subsequent Cycle methods.
func (nt *Network) ThetaCycleStart(mode enums.Enum, testing bool) {
ctx := nt.Context()
ctx.ThetaCycleStart(mode, testing)
ToGPUCtxGlobal()
}
// MinusPhaseStart should be called at the start of a new minus phase,
// handling all initialization prior to applying a new input pattern.
func (nt *Network) MinusPhaseStart() {
nix := nt.NetIxs()
ctx := nt.Context()
nd := int(nix.NNeurons * ctx.NData)
ctx.MinusPhaseStart()
ToGPUCtxGlobal()
RunNewStateLayer(int(nix.NLayers))
RunNewStateNeuron(nd)
RunInitGBuffsPath(int(nix.NPaths))
// note: not completed until run cycles
}
// InitExt initializes external input state.
// Call prior to applying external inputs to layers.
func (nt *Network) InitExt() {
// note: important to do this for GPU
// to ensure partial inputs work the same way on CPU and GPU.
for _, ly := range nt.Layers {
if ly.Off {
continue
}
ly.InitExt()
}
}
// ApplyExts applies external inputs to layers, based on values
// that were set in prior layer-specific ApplyExt calls.
// This does nothing on the CPU, but is critical for the GPU,
// and should be added to all sims where GPU will be used.
func (nt *Network) ApplyExts() {
if !UseGPU {
return
}
ToGPUExts()
nix := nt.NetIxs()
ctx := nt.Context()
nd := int(nix.NNeurons * ctx.NData)
RunApplyExtsNeuron(nd)
}
// ClearInputs clears the external input to the network,
// calling InitExt and ApplyExt.
func (nt *Network) ClearInputs() {
nt.InitExt()
nt.ApplyExts()
}
// Beta1 does updating at Beta1 timescale.
func (nt *Network) Beta1() {
nix := nt.NetIxs()
ctx := nt.Context()
nd := int(nix.NNeurons * ctx.NData)
RunBeta1Neuron(nd)
}
// Beta2 does updating at Beta1 timescale.
func (nt *Network) Beta2() {
nix := nt.NetIxs()
ctx := nt.Context()
nd := int(nix.NNeurons * ctx.NData)
RunBeta2Neuron(nd)
}
// MinusPhaseEnd does updating after end of minus phase.
func (nt *Network) MinusPhaseEnd() {
nix := nt.NetIxs()
ctx := nt.Context()
nd := int(nix.NNeurons * ctx.NData)
RunMinusPhasePool(int(nix.NPools))
RunMinusPhaseNeuron(nd)
RunMinusPhasePost(int(nix.NLayers))
RunDoneLayersNeurons() // this is critical for action-taking models to have the minus phase state
}
// PlusPhaseStart does updating at the start of the plus phase:
// applies Target inputs as External inputs.
func (nt *Network) PlusPhaseStart() {
nix := nt.NetIxs()
ctx := nt.Context()
nd := int(nix.NNeurons * ctx.NData)
RunPlusPhaseStartContext(1)
RunPlusPhaseStartNeuron(nd)
}
// PlusPhaseEnd does updating after end of plus phase.
// On GPU this is when we finally sync back Layers and Neurons.
func (nt *Network) PlusPhaseEnd() {
nix := nt.NetIxs()
ctx := nt.Context()
// fmt.Println("plus start:", ctx.Cycle)
nd := int(nix.NNeurons * ctx.NData)
pd := int(nix.NPools * ctx.NData)
RunPlusPhaseEndPool(pd)
RunPlusPhaseEndNeuron(nd)
RunPlusPhaseEndPost(int(nix.NLayers))
RunDoneLayersNeurons()
// fmt.Println("plus post sync:", ctx.Cycle)
}
// TargToExt sets external input Ext from target values Target
// This is done at end of MinusPhase to allow targets to drive activity in plus phase.
// This can be called separately to simulate alpha cycles within theta cycles, for example.
func (nt *Network) TargToExt() {
ctx := nt.Context()
for _, ly := range nt.Layers {
if ly.Off {
continue
}
ly.TargToExt(ctx)
}
}
// ClearTargExt clears external inputs Ext that were set from target values Target.
// This can be called to simulate alpha cycles within theta cycles, for example.
func (nt *Network) ClearTargExt() {
ctx := nt.Context()
for _, ly := range nt.Layers {
if ly.Off {
continue
}
ly.ClearTargExt(ctx)
}
}
// GPUTestWrite writes values to neuron, for testing
func (nt *Network) GPUTestWrite() {
nix := nt.NetIxs()
ctx := nt.Context()
nd := int(nix.NNeurons * ctx.NData)
RunGPUTestWrite(nd)
RunDoneLayersNeurons()
}
//gosl:start
//////// Kernels for all parallel CPU / GPU compute are here:
// GatherSpikes is the kernel over Neurons * Data for gathering
// spike inputs sent on the previous cycle.
func GatherSpikes(i uint32) { //gosl:kernel
ctx := GetCtx(0)
ni := ctx.ItemIndex(i)
if ni >= NetworkIxs[0].NNeurons {
return
}
di := ctx.DataIndex(i)
li := NeuronIxs.Value(int(ni), int(NrnLayIndex))
Layers[li].GatherSpikes(ctx, ni, di)
}
// LayerGi is the kernel over Layers * Data for updating Gi inhibition.
func LayerGi(i uint32) { //gosl:kernel
ctx := GetCtx(0)
li := ctx.ItemIndex(i)
if li >= NetworkIxs[0].NLayers {
return
}
di := ctx.DataIndex(i)
Layers[li].LayerGi(ctx, li, di)
}
// BetweenGi is the kernel over Layers * Data for updating Gi
// inhibition between layers.
func BetweenGi(i uint32) { //gosl:kernel
ctx := GetCtx(0)
li := ctx.ItemIndex(i)
if li >= NetworkIxs[0].NLayers {
return
}
di := ctx.DataIndex(i)
Layers[li].BetweenGi(ctx, di)
}
// PoolGi is the kernel over Pools * Data for updating Gi inhibition.
func PoolGi(i uint32) { //gosl:kernel
ctx := GetCtx(0)
pi := ctx.ItemIndex(i)
if pi >= NetworkIxs[0].NPools {
return
}
di := ctx.DataIndex(i)
PoolPoolGi(ctx, pi, di)
}
// CycleNeuron is the kernel over Neurons * Data to do
// one cycle (msec) of updating at the neuron level.
func CycleNeuron(i uint32) { //gosl:kernel
ctx := GetCtx(0)
ni := ctx.ItemIndex(i)
if ni >= NetworkIxs[0].NNeurons {
return
}
di := ctx.DataIndex(i)
li := NeuronIxs.Value(int(ni), int(NrnLayIndex))
Layers[li].CycleNeuron(ctx, ni, di)
}
// SendSpike is the kernel over Neurons * Data to
// send spike signal for neurons over threshold.
func SendSpike(i uint32) { //gosl:kernel
ctx := GetCtx(0)
ni := ctx.ItemIndex(i)
if ni >= NetworkIxs[0].NNeurons {
return
}
di := ctx.DataIndex(i)
li := NeuronIxs.Value(int(ni), int(NrnLayIndex))
Layers[li].SendSpike(ctx, ni, di)
}
// CyclePost is the kernel over Layers * Data to
// update state after each Cycle of updating.
func CyclePost(i uint32) { //gosl:kernel
ctx := GetCtx(0)
li := ctx.ItemIndex(i)
if li >= NetworkIxs[0].NLayers {
return
}
di := ctx.DataIndex(i)
Layers[li].CyclePost(ctx, di)
}
// CycleInc is the kernel over 1 call to increment the cycle counter.
func CycleInc(i uint32) { //gosl:kernel read-write:Ctx
if i != 0 {
return
}
ctx := GetCtx(0)
ctx.CycleInc()
}
// ApplyExtsNeuron is the kernel over Neurons * Data to
// apply Ext external input to the neurons receiving inputs.
func ApplyExtsNeuron(i uint32) { //gosl:kernel
ctx := GetCtx(0)
ni := ctx.ItemIndex(i)
if ni >= NetworkIxs[0].NNeurons {
return
}
di := ctx.DataIndex(i)
li := NeuronIxs.Value(int(ni), int(NrnLayIndex))
Layers[li].ApplyExtsNeuron(ni, di)
}
// NewStateLayer is the kernel over Layers (not Data)
// which does new state on pools as well.
func NewStateLayer(li uint32) { //gosl:kernel
ctx := GetCtx(0)
if li >= NetworkIxs[0].NLayers {
return
}
Layers[li].NewStateLayer(ctx)
}
// NewStateNeuron is the kernel over Neurons * Data to
// do new state on neurons (decay).
func NewStateNeuron(i uint32) { //gosl:kernel
ctx := GetCtx(0)
ni := ctx.ItemIndex(i)
if ni >= NetworkIxs[0].NNeurons {
return
}
di := ctx.DataIndex(i)
li := NeuronIxs.Value(int(ni), int(NrnLayIndex))
Layers[li].NewStateNeuron(ctx, ni, di)
}
// InitGBuffsPath is the kernel over Paths to
// initialize PathGBuf, PathGSyns.
func InitGBuffsPath(pti uint32) { //gosl:kernel
ctx := GetCtx(0)
if pti >= NetworkIxs[0].NPaths {
return
}
Paths[pti].InitGBuffs(ctx)
}
// Beta1Neuron is the kernel over Neurons * Data to
// do neuron-level updating at Beta1.
func Beta1Neuron(i uint32) { //gosl:kernel
ctx := GetCtx(0)
ni := ctx.ItemIndex(i)
if ni >= NetworkIxs[0].NNeurons {
return
}
di := ctx.DataIndex(i)
li := NeuronIxs.Value(int(ni), int(NrnLayIndex))
Layers[li].Beta1Neuron(ctx, ni, di)
}
// Beta2Neuron is the kernel over Neurons * Data to
// do neuron-level updating at Beta1.
func Beta2Neuron(i uint32) { //gosl:kernel
ctx := GetCtx(0)
ni := ctx.ItemIndex(i)
if ni >= NetworkIxs[0].NNeurons {
return
}
di := ctx.DataIndex(i)
li := NeuronIxs.Value(int(ni), int(NrnLayIndex))
Layers[li].Beta2Neuron(ctx, ni, di)
}
//////// Minus Phase
// MinusPhasePool is the kernel over Pools to
// do pool-level updating after end of minus phase.
func MinusPhasePool(pi uint32) { //gosl:kernel
ctx := GetCtx(0)
if pi >= NetworkIxs[0].NPools {
return
}
li := PoolIxs.Value(int(pi), int(PoolLayerIdx))
Layers[li].MinusPhasePool(ctx, pi)
}
// MinusPhaseNeuron is the kernel over Neurons * Data to
// do neuron-level updating after end of minus phase.
func MinusPhaseNeuron(i uint32) { //gosl:kernel
ctx := GetCtx(0)
ni := ctx.ItemIndex(i)
if ni >= NetworkIxs[0].NNeurons {
return
}
di := ctx.DataIndex(i)
li := NeuronIxs.Value(int(ni), int(NrnLayIndex))
Layers[li].MinusPhaseNeuron(ctx, ni, di)
}
// MinusPhasePost does special algorithm post processing.
func MinusPhasePost(li uint32) { //gosl:kernel
ctx := GetCtx(0)
if li >= NetworkIxs[0].NLayers {
return
}
Layers[li].MinusPhasePost(ctx)
}
// PlusPhaseStartContext is the kernel over 1 call to call PlusPhaseStart on context.
func PlusPhaseStartContext(i uint32) { //gosl:kernel read-write:Ctx
if i != 0 {
return
}
ctx := GetCtx(0)
ctx.PlusPhaseStart()
}
// PlusPhaseStartNeuron is the kernel over Neurons * Data to
// do neuron-level updating at start of plus phase.
func PlusPhaseStartNeuron(i uint32) { //gosl:kernel
ctx := GetCtx(0)
ni := ctx.ItemIndex(i)
if ni >= NetworkIxs[0].NNeurons {
return
}
di := ctx.DataIndex(i)
li := NeuronIxs.Value(int(ni), int(NrnLayIndex))
Layers[li].PlusPhaseStartNeuron(ctx, ni, di)
}
// PlusPhaseEndPool is the kernel over Pools * Data to
// do pool-level updating after end of plus phase.
func PlusPhaseEndPool(i uint32) { //gosl:kernel
ctx := GetCtx(0)
pi := ctx.ItemIndex(i)
if pi >= NetworkIxs[0].NPools {
return
}
di := ctx.DataIndex(i)
li := PoolIxs.Value(int(pi), int(PoolLayerIdx))
Layers[li].PlusPhaseEndPool(ctx, pi, di)
}
// PlusPhaseEndNeuron is the kernel over Neurons * Data to
// do neuron-level updating after end of plus phase.
func PlusPhaseEndNeuron(i uint32) { //gosl:kernel
ctx := GetCtx(0)
ni := ctx.ItemIndex(i)
if ni >= NetworkIxs[0].NNeurons {
return
}
di := ctx.DataIndex(i)
li := NeuronIxs.Value(int(ni), int(NrnLayIndex))
Layers[li].PlusPhaseEndNeuron(ctx, ni, di)
}
// PlusPhaseEndPost does special algorithm post processing.
func PlusPhaseEndPost(li uint32) { //gosl:kernel
ctx := GetCtx(0)
if li >= NetworkIxs[0].NLayers {
return
}
Layers[li].PlusPhaseEndPost(ctx)
}
// GPUTestWrite is the kernel over Neurons * Data for testing
// the unique writing of data on GPU.
func GPUTestWrite(i uint32) { //gosl:kernel
ctx := GetCtx(0)
ni := ctx.ItemIndex(i)
if ni >= NetworkIxs[0].NNeurons {
return
}
di := ctx.DataIndex(i)
for vi := Spike; vi < NeuronVarsN; vi++ {
Neurons.Set(float32(ni*1000+uint32(vi)), int(ni), int(di), int(vi))
}
}
//gosl:end
// Code generated by "goal build"; DO NOT EDIT.
//line act-path.goal:1
// Copyright (c) 2019, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
import (
"log"
"sync/atomic"
"cogentcore.org/core/math32"
)
//gosl:start
// PathGTypes represents the conductance (G) effects of a given pathway,
// including excitatory, inhibitory, and modulatory.
type PathGTypes int32 //enums:enum
// The pathway conductance types
const (
// Excitatory pathways drive Ge conductance on receiving neurons,
// which send to GiRaw and GiSyn neuron variables.
ExcitatoryG PathGTypes = iota
// Inhibitory pathways drive Gi inhibitory conductance,
// which send to GiRaw and GiSyn neuron variables.
InhibitoryG
// Modulatory pathways have a multiplicative effect on other inputs,
// which send to GModRaw and GModSyn neuron variables.
ModulatoryG
// Maintenance pathways drive unique set of NMDA channels that support
// strong active maintenance abilities.
// Send to GMaintRaw and GMaintSyn neuron variables.
MaintG
// Context pathways are for inputs to CT layers, which update
// only at the end of the plus phase, and send to CtxtGe.
ContextG
)
//////////////////////////////////////////////////////////////////////////////////////
// SynComParams
// SynComParams are synaptic communication parameters:
// used in the Path parameters. Includes delay and
// probability of failure, and Inhib for inhibitory connections,
// and modulatory pathways that have multiplicative-like effects.
type SynComParams struct {
// type of conductance (G) communicated by this pathway
GType PathGTypes
// additional synaptic delay in msec for inputs arriving at this pathway.
// Must be <= MaxDelay which is set during network building based on MaxDelay
// of any existing Path in the network. Delay = 0 means a spike reaches
// receivers in the next Cycle, which is the minimum time (1 msec).
// Biologically, subtract 1 from biological synaptic delay values to set
// corresponding Delay value.
Delay uint32 `min:"0" default:"2"`
// maximum value of Delay, based on MaxDelay values when the BuildGBuf
// function was called during [Network.Build]. Cannot set it longer than this,
// except by calling BuildGBuf on network after changing MaxDelay to a larger
// value in any pathway in the network.
MaxDelay uint32 `edit:"-"`
// delay length = actual length of the GBuf buffer per neuron = Delay+1; just for speed
DelLen uint32 `display:"-"`
}
func (sc *SynComParams) Defaults() {
sc.Delay = 2
sc.MaxDelay = 2
sc.Update()
}
func (sc *SynComParams) Update() {
if sc.Delay > sc.MaxDelay {
sc.Delay = sc.MaxDelay
}
sc.DelLen = sc.Delay + 1
}
// RingIndex returns the wrap-around ring index for given raw index.
// For writing and reading spikes to GBuf buffer, based on
// Context.CyclesTotal counter.
// RN: 0 1 2 <- recv neuron indexes
// DI: 0 1 2 0 1 2 0 1 2 <- delay indexes
// C0: ^ v <- cycle 0, ring index: ^ = write, v = read
// C1: ^ v <- cycle 1, shift over by 1 -- overwrite last read
// C2: v ^ <- cycle 2: read out value stored on C0 -- index wraps around
func (sc *SynComParams) RingIndex(i uint32) uint32 {
ri := i
if ri >= sc.DelLen {
ri -= sc.DelLen
}
return ri
}
// WriteOff returns offset for writing new spikes into the GBuf buffer,
// based on Context CyclesTotal counter which increments each cycle.
// This is logically the last position in the ring buffer.
func (sc *SynComParams) WriteOff(cycTot int32) uint32 {
return sc.RingIndex(uint32(cycTot)%sc.DelLen + sc.DelLen)
}
// ReadOff returns offset for reading existing spikes from the GBuf buffer,
// based on Context CyclesTotal counter which increments each cycle.
// This is logically the zero position in the ring buffer.
func (sc *SynComParams) ReadOff(cycTot int32) uint32 {
return sc.RingIndex(uint32(cycTot) % sc.DelLen)
}
// ReadIndex returns index for reading existing spikes from the GBuf buffer,
// based on the layer-based recv neuron index, data parallel idx, and the
// ReadOff offset from the CyclesTotal.
func (sc *SynComParams) ReadIndex(rnIndex, di uint32, cycTot int32, nRecvNeurs, maxData uint32) uint32 {
// return rnIndex*sc.DelLen + sc.ReadOff(cycTot)
// delay is outer, neurs are inner -- should be faster?
return (sc.ReadOff(cycTot)*nRecvNeurs+rnIndex)*maxData + di
}
// FloatToIntFactor returns the factor used for converting float32
// to int32 in GBuf encoding. Because total G is constrained via
// scaling factors to be around ~1, it is safe to use a factor that
// uses most of the available bits, leaving enough room to prevent
// overflow when adding together the different vals.
// For encoding, bake this into scale factor in SendSpike, and
// cast the result to int32.
func (sc *SynComParams) FloatToIntFactor() float32 {
return float32(uint32(1) << 24) // leaves 7 bits = 128 to cover any extreme cases
// this is sufficient to pass existing tests at std tolerances.
}
// FloatToGBuf converts the given floating point value
// to a large int32 for accumulating in GBuf.
// Note: more efficient to bake factor into scale factor per paths.
func (sc *SynComParams) FloatToGBuf(val float32) int32 {
return int32(val * sc.FloatToIntFactor())
}
// FloatFromGBuf converts the given int32 value produced
// via FloatToGBuf back into a float32 (divides by factor).
// If the value is negative, a panic is triggered indicating
// there was numerical overflow in the aggregation.
// If this occurs, the FloatToIntFactor needs to be decreased.
func (sc *SynComParams) FloatFromGBuf(ival int32) float32 {
//gosl:end
if ival < 0 {
log.Printf("axon.SynComParams: FloatFromGBuf is negative, there was an overflow error\n")
return 1
}
//gosl:start
return float32(ival) / sc.FloatToIntFactor()
}
//////// PathScaleParams
// PathScaleParams are pathway scaling parameters: modulates overall strength of pathway,
// using both absolute and relative factors.
type PathScaleParams struct {
// relative scaling that shifts balance between different pathways -- this is subject to normalization across all other pathways into receiving neuron, and determines the GScale.Target for adapting scaling
Rel float32 `min:"0"`
// absolute multiplier adjustment factor for the path scaling -- can be used to adjust for idiosyncrasies not accommodated by the standard scaling based on initial target activation level and relative scaling factors -- any adaptation operates by directly adjusting scaling factor from the initially computed value
Abs float32 `default:"1" min:"0"`
pad, pad1 float32
}
func (ws *PathScaleParams) Defaults() {
ws.Rel = 1
ws.Abs = 1
}
func (ws *PathScaleParams) Update() {
}
// SLayActScale computes scaling factor based on sending layer activity level (savg), number of units
// in sending layer (snu), and number of recv connections (ncon).
// Uses a fixed sem_extra standard-error-of-the-mean (SEM) extra value of 2
// to add to the average expected number of active connections to receive,
// for purposes of computing scaling factors with partial connectivity
// For 25% layer activity, binomial SEM = sqrt(p(1-p)) = .43, so 3x = 1.3 so 2 is a reasonable default.
func (ws *PathScaleParams) SLayActScale(savg, snu, ncon float32) float32 {
if ncon < 1 { // path Avg can be < 1 in some cases
ncon = 1
}
semExtra := 2
slayActN := int(math32.Round(savg * snu)) // sending layer actual # active
slayActN = max(slayActN, 1)
var sc float32
if ncon == snu {
sc = 1 / float32(slayActN)
} else {
maxActN := int(math32.Min(ncon, float32(slayActN))) // max number we could get
avgActN := int(math32.Round(savg * ncon)) // recv average actual # active if uniform
avgActN = max(avgActN, 1)
expActN := avgActN + semExtra // expected
expActN = min(expActN, maxActN)
sc = 1 / float32(expActN)
}
return sc
}
// FullScale returns full scaling factor, which is product of Abs * Rel * SLayActScale
func (ws *PathScaleParams) FullScale(savg, snu, ncon float32) float32 {
return ws.Abs * ws.Rel * ws.SLayActScale(savg, snu, ncon)
}
//////// Indexes
// SynRecvLayerIndex converts the Synapse RecvIndex of recv neuron's index
// in network level global list of all neurons to receiving
// layer-specific index.
func (pt *PathParams) SynRecvLayerIndex(syni uint32) uint32 {
return pt.Indexes.RecvNIndexToLayIndex(SynapseIxs.Value(int(syni), int(SynRecvIndex)))
}
// SynSendLayerIndex converts the Synapse SendIndex of sending neuron's index
// in network level global list of all neurons to sending
// layer-specific index.
func (pt *PathParams) SynSendLayerIndex(syni uint32) uint32 {
return pt.Indexes.SendNIndexToLayIndex(SynapseIxs.Value(int(syni), int(SynSendIndex)))
}
//////// Cycle
// GatherSpikes integrates G*Raw and G*Syn values for given recv neuron
// while integrating the Recv Path-level GSyn integrated values.
func (pt *PathParams) GatherSpikes(ctx *Context, ly *LayerParams, ni, di, lni uint32) {
deli := pt.Com.ReadOff(ctx.CyclesTotal)
npti := pt.Indexes.NPathNeurSt + lni
gRaw := pt.Com.FloatFromGBuf(PathGBuf.Value(int(npti), int(di), int(deli)))
PathGBuf.Set(0, int(npti), int(di), int(deli))
gsyn := PathGSyns.Value(int(npti), int(di))
pt.GatherSpikesGSyn(ctx, ly, ni, di, gRaw, &gsyn)
PathGSyns.Set(gsyn, int(npti), int(di))
}
// GatherSpikesGSyn integrates G*Raw and G*Syn values for given neuron
// from the given Path-level GRaw value, first integrating
// pathway-level GSyn value.
func (pt *PathParams) GatherSpikesGSyn(ctx *Context, ly *LayerParams, ni, di uint32, gRaw float32, gSyn *float32) {
switch pt.Com.GType {
case ExcitatoryG:
*gSyn = ly.Acts.Dt.GeSynFromRaw(*gSyn, gRaw)
Neurons.SetAdd(gRaw, int(ni), int(di), int(GeRaw))
Neurons.SetAdd(*gSyn, int(ni), int(di), int(GeSyn))
case InhibitoryG:
*gSyn = ly.Acts.Dt.GiSynFromRaw(*gSyn, gRaw)
Neurons.SetAdd(gRaw, int(ni), int(di), int(GiRaw))
Neurons.SetAdd(*gSyn, int(ni), int(di), int(GiSyn))
case ModulatoryG:
*gSyn = ly.Acts.Dt.GeSynFromRaw(*gSyn, gRaw)
Neurons.SetAdd(gRaw, int(ni), int(di), int(GModRaw))
Neurons.SetAdd(*gSyn, int(ni), int(di), int(GModSyn))
case MaintG:
*gSyn = ly.Acts.Dt.GeSynFromRaw(*gSyn, gRaw)
Neurons.SetAdd(gRaw, int(ni), int(di), int(GMaintRaw))
// note: Syn happens via NMDA in Act
case ContextG:
Neurons.SetAdd(gRaw, int(ni), int(di), int(CtxtGeRaw))
default:
}
}
// SendSpike sends a spike from the sending neuron at index sendIndex
// into the GBuf buffer on the receiver side. The buffer on the receiver side
// is a ring buffer, which is used for modelling the time delay between
// sending and receiving spikes.
func (pt *PathParams) SendSpike(ctx *Context, ni, di, lni uint32) {
sendVal := pt.GScale.Scale * pt.Com.FloatToIntFactor() // pre-bake in conversion to uint factor
if pt.Type == CTCtxtPath {
if uint32(ctx.Cycle) != uint32(ctx.ThetaCycles)-1-pt.Com.DelLen {
return
}
sendVal *= Neurons.Value(int(ni), int(di), int(Burst)) // Burst is regular CaP for all non-SuperLayer neurons
} else {
if Neurons.Value(int(ni), int(di), int(Spike)) == 0 {
return
}
}
recvNeurSt := pt.Indexes.RecvNeurSt
npst := pt.Indexes.NPathNeurSt
cni := pt.Indexes.SendConSt + lni
synst := pt.Indexes.SynapseSt + PathSendCon.Value(int(cni), int(StartOff))
synn := PathSendCon.Value(int(cni), int(Nitems))
for ci := uint32(0); ci < synn; ci++ {
syni := synst + ci
ri := SynapseIxs.Value(int(syni), int(SynRecvIndex))
npti := npst + (ri - recvNeurSt)
deli := pt.Com.WriteOff(ctx.CyclesTotal)
sv := int32(sendVal * Synapses.Value(int(syni), int(Wt)))
atomic.AddInt32(PathGBuf.ValuePtr(int(npti), int(di), int(deli)), sv)
}
}
// InitGBuffs initializes the per-pathway synaptic conductance buffers.
// This is not typically needed (called during InitWeights, InitActs)
// but can be called when needed. Must be called to completely initialize
// prior activity, e.g., full Glong clearing.
func (pt *PathParams) InitGBuffs(ctx *Context) {
nix := GetNetworkIxs(0)
maxd := nix.MaxData
mdel := nix.MaxDelay + 1
rnn := pt.Indexes.RecvNeurN
npst := pt.Indexes.NPathNeurSt
for ri := uint32(0); ri < rnn; ri++ {
for di := uint32(0); di < maxd; di++ {
for dl := uint32(0); dl < mdel; dl++ {
PathGBuf.Set(0, int(npst+ri), int(di), int(dl))
}
PathGSyns.Set(0.0, int(npst+ri), int(di))
}
}
}
//gosl:end
// Code generated by "goal build"; DO NOT EDIT.
//line act.goal:1
// Copyright (c) 2019, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
import (
"math"
"cogentcore.org/core/math32"
"cogentcore.org/core/math32/minmax"
"cogentcore.org/lab/base/randx"
"cogentcore.org/lab/gosl/slbool"
"github.com/emer/axon/v2/chans"
)
//////// act.go contains the activation params and functions for axon
//gosl:start
//gosl:import "cogentcore.org/core/math32"
//gosl:import "cogentcore.org/core/math32/minmax"
//gosl:import "github.com/emer/axon/v2/chans"
// NeuronHasFlag
func NeuronHasFlag(flag NeuronFlags, ni, di uint32) bool {
return (NeuronFlags(math.Float32bits(Neurons.Value(int(ni), int(di), int(NeurFlags)))) & flag) > 0 // weird: != 0 does NOT work on GPU
}
func NeuronSetFlag(flag NeuronFlags, ni, di uint32) {
Neurons.Set(math.Float32frombits(math.Float32bits(Neurons.Value(int(ni), int(di), int(NeurFlags)))|uint32(flag)), int(ni), int(di), int(NeurFlags))
}
func NeuronClearFlag(flag NeuronFlags, ni, di uint32) {
Neurons.Set(math.Float32frombits(math.Float32bits(Neurons.Value(int(ni), int(di), int(NeurFlags)))&^uint32(flag)), int(ni), int(di), int(NeurFlags))
}
// NeuronIsOff returns true if the neuron has been turned off (lesioned)
// Only checks the first data item -- all should be consistent.
func NeuronIsOff(ni uint32) bool {
return NeuronHasFlag(NeuronOff, ni, 0)
}
//////// SpikeParams
// SpikeParams contains spiking activation function params.
// Implements a basic thresholded Vm model, and optionally
// the AdEx adaptive exponential function.
type SpikeParams struct {
// Thr is the spiking threshold value Theta (Θ) for firing output activation,
// in mV (millivolts). See also ExpThr for the AdEx implementation,
// in which case this threshold is the V_t parameters for the exponential function.
Thr float32 `default:"-50"`
// VmR is the post-spiking membrane potential to reset to, in mV.
// This produces refractory effect if lower than VmInit.
// -70 is appropriate biologically based value for AdEx (Brette & Gurstner, 2005)
// parameters. See also RTau.
VmR float32 `default:"-70"`
// Tr is the post-spiking explicit refractory period, in cycles.
// Prevents Vm updating for this number of cycles post firing.
// Vm is reduced in exponential steps over this period according to RTau,
// being fixed at Tr to VmR exactly.
Tr int32 `min:"1" default:"3"`
// RTau is the time constant for decaying Vm down to VmR. At end of Tr it is set
// to VmR exactly. This provides a more realistic shape of the post-spiking
// Vm which is only relevant for more realistic channels that key off of Vm.
// Does not otherwise affect standard computation.
RTau float32 `default:"1.6667"`
// Exp turns on the AdEx exponential excitatory current that drives Vm rapidly
// upward for spiking as it gets past its nominal firing threshold (Thr).
// Efficiently captures the Hodgkin Huxley dynamics of Na and K channels
// (Brette & Gurstner 2005).
Exp slbool.Bool `default:"true"`
// ExpSlope is the slope in mV for extra exponential excitatory current in AdEx.
ExpSlope float32 `default:"2"`
// ExpThr is the membrane potential threshold (mV) for actually triggering
// a spike when using the exponential mechanism. Due to 1 ms time integration,
// this doesn't have much impact as long as it is above nominal spike threshold,
// and inside the VmRange for clipping Vm.
ExpThr float32 `default:"-10"`
// MaxHz is for translating spiking interval (rate) into rate-code activation
// equivalent, as the maximum firing rate associated with a maximum
// activation value of 1.
MaxHz float32 `default:"180" min:"1"`
// ISITau is the time constant for integrating the spiking interval in
// estimating spiking rate.
ISITau float32 `default:"5" min:"1"`
// ISIDt = 1 / tau
ISIDt float32 `display:"-"`
// RDt = 1 / tau
RDt float32 `display:"-"`
pad int32
}
func (sk *SpikeParams) Defaults() {
sk.Thr = -50
sk.VmR = -70
sk.Tr = 3
sk.RTau = 1.6667
sk.Exp.SetBool(true)
sk.ExpSlope = 2
sk.ExpThr = -10
sk.MaxHz = 180
sk.ISITau = 5
sk.Update()
}
func (sk *SpikeParams) Update() {
if sk.Tr <= 0 {
sk.Tr = 1 // hard min
}
sk.ISIDt = 1 / sk.ISITau
sk.RDt = 1 / sk.RTau
}
func (sk *SpikeParams) ShouldDisplay(field string) bool {
switch field {
case "ExpSlope", "ExpThr":
return sk.Exp.IsTrue()
default:
return true
}
}
// ActToISI compute spiking interval from a given rate-coded activation,
// based on time increment (.001 = 1msec default), Act.Dt.Integ
func (sk *SpikeParams) ActToISI(act, timeInc, integ float32) float32 {
if act == 0 {
return 0
}
return (1 / (timeInc * integ * act * sk.MaxHz))
}
// ActFromISI computes rate-code activation from estimated spiking interval
func (sk *SpikeParams) ActFromISI(isi, timeInc, integ float32) float32 {
if isi <= 0 {
return 0
}
maxInt := 1.0 / (timeInc * integ * sk.MaxHz) // interval at max hz..
return maxInt / isi // normalized
}
// AvgFromISI returns updated spiking ISI from current isi interval value
func (sk *SpikeParams) AvgFromISI(avg float32, isi float32) float32 {
av := avg
if av <= 0 {
av = isi
} else if isi < 0.8*av {
av = isi // if significantly less than we take that
} else { // integrate on slower
av += sk.ISIDt * (isi - av) // running avg updt
}
return av
}
//////// DendParams
// DendParams are the parameters for updating dendrite-specific dynamics
type DendParams struct {
// GExp is the dendrite-specific strength multiplier of the exponential
// spiking drive on Vm. E.g., .5 makes it half as strong as at the soma.
GExp float32 `default:"0.2,0.5"`
// GR is the dendrite-specific additional conductance of Kdr delayed
// rectifier currents, used to reset membrane potential for dendrite.
// Applied for Tr cycles (ms).
GR float32 `default:"3,6"`
// SSGi is the SST+ somatostatin positive slow spiking inhibition level
// specifically affecting dendritic Vm (VmDend). This is important for countering
// a positive feedback loop from NMDA getting stronger over the course
// of learning. Also typically requires SubMean = 1 for TrgAvgAct and
// learning to fully counter this feedback loop.
SSGi float32 `default:"0,2"`
// HasMod is set automatically based on whether this layer has any recv pathways
// that have a GType conductance type of Modulatory.
// If so, then multiply GeSyn etc by GModSyn.
HasMod slbool.Bool `edit:"-"`
// ModGain is a multiplicative gain factor on the total modulatory input.
// This can also be controlled by the PathScale.Abs factor on
// ModulatoryG inputs, but it is convenient to be able to control
// on the layer as well.
ModGain float32
// ModACh if true, modulatory signal also includes ACh multiplicative factor.
ModACh slbool.Bool
// ModBase is the baseline modulatory level for modulatory effects.
// Net modulation is ModBase + ModGain * GModSyn
ModBase float32
pad int32
}
func (dp *DendParams) Defaults() {
dp.SSGi = 2
dp.GExp = 0.2
dp.GR = 3
dp.ModGain = 1
dp.ModBase = 0
}
func (dp *DendParams) Update() {
}
func (dp *DendParams) ShouldDisplay(field string) bool {
switch field {
case "ModGain", "ModACh", "ModBase":
return dp.HasMod.IsTrue()
default:
return true
}
}
//////// ActInitParams
// ActInitParams are initial values for key network state variables.
// Initialized in InitActs called by InitWeights, and provides target values
// for DecayState.
type ActInitParams struct {
// Vm initial membrane potential in mV (millivolts).
// See Erev.L for the resting potential, typically -70.
Vm float32 `default:"-70"`
// Act is the initial activation value. Typically 0.
Act float32 `default:"0"`
// GeBase is the baseline level of excitatory conductance (net input).
// Ge is initialized to this value, and it is added in as a constant
// background level of excitatory input, to capture all the other
// inputs not represented in the model, and intrinsic excitability, etc.
GeBase float32 `default:"0"`
// GiBase baseline level of inhibitory conductance (net input)
// Gi is initialized to this value, and it is added in as a constant
// background level of inhibitory input. Captures all the other inputs
// not represented in the model.
GiBase float32 `default:"0"`
// GeVar is the variance (sigma) of gaussian distribution around baseline
// Ge values, per neuron, to establish variability in intrinsic excitability.
// Value never goes < 0.
GeVar float32 `default:"0"`
// GiVar is the variance (sigma) of gaussian distribution around baseline
// Gi values, per neuron, to establish variability in intrinsic excitability.
// Value never goes < 0
GiVar float32 `default:"0"`
pad, pad1 int32
}
func (ai *ActInitParams) Update() {
}
func (ai *ActInitParams) Defaults() {
ai.Vm = -70
ai.Act = 0
ai.GeBase = 0
ai.GiBase = 0
ai.GeVar = 0
ai.GiVar = 0
}
//gosl:end
// note: these are only called in CPU during init.
// GeBase returns the baseline Ge value: Ge + rand(GeVar) > 0
func (ai *ActInitParams) GetGeBase(rnd randx.Rand) float32 {
ge := ai.GeBase
if ai.GeVar > 0 {
ge += float32(float64(ai.GeVar) * rnd.NormFloat64())
if ge < 0 {
ge = 0
}
}
return ge
}
// GiBase returns the baseline Gi value: Gi + rand(GiVar) > 0
func (ai *ActInitParams) GetGiBase(rnd randx.Rand) float32 {
gi := ai.GiBase
if ai.GiVar > 0 {
gi += float32(float64(ai.GiVar) * rnd.NormFloat64())
if gi < 0 {
gi = 0
}
}
return gi
}
//gosl:start
//////// DecayParams
// DecayParams control the decay of activation state in the DecayState function
// called in NewState when a new state is to be processed.
type DecayParams struct {
// Act is proportion to decay most activation state variables toward initial
// values at start of every ThetaCycle (except those controlled separately below).
// If 1 it is effectively equivalent to full clear, resetting other derived values.
// ISI is reset every AlphaCycle to get a fresh sample of activations
// (doesn't affect direct computation -- only readout).
Act float32 `default:"0,0.2,0.5,1" max:"1" min:"0"`
// Glong is proportion to decay long-lasting conductances, NMDA and GABA,
// and also the dendritic membrane potential -- when using random stimulus
// order, it is important to decay this significantly to allow a fresh start,
// but set Act to 0 to enable ongoing activity to keep neurons in their
// sensitive regime.
Glong float32 `default:"0,0.6" max:"1" min:"0"`
// AHP is decay of afterhyperpolarization currents, including mAHP, sAHP,
// and KNa, Kir. Has a separate decay because often useful to have this
// not decay at all even if decay is on.
AHP float32 `default:"0" max:"1" min:"0"`
// LearnCa is decay of Ca variables driven by spiking activity used in learning:
// CaSpike* and Ca* variables. These are typically not decayed but may
// need to be in some situations.
LearnCa float32 `default:"0" max:"1" min:"0"`
// OnRew means decay layer at end of ThetaCycle when there is a global reward.
// true by default for PTPred, PTMaint and PFC Super layers.
OnRew slbool.Bool
pad, pad1, pad2 float32
}
func (dp *DecayParams) Update() {
}
func (dp *DecayParams) Defaults() {
dp.Act = 0.2
dp.Glong = 0.6
dp.AHP = 0
dp.LearnCa = 0
}
//////// DtParams
// DtParams are time and rate constants for temporal derivatives in Axon (Vm, G)
type DtParams struct {
// Integ is the overall rate constant for numerical integration, for all equations
// at the neuron level. All time constants are specified in ms millisecond units,
// with one cycle = 1 ms. If you instead want to make one cycle = 2 ms, you can do
// this globally by setting this integ value to 2 (etc).
// However, stability issues will likely arise if you go too high.
// For improved numerical stability, you may even need to reduce this value
// to 0.5 or possibly even lower (typically however this is not necessary).
Integ float32 `default:"1" min:"0"`
// VmC is the membrane potential capacitance in pF (picofarads), which
// determines the rate of Vm updating over time.
VmC float32 `default:"281" min:"1"`
// VmDendC is the effective dendritic membrane capacitance in pF (picofarads),
// which is typically slower than VmC (also reflecting other dendritic dynamics).
VmDendC float32 `default:"500" min:"1"`
// VmSteps are the number of integration steps to take in computing new Vm value.
// This is the one computation that can be most numerically unstable
// so taking multiple steps with proportionally smaller dt is beneficial.
VmSteps int32 `default:"2" min:"1"`
// GeTau is the time constant for decay of excitatory AMPA receptor
// conductance in ms (milliseconds).
GeTau float32 `default:"5" min:"1"`
// GiTau is the time constant for decay of inhibitory GABA-A receptor
// conductance in ms (milliseconds).
GiTau float32 `default:"7" min:"1"`
// IntTau is a time constant for integrating values over timescale of an
// individual input state (e.g., roughly the 200 msec theta cycle),
// used in computing ActInt, GeInt from Ge, and GiInt from GiSyn.
// This is used for scoring performance, not for learning, in cycles,
// which should be milliseconds typically
// (Tau is roughly 2/3 of the way to asymptote).
IntTau float32 `default:"40" min:"1"`
// LongAvgTau is a time constant for integrating slower long-time-scale averages,
// such as ActAvg, Pool.ActsMAvg, ActsPAvg. Computed in NewState
// when a new input state is present (i.e., not msec but in units
// of a theta cycle) (Tau is roughly 2/3 of the way to asymptote).
// Set lower for smaller models.
LongAvgTau float32 `default:"20" min:"1"`
// maxCycStart is the cycle to start updating the CaPMaxCa, CaPMax values
// within a theta cycle. Early cycles often reflect prior state.
MaxCycStart int32 `default:"10" min:"0"`
// VmDT = Integ / VmC
VmDt float32 `display:"-" json:"-" xml:"-"`
// VmDendDt = Integ / VmDendC
VmDendDt float32 `display:"-" json:"-" xml:"-"`
// DtStep = 1 / VmSteps
DtStep float32 `display:"-" json:"-" xml:"-"`
// GeDt = Integ / GeTau
GeDt float32 `display:"-" json:"-" xml:"-"`
// GiDt = Integ / GiTau
GiDt float32 `display:"-" json:"-" xml:"-"`
// IntDt = Integ / IntTau
IntDt float32 `display:"-" json:"-" xml:"-"`
// LongAvgDt = 1 / LongAvgTau
LongAvgDt float32 `display:"-" json:"-" xml:"-"`
// MaxI = VmC * 100 nS nominal max conductance = maximum I current step.
MaxI float32 `display:"-" json:"-" xml:"-"`
pad, pad1, pad2 float32
}
func (dp *DtParams) Update() {
if dp.VmSteps < 1 {
dp.VmSteps = 1
}
dp.VmDt = dp.Integ / dp.VmC
dp.VmDendDt = dp.Integ / dp.VmDendC
dp.DtStep = 1 / float32(dp.VmSteps)
dp.GeDt = dp.Integ / dp.GeTau
dp.GiDt = dp.Integ / dp.GiTau
dp.IntDt = dp.Integ / dp.IntTau
dp.LongAvgDt = 1 / dp.LongAvgTau
dp.MaxI = dp.VmC * 100
}
func (dp *DtParams) Defaults() {
dp.Integ = 1
dp.VmC = 281
dp.VmDendC = 500
dp.VmSteps = 2
dp.GeTau = 5
dp.GiTau = 7
dp.IntTau = 40
dp.LongAvgTau = 20
dp.MaxCycStart = 10
dp.Update()
}
// GeSynFromRaw integrates a synaptic conductance from raw spiking using GeTau
func (dp *DtParams) GeSynFromRaw(geSyn, geRaw float32) float32 {
return geSyn + geRaw - dp.GeDt*geSyn
}
// GeSynFromRawSteady returns the steady-state GeSyn that would result from
// receiving a steady increment of GeRaw every time step = raw * GeTau.
// dSyn = Raw - dt*Syn; solve for dSyn = 0 to get steady state:
// dt*Syn = Raw; Syn = Raw / dt = Raw * Tau
func (dp *DtParams) GeSynFromRawSteady(geRaw float32) float32 {
return geRaw * dp.GeTau
}
// GiSynFromRaw integrates a synaptic conductance from raw spiking using GiTau
func (dp *DtParams) GiSynFromRaw(giSyn, giRaw float32) float32 {
return giSyn + giRaw - dp.GiDt*giSyn
}
// GiSynFromRawSteady returns the steady-state GiSyn that would result from
// receiving a steady increment of GiRaw every time step = raw * GiTau.
// dSyn = Raw - dt*Syn; solve for dSyn = 0 to get steady state:
// dt*Syn = Raw; Syn = Raw / dt = Raw * Tau
func (dp *DtParams) GiSynFromRawSteady(giRaw float32) float32 {
return giRaw * dp.GiTau
}
// AvgVarUpdate updates the average and variance from current value, using LongAvgDt
func (dp *DtParams) AvgVarUpdate(avg, vr *float32, val float32) {
if *avg == 0 { // first time -- set
*avg = val
*vr = 0
} else {
del := val - *avg
incr := dp.LongAvgDt * del
*avg += incr
// following is magic exponentially weighted incremental variance formula
// derived by Finch, 2009: Incremental calculation of weighted mean and variance
if *vr == 0 {
*vr = 2 * (1 - dp.LongAvgDt) * del * incr
} else {
*vr = (1 - dp.LongAvgDt) * (*vr + del*incr)
}
}
}
//////// Noise
// SpikeNoiseParams parameterizes background spiking activity impinging on the neuron,
// simulated using a poisson spiking process.
type SpikeNoiseParams struct {
// On switch to add noise simulating background spiking levels.
On slbool.Bool
// GeHz is the mean frequency of excitatory spikes. Typically 50Hz but multiple
// inputs increase rate. This is a poisson lambda parameter, also the variance.
GeHz float32 `default:"100"`
// Ge is the excitatory conductance per spike. 0.001 has minimal impact,
// 0.01 can be strong, and .15 is needed to influence timing of clamped inputs.
Ge float32 `min:"0"`
// GiHz is the mean frequency of inhibitory spikes. Typically 100Hz fast spiking
// but multiple inputs increase rate. This is a poisson lambda parameter,
// also the variance.
GiHz float32 `default:"200"`
// Gi is the excitatory conductance per spike. 0.001 has minimal impact,
// 0.01 can be strong, and .15 is needed to influence timing of clamped inputs.
Gi float32 `min:"0"`
// MaintGe adds Ge noise to GeMaintRaw instead of standard Ge.
// used for PTMaintLayer for example.
MaintGe slbool.Bool
// GeExpInt = Exp(-Interval) which is the threshold for GeNoiseP as it is updated.
GeExpInt float32 `display:"-" json:"-" xml:"-"`
// GiExpInt = Exp(-Interval) which is the threshold for GiNoiseP as it is updated.
GiExpInt float32 `display:"-" json:"-" xml:"-"`
}
func (an *SpikeNoiseParams) Update() {
an.GeExpInt = math32.Exp(-1000.0 / an.GeHz)
an.GiExpInt = math32.Exp(-1000.0 / an.GiHz)
}
func (an *SpikeNoiseParams) Defaults() {
an.GeHz = 100
an.Ge = 0.001
an.GiHz = 200
an.Gi = 0.001
an.Update()
}
func (an *SpikeNoiseParams) ShouldDisplay(field string) bool {
switch field {
case "On":
return true
default:
return an.On.IsTrue()
}
}
// PGe updates the GeNoiseP probability, multiplying a uniform random number [0-1]
// and returns Ge from spiking if a spike is triggered
func (an *SpikeNoiseParams) PGe(ctx *Context, p *float32, ni, di uint32) float32 {
nix := GetNetworkIxs(0)
ndi := di*nix.NNeurons + ni
*p *= GetRandomNumber(ndi, ctx.RandCounter.Counter, RandFunActPGe)
if *p <= an.GeExpInt {
*p = 1
return an.Ge
}
return 0
}
// PGi updates the GiNoiseP probability, multiplying a uniform random number [0-1]
// and returns Gi from spiking if a spike is triggered.
func (an *SpikeNoiseParams) PGi(ctx *Context, p *float32, ni, di uint32) float32 {
nix := GetNetworkIxs(0)
ndi := di*nix.NNeurons + ni
*p *= GetRandomNumber(ndi, ctx.RandCounter.Counter, RandFunActPGi)
if *p <= an.GiExpInt {
*p = 1
return an.Gi
}
return 0
}
//////// ClampParams
// ClampParams specify how external inputs drive excitatory conductances
// (like a current clamp) -- either adds or overwrites existing conductances.
// Noise is added in either case.
type ClampParams struct {
// Ge is the contribution to Ge(t) for clamped external input.
// Generally use .8 for Target layers, 1.50 for Input layers.
// This is later multiplied by overall gbar_e which converts to nS units.
Ge float32 `default:"0.8,1.5"`
// Add external conductance on top of any existing.
// Generally this is not a good idea for target layers
// (creates a main effect that learning can never match),
// but may be ok for input layers.
Add slbool.Bool `default:"false"`
// ErrThr is the threshold on neuron Act activity to count as active for
// computing the error relative to target in PctErr method.
ErrThr float32 `default:"0.5"`
pad float32
}
func (cp *ClampParams) Update() {
}
func (cp *ClampParams) Defaults() {
cp.Ge = 0.8
cp.ErrThr = 0.5
}
//////// SMaintParams
// SMaintParams for self-maintenance simulating a population of
// NMDA-interconnected spiking neurons
type SMaintParams struct {
// On switch for self maintenance.
On slbool.Bool
// NNeurons is the number of neurons within the self-maintenance pool,
// each of which is assumed to have the same probability of spiking.
NNeurons float32 `default:"10"`
// Ge is the excitatory conductance multiplier for self maintenance synapses.
Ge float32 `default:"0.2"`
// Inhib controls how much of the extra maintenance conductance goes
// to the GeExt, which drives extra proportional inhibition.
Inhib float32 `default:"1"`
// ISI (inter spike interval) range. Min is used as min ISIAvg
// for poisson spike rate expected from the population,
// and above Max, no additional maintenance conductance is added.
ISI minmax.F32 `display:"inline"`
}
func (sm *SMaintParams) Defaults() {
sm.NNeurons = 10
sm.ISI.Set(1, 20)
sm.Ge = 0.2
sm.Inhib = 1
}
func (sm *SMaintParams) Update() {
}
func (sm *SMaintParams) ShouldDisplay(field string) bool {
switch field {
case "On":
return true
default:
return sm.On.IsTrue()
}
}
// ExpInt returns the exponential interval value for determining
// when the next excitatory spike will arrive, based on given ISI
// value for this neuron.
func (sm *SMaintParams) ExpInt(isi float32) float32 {
if isi <= 0 {
return 0
}
return math32.FastExp(-max(isi, sm.ISI.Min) / sm.NNeurons)
}
//////// PopCodeParams
// PopCodeParams provides an encoding of scalar value using population code,
// where a single continuous (scalar) value is encoded as a gaussian bump
// across a population of neurons (1 dimensional).
// It can also modulate rate code and number of neurons active according to the value.
// This is for layers that represent values as in the Rubicon system.
// Both normalized activation values (1 max) and Ge conductance values can be generated.
type PopCodeParams struct {
// On toggles use of popcode encoding of variable(s) that this layer represents.
On slbool.Bool
// Ge multiplier for driving excitatory conductance based on PopCode.
// Multiplies normalized activation values and adds to total Ge(t)
// which is later multiplied by Gbar.E for pA unit scaling.
Ge float32 `default:"0.1"`
// Min is the minimum value representable. For GaussBump, typically include
// extra to allow mean with activity on either side to represent
// the lowest value you want to encode.
Min float32 `default:"-0.1"`
// Max is the maximum value representable. For GaussBump, typically include
// extra to allow mean with activity on either side to represent
// the lowest value you want to encode.
Max float32 `default:"1.1"`
// MinAct is an activation multiplier for values at Min end of range,
// where values at Max end have an activation of 1.
// If this is < 1, then there is a rate code proportional
// to the value in addition to the popcode pattern. See also MinSigma, MaxSigma.
MinAct float32 `default:"1,0.5"`
// MinSigma is the sigma parameter of a gaussian specifying the tuning width
// of the coarse-coded units, in normalized 0-1 range, for values at the Min
// end of the range. If MinSigma < MaxSigma then more units are activated
// for Max values vs. Min values, proportionally.
MinSigma float32 `default:"0.1,0.08"`
// MaxSigma is the sigma parameter of a gaussian specifying the tuning width
// of the coarse-coded units, in normalized 0-1 range, for values at the Max
// end of the range. If MinSigma < MaxSigma then more units are activated
// for Max values vs. Min values, proportionally.
MaxSigma float32 `default:"0.1,0.12"`
// Clip ensures that encoded and decoded value remains within specified range.
Clip slbool.Bool
}
func (pc *PopCodeParams) Defaults() {
pc.Ge = 0.1
pc.Min = -0.1
pc.Max = 1.1
pc.MinAct = 1
pc.MinSigma = 0.1
pc.MaxSigma = 0.1
pc.Clip.SetBool(true)
}
func (pc *PopCodeParams) Update() {
}
func (pc *PopCodeParams) ShouldDisplay(field string) bool {
switch field {
case "On":
return true
default:
return pc.On.IsTrue()
}
}
// SetRange sets the min, max and sigma values
func (pc *PopCodeParams) SetRange(min, max, minSigma, maxSigma float32) {
pc.Min = min
pc.Max = max
pc.MinSigma = minSigma
pc.MaxSigma = maxSigma
}
// ClipVal returns clipped (clamped) value in min / max range
func (pc *PopCodeParams) ClampValue(val float32) float32 {
clipVal := val
if clipVal < pc.Min {
clipVal = pc.Min
}
if clipVal > pc.Max {
clipVal = pc.Max
}
return clipVal
}
// ProjectParam projects given min / max param value onto val within range
func (pc *PopCodeParams) ProjectParam(minParam, maxParam, clipVal float32) float32 {
normVal := (clipVal - pc.Min) / (pc.Max - pc.Min)
return minParam + normVal*(maxParam-minParam)
}
// EncodeValue returns value for given value, for neuron index i
// out of n total neurons. n must be 2 or more.
func (pc *PopCodeParams) EncodeValue(i, n uint32, val float32) float32 {
eval := val
clipVal := pc.ClampValue(eval)
if pc.Clip.IsTrue() {
eval = clipVal
}
rng := pc.Max - pc.Min
act := float32(1)
if pc.MinAct < 1 {
act = pc.ProjectParam(pc.MinAct, 1.0, clipVal)
}
sig := pc.MinSigma
if pc.MaxSigma > pc.MinSigma {
sig = pc.ProjectParam(pc.MinSigma, pc.MaxSigma, clipVal)
}
gnrm := 1.0 / (rng * sig)
incr := rng / float32(n-1)
trg := pc.Min + incr*float32(i)
dist := gnrm * (trg - eval)
return act * math32.FastExp(-(dist * dist))
}
// EncodeGe returns Ge value for given value, for neuron index i
// out of n total neurons. n must be 2 or more.
func (pc *PopCodeParams) EncodeGe(i, n uint32, val float32) float32 {
return pc.Ge * pc.EncodeValue(i, n, val)
}
//////// ActParams
// ActParams contains all the neural activity computation params and functions
// for Axon, at the neuron level. This is included in [LayerParams].
type ActParams struct {
// Spikes are spiking function parameter, including the AdEx spiking function.
Spikes SpikeParams `display:"inline"`
// Dend are dendrite-specific parameters, which more accurately approximate
// the electrical dynamics present in dendrites vs the soma.
Dend DendParams `display:"inline"`
// Init has initial values for key network state variables.
// Initialized in InitActs called by InitWeights, and provides target
// values for DecayState.
Init ActInitParams `display:"inline"`
// Decay is the amount to decay between theta cycles, simulating the passage
// of time and effects of saccades etc. It is especially important for
// environments with random temporal structure (e.g., most standard neural net
// training corpora).
Decay DecayParams `display:"inline"`
// Dt has time and rate constants for temporal derivatives / updating of
// activation state.
Dt DtParams `display:"inline"`
// Gbar has maximal conductances levels for channels, in nS (nanosiemens).
// Most other conductances are computed as time-varying proportions of these
// values (strict 1 max is not enforced and can be exceeded).
Gbar chans.Chans `display:"inline"`
// Erev are reversal / driving potentials for each channel, in mV (millivolts).
// Current is a function of the difference between these driving potentials
// and the membrane potential Vm, and goes to 0 (and reverses sign) as it
// crosses equality.
Erev chans.Chans `display:"inline"`
// Clamp determines how external inputs drive excitatory conductance.
Clamp ClampParams `display:"inline"`
// Noise specifies how, where, when, and how much noise to add.
Noise SpikeNoiseParams `display:"inline"`
// VmRange constrains the range of the Vm membrane potential,
// which helps to prevent numerical instability.
VmRange minmax.F32 `display:"inline"`
// Mahp is the M-type medium time-scale afterhyperpolarization (mAHP) current.
// This is the primary form of adaptation on the time scale of
// multiple sequences of spikes.
Mahp chans.MahpParams `display:"inline"`
// Sahp is the slow time-scale afterhyperpolarization (sAHP) current.
// It integrates CaD at theta cycle intervals and produces a hard cutoff
// on sustained activity for any neuron.
Sahp chans.SahpParams `display:"inline"`
// KNa has the sodium-gated potassium channel adaptation parameters.
// It activates a leak-like current as a function of neural activity
// (firing = Na influx) at two different time-scales (Slick = medium, Slack = slow).
KNa chans.KNaMedSlow `display:"inline"`
// Kir is the potassium (K) inwardly rectifying (ir) current, which
// is similar to GABA-B (which is a GABA modulated Kir channel).
// This channel is off by default but plays a critical role in making medium
// spiny neurons (MSNs) relatively quiet in the striatum.
Kir chans.KirParams `display:"inline"`
// NMDA has channel parameters used in computing the Gnmda conductance
// that is maximal for more depolarized neurons (due to unblocking of
// Mg++ ions), and thus helps keep active neurons active, thereby promoting
// overall neural stability over time. See also Learn.LearnNMDA for
// distinct parameters used for Ca++ influx driving learning, and
// MaintNMDA for specialized NMDA driven by maintenance pathways.
NMDA chans.NMDAParams `display:"inline"`
// MaintNMDA has channel parameters used in computing the Gnmda conductance
// based on pathways of the MaintG conductance type, e.g., in the PT PFC neurons.
// This is typically stronger and longer lasting than standard NMDA.
MaintNMDA chans.NMDAParams `display:"inline"`
// GabaB has GABA-B channel parameters for long-lasting inhibition
// that is inwardly rectified (GIRK coupled) and maximal for more hyperpolarized
// neurons, thus keeping inactive neurons inactive. This is synergistic with
// NMDA for supporting stable activity patterns over the theta cycle.
GabaB chans.GABABParams `display:"inline"`
// VGCC are voltage gated calcium channels, which provide a key additional
// source of Ca for learning and positive-feedback loop upstate for active
// neurons when they are spiking.
VGCC chans.VGCCParams `display:"inline"`
// AK is the A-type potassium (K) channel that is particularly important
// for limiting the runaway excitation from VGCC channels.
AK chans.AKsParams `display:"inline"`
// SKCa is the small-conductance calcium-activated potassium channel produces
// the pausing function as a consequence of rapid bursting. These are not active
// by default but are critical for subthalamic nucleus (STN) neurons.
SKCa chans.SKCaParams `display:"inline"`
// SMaint provides a simplified self-maintenance current for a population of
// NMDA-interconnected spiking neurons.
SMaint SMaintParams `display:"inline"`
// PopCode provides encoding population codes, used to represent a single
// continuous (scalar) value, across a population of units / neurons
// (1 dimensional).
PopCode PopCodeParams `display:"inline"`
}
func (ac *ActParams) Defaults() {
ac.Spikes.Defaults()
ac.Dend.Defaults()
ac.Init.Defaults()
ac.Decay.Defaults()
ac.Dt.Defaults()
ac.Gbar.SetAll(100, 20, 100, 100) // E, L, I, K
ac.Erev.SetAll(0, -70, -90, -90) // E, L, I, K: K = hyperpolarized -90mv
ac.Clamp.Defaults()
ac.Noise.Defaults()
ac.VmRange.Set(-100, 0)
ac.Mahp.Defaults()
ac.Mahp.Gk = 0.05
ac.Sahp.Defaults()
ac.Sahp.Gk = 0.05
ac.Sahp.CaTau = 5
ac.KNa.Defaults()
ac.KNa.On.SetBool(true)
ac.Kir.Defaults()
ac.Kir.Gk = 0
ac.NMDA.Defaults()
ac.NMDA.Ge = 0.006
ac.MaintNMDA.Defaults()
ac.MaintNMDA.Ge = 0.007
ac.MaintNMDA.Tau = 200
ac.GabaB.Defaults()
ac.VGCC.Defaults()
ac.VGCC.Ge = 0.02
ac.VGCC.Ca = 0.25
ac.AK.Defaults()
ac.AK.Gk = 0.1
ac.SKCa.Defaults()
ac.SKCa.Gk = 0
ac.SMaint.Defaults()
ac.PopCode.Defaults()
ac.Update()
}
// Update must be called after any changes to parameters
func (ac *ActParams) Update() {
ac.Spikes.Update()
ac.Dend.Update()
ac.Init.Update()
ac.Decay.Update()
ac.Dt.Update()
ac.Clamp.Update()
ac.Noise.Update()
ac.Mahp.Update()
ac.Sahp.Update()
ac.KNa.Update()
ac.Kir.Update()
ac.NMDA.Update()
ac.MaintNMDA.Update()
ac.GabaB.Update()
ac.VGCC.Update()
ac.AK.Update()
ac.SKCa.Update()
ac.SMaint.Update()
ac.PopCode.Update()
}
//////// Init
// DecayLearnCa decays neuron-level calcium learning and spiking variables
// by given factor. Note: this is generally NOT useful,
// causing variability in these learning factors as a function
// of the decay parameter that then has impacts on learning rates etc.
// see Act.Decay.LearnCa param controlling this
func (ac *ActParams) DecayLearnCa(ctx *Context, ni, di uint32, decay float32) {
Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(GnmdaLrn)), int(ni), int(di), int(GnmdaLrn))
Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(NmdaCa)), int(ni), int(di), int(NmdaCa))
Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(VgccCa)), int(ni), int(di), int(VgccCa))
Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(VgccCaInt)), int(ni), int(di), int(VgccCaInt))
Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(LearnCa)), int(ni), int(di), int(LearnCa))
Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(CaM)), int(ni), int(di), int(CaM))
Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(CaP)), int(ni), int(di), int(CaP))
Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(CaD)), int(ni), int(di), int(CaD))
Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(LearnCaM)), int(ni), int(di), int(LearnCaM))
Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(LearnCaP)), int(ni), int(di), int(LearnCaP))
Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(LearnCaD)), int(ni), int(di), int(LearnCaD))
Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(ETrace)), int(ni), int(di), int(ETrace))
Neurons.SetAdd(decay*(1.0-Neurons.Value(int(ni), int(di), int(ETrLearn))), int(ni), int(di), int(ETrLearn))
// recovers
Neurons.SetAdd(decay*(1.0-Neurons.Value(int(ni), int(di), int(SKCaIn))), int(ni), int(di), int(SKCaIn))
Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(SKCaR)), int(ni), int(di), int(SKCaR))
Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(SKCaM)), int(ni), int(di), int(SKCaM))
}
// DecayAHP decays after-hyperpolarization variables
// by given factor (typically Decay.AHP)
func (ac *ActParams) DecayAHP(ctx *Context, ni, di uint32, decay float32) {
Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(MahpN)), int(ni), int(di), int(MahpN))
Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(Gmahp)), int(ni), int(di), int(Gmahp))
Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(SahpCa)), int(ni), int(di), int(SahpCa))
Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(SahpN)), int(ni), int(di), int(SahpN))
Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(Gsahp)), int(ni), int(di), int(Gsahp))
Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(GknaMed)), int(ni), int(di), int(GknaMed))
Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(GknaSlow)), int(ni), int(di), int(GknaSlow))
kirMrest := ac.Kir.Mrest
Neurons.SetAdd(decay*(kirMrest-Neurons.Value(int(ni), int(di), int(KirM))), int(ni), int(di), int(KirM))
Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(Gkir)), int(ni), int(di), int(Gkir))
}
// DecayState decays the activation state toward initial values
// in proportion to given decay parameter. Special case values
// such as Glong and KNa are also decayed with their
// separately parameterized values.
// Called with ac.Decay.Act by Layer during NewState
func (ac *ActParams) DecayState(ctx *Context, ni, di uint32, decay, glong, ahp float32) {
// always reset these -- otherwise get insanely large values that take forever to update
Neurons.Set(-1.0, int(ni), int(di), int(ISIAvg))
Neurons.Set(ac.Init.Act, int(ni), int(di), int(ActInt))
Neurons.Set(0.0, int(ni), int(di), int(Spiked))
if decay > 0 { // no-op for most, but not all..
Neurons.Set(0.0, int(ni), int(di), int(Spike))
Neurons.SetSub(decay*(Neurons.Value(int(ni), int(di), int(Act))-ac.Init.Act), int(ni), int(di), int(Act))
Neurons.SetSub(decay*(Neurons.Value(int(ni), int(di), int(ActInt))-ac.Init.Act), int(ni), int(di), int(ActInt))
Neurons.SetSub(decay*(Neurons.Value(int(ni), int(di), int(GeSyn))-NeuronAvgs.Value(int(ni), int(GeBase))), int(ni), int(di), int(GeSyn))
Neurons.SetSub(decay*(Neurons.Value(int(ni), int(di), int(Ge))-NeuronAvgs.Value(int(ni), int(GeBase))), int(ni), int(di), int(Ge))
Neurons.SetSub(decay*(Neurons.Value(int(ni), int(di), int(Gi))-NeuronAvgs.Value(int(ni), int(GiBase))), int(ni), int(di), int(Gi))
Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(Gk)), int(ni), int(di), int(Gk))
Neurons.SetSub(decay*(Neurons.Value(int(ni), int(di), int(Vm))-ac.Init.Vm), int(ni), int(di), int(Vm))
Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(GeNoise)), int(ni), int(di), int(GeNoise))
Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(GiNoise)), int(ni), int(di), int(GiNoise))
Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(GiSyn)), int(ni), int(di), int(GiSyn))
Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(GeInt)), int(ni), int(di), int(GeInt))
Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(GiInt)), int(ni), int(di), int(GiInt))
Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(GeIntNorm)), int(ni), int(di), int(GeIntNorm))
}
Neurons.SetSub(glong*(Neurons.Value(int(ni), int(di), int(VmDend))-ac.Init.Vm), int(ni), int(di), int(VmDend))
if ahp > 0 {
ac.DecayAHP(ctx, ni, di, ahp)
}
Neurons.SetSub(glong*Neurons.Value(int(ni), int(di), int(GgabaB)), int(ni), int(di), int(GgabaB))
Neurons.SetSub(glong*Neurons.Value(int(ni), int(di), int(GababM)), int(ni), int(di), int(GababM))
Neurons.SetSub(glong*Neurons.Value(int(ni), int(di), int(GababX)), int(ni), int(di), int(GababX))
Neurons.SetSub(glong*Neurons.Value(int(ni), int(di), int(GnmdaSyn)), int(ni), int(di), int(GnmdaSyn))
Neurons.SetSub(glong*Neurons.Value(int(ni), int(di), int(Gnmda)), int(ni), int(di), int(Gnmda))
Neurons.SetSub(glong*Neurons.Value(int(ni), int(di), int(GMaintSyn)), int(ni), int(di), int(GMaintSyn))
Neurons.SetSub(glong*Neurons.Value(int(ni), int(di), int(GnmdaMaint)), int(ni), int(di), int(GnmdaMaint))
Neurons.SetSub(glong*Neurons.Value(int(ni), int(di), int(Gvgcc)), int(ni), int(di), int(Gvgcc))
Neurons.SetSub(glong*Neurons.Value(int(ni), int(di), int(VgccM)), int(ni), int(di), int(VgccM))
Neurons.SetSub(glong*Neurons.Value(int(ni), int(di), int(VgccH)), int(ni), int(di), int(VgccH))
Neurons.SetSub(glong*Neurons.Value(int(ni), int(di), int(Gak)), int(ni), int(di), int(Gak))
// don't mess with SKCa -- longer time scale
Neurons.SetSub(glong*Neurons.Value(int(ni), int(di), int(Gsk)), int(ni), int(di), int(Gsk))
if ac.Decay.LearnCa > 0 { // learning-based Ca values -- not usual
ac.DecayLearnCa(ctx, ni, di, ac.Decay.LearnCa)
}
Neurons.Set(0.0, int(ni), int(di), int(Inet))
Neurons.Set(0.0, int(ni), int(di), int(GeRaw))
Neurons.Set(0.0, int(ni), int(di), int(GiRaw))
Neurons.Set(0.0, int(ni), int(di), int(GModRaw))
Neurons.Set(0.0, int(ni), int(di), int(GModSyn))
Neurons.Set(0.0, int(ni), int(di), int(GMaintRaw))
Neurons.Set(0.0, int(ni), int(di), int(SSGiDend))
Neurons.Set(0.0, int(ni), int(di), int(GeExt))
Neurons.SetSub(glong*Neurons.Value(int(ni), int(di), int(CtxtGeOrig)), int(ni), int(di), int(CtxtGeOrig))
}
// InitActs initializes activation state in neuron -- called during InitWeights but otherwise not
// automatically called (DecayState is used instead)
func (ac *ActParams) InitActs(ctx *Context, ni, di uint32) {
Neurons.Set(0, int(ni), int(di), int(Spike))
Neurons.Set(0, int(ni), int(di), int(Spiked))
Neurons.Set(-1, int(ni), int(di), int(ISI))
Neurons.Set(-1, int(ni), int(di), int(ISIAvg))
Neurons.Set(ac.Init.Act, int(ni), int(di), int(Act))
Neurons.Set(ac.Init.Act, int(ni), int(di), int(ActInt))
Neurons.Set(NeuronAvgs.Value(int(ni), int(GeBase)), int(ni), int(di), int(GeSyn))
Neurons.Set(NeuronAvgs.Value(int(ni), int(GeBase)), int(ni), int(di), int(Ge))
Neurons.Set(NeuronAvgs.Value(int(ni), int(GiBase)), int(ni), int(di), int(Gi))
Neurons.Set(0, int(ni), int(di), int(Gk))
Neurons.Set(0, int(ni), int(di), int(Inet))
Neurons.Set(ac.Init.Vm, int(ni), int(di), int(Vm))
Neurons.Set(ac.Init.Vm, int(ni), int(di), int(VmDend))
Neurons.Set(0, int(ni), int(di), int(Target))
Neurons.Set(0, int(ni), int(di), int(Ext))
Neurons.Set(0, int(ni), int(di), int(CaPMaxCa))
Neurons.Set(0, int(ni), int(di), int(CaPMax))
Neurons.Set(1, int(ni), int(di), int(RLRate))
Neurons.Set(1, int(ni), int(di), int(GeNoiseP))
Neurons.Set(0, int(ni), int(di), int(GeNoise))
Neurons.Set(1, int(ni), int(di), int(GiNoiseP))
Neurons.Set(0, int(ni), int(di), int(GiNoise))
Neurons.Set(0, int(ni), int(di), int(GiSyn))
Neurons.Set(1, int(ni), int(di), int(SMaintP))
Neurons.Set(0, int(ni), int(di), int(GeInt))
Neurons.Set(0, int(ni), int(di), int(GeIntNorm))
Neurons.Set(0, int(ni), int(di), int(GiInt))
Neurons.Set(0, int(ni), int(di), int(MahpN))
Neurons.Set(0, int(ni), int(di), int(Gmahp))
Neurons.Set(0, int(ni), int(di), int(SahpCa))
Neurons.Set(0, int(ni), int(di), int(SahpN))
Neurons.Set(0, int(ni), int(di), int(Gsahp))
Neurons.Set(0, int(ni), int(di), int(GknaMed))
Neurons.Set(0, int(ni), int(di), int(GknaSlow))
Neurons.Set(ac.Kir.Mrest, int(ni), int(di), int(KirM))
Neurons.Set(0, int(ni), int(di), int(Gkir))
Neurons.Set(0, int(ni), int(di), int(GnmdaSyn))
Neurons.Set(0, int(ni), int(di), int(Gnmda))
Neurons.Set(0, int(ni), int(di), int(GnmdaMaint))
Neurons.Set(0, int(ni), int(di), int(GnmdaLrn))
Neurons.Set(0, int(ni), int(di), int(NmdaCa))
Neurons.Set(0, int(ni), int(di), int(GgabaB))
Neurons.Set(0, int(ni), int(di), int(GababM))
Neurons.Set(0, int(ni), int(di), int(GababX))
Neurons.Set(0, int(ni), int(di), int(Gvgcc))
Neurons.Set(0, int(ni), int(di), int(VgccM))
Neurons.Set(0, int(ni), int(di), int(VgccH))
Neurons.Set(0, int(ni), int(di), int(Gak))
Neurons.Set(0, int(ni), int(di), int(VgccCaInt))
Neurons.Set(1, int(ni), int(di), int(SKCaIn))
Neurons.Set(0, int(ni), int(di), int(SKCaR))
Neurons.Set(0, int(ni), int(di), int(SKCaM))
Neurons.Set(0, int(ni), int(di), int(Gsk))
Neurons.Set(0, int(ni), int(di), int(GeExt))
Neurons.Set(0, int(ni), int(di), int(GeRaw))
Neurons.Set(0, int(ni), int(di), int(GiRaw))
Neurons.Set(0, int(ni), int(di), int(GModRaw))
Neurons.Set(0, int(ni), int(di), int(GModSyn))
Neurons.Set(0, int(ni), int(di), int(GMaintRaw))
Neurons.Set(0, int(ni), int(di), int(GMaintSyn))
Neurons.Set(0, int(ni), int(di), int(SSGiDend))
Neurons.Set(0, int(ni), int(di), int(Burst))
Neurons.Set(0, int(ni), int(di), int(BurstPrv))
Neurons.Set(0, int(ni), int(di), int(CtxtGe))
Neurons.Set(0, int(ni), int(di), int(CtxtGeRaw))
Neurons.Set(0, int(ni), int(di), int(CtxtGeOrig))
mx := NetworkIxs[0].NCaBins
for i := range mx {
Neurons.Set(0.0, int(ni), int(di), int(CaBins+NeuronVars(i)))
}
ac.InitLongActs(ctx, ni, di)
}
// InitLongActs initializes longer time-scale activation states in neuron
// (CaDPrev, Beta1, Beta2, ActM, ActP)
// Called from InitActs, which is called from InitWeights,
// but otherwise not automatically called
// (DecayState is used instead)
func (ac *ActParams) InitLongActs(ctx *Context, ni, di uint32) {
Neurons.Set(0, int(ni), int(di), int(CaDPrev))
Neurons.Set(0, int(ni), int(di), int(Beta1))
Neurons.Set(0, int(ni), int(di), int(Beta2))
Neurons.Set(0, int(ni), int(di), int(ActM))
Neurons.Set(0, int(ni), int(di), int(ActP))
Neurons.Set(0, int(ni), int(di), int(ETrace))
Neurons.Set(1, int(ni), int(di), int(ETrLearn))
}
//////// Cycle
// NMDAFromRaw updates all the NMDA variables from
// total Ge (GeRaw + Ext) and current Vm, Spiking
func (ac *ActParams) NMDAFromRaw(ctx *Context, ni, di uint32, geTot float32) {
if ac.NMDA.Ge == 0 {
return
}
geT := max(geTot, 0.0)
Neurons.Set(ac.NMDA.NMDASyn(Neurons.Value(int(ni), int(di), int(GnmdaSyn)), geT), int(ni), int(di), int(GnmdaSyn))
Neurons.Set(ac.NMDA.Gnmda(Neurons.Value(int(ni), int(di), int(GnmdaSyn)), Neurons.Value(int(ni), int(di), int(VmDend))), int(ni), int(di), int(Gnmda))
// note: nrn.NmdaCa computed via Learn.LearnNMDA in learn.go, CaM method
}
// MaintNMDAFromRaw updates all the Maint NMDA variables from
// GModRaw and current Vm, Spiking
func (ac *ActParams) MaintNMDAFromRaw(ctx *Context, ni, di uint32) {
if ac.MaintNMDA.Ge == 0 {
return
}
if ac.SMaint.On.IsTrue() {
ac.SMaintFromISI(ctx, ni, di)
}
Neurons.Set(ac.MaintNMDA.NMDASyn(Neurons.Value(int(ni), int(di), int(GMaintSyn)), Neurons.Value(int(ni), int(di), int(GMaintRaw))), int(ni), int(di), int(GMaintSyn))
Neurons.Set(ac.MaintNMDA.Gnmda(Neurons.Value(int(ni), int(di), int(GMaintSyn)), Neurons.Value(int(ni), int(di), int(VmDend))), int(ni), int(di), int(GnmdaMaint))
}
// SMaintFromISI updates the SMaint self-maintenance current into GMaintRaw
func (ac *ActParams) SMaintFromISI(ctx *Context, ni, di uint32) {
nix := GetNetworkIxs(0)
isi := Neurons.Value(int(ni), int(di), int(ISIAvg))
if isi < ac.SMaint.ISI.Min || isi > ac.SMaint.ISI.Max {
return
}
ndi := di*nix.NNeurons + ni
smp := Neurons.Value(int(ni), int(di), int(SMaintP))
smp *= GetRandomNumber(ndi, ctx.RandCounter.Counter, RandFunActSMaintP)
trg := ac.SMaint.ExpInt(isi)
if smp <= trg {
smp = 1
Neurons.SetAdd(ac.SMaint.Ge, int(ni), int(di), int(GMaintRaw))
}
Neurons.Set(smp, int(ni), int(di), int(SMaintP))
}
// GvgccFromVm updates all the VGCC voltage-gated calcium channel variables
// from VmDend
func (ac *ActParams) GvgccFromVm(ctx *Context, ni, di uint32) {
if ac.VGCC.Ge == 0 {
return
}
v := Neurons.Value(int(ni), int(di), int(VmDend))
Neurons.Set(ac.VGCC.Gvgcc(v, Neurons.Value(int(ni), int(di), int(VgccM)), Neurons.Value(int(ni), int(di), int(VgccH))), int(ni), int(di), int(Gvgcc))
dm := ac.VGCC.DeltaMFromV(v, Neurons.Value(int(ni), int(di), int(VgccM)))
dh := ac.VGCC.DeltaHFromV(v, Neurons.Value(int(ni), int(di), int(VgccH)))
Neurons.SetAdd(dm, int(ni), int(di), int(VgccM))
Neurons.SetAdd(dh, int(ni), int(di), int(VgccH))
// note: may be overwritten!
Neurons.Set(ac.VGCC.CaFromG(v, Neurons.Value(int(ni), int(di), int(Gvgcc)), Neurons.Value(int(ni), int(di), int(VgccCa))), int(ni), int(di), int(VgccCa))
}
// GkFromVm updates all the Gk-based conductances: Mahp, KNa, Gak
func (ac *ActParams) GkFromVm(ctx *Context, ni, di uint32) {
vm := Neurons.Value(int(ni), int(di), int(Vm))
vmd := Neurons.Value(int(ni), int(di), int(VmDend))
mahpN := Neurons.Value(int(ni), int(di), int(MahpN))
gmahp := ac.Mahp.GmAHP(vm, &mahpN)
Neurons.Set(gmahp, int(ni), int(di), int(Gmahp))
Neurons.Set(mahpN, int(ni), int(di), int(MahpN))
gsahp := Neurons.Value(int(ni), int(di), int(Gsahp))
gak := ac.AK.Gak(vmd)
Neurons.Set(gak, int(ni), int(di), int(Gak))
nkirM := Neurons.Value(int(ni), int(di), int(KirM))
gkir := ac.Kir.Gkir(vm, nkirM)
Neurons.Set(gkir, int(ni), int(di), int(Gkir))
nkirM += ac.Kir.DM(vm, nkirM)
Neurons.Set(nkirM, int(ni), int(di), int(KirM))
gktot := gmahp + gsahp + gak + gkir
if ac.KNa.On.IsTrue() {
gknaMed := Neurons.Value(int(ni), int(di), int(GknaMed))
gknaSlow := Neurons.Value(int(ni), int(di), int(GknaSlow))
ac.KNa.GcFromSpike(&gknaMed, &gknaSlow, Neurons.Value(int(ni), int(di), int(Spike)) > .5)
Neurons.Set(gknaMed, int(ni), int(di), int(GknaMed))
Neurons.Set(gknaSlow, int(ni), int(di), int(GknaSlow))
gktot += gknaMed + gknaSlow
}
Neurons.Set(gktot, int(ni), int(di), int(Gk))
}
// KNaNewState does TrialSlow version of KNa during NewState if option is set
func (ac *ActParams) KNaNewState(ctx *Context, ni, di uint32) {
if ac.KNa.On.IsTrue() && ac.KNa.TrialSlow.IsTrue() {
Neurons.SetAdd(ac.KNa.Slow.Gk*Neurons.Value(int(ni), int(di), int(CaDPrev)), int(ni), int(di), int(GknaSlow))
}
}
// GSkCaFromCa updates the SKCa channel if used
func (ac *ActParams) GSkCaFromCa(ctx *Context, ni, di uint32) {
if ac.SKCa.Gk == 0 {
return
}
skcar := Neurons.Value(int(ni), int(di), int(SKCaR))
skcain := Neurons.Value(int(ni), int(di), int(SKCaIn))
Neurons.Set(ac.SKCa.MFromCa(skcar, Neurons.Value(int(ni), int(di), int(SKCaM))), int(ni), int(di), int(SKCaM))
ac.SKCa.CaInRFromSpike(Neurons.Value(int(ni), int(di), int(Spike)), Neurons.Value(int(ni), int(di), int(CaD)), &skcain, &skcar)
Neurons.Set(skcar, int(ni), int(di), int(SKCaR))
Neurons.Set(skcain, int(ni), int(di), int(SKCaIn))
Neurons.Set(ac.SKCa.Gk*Neurons.Value(int(ni), int(di), int(SKCaM)), int(ni), int(di), int(Gsk))
Neurons.SetAdd(Neurons.Value(int(ni), int(di), int(Gsk)), int(ni), int(di), int(Gk))
}
// GeFromSyn integrates Ge excitatory conductance from GeSyn.
// geExt is extra conductance to add to the final Ge value
func (ac *ActParams) GeFromSyn(ctx *Context, ni, di uint32, geSyn, geExt float32) {
Neurons.Set(0.0, int(ni), int(di), int(GeExt))
geS := geSyn
geE := geExt
if ac.Clamp.Add.IsTrue() && NeuronHasFlag(NeuronHasExt, ni, di) {
Neurons.Set(Neurons.Value(int(ni), int(di), int(Ext))*ac.Clamp.Ge, int(ni), int(di), int(GeExt))
geS += Neurons.Value(int(ni), int(di), int(GeExt))
}
if ac.Clamp.Add.IsFalse() && NeuronHasFlag(NeuronHasExt, ni, di) {
geS = Neurons.Value(int(ni), int(di), int(Ext)) * ac.Clamp.Ge
Neurons.Set(geS, int(ni), int(di), int(GeExt))
geE = 0 // no extra in this case
}
Neurons.Set(geS+geE, int(ni), int(di), int(Ge))
if Neurons.Value(int(ni), int(di), int(Ge)) < 0.0 {
Neurons.Set(0.0, int(ni), int(di), int(Ge))
}
ac.AddGeNoise(ctx, ni, di)
}
// AddGeNoise updates nrn.GeNoise if active
func (ac *ActParams) AddGeNoise(ctx *Context, ni, di uint32) {
if ac.Noise.On.IsFalse() || ac.Noise.Ge == 0 {
return
}
p := Neurons.Value(int(ni), int(di), int(GeNoiseP))
ge := ac.Noise.PGe(ctx, &p, ni, di)
Neurons.Set(p, int(ni), int(di), int(GeNoiseP))
Neurons.Set(ac.Dt.GeSynFromRaw(Neurons.Value(int(ni), int(di), int(GeNoise)), ge), int(ni), int(di), int(GeNoise))
Neurons.SetAdd(Neurons.Value(int(ni), int(di), int(GeNoise)), int(ni), int(di), int(Ge))
}
// AddGiNoise updates nrn.GiNoise if active
func (ac *ActParams) AddGiNoise(ctx *Context, ni, di uint32) {
if ac.Noise.On.IsFalse() || ac.Noise.Gi == 0 {
return
}
p := Neurons.Value(int(ni), int(di), int(GiNoiseP))
gi := ac.Noise.PGi(ctx, &p, ni, di)
Neurons.Set(p, int(ni), int(di), int(GiNoiseP))
Neurons.Set(ac.Dt.GiSynFromRaw(Neurons.Value(int(ni), int(di), int(GiNoise)), gi), int(ni), int(di), int(GiNoise))
}
// GiFromSyn integrates GiSyn inhibitory synaptic conductance from GiRaw value
// (can add other terms to geRaw prior to calling this)
func (ac *ActParams) GiFromSyn(ctx *Context, ni, di uint32, giSyn float32) float32 {
ac.AddGiNoise(ctx, ni, di)
if giSyn < 0 { // negative inhib G doesn't make any sense
return 0
}
return giSyn
}
// InetFromG computes net current from conductances and Vm
func (ac *ActParams) InetFromG(vm, ge, gl, gi, gk float32) float32 {
inet := ge*(ac.Erev.E-vm) + gl*ac.Gbar.L*(ac.Erev.L-vm) + gi*(ac.Erev.I-vm) + gk*(ac.Erev.K-vm)
if inet > ac.Dt.MaxI {
inet = ac.Dt.MaxI
} else if inet < -ac.Dt.MaxI {
inet = -ac.Dt.MaxI
}
return inet
}
// VmFromInet computes new Vm value from inet, clamping range
func (ac *ActParams) VmFromInet(vm, dt, inet float32) float32 {
return ac.VmRange.ClampValue(vm + dt*inet)
}
// VmInteg integrates Vm over VmSteps to obtain a more stable value
// Returns the new Vm and inet values.
func (ac *ActParams) VmInteg(vm, dt, ge, gl, gi, gk float32, nvm, inet *float32) {
dtEff := dt * ac.Dt.DtStep
*nvm = vm
for i := int32(0); i < ac.Dt.VmSteps; i++ {
*inet = ac.InetFromG(*nvm, ge, gl, gi, gk)
*nvm = ac.VmFromInet(*nvm, dtEff, *inet)
}
}
// VmFromG computes membrane potential Vm from conductances Ge, Gi, and Gk.
func (ac *ActParams) VmFromG(ctx *Context, ni, di uint32) {
updtVm := true
// note: nrn.ISI has NOT yet been updated at this point: 0 right after spike, etc
// so it takes a full 3 time steps after spiking for Tr period
isi := Neurons.Value(int(ni), int(di), int(ISI))
if ac.Spikes.Tr > 0 && isi >= 0 && isi < float32(ac.Spikes.Tr) {
updtVm = false // don't update the spiking vm during refract
}
ge := Neurons.Value(int(ni), int(di), int(Ge)) * ac.Gbar.E
gi := Neurons.Value(int(ni), int(di), int(Gi)) * ac.Gbar.I
gk := Neurons.Value(int(ni), int(di), int(Gk)) * ac.Gbar.K
var nvm, inet, expi float32
if updtVm {
ac.VmInteg(Neurons.Value(int(ni), int(di), int(Vm)), ac.Dt.VmDt, ge, 1, gi, gk, &nvm, &inet)
if updtVm && ac.Spikes.Exp.IsTrue() { // add spike current if relevant
var exVm float32
exVm = 0.5 * (nvm + Neurons.Value(int(ni), int(di), int(Vm))) // midpoint for this
expi = ac.Gbar.L * ac.Spikes.ExpSlope *
math32.FastExp((exVm-ac.Spikes.Thr)/ac.Spikes.ExpSlope)
if expi > ac.Dt.MaxI {
expi = ac.Dt.MaxI
}
inet += expi
nvm = ac.VmFromInet(nvm, ac.Dt.VmDt, expi)
}
Neurons.Set(nvm, int(ni), int(di), int(Vm))
Neurons.Set(inet, int(ni), int(di), int(Inet))
} else { // decay back to VmR
var dvm float32
if int32(isi) == ac.Spikes.Tr-1 {
dvm = ac.Spikes.VmR - Neurons.Value(int(ni), int(di), int(Vm))
} else {
dvm = ac.Spikes.RDt * (ac.Spikes.VmR - Neurons.Value(int(ni), int(di), int(Vm)))
}
Neurons.SetAdd(dvm, int(ni), int(di), int(Vm))
Neurons.Set(dvm*ac.Dt.VmC, int(ni), int(di), int(Inet))
}
glEff := float32(1)
if !updtVm {
glEff += ac.Dend.GR
}
var giEff float32
giEff = gi + ac.Gbar.I*Neurons.Value(int(ni), int(di), int(SSGiDend))
ac.VmInteg(Neurons.Value(int(ni), int(di), int(VmDend)), ac.Dt.VmDendDt, ge, glEff, giEff, gk, &nvm, &inet)
if updtVm {
nvm = ac.VmFromInet(nvm, ac.Dt.VmDendDt, ac.Dend.GExp*expi)
}
Neurons.Set(nvm, int(ni), int(di), int(VmDend))
}
// SpikeFromVmVars computes Spike from Vm and ISI-based activation, using pointers to variables
func (ac *ActParams) SpikeFromVmVars(nrnISI, nrnISIAvg, nrnSpike, nrnSpiked, nrnAct *float32, nrnVm float32) {
var thr float32
if ac.Spikes.Exp.IsTrue() {
thr = ac.Spikes.ExpThr
} else {
thr = ac.Spikes.Thr
}
if nrnVm >= thr {
*nrnSpike = 1
if *nrnISIAvg == -1 {
*nrnISIAvg = -2
} else if *nrnISI > 0 { // must have spiked to update
*nrnISIAvg = ac.Spikes.AvgFromISI(*nrnISIAvg, *nrnISI+1)
}
*nrnISI = 0
} else {
*nrnSpike = 0
if *nrnISI >= 0 {
*nrnISI += 1
if *nrnISI < 10 {
*nrnSpiked = 1
} else {
*nrnSpiked = 0
}
if *nrnISI > 200 { // keep from growing infinitely large
// used to do this arbitrarily in DecayState but that
// caused issues with missing refractory periods
*nrnISI = -1
}
} else {
*nrnSpiked = 0
}
if *nrnISIAvg >= 0 && *nrnISI > 0 && *nrnISI > 1.2**nrnISIAvg {
*nrnISIAvg = ac.Spikes.AvgFromISI(*nrnISIAvg, *nrnISI)
}
}
nwAct := ac.Spikes.ActFromISI(*nrnISIAvg, .001, ac.Dt.Integ)
if nwAct > 1 {
nwAct = 1
}
nwAct = *nrnAct + 100*ac.Dt.VmDt*(nwAct-*nrnAct) // 100 restores to prior behavior
*nrnAct = nwAct
}
// SpikeFromVm computes Spike from Vm and ISI-based activation
func (ac *ActParams) SpikeFromVm(ctx *Context, ni, di uint32) {
nrnISI := Neurons.Value(int(ni), int(di), int(ISI))
nrnISIAvg := Neurons.Value(int(ni), int(di), int(ISIAvg))
nrnSpike := Neurons.Value(int(ni), int(di), int(Spike))
nrnSpiked := Neurons.Value(int(ni), int(di), int(Spiked))
nrnAct := Neurons.Value(int(ni), int(di), int(Act))
nrnVm := Neurons.Value(int(ni), int(di), int(Vm))
ac.SpikeFromVmVars(&nrnISI, &nrnISIAvg, &nrnSpike, &nrnSpiked, &nrnAct, nrnVm)
Neurons.Set(nrnISI, int(ni), int(di), int(ISI))
Neurons.Set(nrnISIAvg, int(ni), int(di), int(ISIAvg))
Neurons.Set(nrnSpike, int(ni), int(di), int(Spike))
Neurons.Set(nrnSpiked, int(ni), int(di), int(Spiked))
Neurons.Set(nrnAct, int(ni), int(di), int(Act))
}
//gosl:end
// Decode decodes value from a pattern of activation
// as the activation-weighted-average of the unit's preferred
// tuning values.
// must have 2 or more values in pattern pat.
func (pc *PopCodeParams) Decode(acts []float32) float32 {
n := len(acts)
if n < 2 {
return 0
}
rng := pc.Max - pc.Min
incr := rng / float32(n-1)
avg := float32(0)
sum := float32(0)
for i, act := range acts {
if act < 0.1 {
act = 0
}
trg := pc.Min + incr*float32(i)
avg += trg * act
sum += act
}
sum = math32.Max(sum, 0.2)
avg /= sum
return avg
}
// Uncertainty returns the uncertainty of the given distribution of
// activities relative to a perfect code for the given value.
// Uncertainty is the average unit-wise standard deviation between the
// pop code encoding and the max-normalized activities.
func (pc *PopCodeParams) Uncertainty(val float32, acts []float32) float32 {
n := len(acts)
if n < 2 {
return 0
}
mx := float32(0)
for _, act := range acts {
if act > mx {
mx = act
}
}
if mx == 0 {
mx = 1
}
vr := float32(0)
for i, act := range acts {
trg := pc.EncodeValue(uint32(i), uint32(n), val)
vi := trg - (act / mx)
vr += vi * vi
}
vr /= float32(n)
return math32.Sqrt(vr)
}
// Copyright (c) 2019, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
import (
"cogentcore.org/lab/tensor"
"github.com/emer/emergent/v2/paths"
)
// BLANovelPath connects all other pools to the first, Novelty, pool in a BLA layer.
// This allows the known US representations to specifically inhibit the novelty pool.
type BLANovelPath struct {
}
func NewBLANovelPath() *BLANovelPath {
return &BLANovelPath{}
}
func (ot *BLANovelPath) Name() string {
return "BLANovelPath"
}
func (ot *BLANovelPath) Connect(send, recv *tensor.Shape, same bool) (sendn, recvn *tensor.Int32, cons *tensor.Bool) {
sendn, recvn, cons = paths.NewTensors(send, recv)
sNtot := send.Len()
// rNtot := recv.Len()
sNp := send.DimSize(0) * send.DimSize(1)
sNu := send.DimSize(2) * send.DimSize(3)
rNu := recv.DimSize(2) * recv.DimSize(3)
rnv := recvn.Values
snv := sendn.Values
npl := sNp
rpi := 0
for spi := 1; spi < npl; spi++ {
for rui := 0; rui < rNu; rui++ {
ri := rpi*rNu + rui
for sui := 0; sui < sNu; sui++ {
si := spi*sNu + sui
off := ri*sNtot + si
cons.Values.Set(true, off)
rnv[ri] = int32(sNu * (npl - 1))
snv[si] = int32(rNu)
}
}
}
return
}
// Copyright (c) 2019, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
import (
"cogentcore.org/core/enums"
"cogentcore.org/lab/gosl/slbool"
"cogentcore.org/lab/gosl/slrand"
"cogentcore.org/lab/gosl/sltype"
)
// ToGPUCtx copies Context var to the GPU
func ToGPUCtx() {
ToGPU(CtxVar)
}
//gosl:start
// CaBinCycles is the number of cycles per CaBin for integrating
// calcium-based activity values ([CaSyn]) that are used for computing
// a synaptic-level (pre * post) credit assignment factor for learning.
// This is a constant because other pre-computed factors depend on it.
const CaBinCycles = 10
// Context contains all of the global context state info
// that is shared across every step of the computation.
// It is passed around to all relevant computational functions,
// and is updated on the CPU and synced to the GPU after every cycle.
// It contains timing, Testing vs. Training mode, random number context, etc.
// There is one canonical instance on the network as Ctx, always get it from
// the network.Context() method.
type Context struct { //types:add -setters
// number of data parallel items to process currently.
NData uint32 `min:"1"`
// current running mode, using sim-defined enum, e.g., Train, Test, etc.
Mode int32
// Testing is true if the model is being run in a testing mode,
// so no weight changes or other associated computations should be done.
// This flag should only affect learning-related behavior.
Testing slbool.Bool `edit:"-"`
// MinusPhase is true if this is the minus phase, when a stimulus is present
// and learning is occuring. Could also be in a non-learning phase when
// no stimulus is present. This affects accumulation of CaBins values only.
MinusPhase slbool.Bool
// PlusPhase is true if this is the plus phase, when the outcome / bursting
// is occurring, driving positive learning; else minus or non-learning phase.
PlusPhase slbool.Bool
// Cycle within current phase, minus or plus.
PhaseCycle int32
// Cycle within Trial: number of iterations of activation updating (settling)
// on the current state. This is reset at NewState.
Cycle int32
// ThetaCycles is the length of the theta cycle (i.e., Trial),
// in terms of 1 msec Cycles. Some network update steps depend on doing something
// at the end of the theta cycle (e.g., CTCtxtPath).
// Should be ISICycles + MinusCycles + PlusCycles
ThetaCycles int32 `default:"200"`
// ISICycles is the number of inter-stimulus-interval cycles,
// which happen prior to the minus phase (i.e., after the last plus phase).
ISICycles int32
// MinusCycles is the number of cycles in the minus phase. Typically 150,
// but may be set longer if ThetaCycles is above default of 200.
MinusCycles int32 `default:"150"`
// PlusCycles is the number of cycles in the plus phase. Typically 50,
// but may be set longer if ThetaCycles is above default of 200.
PlusCycles int32 `default:"50"`
// CyclesTotal is the accumulated cycle count, which increments continuously
// from whenever it was last reset. Typically this is the number of milliseconds
// in simulation time.
CyclesTotal int32
// Time is the accumulated amount of time the network has been running,
// in simulation-time (not real world time), in seconds.
Time float32
// TrialsTotal is the total trial count, which increments continuously in NewState
// _only in Train mode_ from whenever it was last reset. Can be used for synchronizing
// weight updates across nodes.
TrialsTotal int32
// TimePerCycle is the amount of Time to increment per cycle.
TimePerCycle float32 `default:"0.001"`
// SlowInterval is how frequently in Trials to perform slow adaptive processes
// such as synaptic scaling, associated in the brain with sleep,
// via the SlowAdapt method. This should be long enough for meaningful changes
// to accumulate. 100 is default but could easily be longer in larger models.
// Because SlowCounter is incremented by NData, high NData cases (e.g. 16) likely need to
// increase this value, e.g., 400 seems to produce overall consistent results in various models.
SlowInterval int32 `default:"100"`
// SlowCounter increments for each training trial, to trigger SlowAdapt at SlowInterval.
// This is incremented by NData to maintain consistency across different values of this parameter.
SlowCounter int32 `edit:"-"`
// AdaptGiInterval is how frequently in Trials to perform inhibition adaptation,
// which needs to be even slower than the SlowInterval.
AdaptGiInterval int32 `default:"1000"`
// AdaptGiCounter increments for each training trial, to trigger AdaptGi at AdaptGiInterval.
// This is incremented by NData to maintain consistency across different values of this parameter.
AdaptGiCounter int32 `edit:"-"`
pad int32
// RandCounter is the random counter, incremented by maximum number of
// possible random numbers generated per cycle, regardless of how
// many are actually used. This is shared across all layers so must
// encompass all possible param settings.
RandCounter slrand.Counter
}
// Defaults sets default values
func (ctx *Context) Defaults() {
ctx.NData = 1
ctx.TimePerCycle = 0.001
ctx.ISICycles = 0
ctx.MinusCycles = 150
ctx.PlusCycles = 50
ctx.SlowInterval = 100
ctx.AdaptGiInterval = 1000
ctx.Update()
}
func (ctx *Context) Update() {
ctx.ThetaCycles = ctx.ISICycles + ctx.MinusCycles + ctx.PlusCycles
}
// ItemIndex returns the main item index from an overall index over NItems * NData.
// (items = layers, neurons, synapses)
func (ctx *Context) ItemIndex(idx uint32) uint32 {
return idx / ctx.NData
}
// DataIndex returns the data index from an overall index over NItems * NData.
func (ctx *Context) DataIndex(idx uint32) uint32 {
return idx % ctx.NData
}
// CycleInc increments at the cycle level. This is the one time when
// Context is used on GPU in read-write mode, vs. read-only.
//
//gosl:pointer-receiver
func (ctx *Context) CycleInc() {
ctx.PhaseCycle++
ctx.Cycle++
ctx.CyclesTotal++
ctx.Time += ctx.TimePerCycle
// ctx.RandCounter.Add(uint32(RandFunIndexN)):
ctx.RandCounter.Counter = sltype.Uint64Add32(ctx.RandCounter.Counter, uint32(RandFunIndexN))
// note: cannot call writing methods on sub-fields, so have to do it manually.
}
// SlowInc increments the Slow and AdaptGi counters and returns true if it is
// time to perform SlowAdapt or AdaptGi functions.
func (ctx *Context) SlowInc() (slow bool, adaptgi bool) {
ctx.SlowCounter += int32(ctx.NData)
ctx.AdaptGiCounter += int32(ctx.NData)
if ctx.SlowCounter >= ctx.SlowInterval {
slow = true
ctx.SlowCounter = 0
}
if ctx.AdaptGiCounter >= ctx.AdaptGiInterval {
adaptgi = true
ctx.AdaptGiCounter = 0
}
return
}
// MinusPhaseStart resets PhaseCycle = 0 and sets the minus phase to true,
// and plus phase to false.
func (ctx *Context) MinusPhaseStart() {
ctx.PhaseCycle = 0
ctx.MinusPhase.SetBool(true)
ctx.PlusPhase.SetBool(false)
}
// PlusPhaseStart resets PhaseCycle = 0 and sets the plus phase to true,
// and minus phase to false.
func (ctx *Context) PlusPhaseStart() {
ctx.PhaseCycle = 0
ctx.MinusPhase.SetBool(false)
ctx.PlusPhase.SetBool(true)
}
// NCaBins returns 2 * ThetaCycles / CaBinCycles: stored in NetworkIxs.NCaBins.
func (ctx *Context) NCaBins() int32 {
return 2 * (ctx.ThetaCycles / CaBinCycles)
}
// NCaWeights returns (MinusCycles + PlusCycles) / CaBinCycles:
// number of weights set for SynCa weighted computation of SynCaP, SynCaD.
// Weights are stored in [GlobalScalars]
func (ctx *Context) NCaWeights() int32 {
return (ctx.MinusCycles + ctx.PlusCycles) / CaBinCycles
}
// CaBinForCycle returns the [CaBins] bin number for given CyclesTotal
// cycle index. Two ThetaCycles worth of data are stored at a CaBinCycles
// resolution, allowing learning to use any subset of data within that window.
func CaBinForCycle(cycle int32) int32 {
return (cycle / CaBinCycles) % NetworkIxs[0].NCaBins
}
// CaBinIncrement writes given increment to the [CaBins] for given absolute cycle
// (CyclesTotal), initializing with value if it is the first one, and adding otherwise.
// Given value is divided by CaBinCycles to keep it normalized as an average across the
// CaBinCycles window.
func CaBinIncrement(incr float32, cycle int32, ni, di uint32) {
bin := CaBinForCycle(cycle)
incn := incr / float32(CaBinCycles)
if (cycle % CaBinCycles) == 0 {
Neurons.Set(incn, int(ni), int(di), int(CaBins+NeuronVars(bin)))
} else {
Neurons.SetAdd(incn, int(ni), int(di), int(CaBins+NeuronVars(bin)))
}
}
//gosl:end
// ThetaCycleStart resets counters at start of new theta cycle of processing.
// Pass the evaluation mode associated with this theta cycle and testing bool.
// Resets the Minus and Plus phase states, and sets Cycle = 0.
func (ctx *Context) ThetaCycleStart(mode enums.Enum, testing bool) {
ctx.MinusPhase.SetBool(false)
ctx.PlusPhase.SetBool(false)
ctx.Cycle = 0
ctx.Mode = int32(mode.Int64())
ctx.Testing.SetBool(testing)
if !testing {
ctx.TrialsTotal++
}
}
// Reset resets the counters all back to zero
func (ctx *Context) Reset() {
ctx.MinusPhase.SetBool(false)
ctx.PlusPhase.SetBool(false)
ctx.PhaseCycle = 0
ctx.Cycle = 0
ctx.CyclesTotal = 0
ctx.Time = 0
ctx.TrialsTotal = 0
ctx.SlowCounter = 0
ctx.Testing.SetBool(false)
if ctx.TimePerCycle == 0 {
ctx.Defaults()
}
ctx.RandCounter.Reset()
GlobalsReset()
}
// NewContext returns a new Time struct with default parameters
func NewContext() *Context {
ctx := &Context{}
ctx.Defaults()
return ctx
}
// Copyright (c) 2020, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
import (
"cogentcore.org/core/math32"
"cogentcore.org/lab/gosl/slbool"
)
//gosl:start
// BurstParams determine how the 5IB Burst activation is computed from
// CaP integrated spiking values in Super layers -- thresholded.
type BurstParams struct {
// Relative component of threshold on superficial activation value,
// below which it does not drive Burst (and above which, Burst = CaP).
// This is the distance between the average and maximum activation values
// within layer (e.g., 0 = average, 1 = max). Overall effective threshold
// is MAX of relative and absolute thresholds.
ThrRel float32 `max:"1" default:"0.1"`
// Absolute component of threshold on superficial activation value,
// below which it does not drive Burst (and above which, Burst = CaP).
// Overall effective threshold is MAX of relative and absolute thresholds.
ThrAbs float32 `min:"0" max:"1" default:"0.1"`
pad, pad1 float32
}
func (bp *BurstParams) Update() {
}
func (bp *BurstParams) Defaults() {
bp.ThrRel = 0.1
bp.ThrAbs = 0.1
}
// ThrFromAvgMax returns threshold from average and maximum values
func (bp *BurstParams) ThrFromAvgMax(avg, mx float32) float32 {
thr := avg + bp.ThrRel*(mx-avg)
thr = math32.Max(thr, bp.ThrAbs)
return thr
}
// CTParams control the CT corticothalamic neuron special behavior
type CTParams struct {
// GeGain is the gain factor for context excitatory input, which is
// constant as compared to the spiking input from other pathways, so it
// must be downscaled accordingly. This can make a difference
// and may need to be scaled up or down.
GeGain float32 `default:"0.05,0.1,1,2"`
// DecayTau is the decay time constant for context Ge input.
// if > 0, decays over time so intrinsic circuit dynamics have to take over.
// For single-step copy-based cases, set to 0, while longer-time-scale
// dynamics should use ~50 or more.
DecayTau float32 `default:"0,50,70"`
// OFCposPT is set for the OFCposPT PTMaintLayer, which sets the
// GvOFCposPTMaint global variable.
OFCposPT slbool.Bool
// 1 / tau
DecayDt float32 `display:"-" json:"-" xml:"-"`
}
func (cp *CTParams) Update() {
if cp.DecayTau > 0 {
cp.DecayDt = 1 / cp.DecayTau
} else {
cp.DecayDt = 0
}
}
func (cp *CTParams) Defaults() {
cp.GeGain = 1
cp.DecayTau = 50
cp.Update()
}
// PulvinarParams provides parameters for how the plus-phase (outcome)
// state of Pulvinar thalamic relay cell neurons is computed from
// the corresponding driver neuron Burst activation (or CaP if not Super)
type PulvinarParams struct {
// DriveScale is the multiplier on driver input strength,
// which multiplies CaP from driver layer to produce Ge excitatory
// input to CNiIO unit.
DriveScale float32 `default:"0.1" min:"0.0"`
// FullDriveAct is the level of Max driver layer CaP at which the drivers
// fully drive the burst phase activation. If there is weaker driver input,
// then (Max/FullDriveAct) proportion of the non-driver inputs remain and
// this critically prevents the network from learning to turn activation
// off, which is difficult and severely degrades learning.
FullDriveAct float32 `default:"0.6" min:"0.01"`
// DriveLayIndex of layer that generates the driving activity into this one
// set via SetBuildConfig(DriveLayName) setting
DriveLayIndex int32 `edit:"-"`
pad float32
}
func (tp *PulvinarParams) Update() {
}
func (tp *PulvinarParams) Defaults() {
tp.DriveScale = 0.1
tp.FullDriveAct = 0.6
}
// DriveGe returns effective excitatory conductance
// to use for given driver input Burst activation
func (tp *PulvinarParams) DriveGe(act float32) float32 {
return tp.DriveScale * act
}
// NonDrivePct returns the multiplier proportion of the non-driver based Ge to
// keep around, based on FullDriveAct and the max activity in driver layer.
func (tp *PulvinarParams) NonDrivePct(drvMax float32) float32 {
return 1.0 - math32.Min(1.0, drvMax/tp.FullDriveAct)
}
// PulvinarDriver gets the driver input excitation params for Pulvinar layer.
func (ly *LayerParams) PulvinarDriver(ctx *Context, lni, di uint32, drvGe, nonDrivePct *float32) {
dli := uint32(ly.Pulvinar.DriveLayIndex)
dly := GetLayers(dli)
dpi := dly.PoolIndex(0)
drvMax := PoolAvgMax(AMCaP, AMCycle, Max, dpi, di)
*nonDrivePct = ly.Pulvinar.NonDrivePct(drvMax) // how much non-driver to keep
burst := Neurons.Value(int(dly.Indexes.NeurSt+lni), int(di), int(Burst))
*drvGe = ly.Pulvinar.DriveGe(burst)
}
//gosl:end
// note: Defaults not called on GPU
func (ly *LayerParams) CTDefaults() {
ly.Acts.Decay.Act = 0 // deep doesn't decay!
ly.Acts.Decay.Glong = 0
ly.Acts.Decay.AHP = 0
ly.Acts.Dend.SSGi = 2 // 2 > 0 for sure
ly.Inhib.Layer.Gi = 2.2 // higher inhib for more NMDA, recurrents.
ly.Inhib.Pool.Gi = 2.2
// these are for longer temporal integration:
// ly.Acts.NMDA.Ge = 0.003
// ly.Acts.NMDA.Tau = 300
// ly.Acts.GABAB.Gbar = 0.008
}
func (cp *CTParams) DecayForNCycles(ncycles int) {
cp.DecayTau = 50 * (float32(ncycles) / float32(200))
cp.Update()
}
// CTDefaultParamsFast sets fast time-integration parameters for CTLayer.
// This is what works best in the deep_move 1 trial history case,
// vs Medium and Long
func (ly *Layer) CTDefaultParamsFast() {
ly.AddDefaultParams(func(ly *LayerParams) {
ly.CT.GeGain = 1
ly.CT.DecayTau = 0
ly.Inhib.Layer.Gi = 2.0
ly.Inhib.Pool.Gi = 2.0
ly.Acts.GabaB.Gk = 0.006
ly.Acts.NMDA.Ge = 0.004
ly.Acts.NMDA.Tau = 100
ly.Acts.Decay.Act = 0.0
ly.Acts.Decay.Glong = 0.0
ly.Acts.Sahp.Gk = 1.0
})
}
// CTDefaultParamsMedium sets medium time-integration parameters for CTLayer.
// This is what works best in the FSA test case, compared to Fast (deep_move)
// and Long (deep_music) time integration.
func (ly *Layer) CTDefaultParamsMedium() {
ly.AddDefaultParams(func(ly *LayerParams) {
ly.CT.GeGain = 2
ly.Inhib.Layer.Gi = 2.2
ly.Inhib.Pool.Gi = 2.2
ly.Acts.GabaB.Gk = 0.009
ly.Acts.NMDA.Ge = 0.008
ly.Acts.NMDA.Tau = 200
ly.Acts.Decay.Act = 0.0
ly.Acts.Decay.Glong = 0.0
ly.Acts.Sahp.Gk = 1.0
})
}
// CTDefaultParamsLong sets long time-integration parameters for CTLayer.
// This is what works best in the deep_music test case integrating over
// long time windows, compared to Medium and Fast.
func (ly *Layer) CTDefaultParamsLong() {
ly.AddDefaultParams(func(ly *LayerParams) {
ly.CT.GeGain = 1.0
ly.Inhib.Layer.Gi = 2.8
ly.Inhib.Pool.Gi = 2.8
ly.Acts.GabaB.Gk = 0.01
ly.Acts.NMDA.Ge = 0.01
ly.Acts.NMDA.Tau = 300
ly.Acts.Decay.Act = 0.0
ly.Acts.Decay.Glong = 0.0
ly.Acts.Dend.SSGi = 2 // 2 > 0 for sure
ly.Acts.Sahp.Gk = 1.0
})
}
func (lly *Layer) PTMaintDefaults() {
ly := lly.Params
ly.Acts.Decay.Act = 0 // deep doesn't decay!
ly.Acts.Decay.Glong = 0
ly.Acts.Decay.AHP = 0
ly.Acts.Decay.OnRew.SetBool(true)
ly.Acts.Sahp.Gk = 0.01 // not much pressure -- long maint
ly.Acts.GabaB.Gk = 0.01 // needed for cons, good for smaint
ly.Acts.Dend.ModGain = 1.5
// ly.Inhib.ActAvg.Nominal = 0.1 // normal
if lly.Is4D() {
ly.Inhib.ActAvg.Nominal = 0.02
}
ly.Inhib.Layer.Gi = 2.4
ly.Inhib.Pool.Gi = 2.4
ly.Learn.TrgAvgAct.RescaleOn.SetBool(false)
ly.Learn.NeuroMod.AChDisInhib = 0
for _, pj := range lly.RecvPaths {
slay := pj.Send
if slay.Type == BGThalLayer {
pj.Params.Com.GType = ModulatoryG
}
}
}
func (ly *LayerParams) PTPredDefaults() {
ly.Acts.Decay.Act = 0.12 // keep it dynamically changing
ly.Acts.Decay.Glong = 0.6
ly.Acts.Decay.AHP = 0
ly.Acts.Decay.OnRew.SetBool(true)
ly.Acts.Sahp.Gk = 0.1 // more
ly.Acts.KNa.Slow.Gk = 0.2 // todo: more?
ly.Inhib.Layer.Gi = 0.8
ly.Inhib.Pool.Gi = 0.8
ly.CT.GeGain = 0.05
ly.CT.DecayTau = 50
// regular:
// ly.Acts.GabaB.Gk = 0.006
// ly.Acts.NMDA.Ge = 0.004
// ly.Acts.NMDA.Tau = 100
}
// called in Defaults for Pulvinar layer type
func (ly *LayerParams) PulvinarDefaults() {
ly.Acts.Decay.Act = 0
ly.Acts.Decay.Glong = 0
ly.Acts.Decay.AHP = 0
ly.Learn.RLRate.SigmoidMin = 1.0 // 1.0 generally better but worth trying 0.05 too
}
// PulvinarPostBuild does post-Build config of Pulvinar based on BuildConfig options
func (ly *Layer) PulvinarPostBuild() {
ly.Params.Pulvinar.DriveLayIndex = ly.BuildConfigFindLayer("DriveLayName", true)
}
// Copyright (c) 2020, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
import (
"strings"
"github.com/emer/emergent/v2/params"
"github.com/emer/emergent/v2/paths"
)
// AddSuperLayer2D adds a Super Layer of given size, with given name.
func (net *Network) AddSuperLayer2D(name string, nNeurY, nNeurX int) *Layer {
ly := net.AddLayer2D(name, SuperLayer, nNeurY, nNeurX)
return ly
}
// AddSuperLayer4D adds a Super Layer of given size, with given name.
func (net *Network) AddSuperLayer4D(name string, nPoolsY, nPoolsX, nNeurY, nNeurX int) *Layer {
ly := net.AddLayer4D(name, SuperLayer, nPoolsY, nPoolsX, nNeurY, nNeurX)
return ly
}
// AddCTLayer2D adds a CT Layer of given size, with given name.
func (net *Network) AddCTLayer2D(name string, nNeurY, nNeurX int) *Layer {
ly := net.AddLayer2D(name, CTLayer, nNeurY, nNeurX)
return ly
}
// AddCTLayer4D adds a CT Layer of given size, with given name.
func (net *Network) AddCTLayer4D(name string, nPoolsY, nPoolsX, nNeurY, nNeurX int) *Layer {
ly := net.AddLayer4D(name, CTLayer, nPoolsY, nPoolsX, nNeurY, nNeurX)
return ly
}
// AddPulvLayer2D adds a Pulvinar Layer of given size, with given name.
func (net *Network) AddPulvLayer2D(name string, nNeurY, nNeurX int) *Layer {
ly := net.AddLayer2D(name, PulvinarLayer, nNeurY, nNeurX)
return ly
}
// AddPulvLayer4D adds a Pulvinar Layer of given size, with given name.
func (net *Network) AddPulvLayer4D(name string, nPoolsY, nPoolsX, nNeurY, nNeurX int) *Layer {
ly := net.AddLayer4D(name, PulvinarLayer, nPoolsY, nPoolsX, nNeurY, nNeurX)
return ly
}
// AddSuperCT2D adds a superficial (SuperLayer) and corresponding CT (CT suffix) layer
// with CTCtxtPath pathway from Super to CT using given pathway pattern,
// and NO Pulv Pulvinar.
// CT is placed Behind Super.
func (net *Network) AddSuperCT2D(name, pathClass string, shapeY, shapeX int, space float32, pat paths.Pattern) (super, ct *Layer) {
super = net.AddSuperLayer2D(name, shapeY, shapeX)
ct = net.AddCTLayer2D(name+"CT", shapeY, shapeX)
ct.PlaceBehind(super, space)
net.ConnectSuperToCT(super, ct, pat, pathClass)
super.AddClass(name)
ct.AddClass(name)
return
}
// AddSuperCT4D adds a superficial (SuperLayer) and corresponding CT (CT suffix) layer
// with CTCtxtPath pathway from Super to CT using given pathway pattern,
// and NO Pulv Pulvinar.
// CT is placed Behind Super.
func (net *Network) AddSuperCT4D(name, pathClass string, nPoolsY, nPoolsX, nNeurY, nNeurX int, space float32, pat paths.Pattern) (super, ct *Layer) {
super = net.AddSuperLayer4D(name, nPoolsY, nPoolsX, nNeurY, nNeurX)
ct = net.AddCTLayer4D(name+"CT", nPoolsY, nPoolsX, nNeurY, nNeurX)
ct.PlaceBehind(super, space)
net.ConnectSuperToCT(super, ct, pat, pathClass)
super.AddClass(name)
ct.AddClass(name)
return
}
// AddPulvForSuper adds a Pulvinar for given superficial layer (SuperLayer)
// with a P suffix. The Pulv.Driver is set to Super, as is the Class on Pulv.
// The Pulv layer needs other CT connections from higher up to predict this layer.
// Pulvinar is positioned behind the CT layer.
func (net *Network) AddPulvForSuper(super *Layer, space float32) *Layer {
name := super.Name
shp := super.Shape
var plv *Layer
if shp.NumDims() == 2 {
plv = net.AddPulvLayer2D(name+"P", shp.DimSize(0), shp.DimSize(1))
} else {
plv = net.AddPulvLayer4D(name+"P", shp.DimSize(0), shp.DimSize(1), shp.DimSize(2), shp.DimSize(3))
}
plv.SetBuildConfig("DriveLayName", name)
plv.Pos.SetBehind(name+"CT", space)
plv.AddClass(name)
return plv
}
// AddPulvForLayer adds a Pulvinar for given Layer (typically an Input type layer)
// with a P suffix. The Pulv.Driver is set to given Layer.
// The Pulv layer needs other CT connections from higher up to predict this layer.
// Pulvinar is positioned behind the given Layer.
func (net *Network) AddPulvForLayer(lay *Layer, space float32) *Layer {
name := lay.Name
shp := lay.Shape
var plv *Layer
if shp.NumDims() == 2 {
plv = net.AddPulvLayer2D(name+"P", shp.DimSize(0), shp.DimSize(1))
} else {
plv = net.AddPulvLayer4D(name+"P", shp.DimSize(0), shp.DimSize(1), shp.DimSize(2), shp.DimSize(3))
}
plv.SetBuildConfig("DriveLayName", name)
plv.PlaceBehind(lay, space)
return plv
}
// ConnectToPulv adds the following pathways:
// layers | class | path type | path pat
// ------------+------------+-------------+----------
// ct ->pulv | "CTToPulv" | ForwardPath | toPulvPat
// pulv->super | "FromPulv" | BackPath | fmPulvPat
// pulv->ct | "FromPulv" | BackPath | fmPulvPat
//
// Typically pulv is a different shape than super and ct, so use Full or appropriate
// topological pattern. Adds optional pathClass name as a suffix.
func (net *Network) ConnectToPulv(super, ct, pulv *Layer, toPulvPat, fmPulvPat paths.Pattern, pathClass string) (toPulv, toSuper, toCT *Path) {
pathClass = params.AddClass(pathClass)
toPulv = net.ConnectLayers(ct, pulv, toPulvPat, ForwardPath)
toPulv.AddClass("CTToPulv", pathClass)
toSuper = net.ConnectLayers(pulv, super, fmPulvPat, BackPath)
toSuper.AddClass("FromPulv", pathClass)
toCT = net.ConnectLayers(pulv, ct, fmPulvPat, BackPath)
toCT.AddClass("FromPulv", pathClass)
return
}
// ConnectCtxtToCT adds a CTCtxtPath from given sending layer to a CT layer
func (net *Network) ConnectCtxtToCT(send, recv *Layer, pat paths.Pattern) *Path {
return net.ConnectLayers(send, recv, pat, CTCtxtPath)
}
// ConnectCTSelf adds a Self (Lateral) CTCtxtPath pathway within a CT layer,
// in addition to a regular lateral pathway, which supports active maintenance.
// The CTCtxtPath has a Class label of CTSelfCtxt, and the regular one is CTSelfMaint
// with optional class added.
func (net *Network) ConnectCTSelf(ly *Layer, pat paths.Pattern, pathClass string) (ctxt, maint *Path) {
pathClass = params.AddClass(pathClass)
ctxt = net.ConnectLayers(ly, ly, pat, CTCtxtPath)
ctxt.AddClass("CTSelfCtxt", pathClass)
maint = net.LateralConnectLayer(ly, pat)
maint.AddDefaultParams(func(pt *PathParams) {
pt.PathScale.Abs = 0.5 // normalized separately
pt.Com.GType = MaintG
})
maint.AddClass("CTSelfMaint", pathClass)
return
}
// ConnectSuperToCT adds a CTCtxtPath from given sending Super layer to a CT layer
// This automatically sets the FromSuper flag to engage proper defaults,
// Uses given pathway pattern -- e.g., Full, OneToOne, or PoolOneToOne
func (net *Network) ConnectSuperToCT(send, recv *Layer, pat paths.Pattern, pathClass string) *Path {
pathClass = params.AddClass(pathClass)
pt := net.ConnectLayers(send, recv, pat, CTCtxtPath)
pt.AddClass("CTFromSuper", pathClass)
return pt
}
// AddInputPulv2D adds an Input and Layer of given size, with given name.
// The Input layer is set as the Driver of the Layer.
// Both layers have SetClass(name) called to allow shared params.
func (net *Network) AddInputPulv2D(name string, nNeurY, nNeurX int, space float32) (*Layer, *Layer) {
in := net.AddLayer2D(name, InputLayer, nNeurY, nNeurX)
pulv := net.AddPulvLayer2D(name+"P", nNeurY, nNeurX)
pulv.SetBuildConfig("DriveLayName", name)
in.AddClass(name)
pulv.AddClass(name)
pulv.PlaceBehind(in, space)
return in, pulv
}
// AddInputPulv4D adds an Input and Layer of given size, with given name.
// The Input layer is set as the Driver of the Layer.
// Both layers have SetClass(name) called to allow shared params.
func (net *Network) AddInputPulv4D(name string, nPoolsY, nPoolsX, nNeurY, nNeurX int, space float32) (*Layer, *Layer) {
in := net.AddLayer4D(name, InputLayer, nPoolsY, nPoolsX, nNeurY, nNeurX)
pulv := net.AddPulvLayer4D(name+"P", nPoolsY, nPoolsX, nNeurY, nNeurX)
pulv.SetBuildConfig("DriveLayName", name)
in.AddClass(name)
pulv.AddClass(name)
pulv.PlaceBehind(in, space)
return in, pulv
}
//////////////////////////////////////////////////////////////////
// PTMaintLayer
// AddPTMaintLayer2D adds a PTMaintLayer of given size, with given name.
func (net *Network) AddPTMaintLayer2D(name string, nNeurY, nNeurX int) *Layer {
ly := net.AddLayer2D(name, PTMaintLayer, nNeurY, nNeurX)
return ly
}
// AddPTMaintLayer4D adds a PTMaintLayer of given size, with given name.
func (net *Network) AddPTMaintLayer4D(name string, nPoolsY, nPoolsX, nNeurY, nNeurX int) *Layer {
ly := net.AddLayer4D(name, PTMaintLayer, nPoolsY, nPoolsX, nNeurY, nNeurX)
return ly
}
// ConnectPTMaintSelf adds a Self (Lateral) pathway within a PTMaintLayer,
// which supports active maintenance, with a class of PTSelfMaint
func (net *Network) ConnectPTMaintSelf(ly *Layer, pat paths.Pattern, pathClass string) *Path {
pathClass = params.AddClass(pathClass, "PFCPath")
pt := net.LateralConnectLayer(ly, pat)
pt.AddDefaultParams(func(pt *PathParams) {
pt.Com.GType = MaintG
pt.PathScale.Rel = 1 // use abs to manipulate
pt.PathScale.Abs = 4 // strong..
pt.Learn.LRate.Base = 0.0001 // slower > faster
pt.SWts.Init.Mean = 0.5
pt.SWts.Init.Var = 0.5 // high variance so not just spreading out over time
})
pt.AddClass("PTSelfMaint", pathClass)
return pt
}
// AddPTMaintThalForSuper adds a PTMaint pyramidal tract active maintenance layer
// and a BG gated Thalamus layer for given superficial layer (SuperLayer)
// and associated CT, with given thal suffix (e.g., MD, VM).
// PT and Thal have SetClass(super.Name) called to allow shared params.
// Pathways are made with given classes: SuperToPT, PTSelfMaint, PTtoThal, ThalToPT,
// with optional extra class.
// if selfMaint is true, the SMaint self-maintenance mechanism is used
// instead of lateral connections.
// The PT and BGThal layers are positioned behind the CT layer.
func (net *Network) AddPTMaintThalForSuper(super, ct *Layer, thalSuffix, pathClass string, superToPT, ptSelf, ptThal paths.Pattern, selfMaint bool, space float32) (ptMaint, thal *Layer) {
pathClass = params.AddClass(pathClass, "PFCPath")
name := super.Name
shp := super.Shape
is4D := false
ptExtra := 1 // extra size for pt layers
if shp.NumDims() == 2 {
ptMaint = net.AddPTMaintLayer2D(name+"PT", shp.DimSize(0)*ptExtra, shp.DimSize(1)*ptExtra)
thal = net.AddBGThalLayer2D(name+thalSuffix, shp.DimSize(0), shp.DimSize(1))
} else {
is4D = true
ptMaint = net.AddPTMaintLayer4D(name+"PT", shp.DimSize(0), shp.DimSize(1), shp.DimSize(2)*ptExtra, shp.DimSize(3)*ptExtra)
thal = net.AddBGThalLayer4D(name+thalSuffix, shp.DimSize(0), shp.DimSize(1), shp.DimSize(2), shp.DimSize(3))
}
ptMaint.AddClass(name)
thal.AddClass(name)
if selfMaint {
ptMaint.AddDefaultParams(func(ly *LayerParams) {
ly.Acts.SMaint.On.SetBool(true)
ly.Acts.GabaB.Gk = 0.015
ly.Inhib.Layer.Gi = 0.5
ly.Inhib.Pool.Gi = 0.5
})
if is4D {
ptMaint.AddDefaultParams(func(ly *LayerParams) {
ly.Inhib.Pool.On.SetBool(true)
})
}
}
pthal, thalpt := net.BidirConnectLayers(ptMaint, thal, ptThal)
pthal.AddClass("PTtoThal", pathClass)
thalpt.AddDefaultParams(func(pt *PathParams) {
pt.PathScale.Rel = 1.0
pt.Com.GType = ModulatoryG // modulatory -- control with extra ModGain factor
pt.Learn.Learn.SetBool(false)
pt.SWts.Adapt.On.SetBool(false)
pt.SWts.Init.SPct = 0
pt.SWts.Init.Mean = 0.8
pt.SWts.Init.Var = 0.0
})
thalpt.AddClass("ThalToPT", pathClass)
// if is4D {
// fmThalInhib := func(pt *PathParams){
// pt.PathScale.Rel = "1.0
// pt.PathScale.Abs = "1.0
// pt.Learn.Learn = "false
// pt.SWts.Adapt.On = "false
// pt.SWts.Init.SPct = "0
// pt.SWts.Init.Mean = "0.8
// pt.SWts.Init.Var = "0.0
// }
// note: holding off on these for now -- thal modulation should handle..
// ti := net.ConnectLayers(thal, pt, full, InhibPath)
// ti.DefaultParams = fmThalInhib
// ti.AddClass("ThalToPFCInhib")
// ti = net.ConnectLayers(thal, ct, full, InhibPath)
// ti.DefaultParams = fmThalInhib
// ti.AddClass("ThalToPFCInhib")
sthal := net.ConnectLayers(super, thal, superToPT, ForwardPath) // shortcuts
sthal.AddDefaultParams(func(pt *PathParams) {
pt.PathScale.Rel = 1.0
pt.PathScale.Abs = 4.0 // key param for driving gating -- if too strong, premature gating
pt.Learn.Learn.SetBool(false)
pt.SWts.Adapt.On.SetBool(false)
pt.SWts.Init.SPct = 0
pt.SWts.Init.Mean = 0.8 // typically 1to1
pt.SWts.Init.Var = 0.0
})
sthal.AddClass("SuperToThal", pathClass)
pt := net.ConnectLayers(super, ptMaint, superToPT, ForwardPath)
pt.AddDefaultParams(func(pt *PathParams) {
// one-to-one from super -- just use fixed nonlearning path so can control behavior easily
pt.PathScale.Rel = 1 // irrelevant -- only normal path
pt.PathScale.Abs = 0.5 // BGThal modulates this so strength doesn't cause wrong CS gating
pt.Learn.Learn.SetBool(false)
pt.SWts.Adapt.On.SetBool(false)
pt.SWts.Init.SPct = 0
pt.SWts.Init.Mean = 0.8
pt.SWts.Init.Var = 0.0
})
pt.AddClass("SuperToPT", pathClass)
if !selfMaint {
net.ConnectPTMaintSelf(ptMaint, ptSelf, pathClass)
}
if ct != nil {
ptMaint.PlaceBehind(ct, space)
} else {
ptMaint.PlaceBehind(super, space)
}
ptMaint.Pos.Scale = float32(1) / float32(ptExtra)
thal.PlaceBehind(ptMaint, space)
return
}
//////////////////////////////////////////////////////////////////
// PTPredLayer
// AddPTPredLayer2D adds a PTPredLayer of given size, with given name.
func (net *Network) AddPTPredLayer2D(name string, nNeurY, nNeurX int) *Layer {
ly := net.AddLayer2D(name, PTPredLayer, nNeurY, nNeurX)
return ly
}
// AddPTPredLayer4D adds a PTPredLayer of given size, with given name.
func (net *Network) AddPTPredLayer4D(name string, nPoolsY, nPoolsX, nNeurY, nNeurX int) *Layer {
ly := net.AddLayer4D(name, PTPredLayer, nPoolsY, nPoolsX, nNeurY, nNeurX)
return ly
}
// ConnectPTPredSelf adds a Self (Lateral) pathway within a PTPredLayer,
// which supports active maintenance, with a class of PTSelfMaint
func (net *Network) ConnectPTPredSelf(ly *Layer, pat paths.Pattern) *Path {
return net.LateralConnectLayer(ly, pat).AddClass("PTSelfMaint")
}
// ConnectPTToPulv connects PT, PTPred with given Pulv:
// PT -> Pulv is class PTToPulv; PT does NOT receive back from Pulv
// PTPred -> Pulv is class PTPredToPulv,
// From Pulv = type = Back, class = FromPulv
// toPulvPat is the paths.Pattern PT -> Pulv and fmPulvPat is Pulv -> PTPred
// Typically Pulv is a different shape than PTPred, so use Full or appropriate
// topological pattern. adds optional class name to pathway.
func (net *Network) ConnectPTToPulv(ptMaint, ptPred, pulv *Layer, toPulvPat, fmPulvPat paths.Pattern, pathClass string) (ptToPulv, ptPredToPulv, toPTPred *Path) {
pathClass = params.AddClass(pathClass, "PFCPath")
ptToPulv = net.ConnectLayers(ptMaint, pulv, toPulvPat, ForwardPath)
ptToPulv.AddClass("PTToPulv", pathClass)
ptPredToPulv = net.ConnectLayers(ptPred, pulv, toPulvPat, ForwardPath)
ptPredToPulv.AddClass("PTPredToPulv", pathClass)
toPTPred = net.ConnectLayers(pulv, ptPred, fmPulvPat, BackPath)
toPTPred.AddClass("FromPulv", pathClass)
return
}
// ConnectPTpToPulv connects PTPred with given Pulv:
// PTPred -> Pulv is class PTPredToPulv,
// From Pulv = type = Back, class = FromPulv
// toPulvPat is the paths.Pattern PT -> Pulv and fmPulvPat is Pulv -> PTPred
// Typically Pulv is a different shape than PTPred, so use Full or appropriate
// topological pattern. adds optional class name to pathway.
func (net *Network) ConnectPTpToPulv(ptPred, pulv *Layer, toPulvPat, fmPulvPat paths.Pattern, pathClass string) (ptToPulv, ptPredToPulv, toPTPred *Path) {
pathClass = params.AddClass(pathClass, "PFCPath")
ptPredToPulv = net.ConnectLayers(ptPred, pulv, toPulvPat, ForwardPath)
ptPredToPulv.AddClass("PTPredToPulv", pathClass)
toPTPred = net.ConnectLayers(pulv, ptPred, fmPulvPat, BackPath)
toPTPred.AddClass("FromPulv", pathClass)
return
}
// AddPTPredLayer adds a PTPred pyramidal tract prediction layer
// for given PTMaint layer and associated CT.
// Sets SetClass(super.Name) to allow shared params.
// Pathways are made with given classes: PTtoPred, CTtoPred
// The PTPred layer is positioned behind the PT layer.
func (net *Network) AddPTPredLayer(ptMaint, ct *Layer, ptToPredPath, ctToPredPath paths.Pattern, pathClass string, space float32) (ptPred *Layer) {
pathClass = params.AddClass(pathClass, "PFCPath")
name := strings.TrimSuffix(ptMaint.Name, "PT")
// shp := ptMaint.Shape
shp := ct.Shape
if shp.NumDims() == 2 {
ptPred = net.AddPTPredLayer2D(name+"PTp", shp.DimSize(0), shp.DimSize(1))
} else {
ptPred = net.AddPTPredLayer4D(name+"PTp", shp.DimSize(0), shp.DimSize(1), shp.DimSize(2), shp.DimSize(3))
}
ptPred.AddClass(name)
ptPred.PlaceBehind(ptMaint, space)
pt := net.ConnectCtxtToCT(ptMaint, ptPred, ptToPredPath)
pt.AddClass("PTtoPred", pathClass)
pt = net.ConnectLayers(ct, ptPred, ctToPredPath, ForwardPath)
pt.AddDefaultParams(func(pt *PathParams) {
pt.PathScale.Rel = 1 // 1 > 0.5
pt.PathScale.Abs = 2.0 // 2?
})
pt.AddClass("CTtoPred", pathClass)
// note: ptpred does not connect to thalamus -- it is only active on trial *after* thal gating
return
}
// AddPFC4D adds a "full stack" of 4D PFC layers:
// * AddSuperCT4D (Super and CT)
// * AddPTMaintThal (PTMaint, BGThal)
// * AddPTPredLayer (PTPred)
// with given name prefix, which is also set as the Class for all layers & paths (+"Path"),
// and suffix for the BGThal layer (e.g., "MD" or "VM" etc for different thalamic nuclei).
// Sets PFCLayer as additional class for all cortical layers.
// OneToOne and PoolOneToOne connectivity is used between layers.
// decayOnRew determines the Act.Decay.OnRew setting (true of OFC, ACC type for sure).
// if selfMaint is true, the SMaint self-maintenance mechanism is used
// instead of lateral connections.
// CT layer uses the Medium timescale params.
// use, e.g., pfcCT.AddDefaultParams(func (ly *LayerParams) {ly.Inhib.Layer.Gi = 2.8} )
// to change default params.
func (net *Network) AddPFC4D(name, thalSuffix string, nPoolsY, nPoolsX, nNeurY, nNeurX int, decayOnRew, selfMaint bool, space float32) (pfc, pfcCT, pfcPT, pfcPTp, pfcThal *Layer) {
p1to1 := paths.NewPoolOneToOne()
// p1to1rnd := paths.NewPoolUniformRand()
// p1to1rnd.PCon = 0.5
one2one := paths.NewOneToOne()
pathClass := name + "Path"
layClass := "PFCLayer"
pfc, pfcCT = net.AddSuperCT4D(name, pathClass, nPoolsY, nPoolsX, nNeurY, nNeurX, space, one2one)
pfcCT.AddClass(name)
pfc.AddClass(layClass)
pfcCT.AddClass(layClass)
// paths are: super->PT, PT self
pfcPT, pfcThal = net.AddPTMaintThalForSuper(pfc, pfcCT, thalSuffix, pathClass, one2one, p1to1, one2one, selfMaint, space)
pfcPTp = net.AddPTPredLayer(pfcPT, pfcCT, p1to1, p1to1, pathClass, space)
pfcPTp.AddClass(name)
pfcPT.AddClass(layClass)
pfcPTp.AddClass(layClass)
pfcThal.PlaceBehind(pfcPTp, space)
net.ConnectLayers(pfcPT, pfcCT, p1to1, ForwardPath).AddClass(pathClass)
pfcParams := func(ly *LayerParams) {
ly.Acts.Decay.Act = 0
ly.Acts.Decay.Glong = 0
ly.Acts.Decay.OnRew.SetBool(decayOnRew)
ly.Inhib.ActAvg.Nominal = 0.025
ly.Inhib.Layer.On.SetBool(true)
ly.Inhib.Layer.Gi = 2.2
ly.Inhib.Pool.On.SetBool(true)
ly.Inhib.Pool.Gi = 0.8
ly.Learn.TrgAvgAct.SynScaleRate = 0.0002
}
pfc.AddDefaultParams(pfcParams)
pfcCT.CTDefaultParamsMedium()
pfcCT.AddDefaultParams(func(ly *LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.025
ly.Inhib.Layer.Gi = 4 // 4? 2.8 orig
ly.Inhib.Pool.On.SetBool(true)
ly.Inhib.Pool.Gi = 1.2
ly.Acts.Decay.OnRew.SetBool(decayOnRew)
ly.Learn.TrgAvgAct.SynScaleRate = 0.0002
})
// pfcPT.AddDefaultParams(pfcParams)
// pfcPT.AddDefaultParams(func(ly *LayerParams) {
// ly.Inhib.ActAvg.Nominal = 0.05 // more active
// ly.Inhib.Layer.Gi = 2.4 // 2.4 orig
// ly.Inhib.Pool.Gi = 2.4
// ly.Learn.NeuroMod.AChDisInhib = 0 // maybe better -- test further
// })
pfcPTp.AddDefaultParams(pfcParams)
pfcPTp.AddDefaultParams(func(ly *LayerParams) {
ly.Inhib.Layer.Gi = 1.2 // 0.8 orig
ly.Inhib.Pool.Gi = 0.8
})
pfcThal.AddDefaultParams(pfcParams)
pfcThal.AddDefaultParams(func(ly *LayerParams) {
ly.Inhib.Layer.Gi = 2.0 // 1.1 orig
ly.Inhib.Pool.Gi = 0.6
})
return
}
// AddPFC2D adds a "full stack" of 2D PFC layers:
// * AddSuperCT2D (Super and CT)
// * AddPTMaintThal (PTMaint, BGThal)
// * AddPTPredLayer (PTPred)
// with given name prefix, which is also set as the Class for all layers & paths (+"Path"),
// and suffix for the BGThal layer (e.g., "MD" or "VM" etc for different thalamic nuclei).
// Sets PFCLayer as additional class for all cortical layers.
// OneToOne, full connectivity is used between layers.
// decayOnRew determines the Act.Decay.OnRew setting (true of OFC, ACC type for sure).
// if selfMaint is true, the SMaint self-maintenance mechanism is used
// instead of lateral connections.
// CT layer uses the Medium timescale params.
func (net *Network) AddPFC2D(name, thalSuffix string, nNeurY, nNeurX int, decayOnRew, selfMaint bool, space float32) (pfc, pfcCT, pfcPT, pfcPTp, pfcThal *Layer) {
one2one := paths.NewOneToOne()
full := paths.NewFull()
// rnd := paths.NewUniformRand()
// rnd.PCon = 0.5
pathClass := name + "Path"
layClass := "PFCLayer"
pfc, pfcCT = net.AddSuperCT2D(name, pathClass, nNeurY, nNeurX, space, one2one)
pfcCT.AddClass(name)
pfc.AddClass(layClass)
pfcCT.AddClass(layClass)
// paths are: super->PT, PT self
pfcPT, pfcThal = net.AddPTMaintThalForSuper(pfc, pfcCT, thalSuffix, pathClass, one2one, full, one2one, selfMaint, space)
pfcPTp = net.AddPTPredLayer(pfcPT, pfcCT, full, full, pathClass, space)
pfcPTp.AddClass(name)
pfcPT.AddClass(layClass)
pfcPTp.AddClass(layClass)
pfcThal.PlaceBehind(pfcPTp, space)
net.ConnectLayers(pfcPT, pfcCT, full, ForwardPath).AddClass(pathClass)
pfcParams := func(ly *LayerParams) {
ly.Acts.Decay.Act = 0
ly.Acts.Decay.Glong = 0
ly.Acts.Decay.OnRew.SetBool(decayOnRew)
ly.Inhib.ActAvg.Nominal = 0.1
ly.Inhib.Layer.On.SetBool(true)
ly.Inhib.Layer.Gi = 0.9
ly.Inhib.Pool.On.SetBool(false)
}
pfc.AddDefaultParams(pfcParams)
pfcCT.CTDefaultParamsMedium()
pfcCT.AddDefaultParams(func(ly *LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.1
ly.Inhib.Layer.On.SetBool(true)
ly.Inhib.Layer.Gi = 1.4
ly.Inhib.Pool.On.SetBool(false)
ly.Acts.Decay.OnRew.SetBool(decayOnRew)
})
// pfcPT.AddDefaultParams(pfcParams)
// pfcPT.AddDefaultParams(func(ly *LayerParams) {
// ly.Inhib.ActAvg.Nominal = 0.3 // more active
// ly.Inhib.Layer.Gi = 2.4 // 2.4 orig
// ly.Inhib.Pool.Gi = 2.4
// ly.Learn.NeuroMod.AChDisInhib = 0 // maybe better -- test further
// })
pfcPTp.AddDefaultParams(pfcParams)
pfcPTp.AddDefaultParams(func(ly *LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.1
ly.Inhib.Layer.Gi = 0.8
})
pfcThal.AddDefaultParams(pfcParams)
pfcThal.AddDefaultParams(func(ly *LayerParams) {
ly.Inhib.Layer.Gi = 0.6
})
return
}
// ConnectToPFC connects given predictively learned input to all
// relevant PFC layers:
// lay -> pfc (skipped if lay == nil)
// layP -> pfc, layP <-> pfcCT
// pfcPTp <-> layP
// if pfcPT != nil: pfcPT <-> layP
// sets PFCPath class name for pathways
func (net *Network) ConnectToPFC(lay, layP, pfc, pfcCT, pfcPT, pfcPTp *Layer, pat paths.Pattern, pathClass string) {
if pathClass == "" {
pathClass = "PFCPath"
}
if lay != nil {
net.ConnectLayers(lay, pfc, pat, ForwardPath).AddClass(pathClass)
pt := net.ConnectLayers(lay, pfcPTp, pat, ForwardPath) // ptp needs more input
pt.AddDefaultParams(func(pt *PathParams) {
pt.PathScale.Abs = 4
})
pt.AddClass("ToPTp ", pathClass)
}
net.ConnectToPulv(pfc, pfcCT, layP, pat, pat, pathClass)
if pfcPT == nil {
net.ConnectPTpToPulv(pfcPTp, layP, pat, pat, pathClass)
} else {
net.ConnectPTToPulv(pfcPT, pfcPTp, layP, pat, pat, pathClass)
}
}
// ConnectToPFCBack connects given predictively learned input to all
// relevant PFC layers:
// lay -> pfc using a BackPath -- weaker
// layP -> pfc, layP <-> pfcCT
// pfcPTp <-> layP
func (net *Network) ConnectToPFCBack(lay, layP, pfc, pfcCT, pfcPT, pfcPTp *Layer, pat paths.Pattern, pathClass string) {
if pathClass == "" {
pathClass = "PFCPath"
}
tp := net.ConnectLayers(lay, pfc, pat, BackPath)
tp.AddClass(pathClass)
net.ConnectToPulv(pfc, pfcCT, layP, pat, pat, pathClass)
net.ConnectPTToPulv(pfcPT, pfcPTp, layP, pat, pat, pathClass)
pt := net.ConnectLayers(lay, pfcPTp, pat, ForwardPath) // ptp needs more input
pt.AddDefaultParams(func(pt *PathParams) {
pt.PathScale.Abs = 4
})
pt.AddClass("ToPTp ", pathClass)
}
// ConnectToPFCBidir connects given predictively learned input to all
// relevant PFC layers, using bidirectional connections to super layers.
// lay <-> pfc bidirectional
// layP -> pfc, layP <-> pfcCT
// pfcPTp <-> layP
func (net *Network) ConnectToPFCBidir(lay, layP, pfc, pfcCT, pfcPT, pfcPTp *Layer, pat paths.Pattern, pathClass string) (ff, fb *Path) {
if pathClass == "" {
pathClass = "PFCPath"
}
ff, fb = net.BidirConnectLayers(lay, pfc, pat)
ff.AddClass(pathClass)
fb.AddClass(pathClass)
net.ConnectToPulv(pfc, pfcCT, layP, pat, pat, pathClass)
net.ConnectPTToPulv(pfcPT, pfcPTp, layP, pat, pat, pathClass)
pt := net.ConnectLayers(lay, pfcPTp, pat, ForwardPath) // ptp needs more input
pt.AddDefaultParams(func(pt *PathParams) {
pt.PathScale.Abs = 4
})
pt.AddClass("ToPTp ", pathClass)
return
}
// Copyright (c) 2020, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
func (pj *PathParams) CTCtxtPathDefaults() {
pj.Learn.DWt.LearnThr = 0
}
// Code generated by "core generate -add-types -gosl"; DO NOT EDIT.
package axon
import (
"cogentcore.org/core/enums"
)
var _PathGTypesValues = []PathGTypes{0, 1, 2, 3, 4}
// PathGTypesN is the highest valid value for type PathGTypes, plus one.
//
//gosl:start
const PathGTypesN PathGTypes = 5
//gosl:end
var _PathGTypesValueMap = map[string]PathGTypes{`ExcitatoryG`: 0, `InhibitoryG`: 1, `ModulatoryG`: 2, `MaintG`: 3, `ContextG`: 4}
var _PathGTypesDescMap = map[PathGTypes]string{0: `Excitatory pathways drive Ge conductance on receiving neurons, which send to GiRaw and GiSyn neuron variables.`, 1: `Inhibitory pathways drive Gi inhibitory conductance, which send to GiRaw and GiSyn neuron variables.`, 2: `Modulatory pathways have a multiplicative effect on other inputs, which send to GModRaw and GModSyn neuron variables.`, 3: `Maintenance pathways drive unique set of NMDA channels that support strong active maintenance abilities. Send to GMaintRaw and GMaintSyn neuron variables.`, 4: `Context pathways are for inputs to CT layers, which update only at the end of the plus phase, and send to CtxtGe.`}
var _PathGTypesMap = map[PathGTypes]string{0: `ExcitatoryG`, 1: `InhibitoryG`, 2: `ModulatoryG`, 3: `MaintG`, 4: `ContextG`}
// String returns the string representation of this PathGTypes value.
func (i PathGTypes) String() string { return enums.String(i, _PathGTypesMap) }
// SetString sets the PathGTypes value from its string representation,
// and returns an error if the string is invalid.
func (i *PathGTypes) SetString(s string) error {
return enums.SetString(i, s, _PathGTypesValueMap, "PathGTypes")
}
// Int64 returns the PathGTypes value as an int64.
func (i PathGTypes) Int64() int64 { return int64(i) }
// SetInt64 sets the PathGTypes value from an int64.
func (i *PathGTypes) SetInt64(in int64) { *i = PathGTypes(in) }
// Desc returns the description of the PathGTypes value.
func (i PathGTypes) Desc() string { return enums.Desc(i, _PathGTypesDescMap) }
// PathGTypesValues returns all possible values for the type PathGTypes.
func PathGTypesValues() []PathGTypes { return _PathGTypesValues }
// Values returns all possible values for the type PathGTypes.
func (i PathGTypes) Values() []enums.Enum { return enums.Values(_PathGTypesValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i PathGTypes) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *PathGTypes) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "PathGTypes")
}
var _GlobalScalarVarsValues = []GlobalScalarVars{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57}
// GlobalScalarVarsN is the highest valid value for type GlobalScalarVars, plus one.
//
//gosl:start
const GlobalScalarVarsN GlobalScalarVars = 58
//gosl:end
var _GlobalScalarVarsValueMap = map[string]GlobalScalarVars{`GvRew`: 0, `GvHasRew`: 1, `GvRewPred`: 2, `GvPrevPred`: 3, `GvHadRew`: 4, `GvDA`: 5, `GvDAtonic`: 6, `GvACh`: 7, `GvNE`: 8, `GvSer`: 9, `GvAChRaw`: 10, `GvGoalMaint`: 11, `GvVSMatrixJustGated`: 12, `GvVSMatrixHasGated`: 13, `GvCuriosityPoolGated`: 14, `GvTime`: 15, `GvEffort`: 16, `GvUrgencyRaw`: 17, `GvUrgency`: 18, `GvHasPosUS`: 19, `GvHadPosUS`: 20, `GvNegUSOutcome`: 21, `GvHadNegUSOutcome`: 22, `GvPVposSum`: 23, `GvPVpos`: 24, `GvPVnegSum`: 25, `GvPVneg`: 26, `GvPVposEst`: 27, `GvPVposVar`: 28, `GvPVnegEst`: 29, `GvPVnegVar`: 30, `GvGoalDistEst`: 31, `GvGoalDistPrev`: 32, `GvProgressRate`: 33, `GvGiveUpUtility`: 34, `GvContUtility`: 35, `GvGiveUpTiming`: 36, `GvContTiming`: 37, `GvGiveUpProgress`: 38, `GvContProgress`: 39, `GvGiveUpSum`: 40, `GvContSum`: 41, `GvGiveUpProb`: 42, `GvGiveUp`: 43, `GvGaveUp`: 44, `GvVSPatchPos`: 45, `GvVSPatchPosThr`: 46, `GvVSPatchPosRPE`: 47, `GvVSPatchPosSum`: 48, `GvVSPatchPosPrev`: 49, `GvVSPatchPosVar`: 50, `GvLHbDip`: 51, `GvLHbBurst`: 52, `GvLHbPVDA`: 53, `GvCeMpos`: 54, `GvCeMneg`: 55, `GvVtaDA`: 56, `GvCaBinWts`: 57}
var _GlobalScalarVarsDescMap = map[GlobalScalarVars]string{0: `Rew is the external reward value. Must also set HasRew flag when Rew is set, otherwise it is ignored. This is computed by the Rubicon algorithm from US inputs set by Net.Rubicon methods, and can be directly set in simpler RL cases.`, 1: `HasRew must be set to true (1) when an external reward / US input is present, otherwise Rew is ignored. This is also set when Rubicon BOA model gives up. This drives ACh release in the Rubicon model.`, 2: `RewPred is the reward prediction, computed by a special reward prediction layer, e.g., the VSPatch layer in the Rubicon algorithm.`, 3: `PrevPred is previous time step reward prediction, e.g., for TDPredLayer`, 4: `HadRew is HasRew state from the previous trial, copied from HasRew in NewState. Used for updating Effort, Urgency at start of new trial.`, 5: `DA is phasic dopamine that drives learning moreso than performance, representing reward prediction error, signaled as phasic increases or decreases in activity relative to a tonic baseline, which is represented by a value of 0. Released by the VTA (ventral tegmental area), or SNc (substantia nigra pars compacta).`, 6: `DAtonic is tonic dopamine, which has modulatory instead of learning effects. Increases can drive greater propensity to engage in activities by biasing Go vs No pathways in the basal ganglia, for example as a function of Urgency.`, 7: `ACh is acetylcholine, activated by salient events, particularly at the onset of a reward / punishment outcome (US), or onset of a conditioned stimulus (CS). Driven by BLA -> PPtg that detects changes in BLA activity, via LDTLayer type.`, 8: `NE is norepinepherine -- not yet in use`, 9: `Ser is serotonin -- not yet in use`, 10: `AChRaw is raw ACh value used in updating global ACh value by LDTLayer.`, 11: `GoalMaint is the normalized (0-1) goal maintenance activity, set in ApplyRubicon function at start of trial. Drives top-down inhibition of LDT layer / ACh activity.`, 12: `VSMatrixJustGated is VSMatrix just gated (to engage goal maintenance in PFC areas), set at end of plus phase. This excludes any gating happening at time of US.`, 13: `VSMatrixHasGated is VSMatrix has gated since the last time HasRew was set (US outcome received or expected one failed to be received).`, 14: `CuriosityPoolGated is true if VSMatrixJustGated and the first pool representing the curiosity / novelty drive gated. This can change the giving up Effort.Max parameter.`, 15: `Time is the raw time counter, incrementing upward during goal engaged window. This is also copied directly into NegUS[0] which tracks time, but we maintain a separate effort value to make it clearer.`, 16: `Effort is the raw effort counter, incrementing upward for each effort step during goal engaged window. This is also copied directly into NegUS[1] which tracks effort, but we maintain a separate effort value to make it clearer.`, 17: `UrgencyRaw is the raw effort for urgency, incrementing upward from effort increments per step when _not_ goal engaged.`, 18: `Urgency is the overall urgency activity level (normalized 0-1), computed from logistic function of GvUrgencyRaw. This drives DAtonic activity to increasingly bias Go firing.`, 19: `HasPosUS indicates has positive US on this trial, drives goal accomplishment logic and gating.`, 20: `HadPosUS is state from the previous trial (copied from HasPosUS in NewState).`, 21: `NegUSOutcome indicates that a phasic negative US stimulus was experienced, driving phasic ACh, VSMatrix gating to reset current goal engaged plan (if any), and phasic dopamine based on the outcome.`, 22: `HadNegUSOutcome is state from the previous trial (copied from NegUSOutcome in NewState)`, 23: `PVposSum is the total weighted positive valence primary value = sum of Weight * USpos * Drive`, 24: `PVpos is the normalized positive valence primary value = (1 - 1/(1+PVposGain * PVposSum))`, 25: `PVnegSum is the total weighted negative valence primary values including costs = sum of Weight * Cost + Weight * USneg`, 26: `PVpos is the normalized negative valence primary values, including costs = (1 - 1/(1+PVnegGain * PVnegSum))`, 27: `PVposEst is the estimated PVpos final outcome value decoded from the network PVposFinal layer`, 28: `PVposVar is the estimated variance or uncertainty in the PVpos final outcome value decoded from the network PVposFinal layer.`, 29: `PVnegEst is the estimated PVneg final outcome value decoded from the network PVnegFinal layer.`, 30: `PVnegVar is the estimated variance or uncertainty in the PVneg final outcome value decoded from the network PVnegFinal layer.`, 31: `GoalDistEst is the estimate of distance to the goal, in trial step units, decreasing down to 0 as the goal approaches.`, 32: `GoalDistPrev is the previous estimate of distance to the goal, in trial step units, decreasing down to 0 as the goal approaches.`, 33: `ProgressRate is the negative time average change in GoalDistEst, i.e., positive values indicate continued approach to the goal, while negative values represent moving away from the goal.`, 34: `GiveUpUtility is total GiveUp weight as a function of Cost.`, 35: `ContUtility is total Continue weight as a function of expected positive outcome PVposEst.`, 36: `GiveUpTiming is total GiveUp weight as a function of VSPatchPosSum * (1 - VSPatchPosVar).`, 37: `ContTiming is total Continue weight as a function of (1 - VSPatchPosSum) * VSPatchPosVar.`, 38: `GiveUpProgress is total GiveUp weight as a function of ProgressRate.`, 39: `ContProgress is total Continue weight as a function of ProgressRate.`, 40: `GiveUpSum is total GiveUp weight: Utility + Timing + Progress.`, 41: `ContSum is total Continue weight: Utility + Timing + Progress.`, 42: `GiveUpProb is the probability of giving up: 1 / (1 + (GvContSum / GvGiveUpSum))`, 43: `GiveUp is true if a reset was triggered probabilistically based on GiveUpProb.`, 44: `GaveUp is copy of GiveUp from previous trial.`, 45: `VSPatchPos is the net shunting input from VSPatch (PosD1, named PVi in original Rubicon) computed as the Max of US-specific VSPatch saved values, subtracting D1 - D2. This is also stored as GvRewPred.`, 46: `VSPatchPosThr is a thresholded version of GvVSPatchPos, applying Rubicon.LHb.VSPatchNonRewThr threshold for non-reward trials. This is the version used for computing DA.`, 47: `VSPatchPosRPE is the reward prediction error for the VSPatchPos reward prediction without any thresholding applied, and only for PV events. This is used to train the VSPatch, assuming a local feedback circuit that does not have the effective thresholding used for the broadcast critic signal that trains the rest of the network.`, 48: `VSPatchPosSum is the sum of VSPatchPos over goal engaged trials, representing the integrated prediction that the US is going to occur`, 49: `VSPatchPosPrev is the previous trial VSPatchPosSum`, 50: `VSPatchPosVar is the integrated temporal variance of VSPatchPos over goal engaged trials, which determines when the VSPatchPosSum has stabilized`, 51: `computed LHb activity level that drives dipping / pausing of DA firing, when VSPatch pos prediction > actual PV reward drive or PVneg > PVpos`, 52: `LHbBurst is computed LHb activity level that drives bursts of DA firing, when actual PV reward drive > VSPatch pos prediction`, 53: `LHbPVDA is GvLHbBurst - GvLHbDip -- the LHb contribution to DA, reflecting PV and VSPatch (PVi), but not the CS (LV) contributions`, 54: `CeMpos is positive valence central nucleus of the amygdala (CeM) LV (learned value) activity, reflecting |BLAposAcqD1 - BLAposExtD2|_+ positively rectified. CeM sets Raw directly. Note that a positive US onset even with no active Drive will be reflected here, enabling learning about unexpected outcomes.`, 55: `CeMneg is negative valence central nucleus of the amygdala (CeM) LV (learned value) activity, reflecting |BLAnegAcqD2 - BLAnegExtD1|_+ positively rectified. CeM sets Raw directly`, 56: `VtaDA is overall dopamine value reflecting all of the different inputs.`, 57: `CaBinWts are NCaBins starting here, of weights for integrating binned spikes to compute synaptic calcium values that drive the trace factor in learning. These are only stored for the first parallel data index di = 0.`}
var _GlobalScalarVarsMap = map[GlobalScalarVars]string{0: `GvRew`, 1: `GvHasRew`, 2: `GvRewPred`, 3: `GvPrevPred`, 4: `GvHadRew`, 5: `GvDA`, 6: `GvDAtonic`, 7: `GvACh`, 8: `GvNE`, 9: `GvSer`, 10: `GvAChRaw`, 11: `GvGoalMaint`, 12: `GvVSMatrixJustGated`, 13: `GvVSMatrixHasGated`, 14: `GvCuriosityPoolGated`, 15: `GvTime`, 16: `GvEffort`, 17: `GvUrgencyRaw`, 18: `GvUrgency`, 19: `GvHasPosUS`, 20: `GvHadPosUS`, 21: `GvNegUSOutcome`, 22: `GvHadNegUSOutcome`, 23: `GvPVposSum`, 24: `GvPVpos`, 25: `GvPVnegSum`, 26: `GvPVneg`, 27: `GvPVposEst`, 28: `GvPVposVar`, 29: `GvPVnegEst`, 30: `GvPVnegVar`, 31: `GvGoalDistEst`, 32: `GvGoalDistPrev`, 33: `GvProgressRate`, 34: `GvGiveUpUtility`, 35: `GvContUtility`, 36: `GvGiveUpTiming`, 37: `GvContTiming`, 38: `GvGiveUpProgress`, 39: `GvContProgress`, 40: `GvGiveUpSum`, 41: `GvContSum`, 42: `GvGiveUpProb`, 43: `GvGiveUp`, 44: `GvGaveUp`, 45: `GvVSPatchPos`, 46: `GvVSPatchPosThr`, 47: `GvVSPatchPosRPE`, 48: `GvVSPatchPosSum`, 49: `GvVSPatchPosPrev`, 50: `GvVSPatchPosVar`, 51: `GvLHbDip`, 52: `GvLHbBurst`, 53: `GvLHbPVDA`, 54: `GvCeMpos`, 55: `GvCeMneg`, 56: `GvVtaDA`, 57: `GvCaBinWts`}
// String returns the string representation of this GlobalScalarVars value.
func (i GlobalScalarVars) String() string { return enums.String(i, _GlobalScalarVarsMap) }
// SetString sets the GlobalScalarVars value from its string representation,
// and returns an error if the string is invalid.
func (i *GlobalScalarVars) SetString(s string) error {
return enums.SetString(i, s, _GlobalScalarVarsValueMap, "GlobalScalarVars")
}
// Int64 returns the GlobalScalarVars value as an int64.
func (i GlobalScalarVars) Int64() int64 { return int64(i) }
// SetInt64 sets the GlobalScalarVars value from an int64.
func (i *GlobalScalarVars) SetInt64(in int64) { *i = GlobalScalarVars(in) }
// Desc returns the description of the GlobalScalarVars value.
func (i GlobalScalarVars) Desc() string { return enums.Desc(i, _GlobalScalarVarsDescMap) }
// GlobalScalarVarsValues returns all possible values for the type GlobalScalarVars.
func GlobalScalarVarsValues() []GlobalScalarVars { return _GlobalScalarVarsValues }
// Values returns all possible values for the type GlobalScalarVars.
func (i GlobalScalarVars) Values() []enums.Enum { return enums.Values(_GlobalScalarVarsValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i GlobalScalarVars) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *GlobalScalarVars) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "GlobalScalarVars")
}
var _GlobalVectorVarsValues = []GlobalVectorVars{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
// GlobalVectorVarsN is the highest valid value for type GlobalVectorVars, plus one.
//
//gosl:start
const GlobalVectorVarsN GlobalVectorVars = 10
//gosl:end
var _GlobalVectorVarsValueMap = map[string]GlobalVectorVars{`GvCost`: 0, `GvCostRaw`: 1, `GvUSneg`: 2, `GvUSnegRaw`: 3, `GvDrives`: 4, `GvUSpos`: 5, `GvVSPatchD1`: 6, `GvVSPatchD2`: 7, `GvOFCposPTMaint`: 8, `GvVSMatrixPoolGated`: 9}
var _GlobalVectorVarsDescMap = map[GlobalVectorVars]string{0: `Cost are Time, Effort, etc costs, as normalized version of corresponding raw. NCosts of them`, 1: `CostRaw are raw, linearly incremented negative valence US outcomes, this value is also integrated together with all US vals for PVneg`, 2: `USneg are negative valence US outcomes, normalized version of raw. NNegUSs of them`, 3: `USnegRaw are raw, linearly incremented negative valence US outcomes, this value is also integrated together with all US vals for PVneg`, 4: `Drives are current drive state, updated with optional homeostatic exponential return to baseline values.`, 5: `USpos are current positive-valence drive-satisfying input(s) (unconditioned stimuli = US)`, 6: `VSPatch is current reward predicting VSPatch (PosD1) values.`, 7: `VSPatch is current reward predicting VSPatch (PosD2) values.`, 8: `OFCposPTMaint is activity level of given OFCposPT maintenance pool used in anticipating potential USpos outcome value.`, 9: `VSMatrixPoolGated indicates whether given VSMatrix pool gated this is reset after last goal accomplished -- records gating since then.`}
var _GlobalVectorVarsMap = map[GlobalVectorVars]string{0: `GvCost`, 1: `GvCostRaw`, 2: `GvUSneg`, 3: `GvUSnegRaw`, 4: `GvDrives`, 5: `GvUSpos`, 6: `GvVSPatchD1`, 7: `GvVSPatchD2`, 8: `GvOFCposPTMaint`, 9: `GvVSMatrixPoolGated`}
// String returns the string representation of this GlobalVectorVars value.
func (i GlobalVectorVars) String() string { return enums.String(i, _GlobalVectorVarsMap) }
// SetString sets the GlobalVectorVars value from its string representation,
// and returns an error if the string is invalid.
func (i *GlobalVectorVars) SetString(s string) error {
return enums.SetString(i, s, _GlobalVectorVarsValueMap, "GlobalVectorVars")
}
// Int64 returns the GlobalVectorVars value as an int64.
func (i GlobalVectorVars) Int64() int64 { return int64(i) }
// SetInt64 sets the GlobalVectorVars value from an int64.
func (i *GlobalVectorVars) SetInt64(in int64) { *i = GlobalVectorVars(in) }
// Desc returns the description of the GlobalVectorVars value.
func (i GlobalVectorVars) Desc() string { return enums.Desc(i, _GlobalVectorVarsDescMap) }
// GlobalVectorVarsValues returns all possible values for the type GlobalVectorVars.
func GlobalVectorVarsValues() []GlobalVectorVars { return _GlobalVectorVarsValues }
// Values returns all possible values for the type GlobalVectorVars.
func (i GlobalVectorVars) Values() []enums.Enum { return enums.Values(_GlobalVectorVarsValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i GlobalVectorVars) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *GlobalVectorVars) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "GlobalVectorVars")
}
var _GPUVarsValues = []GPUVars{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22}
// GPUVarsN is the highest valid value for type GPUVars, plus one.
//
//gosl:start
const GPUVarsN GPUVars = 23
//gosl:end
var _GPUVarsValueMap = map[string]GPUVars{`LayersVar`: 0, `PathsVar`: 1, `NetworkIxsVar`: 2, `PoolIxsVar`: 3, `NeuronIxsVar`: 4, `SynapseIxsVar`: 5, `PathSendConVar`: 6, `RecvPathIxsVar`: 7, `PathRecvConVar`: 8, `RecvSynIxsVar`: 9, `CtxVar`: 10, `NeuronsVar`: 11, `NeuronAvgsVar`: 12, `LayerStatesVar`: 13, `GlobalScalarsVar`: 14, `GlobalVectorsVar`: 15, `ExtsVar`: 16, `PoolsVar`: 17, `PoolsIntVar`: 18, `PathGBufVar`: 19, `PathGSynsVar`: 20, `SynapsesVar`: 21, `SynapseTracesVar`: 22}
var _GPUVarsDescMap = map[GPUVars]string{0: ``, 1: ``, 2: ``, 3: ``, 4: ``, 5: ``, 6: ``, 7: ``, 8: ``, 9: ``, 10: ``, 11: ``, 12: ``, 13: ``, 14: ``, 15: ``, 16: ``, 17: ``, 18: ``, 19: ``, 20: ``, 21: ``, 22: ``}
var _GPUVarsMap = map[GPUVars]string{0: `LayersVar`, 1: `PathsVar`, 2: `NetworkIxsVar`, 3: `PoolIxsVar`, 4: `NeuronIxsVar`, 5: `SynapseIxsVar`, 6: `PathSendConVar`, 7: `RecvPathIxsVar`, 8: `PathRecvConVar`, 9: `RecvSynIxsVar`, 10: `CtxVar`, 11: `NeuronsVar`, 12: `NeuronAvgsVar`, 13: `LayerStatesVar`, 14: `GlobalScalarsVar`, 15: `GlobalVectorsVar`, 16: `ExtsVar`, 17: `PoolsVar`, 18: `PoolsIntVar`, 19: `PathGBufVar`, 20: `PathGSynsVar`, 21: `SynapsesVar`, 22: `SynapseTracesVar`}
// String returns the string representation of this GPUVars value.
func (i GPUVars) String() string { return enums.String(i, _GPUVarsMap) }
// SetString sets the GPUVars value from its string representation,
// and returns an error if the string is invalid.
func (i *GPUVars) SetString(s string) error {
return enums.SetString(i, s, _GPUVarsValueMap, "GPUVars")
}
// Int64 returns the GPUVars value as an int64.
func (i GPUVars) Int64() int64 { return int64(i) }
// SetInt64 sets the GPUVars value from an int64.
func (i *GPUVars) SetInt64(in int64) { *i = GPUVars(in) }
// Desc returns the description of the GPUVars value.
func (i GPUVars) Desc() string { return enums.Desc(i, _GPUVarsDescMap) }
// GPUVarsValues returns all possible values for the type GPUVars.
func GPUVarsValues() []GPUVars { return _GPUVarsValues }
// Values returns all possible values for the type GPUVars.
func (i GPUVars) Values() []enums.Enum { return enums.Values(_GPUVarsValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i GPUVars) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *GPUVars) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "GPUVars") }
var _LayerTypesValues = []LayerTypes{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35}
// LayerTypesN is the highest valid value for type LayerTypes, plus one.
//
//gosl:start
const LayerTypesN LayerTypes = 36
//gosl:end
var _LayerTypesValueMap = map[string]LayerTypes{`SuperLayer`: 0, `InputLayer`: 1, `TargetLayer`: 2, `CompareLayer`: 3, `CTLayer`: 4, `PulvinarLayer`: 5, `TRNLayer`: 6, `PTMaintLayer`: 7, `PTPredLayer`: 8, `DSMatrixLayer`: 9, `VSMatrixLayer`: 10, `DSPatchLayer`: 11, `STNLayer`: 12, `GPLayer`: 13, `BGThalLayer`: 14, `VSGatedLayer`: 15, `IOLayer`: 16, `CNeLayer`: 17, `CNiIOLayer`: 18, `CNiUpLayer`: 19, `BLALayer`: 20, `CeMLayer`: 21, `VSPatchLayer`: 22, `LHbLayer`: 23, `DrivesLayer`: 24, `UrgencyLayer`: 25, `USLayer`: 26, `PVLayer`: 27, `LDTLayer`: 28, `VTALayer`: 29, `RewLayer`: 30, `RWPredLayer`: 31, `RWDaLayer`: 32, `TDPredLayer`: 33, `TDIntegLayer`: 34, `TDDaLayer`: 35}
var _LayerTypesDescMap = map[LayerTypes]string{0: `Super is a superficial cortical layer (lamina 2-3-4) which does not receive direct input or targets. In more generic models, it should be used as a Hidden layer, and maps onto the Hidden type in LayerTypes.`, 1: `Input is a layer that receives direct external input in its Ext inputs. Biologically, it can be a primary sensory layer, or a thalamic layer.`, 2: `Target is a layer that receives direct external target inputs used for driving plus-phase learning. Simple target layers are generally not used in more biological models, which instead use predictive learning via Pulvinar or related mechanisms.`, 3: `Compare is a layer that receives external comparison inputs, which drive statistics but do NOT drive activation or learning directly. It is rarely used in axon.`, 4: `CT are layer 6 corticothalamic projecting neurons, which drive "top down" predictions in Pulvinar layers. They maintain information over time via stronger NMDA channels and use maintained prior state information to generate predictions about current states forming on Super layers that then drive PT (5IB) bursting activity, which are the plus-phase drivers of Pulvinar activity.`, 5: `Pulvinar are thalamic relay cell neurons in the higher-order Pulvinar nucleus of the thalamus, and functionally isomorphic neurons in the MD thalamus, and potentially other areas. These cells alternately reflect predictions driven by CT pathways, and actual outcomes driven by 5IB Burst activity from corresponding PT or Super layer neurons that provide strong driving inputs.`, 6: `TRNLayer is thalamic reticular nucleus layer for inhibitory competition within the thalamus.`, 7: `PTMaintLayer implements the subset of pyramidal tract (PT) layer 5 intrinsic bursting (5IB) deep neurons that exhibit robust, stable maintenance of activity over the duration of a goal engaged window, modulated by basal ganglia (BG) disinhibitory gating, supported by strong MaintNMDA channels and recurrent excitation. The lateral PTSelfMaint pathway uses MaintG to drive GMaintRaw input that feeds into the stronger, longer MaintNMDA channels, and the ThalToPT ModulatoryG pathway from BGThalamus multiplicatively modulates the strength of other inputs, such that only at the time of BG gating are these strong enough to drive sustained active maintenance. Use Act.Dend.ModGain to parameterize.`, 8: `PTPredLayer implements the subset of pyramidal tract (PT) layer 5 intrinsic bursting (5IB) deep neurons that combine modulatory input from PTMaintLayer sustained maintenance and CTLayer dynamic predictive learning that helps to predict state changes during the period of active goal maintenance. This layer provides the primary input to VSPatch US-timing prediction layers, and other layers that require predictive dynamic`, 9: `DSMatrixLayer represents the matrisome spiny projection neurons (SPNs, MSNs) that are the main Go / No gating units in BG, and are modulated by phasic dopamine: D1 = Go, D2 = No. These are for dorsal striatum, which interact with matrisomes and receive PF (parafasciculus) feedback signals.`, 10: `VSMatrixLayer represents the matrisome spiny projection neurons (SPNs, MSNs) that are the main Go / No gating units in BG, and are modulated by phasic dopamine: D1 = Go, D2 = No. These are for ventral striatum, which drive goal-selection gating signals through the MD thalamus, and activate instinctive behaviors based on learned inputs projecting to various output pathways.`, 11: `DSPatchLayer represents the dorsolateral striosomal spiny neurons that modulate the activity of SNc dopamine to a given Pool.`, 12: `STNLayer represents subthalamic nucleus neurons, with two subtypes: STNp are more strongly driven and get over bursting threshold, driving strong, rapid activation of the KCa channels, causing a long pause in firing, which creates a window during which GPe dynamics resolve Go vs. No balance. STNs are more weakly driven and thus more slowly activate KCa, resulting in a longer period of activation, during which the GPi is inhibited to prevent premature gating based only MtxGo inhibition -- gating only occurs when GPePr signal has had a chance to integrate its MtxNo inputs.`, 13: `GPLayer represents a globus pallidus layer in the BG, including: GPePr, GPeAk (arkypallidal), and GPi / SNr. Has intrinsic activity.`, 14: `BGThalLayer represents a BG gated thalamic layer, which receives BG gating in the form of an inhibitory pathway from GPi. Located mainly in the Ventral thalamus: VA / VM / VL, and also parts of MD mediodorsal thalamus.`, 15: `VSGated represents explicit coding of VS gating status: JustGated and HasGated (since last US or failed predicted US), For visualization and / or motor action signaling.`, 16: `IOLayer represents a cerebellum inferior olive (IO) layer, which drive learning in associated cerebellar nuclei and Purkinje cells. Receives paired input from the CNiIOLayer inhibitory prediction neurons and specific sensory channels that are being predicted, and a modulatory input from the efferent copy of motor action to initiate it. GaP = integrated GeSyn, GaM = integrated GiSyn, GaD = offset GiSyn, TimeDiff = GaP - GaD, TimePeak = 1 if error spike.`, 17: `CNeLayer represents the cerebellar nuclei excitatory neurons, which have slow learning to maintain a target average firing rate.`, 18: `CNiIOLayer represents the cerebellar nuclei inhibitory prediction neurons, which learn to predict the activity of a specific sensory input, and inhibit it in the corresponding CNeUpLayer`, 19: `CNiUpLayer represents the cerebellar nuclei inhibitory upgoing output neurons, which learn from IOLayer error signals to predict specific sensory inputs based on motor commands, thereby cancelling the effects of self-generated motor commands.`, 20: `BLALayer represents a basolateral amygdala layer which learns to associate arbitrary stimuli (CSs) with behaviorally salient outcomes (USs)`, 21: `CeMLayer represents a central nucleus of the amygdala layer.`, 22: `VSPatchLayer represents a ventral striatum patch layer, which learns to represent the expected amount of dopamine reward and projects both directly with shunting inhibition to the VTA and indirectly via the LHb / RMTg to cancel phasic dopamine firing to expected rewards (i.e., reward prediction error).`, 23: `LHbLayer represents the lateral habenula, which drives dipping in the VTA. It tracks the Global LHb values for visualization purposes -- updated by VTALayer.`, 24: `DrivesLayer represents the Drives in .Rubicon framework. It tracks the Global Drives values for visualization and predictive learning purposes.`, 25: `UrgencyLayer represents the Urgency factor in Rubicon framework. It tracks the Global Urgency.Urge value for visualization and predictive learning purposes.`, 26: `USLayer represents a US unconditioned stimulus layer (USpos or USneg). It tracks the Global USpos or USneg, for visualization and predictive learning purposes. Actual US inputs are set in Rubicon.`, 27: `PVLayer represents a PV primary value layer (PVpos or PVneg) representing the total primary value as a function of US inputs, drives, and effort. It tracks the Global VTA.PVpos, PVneg values for visualization and predictive learning purposes.`, 28: `LDTLayer represents the laterodorsal tegmentum layer, which is the primary limbic ACh (acetylcholine) driver to other ACh: BG cholinergic interneurons (CIN) and nucleus basalis ACh areas. The phasic ACh release signals reward salient inputs from CS, US and US omssion, and it drives widespread disinhibition of BG gating and VTA DA firing. It receives excitation from superior colliculus which computes a temporal derivative (stimulus specific adaptation, SSA) of sensory inputs, and inhibitory input from OFC, ACC driving suppression of distracting inputs during goal-engaged states.`, 29: `VTALayer represents the ventral tegmental area, which releases dopamine. It computes final DA value from Rubicon-computed LHb PVDA (primary value DA), updated at start of each trial from updated US, Effort, etc state, and cycle-by-cycle LV learned value state reflecting CS inputs, in the Amygdala (CeM). Its activity reflects this DA level, which is effectively broadcast vial Global state values to all layers.`, 30: `RewLayer represents positive (first unit) or negative (second unit) reward values, showing spiking rates for each, and Act always represents the signed value.`, 31: `RWPredLayer computes reward prediction for a simple Rescorla-Wagner learning dynamic (i.e., PV learning in the Rubicon framework). Activity is computed as linear function of excitatory conductance. The first unit in the layer represents positive reward, second negative. Use with RWPath which does simple delta-rule learning on minus-plus.`, 32: `RWDaLayer computes a dopamine (DA) signal based on a simple Rescorla-Wagner learning dynamic (i.e., PV learning in the Rubicon framework). It computes difference between r(t) and RWPred values. r(t) is accessed directly from a Rew layer -- if no external input then no DA is computed -- critical for effective use of RW only for PV cases. RWPred prediction is also accessed directly from Rew layer to avoid any issues.`, 33: `TDPredLayer is the temporal differences reward prediction layer. It represents estimated value V(t) in the minus phase, and computes estimated V(t+1) based on its learned weights in plus phase, using the TDPredPath pathway type for DA modulated learning. The first unit in the layer represents positive reward, second negative.`, 34: `TDIntegLayer is the temporal differences reward integration layer. It represents estimated value V(t) from prior time step in the minus phase, and estimated discount * V(t+1) + r(t) in the plus phase. It gets Rew, PrevPred from Context.NeuroMod, and Special LayerValues from TDPredLayer. The first unit in the layer represents positive reward, second negative.`, 35: `TDDaLayer computes a dopamine (DA) signal as the temporal difference (TD) between the TDIntegLayer activations in the minus and plus phase. These are retrieved from Special LayerValues.`}
var _LayerTypesMap = map[LayerTypes]string{0: `SuperLayer`, 1: `InputLayer`, 2: `TargetLayer`, 3: `CompareLayer`, 4: `CTLayer`, 5: `PulvinarLayer`, 6: `TRNLayer`, 7: `PTMaintLayer`, 8: `PTPredLayer`, 9: `DSMatrixLayer`, 10: `VSMatrixLayer`, 11: `DSPatchLayer`, 12: `STNLayer`, 13: `GPLayer`, 14: `BGThalLayer`, 15: `VSGatedLayer`, 16: `IOLayer`, 17: `CNeLayer`, 18: `CNiIOLayer`, 19: `CNiUpLayer`, 20: `BLALayer`, 21: `CeMLayer`, 22: `VSPatchLayer`, 23: `LHbLayer`, 24: `DrivesLayer`, 25: `UrgencyLayer`, 26: `USLayer`, 27: `PVLayer`, 28: `LDTLayer`, 29: `VTALayer`, 30: `RewLayer`, 31: `RWPredLayer`, 32: `RWDaLayer`, 33: `TDPredLayer`, 34: `TDIntegLayer`, 35: `TDDaLayer`}
// String returns the string representation of this LayerTypes value.
func (i LayerTypes) String() string { return enums.String(i, _LayerTypesMap) }
// SetString sets the LayerTypes value from its string representation,
// and returns an error if the string is invalid.
func (i *LayerTypes) SetString(s string) error {
return enums.SetString(i, s, _LayerTypesValueMap, "LayerTypes")
}
// Int64 returns the LayerTypes value as an int64.
func (i LayerTypes) Int64() int64 { return int64(i) }
// SetInt64 sets the LayerTypes value from an int64.
func (i *LayerTypes) SetInt64(in int64) { *i = LayerTypes(in) }
// Desc returns the description of the LayerTypes value.
func (i LayerTypes) Desc() string { return enums.Desc(i, _LayerTypesDescMap) }
// LayerTypesValues returns all possible values for the type LayerTypes.
func LayerTypesValues() []LayerTypes { return _LayerTypesValues }
// Values returns all possible values for the type LayerTypes.
func (i LayerTypes) Values() []enums.Enum { return enums.Values(_LayerTypesValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i LayerTypes) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *LayerTypes) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "LayerTypes")
}
var _LayerVarsValues = []LayerVars{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}
// LayerVarsN is the highest valid value for type LayerVars, plus one.
//
//gosl:start
const LayerVarsN LayerVars = 12
//gosl:end
var _LayerVarsValueMap = map[string]LayerVars{`LayerActMAvg`: 0, `LayerActPAvg`: 1, `LayerAvgMaxGeM`: 2, `LayerAvgMaxGiM`: 3, `LayerGiMult`: 4, `LayerPhaseDiff`: 5, `LayerPhaseDiffAvg`: 6, `LayerPhaseDiffVar`: 7, `LayerRT`: 8, `GatedRT`: 9, `LayerRewPredPos`: 10, `LayerRewPredNeg`: 11}
var _LayerVarsDescMap = map[LayerVars]string{0: `LayerActMAvg is the running-average minus-phase activity integrated at Dt.LongAvgTau, used for adapting inhibition relative to target level.`, 1: `LayerActPAvg is the running-average plus-phase activity integrated at Dt.LongAvgTau.`, 2: `LayerAvgMaxGeM is the running-average max of minus-phase Ge value across the layer integrated at Dt.LongAvgTau.`, 3: `LayerAvgMaxGiM is the running-average max of minus-phase Gi value across the layer integrated at Dt.LongAvgTau.`, 4: `LayerGiMult is a multiplier on layer-level inhibition, which can be adapted to maintain target activity level.`, 5: `LayerPhaseDiff is the phase-wise difference in the activity state between the minus [ActM] and plus [ActP] phases, measured using 1 minus the correlation (centered cosine aka normalized dot product). 0 = no difference, 2 = maximum difference. Computed by PhaseDiffFromActs in the PlusPhase.`, 6: `LayerPhaseDiffAvg is the running average of [LayerPhaseDiff] over time, integrated at Dt.LongAvgTau.`, 7: `LayerPhaseDiffVar is the running variance of [LayerPhaseDiff], integrated at Dt.LongAvgTau.`, 8: `LayerRT is the reaction time for this layer in cycles, which is -1 until the Max CaP level (after MaxCycStart) exceeds the Inhib.ActAvg.RTThr threshold.`, 9: `GatedRT is the reaction time for this layer in cycles, which is -1 until the Layer-level [PoolGated] is true.`, 10: `LayerRewPredPos is the positive-valued Reward Prediction value, for RL specific layers: [RWPredLayer], [TDPredLayer]. For [TDIntegLayer], this is the plus phase current integrated reward prediction.`, 11: `LayerRewPredNeg is the negative-valued Reward Prediction value, for RL specific layers: [RWPredLayer], [TDPredLayer] For [TDIntegLayer], this is the minus phase previous integrated reward prediction.`}
var _LayerVarsMap = map[LayerVars]string{0: `LayerActMAvg`, 1: `LayerActPAvg`, 2: `LayerAvgMaxGeM`, 3: `LayerAvgMaxGiM`, 4: `LayerGiMult`, 5: `LayerPhaseDiff`, 6: `LayerPhaseDiffAvg`, 7: `LayerPhaseDiffVar`, 8: `LayerRT`, 9: `GatedRT`, 10: `LayerRewPredPos`, 11: `LayerRewPredNeg`}
// String returns the string representation of this LayerVars value.
func (i LayerVars) String() string { return enums.String(i, _LayerVarsMap) }
// SetString sets the LayerVars value from its string representation,
// and returns an error if the string is invalid.
func (i *LayerVars) SetString(s string) error {
return enums.SetString(i, s, _LayerVarsValueMap, "LayerVars")
}
// Int64 returns the LayerVars value as an int64.
func (i LayerVars) Int64() int64 { return int64(i) }
// SetInt64 sets the LayerVars value from an int64.
func (i *LayerVars) SetInt64(in int64) { *i = LayerVars(in) }
// Desc returns the description of the LayerVars value.
func (i LayerVars) Desc() string { return enums.Desc(i, _LayerVarsDescMap) }
// LayerVarsValues returns all possible values for the type LayerVars.
func LayerVarsValues() []LayerVars { return _LayerVarsValues }
// Values returns all possible values for the type LayerVars.
func (i LayerVars) Values() []enums.Enum { return enums.Values(_LayerVarsValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i LayerVars) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *LayerVars) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "LayerVars")
}
var _ViewTimesValues = []ViewTimes{0, 1, 2, 3, 4, 5, 6}
// ViewTimesN is the highest valid value for type ViewTimes, plus one.
//
//gosl:start
const ViewTimesN ViewTimes = 7
//gosl:end
var _ViewTimesValueMap = map[string]ViewTimes{`Cycle`: 0, `FastSpike`: 1, `Gamma`: 2, `Beta`: 3, `Alpha`: 4, `Phase`: 5, `Theta`: 6}
var _ViewTimesDescMap = map[ViewTimes]string{0: `Cycle is an update of neuron state, equivalent to 1 msec of real time.`, 1: `FastSpike is 10 cycles (msec) or 100hz. This is the fastest spiking time generally observed in the neocortex.`, 2: `Gamma is 25 cycles (msec) or 40hz. Neocortical activity often exhibits synchrony peaks in this range.`, 3: `Beta is 50 cycles (msec) or 20 hz (two Gammas). Gating in the basal ganglia and associated updating in prefrontal cortex occurs at this frequency.`, 4: `Alpha is 100 cycle (msec) or 10 hz (two Betas). Posterior neocortex exhibits synchrony peaks in this range, corresponding to the intrinsic bursting frequency of layer 5 IB neurons, and corticothalamic loop resonance.`, 5: `Phase is the Minus or Plus phase, where plus phase is bursting / outcome that drives positive learning relative to prediction in minus phase. Minus phase is at 150 cycles (msec).`, 6: `Theta is 200 cycles (msec) or 5 hz (two Alphas), i.e., a Trial. This is the modal duration of a saccade, the update frequency of medial temporal lobe episodic memory, and the minimal predictive learning cycle (perceive on Alpha 1, predict on 2).`}
var _ViewTimesMap = map[ViewTimes]string{0: `Cycle`, 1: `FastSpike`, 2: `Gamma`, 3: `Beta`, 4: `Alpha`, 5: `Phase`, 6: `Theta`}
// String returns the string representation of this ViewTimes value.
func (i ViewTimes) String() string { return enums.String(i, _ViewTimesMap) }
// SetString sets the ViewTimes value from its string representation,
// and returns an error if the string is invalid.
func (i *ViewTimes) SetString(s string) error {
return enums.SetString(i, s, _ViewTimesValueMap, "ViewTimes")
}
// Int64 returns the ViewTimes value as an int64.
func (i ViewTimes) Int64() int64 { return int64(i) }
// SetInt64 sets the ViewTimes value from an int64.
func (i *ViewTimes) SetInt64(in int64) { *i = ViewTimes(in) }
// Desc returns the description of the ViewTimes value.
func (i ViewTimes) Desc() string { return enums.Desc(i, _ViewTimesDescMap) }
// ViewTimesValues returns all possible values for the type ViewTimes.
func ViewTimesValues() []ViewTimes { return _ViewTimesValues }
// Values returns all possible values for the type ViewTimes.
func (i ViewTimes) Values() []enums.Enum { return enums.Values(_ViewTimesValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i ViewTimes) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *ViewTimes) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "ViewTimes")
}
var _DAModTypesValues = []DAModTypes{0, 1, 2, 3}
// DAModTypesN is the highest valid value for type DAModTypes, plus one.
//
//gosl:start
const DAModTypesN DAModTypes = 4
//gosl:end
var _DAModTypesValueMap = map[string]DAModTypes{`NoDAMod`: 0, `D1Mod`: 1, `D2Mod`: 2, `D1AbsMod`: 3}
var _DAModTypesDescMap = map[DAModTypes]string{0: `NoDAMod means there is no effect of dopamine on neural activity`, 1: `D1Mod is for neurons that primarily express dopamine D1 receptors, which are excitatory from DA bursts, inhibitory from dips. Cortical neurons can generally use this type, while subcortical populations are more diverse in having both D1 and D2 subtypes.`, 2: `D2Mod is for neurons that primarily express dopamine D2 receptors, which are excitatory from DA dips, inhibitory from bursts.`, 3: `D1AbsMod is like D1Mod, except the absolute value of DA is used instead of the signed value. There are a subset of DA neurons that send increased DA for both negative and positive outcomes, targeting frontal neurons.`}
var _DAModTypesMap = map[DAModTypes]string{0: `NoDAMod`, 1: `D1Mod`, 2: `D2Mod`, 3: `D1AbsMod`}
// String returns the string representation of this DAModTypes value.
func (i DAModTypes) String() string { return enums.String(i, _DAModTypesMap) }
// SetString sets the DAModTypes value from its string representation,
// and returns an error if the string is invalid.
func (i *DAModTypes) SetString(s string) error {
return enums.SetString(i, s, _DAModTypesValueMap, "DAModTypes")
}
// Int64 returns the DAModTypes value as an int64.
func (i DAModTypes) Int64() int64 { return int64(i) }
// SetInt64 sets the DAModTypes value from an int64.
func (i *DAModTypes) SetInt64(in int64) { *i = DAModTypes(in) }
// Desc returns the description of the DAModTypes value.
func (i DAModTypes) Desc() string { return enums.Desc(i, _DAModTypesDescMap) }
// DAModTypesValues returns all possible values for the type DAModTypes.
func DAModTypesValues() []DAModTypes { return _DAModTypesValues }
// Values returns all possible values for the type DAModTypes.
func (i DAModTypes) Values() []enums.Enum { return enums.Values(_DAModTypesValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i DAModTypes) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *DAModTypes) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "DAModTypes")
}
var _ValenceTypesValues = []ValenceTypes{0, 1, 2}
// ValenceTypesN is the highest valid value for type ValenceTypes, plus one.
//
//gosl:start
const ValenceTypesN ValenceTypes = 3
//gosl:end
var _ValenceTypesValueMap = map[string]ValenceTypes{`Positive`: 0, `Negative`: 1, `Cost`: 2}
var _ValenceTypesDescMap = map[ValenceTypes]string{0: `Positive valence codes for outcomes aligned with drives / goals.`, 1: `Negative valence codes for harmful or aversive outcomes.`, 2: `Cost codes for continous ongoing cost factors such as Time and Effort`}
var _ValenceTypesMap = map[ValenceTypes]string{0: `Positive`, 1: `Negative`, 2: `Cost`}
// String returns the string representation of this ValenceTypes value.
func (i ValenceTypes) String() string { return enums.String(i, _ValenceTypesMap) }
// SetString sets the ValenceTypes value from its string representation,
// and returns an error if the string is invalid.
func (i *ValenceTypes) SetString(s string) error {
return enums.SetString(i, s, _ValenceTypesValueMap, "ValenceTypes")
}
// Int64 returns the ValenceTypes value as an int64.
func (i ValenceTypes) Int64() int64 { return int64(i) }
// SetInt64 sets the ValenceTypes value from an int64.
func (i *ValenceTypes) SetInt64(in int64) { *i = ValenceTypes(in) }
// Desc returns the description of the ValenceTypes value.
func (i ValenceTypes) Desc() string { return enums.Desc(i, _ValenceTypesDescMap) }
// ValenceTypesValues returns all possible values for the type ValenceTypes.
func ValenceTypesValues() []ValenceTypes { return _ValenceTypesValues }
// Values returns all possible values for the type ValenceTypes.
func (i ValenceTypes) Values() []enums.Enum { return enums.Values(_ValenceTypesValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i ValenceTypes) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *ValenceTypes) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "ValenceTypes")
}
var _NeuronFlagsValues = []NeuronFlags{1, 2, 4, 8}
// NeuronFlagsN is the highest valid value for type NeuronFlags, plus one.
//
//gosl:start
const NeuronFlagsN NeuronFlags = 9
//gosl:end
var _NeuronFlagsValueMap = map[string]NeuronFlags{`NeuronOff`: 1, `NeuronHasExt`: 2, `NeuronHasTarg`: 4, `NeuronHasCmpr`: 8}
var _NeuronFlagsDescMap = map[NeuronFlags]string{1: `NeuronOff flag indicates that this neuron has been turned off (i.e., lesioned).`, 2: `NeuronHasExt means the neuron has external input in its Ext field.`, 4: `NeuronHasTarg means the neuron has external target input in its Target field.`, 8: `NeuronHasCmpr means the neuron has external comparison input in its Target field. Used for computing comparison statistics but does not drive neural activity ever.`}
var _NeuronFlagsMap = map[NeuronFlags]string{1: `NeuronOff`, 2: `NeuronHasExt`, 4: `NeuronHasTarg`, 8: `NeuronHasCmpr`}
// String returns the string representation of this NeuronFlags value.
func (i NeuronFlags) String() string { return enums.String(i, _NeuronFlagsMap) }
// SetString sets the NeuronFlags value from its string representation,
// and returns an error if the string is invalid.
func (i *NeuronFlags) SetString(s string) error {
return enums.SetString(i, s, _NeuronFlagsValueMap, "NeuronFlags")
}
// Int64 returns the NeuronFlags value as an int64.
func (i NeuronFlags) Int64() int64 { return int64(i) }
// SetInt64 sets the NeuronFlags value from an int64.
func (i *NeuronFlags) SetInt64(in int64) { *i = NeuronFlags(in) }
// Desc returns the description of the NeuronFlags value.
func (i NeuronFlags) Desc() string { return enums.Desc(i, _NeuronFlagsDescMap) }
// NeuronFlagsValues returns all possible values for the type NeuronFlags.
func NeuronFlagsValues() []NeuronFlags { return _NeuronFlagsValues }
// Values returns all possible values for the type NeuronFlags.
func (i NeuronFlags) Values() []enums.Enum { return enums.Values(_NeuronFlagsValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i NeuronFlags) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *NeuronFlags) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "NeuronFlags")
}
var _NeuronVarsValues = []NeuronVars{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92}
// NeuronVarsN is the highest valid value for type NeuronVars, plus one.
//
//gosl:start
const NeuronVarsN NeuronVars = 93
//gosl:end
var _NeuronVarsValueMap = map[string]NeuronVars{`Spike`: 0, `Spiked`: 1, `Act`: 2, `ActInt`: 3, `Ge`: 4, `Gi`: 5, `Gk`: 6, `Inet`: 7, `Vm`: 8, `VmDend`: 9, `ISI`: 10, `ISIAvg`: 11, `Ext`: 12, `Target`: 13, `CaM`: 14, `CaP`: 15, `CaD`: 16, `CaDPrev`: 17, `CaSyn`: 18, `LearnCa`: 19, `LearnCaM`: 20, `LearnCaP`: 21, `LearnCaD`: 22, `CaDiff`: 23, `LearnDiff`: 24, `GaM`: 25, `GaP`: 26, `GaD`: 27, `TimeDiff`: 28, `TimePeak`: 29, `TimeCycle`: 30, `LearnNow`: 31, `RLRate`: 32, `ETrace`: 33, `ETrLearn`: 34, `GnmdaSyn`: 35, `Gnmda`: 36, `GnmdaLrn`: 37, `GnmdaMaint`: 38, `NmdaCa`: 39, `Gvgcc`: 40, `VgccM`: 41, `VgccH`: 42, `VgccCa`: 43, `VgccCaInt`: 44, `Burst`: 45, `BurstPrv`: 46, `CtxtGe`: 47, `CtxtGeRaw`: 48, `CtxtGeOrig`: 49, `GgabaB`: 50, `GababM`: 51, `GababX`: 52, `Gak`: 53, `SSGiDend`: 54, `GknaMed`: 55, `GknaSlow`: 56, `Gkir`: 57, `KirM`: 58, `Gsk`: 59, `SKCaIn`: 60, `SKCaR`: 61, `SKCaM`: 62, `Gmahp`: 63, `MahpN`: 64, `Gsahp`: 65, `SahpCa`: 66, `SahpN`: 67, `ActM`: 68, `ActP`: 69, `Beta1`: 70, `Beta2`: 71, `CaPMax`: 72, `CaPMaxCa`: 73, `GeNoise`: 74, `GeNoiseP`: 75, `GiNoise`: 76, `GiNoiseP`: 77, `GeExt`: 78, `GeRaw`: 79, `GeSyn`: 80, `GiRaw`: 81, `GiSyn`: 82, `GeInt`: 83, `GeIntNorm`: 84, `GiInt`: 85, `GModRaw`: 86, `GModSyn`: 87, `SMaintP`: 88, `GMaintRaw`: 89, `GMaintSyn`: 90, `NeurFlags`: 91, `CaBins`: 92}
var _NeuronVarsDescMap = map[NeuronVars]string{0: `Spike is whether neuron has spiked or not on this cycle (0 or 1).`, 1: `Spiked is 1 if neuron has spiked within the last 10 cycles (msecs), corresponding to a nominal max spiking rate of 100 Hz, 0 otherwise. Useful for visualization and computing activity levels in terms of average spiked levels.`, 2: `Act is rate-coded activation value reflecting instantaneous estimated rate of spiking, based on 1 / ISIAvg. It is integrated over time for ActInt which is then used for performance statistics and layer average activations, etc. Should not be used for learning or other computations: just for stats / display.`, 3: `ActInt is integrated running-average activation value computed from Act with time constant Act.Dt.IntTau, to produce a longer-term integrated value reflecting the overall activation state across the ThetaCycle time scale, as the overall response of network to current input state. This is copied to ActM and ActP at the ends of the minus and plus phases, respectively, and used in computing some performance-level statistics (based on ActM). Should not be used for learning or other computations.`, 4: `Ge is total excitatory conductance, including all forms of excitation (e.g., NMDA). Does *not* include the Gbar.E factor.`, 5: `Gi is total inhibitory synaptic conductance, i.e., the net inhibitory input to the neuron. Does *not* include the Gbar.I factor.`, 6: `Gk is total potassium conductance, typically reflecting sodium-gated potassium currents involved in adaptation effects. Does *not* include the Gbar.K factor.`, 7: `Inet is net current produced by all channels, which drives update of Vm.`, 8: `Vm is the membrane potential at the cell body, which integrates Inet current over time, and drives spiking at the axon initial segment of the neuron.`, 9: `VmDend is the dendritic membrane potential, which has a slower time constant than Vm and is not subject to the VmR reset after spiking.`, 10: `ISI is the current inter-spike-interval, which counts up since last spike. Starts at -1 when initialized.`, 11: `ISIAvg is the average inter-spike-interval, i.e., the average time interval between spikes, integrated with ISITau rate constant (relatively fast) to capture something close to an instantaneous spiking rate. Starts at -1 when initialized, and goes to -2 after first spike, and is only valid after the second spike post-initialization.`, 12: `Ext is the external input: drives activation of unit from outside influences (e.g., sensory input).`, 13: `Target is the target value: drives learning to produce this activation value.`, 14: `CaM is the spike-driven calcium trace at the neuron level, which then drives longer time-integrated variables: [CaP] and [CaD]. These variables are used for statistics and display to capture spiking activity at different timescales. They fluctuate more than [Act] and [ActInt], but are closer to the biological variables driving learning. CaM is the exponential integration of SpikeG * Spike using the MTau time constant (typically 5), and simulates a calmodulin (CaM) like signal, at an abstract level.`, 15: `CaP is the continuous cascaded integration of [CaM] using the PTau time constant (typically 40), representing a neuron-level, purely spiking version of the plus, LTP direction of weight change in the Kinase learning rule, dependent on CaMKII. This is not used for learning (see [LearnCaP]), but instead for statistics as a representation of recent activity.`, 16: `CaD is the continuous cascaded integration [CaP] using the DTau time constant (typically 40), representing a neuron-level, purely spiking version of the minus, LTD direction of weight change in the Kinase learning rule, dependent on DAPK1. This is not used for learning (see [LearnCaD]), but instead for statistics as a representation of trial-level activity.`, 17: `CaDPrev is the final [CaD] activation state at the end of previous theta cycle. This is used for specialized learning mechanisms that operate on delayed sending activations.`, 18: `CaSyn is the neuron-level integration of spike-driven calcium, used to approximate synaptic calcium influx as a product of sender and receiver neuron CaSyn values, which are integrated separately because it is computationally much more efficient. CaSyn enters into a Sender * Receiver product at each synapse to give the effective credit assignment factor for learning. This value is driven directly by spikes, with an exponential integration time constant of 30 msec (default), which captures the coincidence window for pre*post firing on NMDA receptor opening. The neuron [CaBins] values record the temporal trajectory of CaSyn over the course of the theta cycle window, and then the pre*post product is integrated over these bins at the synaptic level.`, 19: `LearnCa is the receiving neuron calcium signal, which is integrated up to [LearnCaP] and [LearnCaD], the difference of which is the temporal error component of the kinase cortical learning rule. LearnCa combines NMDA via [NmdaCa] and spiking-driven VGCC [VgccCaInt] calcium sources. The NMDA signal reflects both sending and receiving activity, while the VGCC signal is purely receiver spiking, and a balance of both works best.`, 20: `LearnCaM is the integrated [LearnCa] at the MTau timescale (typically 5), simulating a calmodulin (CaM) like signal, which then drives [LearnCaP], and [LearnCaD] for the delta signal for error-driven learning.`, 21: `LearnCaP is the cascaded integration of [LearnCaM] using the PTau time constant (typically 40), representing the plus, LTP direction of weight change, capturing the function of CaMKII in the Kinase learning rule.`, 22: `LearnCaD is the cascaded integration of [LearnCaP] using the DTau time constant (typically 40), representing the minus, LTD direction of weight change, capturing the function of DAPK1 in the Kinase learning rule.`, 23: `CaDiff is difference between [LearnCaP] - [LearnCaD]. This is the error signal that drives error-driven learning.`, 24: `LearnDiff is the actual difference signal that drives learning, which is computed from [CaDiff] for neocortical neurons, but specifically at the point of learning ([LearnNow]).`, 25: `GaM is first-level integration of all input conductances g_a, which then drives longer time-integrated variables: [GaP] and [GaD]. These variables are used for timing of learning based on bursts of activity change over time: at the minus and plus phases.`, 26: `GaP is the continuous cascaded integration of [GaM] using the PTau time constant (typically 40), representing a neuron-level, all-conductance-based version of the plus, LTP direction of weight change in the Kinase learning rule.`, 27: `GaD is the continuous cascaded integration of [GaP] using the DTau time constant (typically 40), representing a neuron-level, all-conductance-based version of the minus, LTD direction of weight change in the Kinase learning rule.`, 28: `TimeDiff is the running time-average of |P - D| (absolute value), used for determining the timing of learning in terms of onsets of peaks. See [TimePeak]. GaP - GaD is used, as it is smoother and more reliable than LearnCaP - D.`, 29: `TimePeak is the value of the current peak (local maximum) of [TimeDiff]. This typically occurs at the onset of the minus phase, and drives the timing of learning a given number of cycles after that.`, 30: `TimeCycle is the absolute cycle where [TimePeak] occurred.`, 31: `LearnNow is the absolute cycle (ms, CyclesTotal) when the receiving neuron learns. For neocortex, either at end of theta cycle or based on timing computed from [TimeCycle] per [LearnTimingParams].`, 32: `RLRate is recv-unit based learning rate multiplier, reflecting the sigmoid derivative computed from [CaD] of recv unit, and the normalized difference (CaP - CaD) / MAX(CaP - CaD).`, 33: `ETrace is the eligibility trace for this neuron.`, 34: `ETrLearn is the learning factor for the eligibility trace for this neuron. 1 + ETraceScale * [ETrace]`, 35: `GnmdaSyn is the integrated NMDA synaptic current on the receiving neuron. It adds GeRaw and decays with a time constant.`, 36: `Gnmda is the net postsynaptic (receiving) NMDA conductance, after Mg V-gating and Gbar. This is added directly to Ge as it has the same reversal potential.`, 37: `GnmdaLrn is learning version of integrated NMDA recv synaptic current. It adds [GeRaw] and decays with a time constant. This drives [NmdaCa] that then drives [LearnCa] for learning.`, 38: `GnmdaMaint is net postsynaptic maintenance NMDA conductance, computed from [GMaintSyn] and [GMaintRaw], after Mg V-gating and Gbar. This is added directly to Ge as it has the same reversal potential.`, 39: `NmdaCa is NMDA calcium computed from GnmdaLrn, drives learning via CaM.`, 40: `Gvgcc is conductance (via Ca) for VGCC voltage gated calcium channels.`, 41: `VgccM is activation gate of VGCC channels.`, 42: `VgccH inactivation gate of VGCC channels.`, 43: `VgccCa is the instantaneous VGCC calcium flux: can be driven by spiking or directly from Gvgcc.`, 44: `VgccCaInt is the time-integrated VGCC calcium flux. This is actually what drives learning.`, 45: `Burst is the layer 5 IB intrinsic bursting neural activation value, computed by thresholding the [CaP] value in Super superficial layers.`, 46: `BurstPrv is previous Burst bursting activation from prior time step. Used for context-based learning.`, 47: `CtxtGe is context (temporally delayed) excitatory conductance, driven by deep bursting at end of the plus phase, for CT layers.`, 48: `CtxtGeRaw is raw update of context (temporally delayed) excitatory conductance, driven by deep bursting at end of the plus phase, for CT layers.`, 49: `CtxtGeOrig is original CtxtGe value prior to any decay factor. Updates at end of plus phase.`, 50: `GgabaB is net GABA-B conductance, after Vm gating and Gk + Gbase. Applies to Gk, not Gi, for GIRK, with .1 reversal potential.`, 51: `GababM is the GABA-B / GIRK activation, which is a time-integrated value with rise and decay time constants.`, 52: `GababX is GABA-B / GIRK internal drive variable. This gets the raw activation and decays.`, 53: `Gak is the conductance of A-type K potassium channels.`, 54: `SSGiDend is the amount of SST+ somatostatin positive slow spiking inhibition applied to dendritic Vm (VmDend).`, 55: `GknaMed is the conductance of sodium-gated potassium channel (KNa) medium dynamics (Slick), which produces accommodation / adaptation.`, 56: `GknaSlow is the conductance of sodium-gated potassium channel (KNa) slow dynamics (Slack), which produces accommodation / adaptation.`, 57: `Gkir is the conductance of the potassium (K) inwardly rectifying channel, which is strongest at low membrane potentials. Can be modulated by DA.`, 58: `KirM is the Kir potassium (K) inwardly rectifying gating value.`, 59: `Gsk is Calcium-gated potassium channel conductance as a function of Gbar * SKCaM.`, 60: `SKCaIn is intracellular calcium store level, available to be released with spiking as SKCaR, which can bind to SKCa receptors and drive K current. replenishment is a function of spiking activity being below a threshold.`, 61: `SKCaR is the released amount of intracellular calcium, from SKCaIn, as a function of spiking events. This can bind to SKCa channels and drive K currents.`, 62: `SKCaM is the Calcium-gated potassium channel gating factor, driven by SKCaR via a Hill equation as in chans.SKPCaParams.`, 63: `Gmahp is medium time scale AHP conductance.`, 64: `MahpN is accumulating voltage-gated gating value for the medium time scale AHP.`, 65: `Gsahp is slow time scale AHP conductance.`, 66: `SahpCa is slowly accumulating calcium value that drives the slow AHP.`, 67: `SahpN is the sAHP gating value.`, 68: `ActM is ActInt activation state at end of third quarter, representing the posterior-cortical minus phase activation. This is used for statistics and monitoring network performance. Should not be used for learning or other computations.`, 69: `ActP is ActInt activation state at end of fourth quarter, representing the posterior-cortical plus_phase activation. This is used for statistics and monitoring network performance. Should not be used for learning or other computations.`, 70: `Beta1 is the activation state at the first beta cycle within current state processing window (i.e., at 50 msec), as saved by Beta1() function. Used for example in hippocampus for CA3, CA1 learning.`, 71: `Beta2 is the activation state at the second beta cycle within current state processing window (i.e., at 100 msec), as saved by Beta2() function. Used for example in hippocampus for CA3, CA1 learning.`, 72: `CaPMax is the maximum [CaP] across one theta cycle time window (max of CaPMaxCa). It is used for specialized algorithms that have more phasic behavior within a single trial, e.g., BG Matrix layer gating. Also useful for visualization of peak activity of neurons.`, 73: `CaPMaxCa is the Ca integrated like [CaP] but only starting at the MaxCycStart cycle, to prevent inclusion of carryover spiking from prior theta cycle trial. The PTau time constant otherwise results in significant carryover. This is the input to CaPMax.`, 74: `GeNoise is integrated noise excitatory conductance, added into Ge.`, 75: `GeNoiseP is accumulating poisson probability factor for driving excitatory noise spiking. Multiply times uniform random deviate at each time step, until it gets below the target threshold based on poisson lambda as function of noise firing rate.`, 76: `GiNoise is integrated noise inhibitory conductance, added into Gi.`, 77: `GiNoiseP is accumulating poisson probability factor for driving inhibitory noise spiking. Multiply times uniform random deviate at each time step, until it gets below the target threshold based on poisson lambda as a function of noise firing rate.`, 78: `GeExt is extra excitatory conductance added to Ge, from Ext input, GeCtxt etc.`, 79: `GeRaw is the raw excitatory conductance (net input) received from senders = current raw spiking drive.`, 80: `GeSyn is the time-integrated total excitatory (AMPA) synaptic conductance, with an instantaneous rise time from each spike (in GeRaw) and exponential decay with Dt.GeTau, aggregated over pathways. Does *not* include Gbar.E.`, 81: `GiRaw is the raw inhibitory conductance (net input) received from senders = current raw spiking drive.`, 82: `GiSyn is time-integrated total inhibitory synaptic conductance, with an instantaneous rise time from each spike (in GiRaw) and exponential decay with Dt.GiTau, aggregated over pathways -- does *not* include Gbar.I. This is added with computed FFFB inhibition to get the full inhibition in Gi.`, 83: `GeInt is integrated running-average activation value computed from Ge with time constant Act.Dt.IntTau, to produce a longer-term integrated value reflecting the overall Ge level across the ThetaCycle time scale (Ge itself fluctuates considerably). This is useful for stats to set strength of connections etc to get neurons into right range of overall excitatory drive.`, 84: `GeIntNorm is normalized GeInt value (divided by the layer maximum). This is used for learning in layers that require learning on subthreshold activity.`, 85: `GiInt is integrated running-average activation value computed from GiSyn with time constant Act.Dt.IntTau, to produce a longer-term integrated value reflecting the overall synaptic Gi level across the ThetaCycle time scale (Gi itself fluctuates considerably). Useful for stats to set strength of connections etc to get neurons into right range of overall inhibitory drive.`, 86: `GModRaw is raw modulatory conductance, received from GType = ModulatoryG pathways.`, 87: `GModSyn is syn integrated modulatory conductance, received from GType = ModulatoryG pathways.`, 88: `SMaintP is accumulating poisson probability factor for driving self-maintenance by simulating a population of mutually interconnected neurons. Multiply times uniform random deviate at each time step, until it gets below the target threshold based on poisson lambda based on accumulating self maint factor.`, 89: `GMaintRaw is raw maintenance conductance, received from GType = MaintG pathways.`, 90: `GMaintSyn is syn integrated maintenance conductance, integrated using MaintNMDA params.`, 91: `NeurFlags are bit flags for binary state variables, which are converted to / from uint32. These need to be in Vars because they can be differential per data (for ext inputs) and are writable (indexes are read only).`, 92: `CaBins is a vector of values starting here, with aggregated [CaSyn] values in time bins of [CaBinCycles] across two theta cycles, for computing synaptic calcium efficiently. Each bin = Sum(CaSyn / CaBinCycles). Total number of bins = 2 * [Context.ThetaCycles] / CaBinCycles. Use [CaBinForCycle] to access. Synaptic calcium is integrated from sender * receiver CaBins values, with weights for CaP vs CaD that reflect their faster vs. slower time constants, respectively. CaD is used for the credit assignment factor, while CaP - CaD is used directly for error-driven learning at Target layers.`}
var _NeuronVarsMap = map[NeuronVars]string{0: `Spike`, 1: `Spiked`, 2: `Act`, 3: `ActInt`, 4: `Ge`, 5: `Gi`, 6: `Gk`, 7: `Inet`, 8: `Vm`, 9: `VmDend`, 10: `ISI`, 11: `ISIAvg`, 12: `Ext`, 13: `Target`, 14: `CaM`, 15: `CaP`, 16: `CaD`, 17: `CaDPrev`, 18: `CaSyn`, 19: `LearnCa`, 20: `LearnCaM`, 21: `LearnCaP`, 22: `LearnCaD`, 23: `CaDiff`, 24: `LearnDiff`, 25: `GaM`, 26: `GaP`, 27: `GaD`, 28: `TimeDiff`, 29: `TimePeak`, 30: `TimeCycle`, 31: `LearnNow`, 32: `RLRate`, 33: `ETrace`, 34: `ETrLearn`, 35: `GnmdaSyn`, 36: `Gnmda`, 37: `GnmdaLrn`, 38: `GnmdaMaint`, 39: `NmdaCa`, 40: `Gvgcc`, 41: `VgccM`, 42: `VgccH`, 43: `VgccCa`, 44: `VgccCaInt`, 45: `Burst`, 46: `BurstPrv`, 47: `CtxtGe`, 48: `CtxtGeRaw`, 49: `CtxtGeOrig`, 50: `GgabaB`, 51: `GababM`, 52: `GababX`, 53: `Gak`, 54: `SSGiDend`, 55: `GknaMed`, 56: `GknaSlow`, 57: `Gkir`, 58: `KirM`, 59: `Gsk`, 60: `SKCaIn`, 61: `SKCaR`, 62: `SKCaM`, 63: `Gmahp`, 64: `MahpN`, 65: `Gsahp`, 66: `SahpCa`, 67: `SahpN`, 68: `ActM`, 69: `ActP`, 70: `Beta1`, 71: `Beta2`, 72: `CaPMax`, 73: `CaPMaxCa`, 74: `GeNoise`, 75: `GeNoiseP`, 76: `GiNoise`, 77: `GiNoiseP`, 78: `GeExt`, 79: `GeRaw`, 80: `GeSyn`, 81: `GiRaw`, 82: `GiSyn`, 83: `GeInt`, 84: `GeIntNorm`, 85: `GiInt`, 86: `GModRaw`, 87: `GModSyn`, 88: `SMaintP`, 89: `GMaintRaw`, 90: `GMaintSyn`, 91: `NeurFlags`, 92: `CaBins`}
// String returns the string representation of this NeuronVars value.
func (i NeuronVars) String() string { return enums.String(i, _NeuronVarsMap) }
// SetString sets the NeuronVars value from its string representation,
// and returns an error if the string is invalid.
func (i *NeuronVars) SetString(s string) error {
return enums.SetString(i, s, _NeuronVarsValueMap, "NeuronVars")
}
// Int64 returns the NeuronVars value as an int64.
func (i NeuronVars) Int64() int64 { return int64(i) }
// SetInt64 sets the NeuronVars value from an int64.
func (i *NeuronVars) SetInt64(in int64) { *i = NeuronVars(in) }
// Desc returns the description of the NeuronVars value.
func (i NeuronVars) Desc() string { return enums.Desc(i, _NeuronVarsDescMap) }
// NeuronVarsValues returns all possible values for the type NeuronVars.
func NeuronVarsValues() []NeuronVars { return _NeuronVarsValues }
// Values returns all possible values for the type NeuronVars.
func (i NeuronVars) Values() []enums.Enum { return enums.Values(_NeuronVarsValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i NeuronVars) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *NeuronVars) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "NeuronVars")
}
var _NeuronAvgVarsValues = []NeuronAvgVars{0, 1, 2, 3, 4, 5, 6}
// NeuronAvgVarsN is the highest valid value for type NeuronAvgVars, plus one.
//
//gosl:start
const NeuronAvgVarsN NeuronAvgVars = 7
//gosl:end
var _NeuronAvgVarsValueMap = map[string]NeuronAvgVars{`ActAvg`: 0, `AvgPct`: 1, `TrgAvg`: 2, `DTrgAvg`: 3, `AvgDif`: 4, `GeBase`: 5, `GiBase`: 6}
var _NeuronAvgVarsDescMap = map[NeuronAvgVars]string{0: `ActAvg is average activation (of minus phase activation state) over long time intervals (time constant = Dt.LongAvgTau). Useful for finding hog units and seeing overall distribution of activation.`, 1: `AvgPct is ActAvg as a proportion of overall layer activation. This is used for synaptic scaling to match TrgAvg activation, updated at SlowInterval intervals.`, 2: `TrgAvg is neuron's target average activation as a proportion of overall layer activation, assigned during weight initialization, driving synaptic scaling relative to AvgPct.`, 3: `DTrgAvg is change in neuron's target average activation as a result of unit-wise error gradient. Acts like a bias weight. MPI needs to share these across processors.`, 4: `AvgDif is AvgPct - TrgAvg, i.e., the error in overall activity level relative to set point for this neuron, which drives synaptic scaling. Updated at SlowInterval intervals.`, 5: `GeBase is baseline level of Ge, added to GeRaw, for intrinsic excitability.`, 6: `GiBase is baseline level of Gi, added to GiRaw, for intrinsic excitability.`}
var _NeuronAvgVarsMap = map[NeuronAvgVars]string{0: `ActAvg`, 1: `AvgPct`, 2: `TrgAvg`, 3: `DTrgAvg`, 4: `AvgDif`, 5: `GeBase`, 6: `GiBase`}
// String returns the string representation of this NeuronAvgVars value.
func (i NeuronAvgVars) String() string { return enums.String(i, _NeuronAvgVarsMap) }
// SetString sets the NeuronAvgVars value from its string representation,
// and returns an error if the string is invalid.
func (i *NeuronAvgVars) SetString(s string) error {
return enums.SetString(i, s, _NeuronAvgVarsValueMap, "NeuronAvgVars")
}
// Int64 returns the NeuronAvgVars value as an int64.
func (i NeuronAvgVars) Int64() int64 { return int64(i) }
// SetInt64 sets the NeuronAvgVars value from an int64.
func (i *NeuronAvgVars) SetInt64(in int64) { *i = NeuronAvgVars(in) }
// Desc returns the description of the NeuronAvgVars value.
func (i NeuronAvgVars) Desc() string { return enums.Desc(i, _NeuronAvgVarsDescMap) }
// NeuronAvgVarsValues returns all possible values for the type NeuronAvgVars.
func NeuronAvgVarsValues() []NeuronAvgVars { return _NeuronAvgVarsValues }
// Values returns all possible values for the type NeuronAvgVars.
func (i NeuronAvgVars) Values() []enums.Enum { return enums.Values(_NeuronAvgVarsValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i NeuronAvgVars) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *NeuronAvgVars) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "NeuronAvgVars")
}
var _NeuronIndexVarsValues = []NeuronIndexVars{0, 1, 2}
// NeuronIndexVarsN is the highest valid value for type NeuronIndexVars, plus one.
//
//gosl:start
const NeuronIndexVarsN NeuronIndexVars = 3
//gosl:end
var _NeuronIndexVarsValueMap = map[string]NeuronIndexVars{`NrnNeurIndex`: 0, `NrnLayIndex`: 1, `NrnSubPool`: 2}
var _NeuronIndexVarsDescMap = map[NeuronIndexVars]string{0: `NrnNeurIndex is the index of this neuron within its owning layer.`, 1: `NrnLayIndex is the index of the layer that this neuron belongs to, needed for neuron-level parallel code.`, 2: `NrnSubPool is the index of the sub-level inhibitory pool for this neuron (only for 4D shapes, the pool (unit-group / hypercolumn) structure level). Indicies start at 1 -- 0 is layer-level pool (is 0 if no sub-pools).`}
var _NeuronIndexVarsMap = map[NeuronIndexVars]string{0: `NrnNeurIndex`, 1: `NrnLayIndex`, 2: `NrnSubPool`}
// String returns the string representation of this NeuronIndexVars value.
func (i NeuronIndexVars) String() string { return enums.String(i, _NeuronIndexVarsMap) }
// SetString sets the NeuronIndexVars value from its string representation,
// and returns an error if the string is invalid.
func (i *NeuronIndexVars) SetString(s string) error {
return enums.SetString(i, s, _NeuronIndexVarsValueMap, "NeuronIndexVars")
}
// Int64 returns the NeuronIndexVars value as an int64.
func (i NeuronIndexVars) Int64() int64 { return int64(i) }
// SetInt64 sets the NeuronIndexVars value from an int64.
func (i *NeuronIndexVars) SetInt64(in int64) { *i = NeuronIndexVars(in) }
// Desc returns the description of the NeuronIndexVars value.
func (i NeuronIndexVars) Desc() string { return enums.Desc(i, _NeuronIndexVarsDescMap) }
// NeuronIndexVarsValues returns all possible values for the type NeuronIndexVars.
func NeuronIndexVarsValues() []NeuronIndexVars { return _NeuronIndexVarsValues }
// Values returns all possible values for the type NeuronIndexVars.
func (i NeuronIndexVars) Values() []enums.Enum { return enums.Values(_NeuronIndexVarsValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i NeuronIndexVars) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *NeuronIndexVars) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "NeuronIndexVars")
}
var _PathTypesValues = []PathTypes{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}
// PathTypesN is the highest valid value for type PathTypes, plus one.
//
//gosl:start
const PathTypesN PathTypes = 15
//gosl:end
var _PathTypesValueMap = map[string]PathTypes{`ForwardPath`: 0, `BackPath`: 1, `LateralPath`: 2, `InhibPath`: 3, `CTCtxtPath`: 4, `DSPatchPath`: 5, `VSPatchPath`: 6, `VSMatrixPath`: 7, `DSMatrixPath`: 8, `CNIOPath`: 9, `CNeUpPath`: 10, `RWPath`: 11, `TDPredPath`: 12, `BLAPath`: 13, `HipPath`: 14}
var _PathTypesDescMap = map[PathTypes]string{0: `Forward is a feedforward, bottom-up pathway from sensory inputs to higher layers`, 1: `Back is a feedback, top-down pathway from higher layers back to lower layers`, 2: `Lateral is a lateral pathway within the same layer / area`, 3: `Inhib is an inhibitory pathway that drives inhibitory synaptic conductances instead of the default excitatory ones.`, 4: `CTCtxt are pathways from Superficial layers to CT layers that send Burst activations drive updating of CtxtGe excitatory conductance, at end of plus (51B Bursting) phase. Biologically, this pathway comes from the PT layer 5IB neurons, but it is simpler to use the Super neurons directly, and PT are optional for most network types. These pathways also use a special learning rule that takes into account the temporal delays in the activation states. Can also add self context from CT for deeper temporal context.`, 5: `DSPatchPath implements the DSPatch learning rule: dW = ACh * DA * X * Y where DA is D1 vs. D2 modulated DA level, X = sending activity factor, Y = receiving activity factor, and ACh provides overall modulation.`, 6: `VSPatchPath implements the VSPatch learning rule: dW = ACh * DA * X * Y where DA is D1 vs. D2 modulated DA level, X = sending activity factor, Y = receiving activity factor, and ACh provides overall modulation.`, 7: `VSMatrixPath is for ventral striatum matrix (SPN / MSN) neurons supporting trace-based learning, where an initial trace of synaptic co-activity is formed, and then modulated by subsequent phasic dopamine & ACh when an outcome occurs. This bridges the temporal gap between gating activity and subsequent outcomes, and is based biologically on synaptic tags. Trace is reset at time of reward based on ACh level (from CINs in biology).`, 8: `DSMatrixPath is for dorsal striatum matrix (SPN / MSN) neurons supporting trace-based learning, where an initial trace of synaptic co-activity is formed, and then modulated by subsequent phasic dopamine & ACh when an outcome occurs. This bridges the temporal gap between gating activity and subsequent outcomes, and is based biologically on synaptic tags. Trace is reset at time of reward based on ACh level (from CINs in biology).`, 9: `CNIOPath is a cerebellar nucleus pathway trained by IO error signals.`, 10: `CNeUpPath is a cerebellar excitatory output neuron pathway, for upbound microzones, which learns drive the output neurons at their target baseline activity level by adapting the inhibitory input strength.`, 11: `RWPath does dopamine-modulated learning for reward prediction: Da * Send.CaP (integrated current spiking activity). Uses RLPredPath parameters. Use in RWPredLayer typically to generate reward predictions. If the Da sign is positive, the first recv unit learns fully; for negative, second one learns fully. Lower lrate applies for opposite cases. Weights are positive-only.`, 12: `TDPredPath does dopamine-modulated learning for reward prediction: DWt = Da * Send.CaDPrev (activity on *previous* timestep) Uses RLPredPath parameters. Use in TDPredLayer typically to generate reward predictions. If the Da sign is positive, the first recv unit learns fully; for negative, second one learns fully. Lower lrate applies for opposite cases. Weights are positive-only.`, 13: `BLAPath implements the Rubicon BLA learning rule: dW = ACh * X_t-1 * (Y_t - Y_t-1) The recv delta is across trials, where the US should activate on trial boundary, to enable sufficient time for gating through to OFC, so BLA initially learns based on US present - US absent. It can also learn based on CS onset if there is a prior CS that predicts that.`, 14: `HipPath is a special pathway for the hippocampus. TODO: fixme.`}
var _PathTypesMap = map[PathTypes]string{0: `ForwardPath`, 1: `BackPath`, 2: `LateralPath`, 3: `InhibPath`, 4: `CTCtxtPath`, 5: `DSPatchPath`, 6: `VSPatchPath`, 7: `VSMatrixPath`, 8: `DSMatrixPath`, 9: `CNIOPath`, 10: `CNeUpPath`, 11: `RWPath`, 12: `TDPredPath`, 13: `BLAPath`, 14: `HipPath`}
// String returns the string representation of this PathTypes value.
func (i PathTypes) String() string { return enums.String(i, _PathTypesMap) }
// SetString sets the PathTypes value from its string representation,
// and returns an error if the string is invalid.
func (i *PathTypes) SetString(s string) error {
return enums.SetString(i, s, _PathTypesValueMap, "PathTypes")
}
// Int64 returns the PathTypes value as an int64.
func (i PathTypes) Int64() int64 { return int64(i) }
// SetInt64 sets the PathTypes value from an int64.
func (i *PathTypes) SetInt64(in int64) { *i = PathTypes(in) }
// Desc returns the description of the PathTypes value.
func (i PathTypes) Desc() string { return enums.Desc(i, _PathTypesDescMap) }
// PathTypesValues returns all possible values for the type PathTypes.
func PathTypesValues() []PathTypes { return _PathTypesValues }
// Values returns all possible values for the type PathTypes.
func (i PathTypes) Values() []enums.Enum { return enums.Values(_PathTypesValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i PathTypes) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *PathTypes) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "PathTypes")
}
var _GPLayerTypesValues = []GPLayerTypes{0, 1, 2}
// GPLayerTypesN is the highest valid value for type GPLayerTypes, plus one.
//
//gosl:start
const GPLayerTypesN GPLayerTypes = 3
//gosl:end
var _GPLayerTypesValueMap = map[string]GPLayerTypes{`GPePr`: 0, `GPeAk`: 1, `GPi`: 2}
var _GPLayerTypesDescMap = map[GPLayerTypes]string{0: `GPePr is the set of prototypical GPe neurons, mediating classical NoGo`, 1: `GPeAk is arkypallidal layer of GPe neurons, receiving inhibition from GPePr and projecting inhibition to Mtx`, 2: `GPi is the inner globus pallidus, functionally equivalent to SNr, receiving from MtxGo and GPePr, and sending inhibition to VThal`}
var _GPLayerTypesMap = map[GPLayerTypes]string{0: `GPePr`, 1: `GPeAk`, 2: `GPi`}
// String returns the string representation of this GPLayerTypes value.
func (i GPLayerTypes) String() string { return enums.String(i, _GPLayerTypesMap) }
// SetString sets the GPLayerTypes value from its string representation,
// and returns an error if the string is invalid.
func (i *GPLayerTypes) SetString(s string) error {
return enums.SetString(i, s, _GPLayerTypesValueMap, "GPLayerTypes")
}
// Int64 returns the GPLayerTypes value as an int64.
func (i GPLayerTypes) Int64() int64 { return int64(i) }
// SetInt64 sets the GPLayerTypes value from an int64.
func (i *GPLayerTypes) SetInt64(in int64) { *i = GPLayerTypes(in) }
// Desc returns the description of the GPLayerTypes value.
func (i GPLayerTypes) Desc() string { return enums.Desc(i, _GPLayerTypesDescMap) }
// GPLayerTypesValues returns all possible values for the type GPLayerTypes.
func GPLayerTypesValues() []GPLayerTypes { return _GPLayerTypesValues }
// Values returns all possible values for the type GPLayerTypes.
func (i GPLayerTypes) Values() []enums.Enum { return enums.Values(_GPLayerTypesValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i GPLayerTypes) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *GPLayerTypes) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "GPLayerTypes")
}
var _PoolIndexVarsValues = []PoolIndexVars{0, 1, 2, 3}
// PoolIndexVarsN is the highest valid value for type PoolIndexVars, plus one.
//
//gosl:start
const PoolIndexVarsN PoolIndexVars = 4
//gosl:end
var _PoolIndexVarsValueMap = map[string]PoolIndexVars{`PoolNeurSt`: 0, `PoolNeurEd`: 1, `PoolLayerIdx`: 2, `PoolIsLayer`: 3}
var _PoolIndexVarsDescMap = map[PoolIndexVars]string{0: `PoolNeurSt is the starting layer-wise index within the list of neurons in this pool. Add layer starting neuron index (NeurSt) to get index into global network neurons list.`, 1: `PoolNeurEd is the ending (exclusive) layer-wise index within the list of neurons in this pool. Add layer starting neuron index (NeurSt) to get index into global network neurons list.`, 2: `PoolLayerIdx is the layer index for this pool.`, 3: `PoolIsLayer is true (> 0) if this pool represents the entire layer, which is always the first pool in the list of pools for a layer.`}
var _PoolIndexVarsMap = map[PoolIndexVars]string{0: `PoolNeurSt`, 1: `PoolNeurEd`, 2: `PoolLayerIdx`, 3: `PoolIsLayer`}
// String returns the string representation of this PoolIndexVars value.
func (i PoolIndexVars) String() string { return enums.String(i, _PoolIndexVarsMap) }
// SetString sets the PoolIndexVars value from its string representation,
// and returns an error if the string is invalid.
func (i *PoolIndexVars) SetString(s string) error {
return enums.SetString(i, s, _PoolIndexVarsValueMap, "PoolIndexVars")
}
// Int64 returns the PoolIndexVars value as an int64.
func (i PoolIndexVars) Int64() int64 { return int64(i) }
// SetInt64 sets the PoolIndexVars value from an int64.
func (i *PoolIndexVars) SetInt64(in int64) { *i = PoolIndexVars(in) }
// Desc returns the description of the PoolIndexVars value.
func (i PoolIndexVars) Desc() string { return enums.Desc(i, _PoolIndexVarsDescMap) }
// PoolIndexVarsValues returns all possible values for the type PoolIndexVars.
func PoolIndexVarsValues() []PoolIndexVars { return _PoolIndexVarsValues }
// Values returns all possible values for the type PoolIndexVars.
func (i PoolIndexVars) Values() []enums.Enum { return enums.Values(_PoolIndexVarsValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i PoolIndexVars) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *PoolIndexVars) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "PoolIndexVars")
}
var _PoolIntVarsValues = []PoolIntVars{0, 1, 2, 3, 4, 5}
// PoolIntVarsN is the highest valid value for type PoolIntVars, plus one.
//
//gosl:start
const PoolIntVarsN PoolIntVars = 6
//gosl:end
var _PoolIntVarsValueMap = map[string]PoolIntVars{`Clamped`: 0, `PoolGated`: 1, `FFsRawInt`: 2, `FBsRawInt`: 3, `GeExtRawInt`: 4, `PoolIntAvgMaxStart`: 5}
var _PoolIntVarsDescMap = map[PoolIntVars]string{0: `Clamped if true (!=0), this layer is hard-clamped and should use GeExts exclusively for PV.`, 1: `PoolGated is true (> 0) if this pool gated (for [MatrixLayer], [BGThalLayer])`, 2: `FFsRawInt is the int32 atomic add compatible integration of [fsfffb.FFsRaw].`, 3: `FBsRawInt is the int32 atomic add compatible integration of [fsfffb.FBsRaw].`, 4: `GeExtRawInt is the int32 atomic add compatible integration of [fsfffb.GeExtRaw].`, 5: `PoolIntAvgMaxStart is the starting point for int32 AvgMax variables. Use AvgMaxIntVarIndex to get the relevant variable index. There are only values for Cycle phase, for the different variables.`}
var _PoolIntVarsMap = map[PoolIntVars]string{0: `Clamped`, 1: `PoolGated`, 2: `FFsRawInt`, 3: `FBsRawInt`, 4: `GeExtRawInt`, 5: `PoolIntAvgMaxStart`}
// String returns the string representation of this PoolIntVars value.
func (i PoolIntVars) String() string { return enums.String(i, _PoolIntVarsMap) }
// SetString sets the PoolIntVars value from its string representation,
// and returns an error if the string is invalid.
func (i *PoolIntVars) SetString(s string) error {
return enums.SetString(i, s, _PoolIntVarsValueMap, "PoolIntVars")
}
// Int64 returns the PoolIntVars value as an int64.
func (i PoolIntVars) Int64() int64 { return int64(i) }
// SetInt64 sets the PoolIntVars value from an int64.
func (i *PoolIntVars) SetInt64(in int64) { *i = PoolIntVars(in) }
// Desc returns the description of the PoolIntVars value.
func (i PoolIntVars) Desc() string { return enums.Desc(i, _PoolIntVarsDescMap) }
// PoolIntVarsValues returns all possible values for the type PoolIntVars.
func PoolIntVarsValues() []PoolIntVars { return _PoolIntVarsValues }
// Values returns all possible values for the type PoolIntVars.
func (i PoolIntVars) Values() []enums.Enum { return enums.Values(_PoolIntVarsValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i PoolIntVars) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *PoolIntVars) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "PoolIntVars")
}
var _AvgMaxValues = []AvgMax{0, 1}
// AvgMaxN is the highest valid value for type AvgMax, plus one.
//
//gosl:start
const AvgMaxN AvgMax = 2
//gosl:end
var _AvgMaxValueMap = map[string]AvgMax{`Avg`: 0, `Max`: 1}
var _AvgMaxDescMap = map[AvgMax]string{0: ``, 1: ``}
var _AvgMaxMap = map[AvgMax]string{0: `Avg`, 1: `Max`}
// String returns the string representation of this AvgMax value.
func (i AvgMax) String() string { return enums.String(i, _AvgMaxMap) }
// SetString sets the AvgMax value from its string representation,
// and returns an error if the string is invalid.
func (i *AvgMax) SetString(s string) error { return enums.SetString(i, s, _AvgMaxValueMap, "AvgMax") }
// Int64 returns the AvgMax value as an int64.
func (i AvgMax) Int64() int64 { return int64(i) }
// SetInt64 sets the AvgMax value from an int64.
func (i *AvgMax) SetInt64(in int64) { *i = AvgMax(in) }
// Desc returns the description of the AvgMax value.
func (i AvgMax) Desc() string { return enums.Desc(i, _AvgMaxDescMap) }
// AvgMaxValues returns all possible values for the type AvgMax.
func AvgMaxValues() []AvgMax { return _AvgMaxValues }
// Values returns all possible values for the type AvgMax.
func (i AvgMax) Values() []enums.Enum { return enums.Values(_AvgMaxValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i AvgMax) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *AvgMax) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "AvgMax") }
var _AvgMaxPhasesValues = []AvgMaxPhases{0, 1, 2, 3}
// AvgMaxPhasesN is the highest valid value for type AvgMaxPhases, plus one.
//
//gosl:start
const AvgMaxPhasesN AvgMaxPhases = 4
//gosl:end
var _AvgMaxPhasesValueMap = map[string]AvgMaxPhases{`Cycle`: 0, `Minus`: 1, `Plus`: 2, `Prev`: 3}
var _AvgMaxPhasesDescMap = map[AvgMaxPhases]string{0: `Cycle is the current cycle, which is the source for the rest.`, 1: `Minus is at the end of the minus phase.`, 2: `Plus is at the end of the plus phase.`, 3: `Prev is at the end of the previous plus phase.`}
var _AvgMaxPhasesMap = map[AvgMaxPhases]string{0: `Cycle`, 1: `Minus`, 2: `Plus`, 3: `Prev`}
// String returns the string representation of this AvgMaxPhases value.
func (i AvgMaxPhases) String() string { return enums.String(i, _AvgMaxPhasesMap) }
// SetString sets the AvgMaxPhases value from its string representation,
// and returns an error if the string is invalid.
func (i *AvgMaxPhases) SetString(s string) error {
return enums.SetString(i, s, _AvgMaxPhasesValueMap, "AvgMaxPhases")
}
// Int64 returns the AvgMaxPhases value as an int64.
func (i AvgMaxPhases) Int64() int64 { return int64(i) }
// SetInt64 sets the AvgMaxPhases value from an int64.
func (i *AvgMaxPhases) SetInt64(in int64) { *i = AvgMaxPhases(in) }
// Desc returns the description of the AvgMaxPhases value.
func (i AvgMaxPhases) Desc() string { return enums.Desc(i, _AvgMaxPhasesDescMap) }
// AvgMaxPhasesValues returns all possible values for the type AvgMaxPhases.
func AvgMaxPhasesValues() []AvgMaxPhases { return _AvgMaxPhasesValues }
// Values returns all possible values for the type AvgMaxPhases.
func (i AvgMaxPhases) Values() []enums.Enum { return enums.Values(_AvgMaxPhasesValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i AvgMaxPhases) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *AvgMaxPhases) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "AvgMaxPhases")
}
var _AvgMaxVarsValues = []AvgMaxVars{0, 1, 2, 3, 4, 5, 6}
// AvgMaxVarsN is the highest valid value for type AvgMaxVars, plus one.
//
//gosl:start
const AvgMaxVarsN AvgMaxVars = 7
//gosl:end
var _AvgMaxVarsValueMap = map[string]AvgMaxVars{`CaP`: 0, `CaD`: 1, `CaPMax`: 2, `Act`: 3, `GeInt`: 4, `GiInt`: 5, `AvgDif`: 6}
var _AvgMaxVarsDescMap = map[AvgMaxVars]string{0: `CaP is the primary variable for tracking overall pool activity over a recent timescale, integrated at roughly 40 msec time constant.`, 1: `CaD is a slower moving activation signal, capable of reflecting activity over the entire trial.`, 2: `CaPMax is the maximum CaP over the trial of processing.`, 3: `Act is the computed rate-code equivalent of current spike rate.`, 4: `GeInt is the integrated running-average value of excitatory conductance.`, 5: `GiInt is the integrated running-average value of inhibitory conductance.`, 6: `AvgDif is the integrated AvgDif between ActPct - TrgAvg. Only the Plus phase is used.`}
var _AvgMaxVarsMap = map[AvgMaxVars]string{0: `CaP`, 1: `CaD`, 2: `CaPMax`, 3: `Act`, 4: `GeInt`, 5: `GiInt`, 6: `AvgDif`}
// String returns the string representation of this AvgMaxVars value.
func (i AvgMaxVars) String() string { return enums.String(i, _AvgMaxVarsMap) }
// SetString sets the AvgMaxVars value from its string representation,
// and returns an error if the string is invalid.
func (i *AvgMaxVars) SetString(s string) error {
return enums.SetString(i, s, _AvgMaxVarsValueMap, "AvgMaxVars")
}
// Int64 returns the AvgMaxVars value as an int64.
func (i AvgMaxVars) Int64() int64 { return int64(i) }
// SetInt64 sets the AvgMaxVars value from an int64.
func (i *AvgMaxVars) SetInt64(in int64) { *i = AvgMaxVars(in) }
// Desc returns the description of the AvgMaxVars value.
func (i AvgMaxVars) Desc() string { return enums.Desc(i, _AvgMaxVarsDescMap) }
// AvgMaxVarsValues returns all possible values for the type AvgMaxVars.
func AvgMaxVarsValues() []AvgMaxVars { return _AvgMaxVarsValues }
// Values returns all possible values for the type AvgMaxVars.
func (i AvgMaxVars) Values() []enums.Enum { return enums.Values(_AvgMaxVarsValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i AvgMaxVars) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *AvgMaxVars) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "AvgMaxVars")
}
var _SynapseVarsValues = []SynapseVars{0, 1, 2, 3, 4}
// SynapseVarsN is the highest valid value for type SynapseVars, plus one.
//
//gosl:start
const SynapseVarsN SynapseVars = 5
//gosl:end
var _SynapseVarsValueMap = map[string]SynapseVars{`Wt`: 0, `LWt`: 1, `SWt`: 2, `DWt`: 3, `DSWt`: 4}
var _SynapseVarsDescMap = map[SynapseVars]string{0: `Wt is the effective synaptic weight value, determining how much conductance one presynaptic spike drives into the receiving neuron. Biologically it represents the number of effective AMPA receptors in the synapse. Wt = [SWt] * WtSig([LWt]), where WtSig is the sigmoidal constrast enhancement function that produces values between 0-2 based on LWt, centered on 1.`, 1: `LWt is the rapid, online learning, linear weight value. It learns on every trial according to the learning rate (LRate) parameter. Biologically, this represents the internal biochemical processes that drive the trafficking of AMPA receptors in the synaptic density.`, 2: `SWt is a slowly adapting structural weight value, which acts as a multiplicative scaling factor on net synaptic efficacy [Wt]. Biologically it represents the physical size and efficacy of the dendritic spine. SWt values adapt in a slower outer loop along with synaptic scaling, with constraints to prevent runaway positive feedback loops and maintain variance and further capacity to learn. Initial weight variance is partially or fully captured in the SWt values, with LWt capturing the remainder.`, 3: `DWt is delta (change in) synaptic weight, from learning. This updates [LWt] on every trial. It is reset to 0 after it is applied, but the network view captures this value just prior to application.`, 4: `DSWt is the accumulated change in the [SWt] slow structural weight, computed as the accumulation of [DWt] values over the longer slow weight update window.`}
var _SynapseVarsMap = map[SynapseVars]string{0: `Wt`, 1: `LWt`, 2: `SWt`, 3: `DWt`, 4: `DSWt`}
// String returns the string representation of this SynapseVars value.
func (i SynapseVars) String() string { return enums.String(i, _SynapseVarsMap) }
// SetString sets the SynapseVars value from its string representation,
// and returns an error if the string is invalid.
func (i *SynapseVars) SetString(s string) error {
return enums.SetString(i, s, _SynapseVarsValueMap, "SynapseVars")
}
// Int64 returns the SynapseVars value as an int64.
func (i SynapseVars) Int64() int64 { return int64(i) }
// SetInt64 sets the SynapseVars value from an int64.
func (i *SynapseVars) SetInt64(in int64) { *i = SynapseVars(in) }
// Desc returns the description of the SynapseVars value.
func (i SynapseVars) Desc() string { return enums.Desc(i, _SynapseVarsDescMap) }
// SynapseVarsValues returns all possible values for the type SynapseVars.
func SynapseVarsValues() []SynapseVars { return _SynapseVarsValues }
// Values returns all possible values for the type SynapseVars.
func (i SynapseVars) Values() []enums.Enum { return enums.Values(_SynapseVarsValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i SynapseVars) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *SynapseVars) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "SynapseVars")
}
var _SynapseTraceVarsValues = []SynapseTraceVars{0, 1, 2}
// SynapseTraceVarsN is the highest valid value for type SynapseTraceVars, plus one.
//
//gosl:start
const SynapseTraceVarsN SynapseTraceVars = 3
//gosl:end
var _SynapseTraceVarsValueMap = map[string]SynapseTraceVars{`Tr`: 0, `DTr`: 1, `DiDWt`: 2}
var _SynapseTraceVarsDescMap = map[SynapseTraceVars]string{0: `Tr is trace of synaptic activity over time, which is used for credit assignment in learning. In MatrixPath this is a tag that is then updated later when US occurs.`, 1: `DTr is delta (change in) Tr trace of synaptic activity over time.`, 2: `DiDWt is delta weight for each data parallel index (Di). This is directly computed from the Ca values (in cortical version) and then aggregated into the overall DWt (which may be further integrated across MPI nodes), which then drives changes in Wt values.`}
var _SynapseTraceVarsMap = map[SynapseTraceVars]string{0: `Tr`, 1: `DTr`, 2: `DiDWt`}
// String returns the string representation of this SynapseTraceVars value.
func (i SynapseTraceVars) String() string { return enums.String(i, _SynapseTraceVarsMap) }
// SetString sets the SynapseTraceVars value from its string representation,
// and returns an error if the string is invalid.
func (i *SynapseTraceVars) SetString(s string) error {
return enums.SetString(i, s, _SynapseTraceVarsValueMap, "SynapseTraceVars")
}
// Int64 returns the SynapseTraceVars value as an int64.
func (i SynapseTraceVars) Int64() int64 { return int64(i) }
// SetInt64 sets the SynapseTraceVars value from an int64.
func (i *SynapseTraceVars) SetInt64(in int64) { *i = SynapseTraceVars(in) }
// Desc returns the description of the SynapseTraceVars value.
func (i SynapseTraceVars) Desc() string { return enums.Desc(i, _SynapseTraceVarsDescMap) }
// SynapseTraceVarsValues returns all possible values for the type SynapseTraceVars.
func SynapseTraceVarsValues() []SynapseTraceVars { return _SynapseTraceVarsValues }
// Values returns all possible values for the type SynapseTraceVars.
func (i SynapseTraceVars) Values() []enums.Enum { return enums.Values(_SynapseTraceVarsValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i SynapseTraceVars) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *SynapseTraceVars) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "SynapseTraceVars")
}
var _SynapseIndexVarsValues = []SynapseIndexVars{0, 1, 2}
// SynapseIndexVarsN is the highest valid value for type SynapseIndexVars, plus one.
//
//gosl:start
const SynapseIndexVarsN SynapseIndexVars = 3
//gosl:end
var _SynapseIndexVarsValueMap = map[string]SynapseIndexVars{`SynRecvIndex`: 0, `SynSendIndex`: 1, `SynPathIndex`: 2}
var _SynapseIndexVarsDescMap = map[SynapseIndexVars]string{0: `SynRecvIndex is receiving neuron index in network's global list of neurons`, 1: `SynSendIndex is sending neuron index in network's global list of neurons`, 2: `SynPathIndex is pathway index in global list of pathways organized as [Layers][RecvPaths]`}
var _SynapseIndexVarsMap = map[SynapseIndexVars]string{0: `SynRecvIndex`, 1: `SynSendIndex`, 2: `SynPathIndex`}
// String returns the string representation of this SynapseIndexVars value.
func (i SynapseIndexVars) String() string { return enums.String(i, _SynapseIndexVarsMap) }
// SetString sets the SynapseIndexVars value from its string representation,
// and returns an error if the string is invalid.
func (i *SynapseIndexVars) SetString(s string) error {
return enums.SetString(i, s, _SynapseIndexVarsValueMap, "SynapseIndexVars")
}
// Int64 returns the SynapseIndexVars value as an int64.
func (i SynapseIndexVars) Int64() int64 { return int64(i) }
// SetInt64 sets the SynapseIndexVars value from an int64.
func (i *SynapseIndexVars) SetInt64(in int64) { *i = SynapseIndexVars(in) }
// Desc returns the description of the SynapseIndexVars value.
func (i SynapseIndexVars) Desc() string { return enums.Desc(i, _SynapseIndexVarsDescMap) }
// SynapseIndexVarsValues returns all possible values for the type SynapseIndexVars.
func SynapseIndexVarsValues() []SynapseIndexVars { return _SynapseIndexVarsValues }
// Values returns all possible values for the type SynapseIndexVars.
func (i SynapseIndexVars) Values() []enums.Enum { return enums.Values(_SynapseIndexVarsValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i SynapseIndexVars) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *SynapseIndexVars) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "SynapseIndexVars")
}
// Code generated by "gosl"; DO NOT EDIT
package axon
import (
"embed"
"fmt"
"math"
"unsafe"
"cogentcore.org/core/gpu"
"cogentcore.org/lab/tensor"
)
//go:embed shaders/*.wgsl
var shaders embed.FS
var (
// GPUInitialized is true once the GPU system has been initialized.
// Prevents multiple initializations.
GPUInitialized bool
// ComputeGPU is the compute gpu device.
// Set this prior to calling GPUInit() to use an existing device.
ComputeGPU *gpu.GPU
// BorrowedGPU is true if our ComputeGPU is set externally,
// versus created specifically for this system. If external,
// we don't release it.
BorrowedGPU bool
// UseGPU indicates whether to use GPU vs. CPU.
UseGPU bool
)
// GPUSystem is a GPU compute System with kernels operating on the
// same set of data variables.
var GPUSystem *gpu.ComputeSystem
// GPUVars is an enum for GPU variables, for specifying what to sync.
type GPUVars int32 //enums:enum
const (
LayersVar GPUVars = 0
PathsVar GPUVars = 1
NetworkIxsVar GPUVars = 2
PoolIxsVar GPUVars = 3
NeuronIxsVar GPUVars = 4
SynapseIxsVar GPUVars = 5
PathSendConVar GPUVars = 6
RecvPathIxsVar GPUVars = 7
PathRecvConVar GPUVars = 8
RecvSynIxsVar GPUVars = 9
CtxVar GPUVars = 10
NeuronsVar GPUVars = 11
NeuronAvgsVar GPUVars = 12
LayerStatesVar GPUVars = 13
GlobalScalarsVar GPUVars = 14
GlobalVectorsVar GPUVars = 15
ExtsVar GPUVars = 16
PoolsVar GPUVars = 17
PoolsIntVar GPUVars = 18
PathGBufVar GPUVars = 19
PathGSynsVar GPUVars = 20
SynapsesVar GPUVars = 21
SynapseTracesVar GPUVars = 22
)
// Tensor stride variables
var TensorStrides tensor.Uint32
// GPUInit initializes the GPU compute system,
// configuring system(s), variables and kernels.
// It is safe to call multiple times: detects if already run.
func GPUInit() {
if GPUInitialized {
return
}
GPUInitialized = true
if ComputeGPU == nil { // set prior to this call to use an external
ComputeGPU = gpu.NewComputeGPU()
} else {
BorrowedGPU = true
}
gp := ComputeGPU
_ = fmt.Sprintf("%g",math.NaN()) // keep imports happy
{
sy := gpu.NewComputeSystem(gp, "Default")
GPUSystem = sy
vars := sy.Vars()
{
sgp := vars.AddGroup(gpu.Storage, "Params")
var vr *gpu.Var
_ = vr
vr = sgp.Add("TensorStrides", gpu.Uint32, 1, gpu.ComputeShader)
vr.ReadOnly = true
vr = sgp.AddStruct("Layers", int(unsafe.Sizeof(LayerParams{})), 1, gpu.ComputeShader)
vr.ReadOnly = true
vr = sgp.AddStruct("Paths", int(unsafe.Sizeof(PathParams{})), 1, gpu.ComputeShader)
vr.ReadOnly = true
sgp.SetNValues(1)
}
{
sgp := vars.AddGroup(gpu.Storage, "Indexes")
var vr *gpu.Var
_ = vr
vr = sgp.AddStruct("NetworkIxs", int(unsafe.Sizeof(NetworkIndexes{})), 1, gpu.ComputeShader)
vr.ReadOnly = true
vr = sgp.Add("PoolIxs", gpu.Uint32, 1, gpu.ComputeShader)
vr.ReadOnly = true
vr = sgp.Add("NeuronIxs", gpu.Uint32, 1, gpu.ComputeShader)
vr.ReadOnly = true
vr = sgp.Add("SynapseIxs", gpu.Uint32, 1, gpu.ComputeShader)
vr.ReadOnly = true
vr = sgp.Add("PathSendCon", gpu.Uint32, 1, gpu.ComputeShader)
vr.ReadOnly = true
vr = sgp.Add("RecvPathIxs", gpu.Uint32, 1, gpu.ComputeShader)
vr.ReadOnly = true
vr = sgp.Add("PathRecvCon", gpu.Uint32, 1, gpu.ComputeShader)
vr.ReadOnly = true
vr = sgp.Add("RecvSynIxs", gpu.Uint32, 1, gpu.ComputeShader)
vr.ReadOnly = true
sgp.SetNValues(1)
}
{
sgp := vars.AddGroup(gpu.Storage, "Neurons")
var vr *gpu.Var
_ = vr
vr = sgp.AddStruct("Ctx", int(unsafe.Sizeof(Context{})), 1, gpu.ComputeShader)
vr = sgp.Add("Neurons", gpu.Float32, 1, gpu.ComputeShader)
vr = sgp.Add("NeuronAvgs", gpu.Float32, 1, gpu.ComputeShader)
vr = sgp.Add("LayerStates", gpu.Float32, 1, gpu.ComputeShader)
vr = sgp.Add("GlobalScalars", gpu.Float32, 1, gpu.ComputeShader)
vr = sgp.Add("GlobalVectors", gpu.Float32, 1, gpu.ComputeShader)
vr = sgp.Add("Exts", gpu.Float32, 1, gpu.ComputeShader)
vr = sgp.Add("Pools", gpu.Float32, 1, gpu.ComputeShader)
vr = sgp.Add("PoolsInt", gpu.Int32, 1, gpu.ComputeShader)
sgp.SetNValues(1)
}
{
sgp := vars.AddGroup(gpu.Storage, "Synapse")
var vr *gpu.Var
_ = vr
vr = sgp.Add("PathGBuf", gpu.Int32, 1, gpu.ComputeShader)
vr = sgp.Add("PathGSyns", gpu.Float32, 1, gpu.ComputeShader)
vr = sgp.Add("Synapses", gpu.Float32, 1, gpu.ComputeShader)
vr = sgp.Add("SynapseTraces0", gpu.Float32, 1, gpu.ComputeShader)
vr = sgp.Add("SynapseTraces1", gpu.Float32, 1, gpu.ComputeShader)
vr = sgp.Add("SynapseTraces2", gpu.Float32, 1, gpu.ComputeShader)
vr = sgp.Add("SynapseTraces3", gpu.Float32, 1, gpu.ComputeShader)
vr = sgp.Add("SynapseTraces4", gpu.Float32, 1, gpu.ComputeShader)
vr = sgp.Add("SynapseTraces5", gpu.Float32, 1, gpu.ComputeShader)
vr = sgp.Add("SynapseTraces6", gpu.Float32, 1, gpu.ComputeShader)
sgp.SetNValues(1)
}
var pl *gpu.ComputePipeline
pl = gpu.NewComputePipelineShaderFS(shaders, "shaders/AdaptGiLayer.wgsl", sy)
pl.AddVarUsed(0, "TensorStrides")
pl.AddVarUsed(2, "Ctx")
pl.AddVarUsed(2, "LayerStates")
pl.AddVarUsed(0, "Layers")
pl.AddVarUsed(1, "NetworkIxs")
pl = gpu.NewComputePipelineShaderFS(shaders, "shaders/ApplyExtsNeuron.wgsl", sy)
pl.AddVarUsed(0, "TensorStrides")
pl.AddVarUsed(2, "Ctx")
pl.AddVarUsed(2, "Exts")
pl.AddVarUsed(0, "Layers")
pl.AddVarUsed(1, "NetworkIxs")
pl.AddVarUsed(1, "NeuronIxs")
pl.AddVarUsed(2, "Neurons")
pl = gpu.NewComputePipelineShaderFS(shaders, "shaders/Beta1Neuron.wgsl", sy)
pl.AddVarUsed(0, "TensorStrides")
pl.AddVarUsed(2, "Ctx")
pl.AddVarUsed(0, "Layers")
pl.AddVarUsed(1, "NetworkIxs")
pl.AddVarUsed(1, "NeuronIxs")
pl.AddVarUsed(2, "Neurons")
pl = gpu.NewComputePipelineShaderFS(shaders, "shaders/Beta2Neuron.wgsl", sy)
pl.AddVarUsed(0, "TensorStrides")
pl.AddVarUsed(2, "Ctx")
pl.AddVarUsed(0, "Layers")
pl.AddVarUsed(1, "NetworkIxs")
pl.AddVarUsed(1, "NeuronIxs")
pl.AddVarUsed(2, "Neurons")
pl = gpu.NewComputePipelineShaderFS(shaders, "shaders/BetweenGi.wgsl", sy)
pl.AddVarUsed(0, "TensorStrides")
pl.AddVarUsed(2, "Ctx")
pl.AddVarUsed(0, "Layers")
pl.AddVarUsed(1, "NetworkIxs")
pl.AddVarUsed(2, "Pools")
pl = gpu.NewComputePipelineShaderFS(shaders, "shaders/CycleInc.wgsl", sy)
pl.AddVarUsed(0, "TensorStrides")
pl.AddVarUsed(2, "Ctx")
pl = gpu.NewComputePipelineShaderFS(shaders, "shaders/CycleNeuron.wgsl", sy)
pl.AddVarUsed(0, "TensorStrides")
pl.AddVarUsed(2, "Ctx")
pl.AddVarUsed(2, "GlobalScalars")
pl.AddVarUsed(2, "GlobalVectors")
pl.AddVarUsed(2, "LayerStates")
pl.AddVarUsed(0, "Layers")
pl.AddVarUsed(1, "NetworkIxs")
pl.AddVarUsed(1, "NeuronIxs")
pl.AddVarUsed(2, "Neurons")
pl.AddVarUsed(1, "PoolIxs")
pl.AddVarUsed(2, "Pools")
pl = gpu.NewComputePipelineShaderFS(shaders, "shaders/CyclePost.wgsl", sy)
pl.AddVarUsed(0, "TensorStrides")
pl.AddVarUsed(2, "Ctx")
pl.AddVarUsed(2, "GlobalScalars")
pl.AddVarUsed(2, "GlobalVectors")
pl.AddVarUsed(2, "LayerStates")
pl.AddVarUsed(0, "Layers")
pl.AddVarUsed(1, "NetworkIxs")
pl.AddVarUsed(2, "Pools")
pl.AddVarUsed(2, "PoolsInt")
pl = gpu.NewComputePipelineShaderFS(shaders, "shaders/DWtFromDiSyn.wgsl", sy)
pl.AddVarUsed(0, "TensorStrides")
pl.AddVarUsed(2, "Ctx")
pl.AddVarUsed(1, "NetworkIxs")
pl.AddVarUsed(0, "Paths")
pl.AddVarUsed(1, "SynapseIxs")
pl.AddVarUsed(3, "SynapseTraces0")
pl.AddVarUsed(3, "SynapseTraces1")
pl.AddVarUsed(3, "SynapseTraces2")
pl.AddVarUsed(3, "SynapseTraces3")
pl.AddVarUsed(3, "SynapseTraces4")
pl.AddVarUsed(3, "SynapseTraces5")
pl.AddVarUsed(3, "SynapseTraces6")
pl.AddVarUsed(3, "Synapses")
pl = gpu.NewComputePipelineShaderFS(shaders, "shaders/DWtSubMeanNeuron.wgsl", sy)
pl.AddVarUsed(0, "TensorStrides")
pl.AddVarUsed(2, "Ctx")
pl.AddVarUsed(0, "Layers")
pl.AddVarUsed(1, "NetworkIxs")
pl.AddVarUsed(2, "NeuronAvgs")
pl.AddVarUsed(1, "NeuronIxs")
pl.AddVarUsed(2, "Neurons")
pl.AddVarUsed(1, "PathRecvCon")
pl.AddVarUsed(0, "Paths")
pl.AddVarUsed(1, "RecvPathIxs")
pl.AddVarUsed(1, "RecvSynIxs")
pl.AddVarUsed(3, "Synapses")
pl = gpu.NewComputePipelineShaderFS(shaders, "shaders/DWtSyn.wgsl", sy)
pl.AddVarUsed(0, "TensorStrides")
pl.AddVarUsed(2, "Ctx")
pl.AddVarUsed(2, "GlobalScalars")
pl.AddVarUsed(0, "Layers")
pl.AddVarUsed(1, "NetworkIxs")
pl.AddVarUsed(1, "NeuronIxs")
pl.AddVarUsed(2, "Neurons")
pl.AddVarUsed(0, "Paths")
pl.AddVarUsed(2, "Pools")
pl.AddVarUsed(1, "SynapseIxs")
pl.AddVarUsed(3, "SynapseTraces0")
pl.AddVarUsed(3, "SynapseTraces1")
pl.AddVarUsed(3, "SynapseTraces2")
pl.AddVarUsed(3, "SynapseTraces3")
pl.AddVarUsed(3, "SynapseTraces4")
pl.AddVarUsed(3, "SynapseTraces5")
pl.AddVarUsed(3, "SynapseTraces6")
pl.AddVarUsed(3, "Synapses")
pl = gpu.NewComputePipelineShaderFS(shaders, "shaders/GPUTestWrite.wgsl", sy)
pl.AddVarUsed(0, "TensorStrides")
pl.AddVarUsed(2, "Ctx")
pl.AddVarUsed(1, "NetworkIxs")
pl.AddVarUsed(2, "Neurons")
pl = gpu.NewComputePipelineShaderFS(shaders, "shaders/GatherSpikes.wgsl", sy)
pl.AddVarUsed(0, "TensorStrides")
pl.AddVarUsed(2, "Ctx")
pl.AddVarUsed(0, "Layers")
pl.AddVarUsed(1, "NetworkIxs")
pl.AddVarUsed(2, "NeuronAvgs")
pl.AddVarUsed(1, "NeuronIxs")
pl.AddVarUsed(2, "Neurons")
pl.AddVarUsed(3, "PathGBuf")
pl.AddVarUsed(3, "PathGSyns")
pl.AddVarUsed(0, "Paths")
pl.AddVarUsed(1, "PoolIxs")
pl.AddVarUsed(2, "PoolsInt")
pl.AddVarUsed(1, "RecvPathIxs")
pl = gpu.NewComputePipelineShaderFS(shaders, "shaders/InitGBuffsPath.wgsl", sy)
pl.AddVarUsed(0, "TensorStrides")
pl.AddVarUsed(2, "Ctx")
pl.AddVarUsed(1, "NetworkIxs")
pl.AddVarUsed(3, "PathGBuf")
pl.AddVarUsed(3, "PathGSyns")
pl.AddVarUsed(0, "Paths")
pl = gpu.NewComputePipelineShaderFS(shaders, "shaders/LayerGi.wgsl", sy)
pl.AddVarUsed(0, "TensorStrides")
pl.AddVarUsed(2, "Ctx")
pl.AddVarUsed(2, "LayerStates")
pl.AddVarUsed(0, "Layers")
pl.AddVarUsed(1, "NetworkIxs")
pl.AddVarUsed(1, "PoolIxs")
pl.AddVarUsed(2, "Pools")
pl.AddVarUsed(2, "PoolsInt")
pl = gpu.NewComputePipelineShaderFS(shaders, "shaders/MinusPhaseNeuron.wgsl", sy)
pl.AddVarUsed(0, "TensorStrides")
pl.AddVarUsed(2, "Ctx")
pl.AddVarUsed(0, "Layers")
pl.AddVarUsed(1, "NetworkIxs")
pl.AddVarUsed(1, "NeuronIxs")
pl.AddVarUsed(2, "Neurons")
pl = gpu.NewComputePipelineShaderFS(shaders, "shaders/MinusPhasePool.wgsl", sy)
pl.AddVarUsed(0, "TensorStrides")
pl.AddVarUsed(2, "Ctx")
pl.AddVarUsed(2, "LayerStates")
pl.AddVarUsed(0, "Layers")
pl.AddVarUsed(1, "NetworkIxs")
pl.AddVarUsed(1, "PoolIxs")
pl.AddVarUsed(2, "Pools")
pl.AddVarUsed(2, "PoolsInt")
pl = gpu.NewComputePipelineShaderFS(shaders, "shaders/MinusPhasePost.wgsl", sy)
pl.AddVarUsed(0, "TensorStrides")
pl.AddVarUsed(2, "Ctx")
pl.AddVarUsed(2, "GlobalScalars")
pl.AddVarUsed(2, "GlobalVectors")
pl.AddVarUsed(0, "Layers")
pl.AddVarUsed(1, "NetworkIxs")
pl.AddVarUsed(2, "NeuronAvgs")
pl.AddVarUsed(2, "Neurons")
pl.AddVarUsed(2, "PoolsInt")
pl = gpu.NewComputePipelineShaderFS(shaders, "shaders/NewStateLayer.wgsl", sy)
pl.AddVarUsed(0, "TensorStrides")
pl.AddVarUsed(2, "Ctx")
pl.AddVarUsed(2, "LayerStates")
pl.AddVarUsed(0, "Layers")
pl.AddVarUsed(1, "NetworkIxs")
pl.AddVarUsed(2, "Pools")
pl.AddVarUsed(2, "PoolsInt")
pl = gpu.NewComputePipelineShaderFS(shaders, "shaders/NewStateNeuron.wgsl", sy)
pl.AddVarUsed(0, "TensorStrides")
pl.AddVarUsed(2, "Ctx")
pl.AddVarUsed(0, "Layers")
pl.AddVarUsed(1, "NetworkIxs")
pl.AddVarUsed(2, "NeuronAvgs")
pl.AddVarUsed(1, "NeuronIxs")
pl.AddVarUsed(2, "Neurons")
pl = gpu.NewComputePipelineShaderFS(shaders, "shaders/PlusPhaseEndNeuron.wgsl", sy)
pl.AddVarUsed(0, "TensorStrides")
pl.AddVarUsed(2, "Ctx")
pl.AddVarUsed(2, "GlobalScalars")
pl.AddVarUsed(0, "Layers")
pl.AddVarUsed(1, "NetworkIxs")
pl.AddVarUsed(1, "NeuronIxs")
pl.AddVarUsed(2, "Neurons")
pl.AddVarUsed(1, "PoolIxs")
pl.AddVarUsed(2, "Pools")
pl = gpu.NewComputePipelineShaderFS(shaders, "shaders/PlusPhaseEndPool.wgsl", sy)
pl.AddVarUsed(0, "TensorStrides")
pl.AddVarUsed(2, "Ctx")
pl.AddVarUsed(0, "Layers")
pl.AddVarUsed(1, "NetworkIxs")
pl.AddVarUsed(1, "PoolIxs")
pl.AddVarUsed(2, "Pools")
pl = gpu.NewComputePipelineShaderFS(shaders, "shaders/PlusPhaseEndPost.wgsl", sy)
pl.AddVarUsed(0, "TensorStrides")
pl.AddVarUsed(2, "Ctx")
pl.AddVarUsed(2, "GlobalScalars")
pl.AddVarUsed(2, "GlobalVectors")
pl.AddVarUsed(2, "LayerStates")
pl.AddVarUsed(0, "Layers")
pl.AddVarUsed(1, "NetworkIxs")
pl.AddVarUsed(2, "NeuronAvgs")
pl.AddVarUsed(2, "Neurons")
pl.AddVarUsed(2, "Pools")
pl.AddVarUsed(2, "PoolsInt")
pl = gpu.NewComputePipelineShaderFS(shaders, "shaders/PlusPhaseStartContext.wgsl", sy)
pl.AddVarUsed(0, "TensorStrides")
pl.AddVarUsed(2, "Ctx")
pl = gpu.NewComputePipelineShaderFS(shaders, "shaders/PlusPhaseStartNeuron.wgsl", sy)
pl.AddVarUsed(0, "TensorStrides")
pl.AddVarUsed(2, "Ctx")
pl.AddVarUsed(0, "Layers")
pl.AddVarUsed(1, "NetworkIxs")
pl.AddVarUsed(1, "NeuronIxs")
pl.AddVarUsed(2, "Neurons")
pl = gpu.NewComputePipelineShaderFS(shaders, "shaders/PoolGi.wgsl", sy)
pl.AddVarUsed(0, "TensorStrides")
pl.AddVarUsed(2, "Ctx")
pl.AddVarUsed(2, "LayerStates")
pl.AddVarUsed(0, "Layers")
pl.AddVarUsed(1, "NetworkIxs")
pl.AddVarUsed(1, "PoolIxs")
pl.AddVarUsed(2, "Pools")
pl.AddVarUsed(2, "PoolsInt")
pl = gpu.NewComputePipelineShaderFS(shaders, "shaders/SendSpike.wgsl", sy)
pl.AddVarUsed(0, "TensorStrides")
pl.AddVarUsed(2, "Ctx")
pl.AddVarUsed(2, "GlobalScalars")
pl.AddVarUsed(2, "GlobalVectors")
pl.AddVarUsed(2, "LayerStates")
pl.AddVarUsed(0, "Layers")
pl.AddVarUsed(1, "NetworkIxs")
pl.AddVarUsed(1, "NeuronIxs")
pl.AddVarUsed(2, "Neurons")
pl.AddVarUsed(3, "PathGBuf")
pl.AddVarUsed(1, "PathSendCon")
pl.AddVarUsed(0, "Paths")
pl.AddVarUsed(1, "PoolIxs")
pl.AddVarUsed(2, "Pools")
pl.AddVarUsed(1, "SynapseIxs")
pl.AddVarUsed(3, "Synapses")
pl = gpu.NewComputePipelineShaderFS(shaders, "shaders/SlowAdaptLayer.wgsl", sy)
pl.AddVarUsed(0, "TensorStrides")
pl.AddVarUsed(2, "Ctx")
pl.AddVarUsed(0, "Layers")
pl.AddVarUsed(1, "NetworkIxs")
pl.AddVarUsed(2, "NeuronAvgs")
pl.AddVarUsed(2, "Neurons")
pl.AddVarUsed(1, "PoolIxs")
pl.AddVarUsed(2, "Pools")
pl.AddVarUsed(2, "PoolsInt")
pl = gpu.NewComputePipelineShaderFS(shaders, "shaders/SlowAdaptNeuron.wgsl", sy)
pl.AddVarUsed(0, "TensorStrides")
pl.AddVarUsed(2, "Ctx")
pl.AddVarUsed(0, "Layers")
pl.AddVarUsed(1, "NetworkIxs")
pl.AddVarUsed(2, "NeuronAvgs")
pl.AddVarUsed(1, "NeuronIxs")
pl.AddVarUsed(1, "PathRecvCon")
pl.AddVarUsed(0, "Paths")
pl.AddVarUsed(1, "RecvPathIxs")
pl.AddVarUsed(1, "RecvSynIxs")
pl.AddVarUsed(3, "Synapses")
pl = gpu.NewComputePipelineShaderFS(shaders, "shaders/WtFromDWtLayer.wgsl", sy)
pl.AddVarUsed(0, "TensorStrides")
pl.AddVarUsed(2, "Ctx")
pl.AddVarUsed(0, "Layers")
pl.AddVarUsed(1, "NetworkIxs")
pl.AddVarUsed(2, "NeuronAvgs")
pl.AddVarUsed(2, "Neurons")
pl.AddVarUsed(1, "PoolIxs")
pl = gpu.NewComputePipelineShaderFS(shaders, "shaders/WtFromDWtSyn.wgsl", sy)
pl.AddVarUsed(0, "TensorStrides")
pl.AddVarUsed(2, "Ctx")
pl.AddVarUsed(1, "NetworkIxs")
pl.AddVarUsed(0, "Paths")
pl.AddVarUsed(1, "SynapseIxs")
pl.AddVarUsed(3, "Synapses")
sy.Config()
}
}
// GPURelease releases the GPU compute system resources.
// Call this at program exit.
func GPURelease() {
if GPUSystem != nil {
GPUSystem.Release()
GPUSystem = nil
}
if !BorrowedGPU && ComputeGPU != nil {
ComputeGPU.Release()
}
ComputeGPU = nil
}
// RunAdaptGiLayer runs the AdaptGiLayer kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// Can call multiple Run* kernels in a row, which are then all launched
// in the same command submission on the GPU, which is by far the most efficient.
// MUST call RunDone (with optional vars to sync) after all Run calls.
// Alternatively, a single-shot RunOneAdaptGiLayer call does Run and Done for a
// single run-and-sync case.
func RunAdaptGiLayer(n int) {
if UseGPU {
RunAdaptGiLayerGPU(n)
} else {
RunAdaptGiLayerCPU(n)
}
}
// RunAdaptGiLayerGPU runs the AdaptGiLayer kernel on the GPU. See [RunAdaptGiLayer] for more info.
func RunAdaptGiLayerGPU(n int) {
sy := GPUSystem
pl := sy.ComputePipelines["AdaptGiLayer"]
ce, _ := sy.BeginComputePass()
pl.Dispatch1D(ce, n, 64)
}
// RunAdaptGiLayerCPU runs the AdaptGiLayer kernel on the CPU.
func RunAdaptGiLayerCPU(n int) {
gpu.VectorizeFunc(0, n, AdaptGiLayer)
}
// RunOneAdaptGiLayer runs the AdaptGiLayer kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// This version then calls RunDone with the given variables to sync
// after the Run, for a single-shot Run-and-Done call. If multiple kernels
// can be run in sequence, it is much more efficient to do multiple Run*
// calls followed by a RunDone call.
func RunOneAdaptGiLayer(n int, syncVars ...GPUVars) {
if UseGPU {
RunAdaptGiLayerGPU(n)
RunDone(syncVars...)
} else {
RunAdaptGiLayerCPU(n)
}
}
// RunApplyExtsNeuron runs the ApplyExtsNeuron kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// Can call multiple Run* kernels in a row, which are then all launched
// in the same command submission on the GPU, which is by far the most efficient.
// MUST call RunDone (with optional vars to sync) after all Run calls.
// Alternatively, a single-shot RunOneApplyExtsNeuron call does Run and Done for a
// single run-and-sync case.
func RunApplyExtsNeuron(n int) {
if UseGPU {
RunApplyExtsNeuronGPU(n)
} else {
RunApplyExtsNeuronCPU(n)
}
}
// RunApplyExtsNeuronGPU runs the ApplyExtsNeuron kernel on the GPU. See [RunApplyExtsNeuron] for more info.
func RunApplyExtsNeuronGPU(n int) {
sy := GPUSystem
pl := sy.ComputePipelines["ApplyExtsNeuron"]
ce, _ := sy.BeginComputePass()
pl.Dispatch1D(ce, n, 64)
}
// RunApplyExtsNeuronCPU runs the ApplyExtsNeuron kernel on the CPU.
func RunApplyExtsNeuronCPU(n int) {
gpu.VectorizeFunc(0, n, ApplyExtsNeuron)
}
// RunOneApplyExtsNeuron runs the ApplyExtsNeuron kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// This version then calls RunDone with the given variables to sync
// after the Run, for a single-shot Run-and-Done call. If multiple kernels
// can be run in sequence, it is much more efficient to do multiple Run*
// calls followed by a RunDone call.
func RunOneApplyExtsNeuron(n int, syncVars ...GPUVars) {
if UseGPU {
RunApplyExtsNeuronGPU(n)
RunDone(syncVars...)
} else {
RunApplyExtsNeuronCPU(n)
}
}
// RunBeta1Neuron runs the Beta1Neuron kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// Can call multiple Run* kernels in a row, which are then all launched
// in the same command submission on the GPU, which is by far the most efficient.
// MUST call RunDone (with optional vars to sync) after all Run calls.
// Alternatively, a single-shot RunOneBeta1Neuron call does Run and Done for a
// single run-and-sync case.
func RunBeta1Neuron(n int) {
if UseGPU {
RunBeta1NeuronGPU(n)
} else {
RunBeta1NeuronCPU(n)
}
}
// RunBeta1NeuronGPU runs the Beta1Neuron kernel on the GPU. See [RunBeta1Neuron] for more info.
func RunBeta1NeuronGPU(n int) {
sy := GPUSystem
pl := sy.ComputePipelines["Beta1Neuron"]
ce, _ := sy.BeginComputePass()
pl.Dispatch1D(ce, n, 64)
}
// RunBeta1NeuronCPU runs the Beta1Neuron kernel on the CPU.
func RunBeta1NeuronCPU(n int) {
gpu.VectorizeFunc(0, n, Beta1Neuron)
}
// RunOneBeta1Neuron runs the Beta1Neuron kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// This version then calls RunDone with the given variables to sync
// after the Run, for a single-shot Run-and-Done call. If multiple kernels
// can be run in sequence, it is much more efficient to do multiple Run*
// calls followed by a RunDone call.
func RunOneBeta1Neuron(n int, syncVars ...GPUVars) {
if UseGPU {
RunBeta1NeuronGPU(n)
RunDone(syncVars...)
} else {
RunBeta1NeuronCPU(n)
}
}
// RunBeta2Neuron runs the Beta2Neuron kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// Can call multiple Run* kernels in a row, which are then all launched
// in the same command submission on the GPU, which is by far the most efficient.
// MUST call RunDone (with optional vars to sync) after all Run calls.
// Alternatively, a single-shot RunOneBeta2Neuron call does Run and Done for a
// single run-and-sync case.
func RunBeta2Neuron(n int) {
if UseGPU {
RunBeta2NeuronGPU(n)
} else {
RunBeta2NeuronCPU(n)
}
}
// RunBeta2NeuronGPU runs the Beta2Neuron kernel on the GPU. See [RunBeta2Neuron] for more info.
func RunBeta2NeuronGPU(n int) {
sy := GPUSystem
pl := sy.ComputePipelines["Beta2Neuron"]
ce, _ := sy.BeginComputePass()
pl.Dispatch1D(ce, n, 64)
}
// RunBeta2NeuronCPU runs the Beta2Neuron kernel on the CPU.
func RunBeta2NeuronCPU(n int) {
gpu.VectorizeFunc(0, n, Beta2Neuron)
}
// RunOneBeta2Neuron runs the Beta2Neuron kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// This version then calls RunDone with the given variables to sync
// after the Run, for a single-shot Run-and-Done call. If multiple kernels
// can be run in sequence, it is much more efficient to do multiple Run*
// calls followed by a RunDone call.
func RunOneBeta2Neuron(n int, syncVars ...GPUVars) {
if UseGPU {
RunBeta2NeuronGPU(n)
RunDone(syncVars...)
} else {
RunBeta2NeuronCPU(n)
}
}
// RunBetweenGi runs the BetweenGi kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// Can call multiple Run* kernels in a row, which are then all launched
// in the same command submission on the GPU, which is by far the most efficient.
// MUST call RunDone (with optional vars to sync) after all Run calls.
// Alternatively, a single-shot RunOneBetweenGi call does Run and Done for a
// single run-and-sync case.
func RunBetweenGi(n int) {
if UseGPU {
RunBetweenGiGPU(n)
} else {
RunBetweenGiCPU(n)
}
}
// RunBetweenGiGPU runs the BetweenGi kernel on the GPU. See [RunBetweenGi] for more info.
func RunBetweenGiGPU(n int) {
sy := GPUSystem
pl := sy.ComputePipelines["BetweenGi"]
ce, _ := sy.BeginComputePass()
pl.Dispatch1D(ce, n, 64)
}
// RunBetweenGiCPU runs the BetweenGi kernel on the CPU.
func RunBetweenGiCPU(n int) {
gpu.VectorizeFunc(0, n, BetweenGi)
}
// RunOneBetweenGi runs the BetweenGi kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// This version then calls RunDone with the given variables to sync
// after the Run, for a single-shot Run-and-Done call. If multiple kernels
// can be run in sequence, it is much more efficient to do multiple Run*
// calls followed by a RunDone call.
func RunOneBetweenGi(n int, syncVars ...GPUVars) {
if UseGPU {
RunBetweenGiGPU(n)
RunDone(syncVars...)
} else {
RunBetweenGiCPU(n)
}
}
// RunCycleInc runs the CycleInc kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// Can call multiple Run* kernels in a row, which are then all launched
// in the same command submission on the GPU, which is by far the most efficient.
// MUST call RunDone (with optional vars to sync) after all Run calls.
// Alternatively, a single-shot RunOneCycleInc call does Run and Done for a
// single run-and-sync case.
func RunCycleInc(n int) {
if UseGPU {
RunCycleIncGPU(n)
} else {
RunCycleIncCPU(n)
}
}
// RunCycleIncGPU runs the CycleInc kernel on the GPU. See [RunCycleInc] for more info.
func RunCycleIncGPU(n int) {
sy := GPUSystem
pl := sy.ComputePipelines["CycleInc"]
ce, _ := sy.BeginComputePass()
pl.Dispatch1D(ce, n, 64)
}
// RunCycleIncCPU runs the CycleInc kernel on the CPU.
func RunCycleIncCPU(n int) {
gpu.VectorizeFunc(0, n, CycleInc)
}
// RunOneCycleInc runs the CycleInc kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// This version then calls RunDone with the given variables to sync
// after the Run, for a single-shot Run-and-Done call. If multiple kernels
// can be run in sequence, it is much more efficient to do multiple Run*
// calls followed by a RunDone call.
func RunOneCycleInc(n int, syncVars ...GPUVars) {
if UseGPU {
RunCycleIncGPU(n)
RunDone(syncVars...)
} else {
RunCycleIncCPU(n)
}
}
// RunCycleNeuron runs the CycleNeuron kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// Can call multiple Run* kernels in a row, which are then all launched
// in the same command submission on the GPU, which is by far the most efficient.
// MUST call RunDone (with optional vars to sync) after all Run calls.
// Alternatively, a single-shot RunOneCycleNeuron call does Run and Done for a
// single run-and-sync case.
func RunCycleNeuron(n int) {
if UseGPU {
RunCycleNeuronGPU(n)
} else {
RunCycleNeuronCPU(n)
}
}
// RunCycleNeuronGPU runs the CycleNeuron kernel on the GPU. See [RunCycleNeuron] for more info.
func RunCycleNeuronGPU(n int) {
sy := GPUSystem
pl := sy.ComputePipelines["CycleNeuron"]
ce, _ := sy.BeginComputePass()
pl.Dispatch1D(ce, n, 64)
}
// RunCycleNeuronCPU runs the CycleNeuron kernel on the CPU.
func RunCycleNeuronCPU(n int) {
gpu.VectorizeFunc(0, n, CycleNeuron)
}
// RunOneCycleNeuron runs the CycleNeuron kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// This version then calls RunDone with the given variables to sync
// after the Run, for a single-shot Run-and-Done call. If multiple kernels
// can be run in sequence, it is much more efficient to do multiple Run*
// calls followed by a RunDone call.
func RunOneCycleNeuron(n int, syncVars ...GPUVars) {
if UseGPU {
RunCycleNeuronGPU(n)
RunDone(syncVars...)
} else {
RunCycleNeuronCPU(n)
}
}
// RunCyclePost runs the CyclePost kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// Can call multiple Run* kernels in a row, which are then all launched
// in the same command submission on the GPU, which is by far the most efficient.
// MUST call RunDone (with optional vars to sync) after all Run calls.
// Alternatively, a single-shot RunOneCyclePost call does Run and Done for a
// single run-and-sync case.
func RunCyclePost(n int) {
if UseGPU {
RunCyclePostGPU(n)
} else {
RunCyclePostCPU(n)
}
}
// RunCyclePostGPU runs the CyclePost kernel on the GPU. See [RunCyclePost] for more info.
func RunCyclePostGPU(n int) {
sy := GPUSystem
pl := sy.ComputePipelines["CyclePost"]
ce, _ := sy.BeginComputePass()
pl.Dispatch1D(ce, n, 64)
}
// RunCyclePostCPU runs the CyclePost kernel on the CPU.
func RunCyclePostCPU(n int) {
gpu.VectorizeFunc(0, n, CyclePost)
}
// RunOneCyclePost runs the CyclePost kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// This version then calls RunDone with the given variables to sync
// after the Run, for a single-shot Run-and-Done call. If multiple kernels
// can be run in sequence, it is much more efficient to do multiple Run*
// calls followed by a RunDone call.
func RunOneCyclePost(n int, syncVars ...GPUVars) {
if UseGPU {
RunCyclePostGPU(n)
RunDone(syncVars...)
} else {
RunCyclePostCPU(n)
}
}
// RunDWtFromDiSyn runs the DWtFromDiSyn kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// Can call multiple Run* kernels in a row, which are then all launched
// in the same command submission on the GPU, which is by far the most efficient.
// MUST call RunDone (with optional vars to sync) after all Run calls.
// Alternatively, a single-shot RunOneDWtFromDiSyn call does Run and Done for a
// single run-and-sync case.
func RunDWtFromDiSyn(n int) {
if UseGPU {
RunDWtFromDiSynGPU(n)
} else {
RunDWtFromDiSynCPU(n)
}
}
// RunDWtFromDiSynGPU runs the DWtFromDiSyn kernel on the GPU. See [RunDWtFromDiSyn] for more info.
func RunDWtFromDiSynGPU(n int) {
sy := GPUSystem
pl := sy.ComputePipelines["DWtFromDiSyn"]
ce, _ := sy.BeginComputePass()
pl.Dispatch1D(ce, n, 64)
}
// RunDWtFromDiSynCPU runs the DWtFromDiSyn kernel on the CPU.
func RunDWtFromDiSynCPU(n int) {
gpu.VectorizeFunc(0, n, DWtFromDiSyn)
}
// RunOneDWtFromDiSyn runs the DWtFromDiSyn kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// This version then calls RunDone with the given variables to sync
// after the Run, for a single-shot Run-and-Done call. If multiple kernels
// can be run in sequence, it is much more efficient to do multiple Run*
// calls followed by a RunDone call.
func RunOneDWtFromDiSyn(n int, syncVars ...GPUVars) {
if UseGPU {
RunDWtFromDiSynGPU(n)
RunDone(syncVars...)
} else {
RunDWtFromDiSynCPU(n)
}
}
// RunDWtSubMeanNeuron runs the DWtSubMeanNeuron kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// Can call multiple Run* kernels in a row, which are then all launched
// in the same command submission on the GPU, which is by far the most efficient.
// MUST call RunDone (with optional vars to sync) after all Run calls.
// Alternatively, a single-shot RunOneDWtSubMeanNeuron call does Run and Done for a
// single run-and-sync case.
func RunDWtSubMeanNeuron(n int) {
if UseGPU {
RunDWtSubMeanNeuronGPU(n)
} else {
RunDWtSubMeanNeuronCPU(n)
}
}
// RunDWtSubMeanNeuronGPU runs the DWtSubMeanNeuron kernel on the GPU. See [RunDWtSubMeanNeuron] for more info.
func RunDWtSubMeanNeuronGPU(n int) {
sy := GPUSystem
pl := sy.ComputePipelines["DWtSubMeanNeuron"]
ce, _ := sy.BeginComputePass()
pl.Dispatch1D(ce, n, 64)
}
// RunDWtSubMeanNeuronCPU runs the DWtSubMeanNeuron kernel on the CPU.
func RunDWtSubMeanNeuronCPU(n int) {
gpu.VectorizeFunc(0, n, DWtSubMeanNeuron)
}
// RunOneDWtSubMeanNeuron runs the DWtSubMeanNeuron kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// This version then calls RunDone with the given variables to sync
// after the Run, for a single-shot Run-and-Done call. If multiple kernels
// can be run in sequence, it is much more efficient to do multiple Run*
// calls followed by a RunDone call.
func RunOneDWtSubMeanNeuron(n int, syncVars ...GPUVars) {
if UseGPU {
RunDWtSubMeanNeuronGPU(n)
RunDone(syncVars...)
} else {
RunDWtSubMeanNeuronCPU(n)
}
}
// RunDWtSyn runs the DWtSyn kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// Can call multiple Run* kernels in a row, which are then all launched
// in the same command submission on the GPU, which is by far the most efficient.
// MUST call RunDone (with optional vars to sync) after all Run calls.
// Alternatively, a single-shot RunOneDWtSyn call does Run and Done for a
// single run-and-sync case.
func RunDWtSyn(n int) {
if UseGPU {
RunDWtSynGPU(n)
} else {
RunDWtSynCPU(n)
}
}
// RunDWtSynGPU runs the DWtSyn kernel on the GPU. See [RunDWtSyn] for more info.
func RunDWtSynGPU(n int) {
sy := GPUSystem
pl := sy.ComputePipelines["DWtSyn"]
ce, _ := sy.BeginComputePass()
pl.Dispatch1D(ce, n, 64)
}
// RunDWtSynCPU runs the DWtSyn kernel on the CPU.
func RunDWtSynCPU(n int) {
gpu.VectorizeFunc(0, n, DWtSyn)
}
// RunOneDWtSyn runs the DWtSyn kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// This version then calls RunDone with the given variables to sync
// after the Run, for a single-shot Run-and-Done call. If multiple kernels
// can be run in sequence, it is much more efficient to do multiple Run*
// calls followed by a RunDone call.
func RunOneDWtSyn(n int, syncVars ...GPUVars) {
if UseGPU {
RunDWtSynGPU(n)
RunDone(syncVars...)
} else {
RunDWtSynCPU(n)
}
}
// RunGPUTestWrite runs the GPUTestWrite kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// Can call multiple Run* kernels in a row, which are then all launched
// in the same command submission on the GPU, which is by far the most efficient.
// MUST call RunDone (with optional vars to sync) after all Run calls.
// Alternatively, a single-shot RunOneGPUTestWrite call does Run and Done for a
// single run-and-sync case.
func RunGPUTestWrite(n int) {
if UseGPU {
RunGPUTestWriteGPU(n)
} else {
RunGPUTestWriteCPU(n)
}
}
// RunGPUTestWriteGPU runs the GPUTestWrite kernel on the GPU. See [RunGPUTestWrite] for more info.
func RunGPUTestWriteGPU(n int) {
sy := GPUSystem
pl := sy.ComputePipelines["GPUTestWrite"]
ce, _ := sy.BeginComputePass()
pl.Dispatch1D(ce, n, 64)
}
// RunGPUTestWriteCPU runs the GPUTestWrite kernel on the CPU.
func RunGPUTestWriteCPU(n int) {
gpu.VectorizeFunc(0, n, GPUTestWrite)
}
// RunOneGPUTestWrite runs the GPUTestWrite kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// This version then calls RunDone with the given variables to sync
// after the Run, for a single-shot Run-and-Done call. If multiple kernels
// can be run in sequence, it is much more efficient to do multiple Run*
// calls followed by a RunDone call.
func RunOneGPUTestWrite(n int, syncVars ...GPUVars) {
if UseGPU {
RunGPUTestWriteGPU(n)
RunDone(syncVars...)
} else {
RunGPUTestWriteCPU(n)
}
}
// RunGatherSpikes runs the GatherSpikes kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// Can call multiple Run* kernels in a row, which are then all launched
// in the same command submission on the GPU, which is by far the most efficient.
// MUST call RunDone (with optional vars to sync) after all Run calls.
// Alternatively, a single-shot RunOneGatherSpikes call does Run and Done for a
// single run-and-sync case.
func RunGatherSpikes(n int) {
if UseGPU {
RunGatherSpikesGPU(n)
} else {
RunGatherSpikesCPU(n)
}
}
// RunGatherSpikesGPU runs the GatherSpikes kernel on the GPU. See [RunGatherSpikes] for more info.
func RunGatherSpikesGPU(n int) {
sy := GPUSystem
pl := sy.ComputePipelines["GatherSpikes"]
ce, _ := sy.BeginComputePass()
pl.Dispatch1D(ce, n, 64)
}
// RunGatherSpikesCPU runs the GatherSpikes kernel on the CPU.
func RunGatherSpikesCPU(n int) {
gpu.VectorizeFunc(0, n, GatherSpikes)
}
// RunOneGatherSpikes runs the GatherSpikes kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// This version then calls RunDone with the given variables to sync
// after the Run, for a single-shot Run-and-Done call. If multiple kernels
// can be run in sequence, it is much more efficient to do multiple Run*
// calls followed by a RunDone call.
func RunOneGatherSpikes(n int, syncVars ...GPUVars) {
if UseGPU {
RunGatherSpikesGPU(n)
RunDone(syncVars...)
} else {
RunGatherSpikesCPU(n)
}
}
// RunInitGBuffsPath runs the InitGBuffsPath kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// Can call multiple Run* kernels in a row, which are then all launched
// in the same command submission on the GPU, which is by far the most efficient.
// MUST call RunDone (with optional vars to sync) after all Run calls.
// Alternatively, a single-shot RunOneInitGBuffsPath call does Run and Done for a
// single run-and-sync case.
func RunInitGBuffsPath(n int) {
if UseGPU {
RunInitGBuffsPathGPU(n)
} else {
RunInitGBuffsPathCPU(n)
}
}
// RunInitGBuffsPathGPU runs the InitGBuffsPath kernel on the GPU. See [RunInitGBuffsPath] for more info.
func RunInitGBuffsPathGPU(n int) {
sy := GPUSystem
pl := sy.ComputePipelines["InitGBuffsPath"]
ce, _ := sy.BeginComputePass()
pl.Dispatch1D(ce, n, 64)
}
// RunInitGBuffsPathCPU runs the InitGBuffsPath kernel on the CPU.
func RunInitGBuffsPathCPU(n int) {
gpu.VectorizeFunc(0, n, InitGBuffsPath)
}
// RunOneInitGBuffsPath runs the InitGBuffsPath kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// This version then calls RunDone with the given variables to sync
// after the Run, for a single-shot Run-and-Done call. If multiple kernels
// can be run in sequence, it is much more efficient to do multiple Run*
// calls followed by a RunDone call.
func RunOneInitGBuffsPath(n int, syncVars ...GPUVars) {
if UseGPU {
RunInitGBuffsPathGPU(n)
RunDone(syncVars...)
} else {
RunInitGBuffsPathCPU(n)
}
}
// RunLayerGi runs the LayerGi kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// Can call multiple Run* kernels in a row, which are then all launched
// in the same command submission on the GPU, which is by far the most efficient.
// MUST call RunDone (with optional vars to sync) after all Run calls.
// Alternatively, a single-shot RunOneLayerGi call does Run and Done for a
// single run-and-sync case.
func RunLayerGi(n int) {
if UseGPU {
RunLayerGiGPU(n)
} else {
RunLayerGiCPU(n)
}
}
// RunLayerGiGPU runs the LayerGi kernel on the GPU. See [RunLayerGi] for more info.
func RunLayerGiGPU(n int) {
sy := GPUSystem
pl := sy.ComputePipelines["LayerGi"]
ce, _ := sy.BeginComputePass()
pl.Dispatch1D(ce, n, 64)
}
// RunLayerGiCPU runs the LayerGi kernel on the CPU.
func RunLayerGiCPU(n int) {
gpu.VectorizeFunc(0, n, LayerGi)
}
// RunOneLayerGi runs the LayerGi kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// This version then calls RunDone with the given variables to sync
// after the Run, for a single-shot Run-and-Done call. If multiple kernels
// can be run in sequence, it is much more efficient to do multiple Run*
// calls followed by a RunDone call.
func RunOneLayerGi(n int, syncVars ...GPUVars) {
if UseGPU {
RunLayerGiGPU(n)
RunDone(syncVars...)
} else {
RunLayerGiCPU(n)
}
}
// RunMinusPhaseNeuron runs the MinusPhaseNeuron kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// Can call multiple Run* kernels in a row, which are then all launched
// in the same command submission on the GPU, which is by far the most efficient.
// MUST call RunDone (with optional vars to sync) after all Run calls.
// Alternatively, a single-shot RunOneMinusPhaseNeuron call does Run and Done for a
// single run-and-sync case.
func RunMinusPhaseNeuron(n int) {
if UseGPU {
RunMinusPhaseNeuronGPU(n)
} else {
RunMinusPhaseNeuronCPU(n)
}
}
// RunMinusPhaseNeuronGPU runs the MinusPhaseNeuron kernel on the GPU. See [RunMinusPhaseNeuron] for more info.
func RunMinusPhaseNeuronGPU(n int) {
sy := GPUSystem
pl := sy.ComputePipelines["MinusPhaseNeuron"]
ce, _ := sy.BeginComputePass()
pl.Dispatch1D(ce, n, 64)
}
// RunMinusPhaseNeuronCPU runs the MinusPhaseNeuron kernel on the CPU.
func RunMinusPhaseNeuronCPU(n int) {
gpu.VectorizeFunc(0, n, MinusPhaseNeuron)
}
// RunOneMinusPhaseNeuron runs the MinusPhaseNeuron kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// This version then calls RunDone with the given variables to sync
// after the Run, for a single-shot Run-and-Done call. If multiple kernels
// can be run in sequence, it is much more efficient to do multiple Run*
// calls followed by a RunDone call.
func RunOneMinusPhaseNeuron(n int, syncVars ...GPUVars) {
if UseGPU {
RunMinusPhaseNeuronGPU(n)
RunDone(syncVars...)
} else {
RunMinusPhaseNeuronCPU(n)
}
}
// RunMinusPhasePool runs the MinusPhasePool kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// Can call multiple Run* kernels in a row, which are then all launched
// in the same command submission on the GPU, which is by far the most efficient.
// MUST call RunDone (with optional vars to sync) after all Run calls.
// Alternatively, a single-shot RunOneMinusPhasePool call does Run and Done for a
// single run-and-sync case.
func RunMinusPhasePool(n int) {
if UseGPU {
RunMinusPhasePoolGPU(n)
} else {
RunMinusPhasePoolCPU(n)
}
}
// RunMinusPhasePoolGPU runs the MinusPhasePool kernel on the GPU. See [RunMinusPhasePool] for more info.
func RunMinusPhasePoolGPU(n int) {
sy := GPUSystem
pl := sy.ComputePipelines["MinusPhasePool"]
ce, _ := sy.BeginComputePass()
pl.Dispatch1D(ce, n, 64)
}
// RunMinusPhasePoolCPU runs the MinusPhasePool kernel on the CPU.
func RunMinusPhasePoolCPU(n int) {
gpu.VectorizeFunc(0, n, MinusPhasePool)
}
// RunOneMinusPhasePool runs the MinusPhasePool kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// This version then calls RunDone with the given variables to sync
// after the Run, for a single-shot Run-and-Done call. If multiple kernels
// can be run in sequence, it is much more efficient to do multiple Run*
// calls followed by a RunDone call.
func RunOneMinusPhasePool(n int, syncVars ...GPUVars) {
if UseGPU {
RunMinusPhasePoolGPU(n)
RunDone(syncVars...)
} else {
RunMinusPhasePoolCPU(n)
}
}
// RunMinusPhasePost runs the MinusPhasePost kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// Can call multiple Run* kernels in a row, which are then all launched
// in the same command submission on the GPU, which is by far the most efficient.
// MUST call RunDone (with optional vars to sync) after all Run calls.
// Alternatively, a single-shot RunOneMinusPhasePost call does Run and Done for a
// single run-and-sync case.
func RunMinusPhasePost(n int) {
if UseGPU {
RunMinusPhasePostGPU(n)
} else {
RunMinusPhasePostCPU(n)
}
}
// RunMinusPhasePostGPU runs the MinusPhasePost kernel on the GPU. See [RunMinusPhasePost] for more info.
func RunMinusPhasePostGPU(n int) {
sy := GPUSystem
pl := sy.ComputePipelines["MinusPhasePost"]
ce, _ := sy.BeginComputePass()
pl.Dispatch1D(ce, n, 64)
}
// RunMinusPhasePostCPU runs the MinusPhasePost kernel on the CPU.
func RunMinusPhasePostCPU(n int) {
gpu.VectorizeFunc(0, n, MinusPhasePost)
}
// RunOneMinusPhasePost runs the MinusPhasePost kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// This version then calls RunDone with the given variables to sync
// after the Run, for a single-shot Run-and-Done call. If multiple kernels
// can be run in sequence, it is much more efficient to do multiple Run*
// calls followed by a RunDone call.
func RunOneMinusPhasePost(n int, syncVars ...GPUVars) {
if UseGPU {
RunMinusPhasePostGPU(n)
RunDone(syncVars...)
} else {
RunMinusPhasePostCPU(n)
}
}
// RunNewStateLayer runs the NewStateLayer kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// Can call multiple Run* kernels in a row, which are then all launched
// in the same command submission on the GPU, which is by far the most efficient.
// MUST call RunDone (with optional vars to sync) after all Run calls.
// Alternatively, a single-shot RunOneNewStateLayer call does Run and Done for a
// single run-and-sync case.
func RunNewStateLayer(n int) {
if UseGPU {
RunNewStateLayerGPU(n)
} else {
RunNewStateLayerCPU(n)
}
}
// RunNewStateLayerGPU runs the NewStateLayer kernel on the GPU. See [RunNewStateLayer] for more info.
func RunNewStateLayerGPU(n int) {
sy := GPUSystem
pl := sy.ComputePipelines["NewStateLayer"]
ce, _ := sy.BeginComputePass()
pl.Dispatch1D(ce, n, 64)
}
// RunNewStateLayerCPU runs the NewStateLayer kernel on the CPU.
func RunNewStateLayerCPU(n int) {
gpu.VectorizeFunc(0, n, NewStateLayer)
}
// RunOneNewStateLayer runs the NewStateLayer kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// This version then calls RunDone with the given variables to sync
// after the Run, for a single-shot Run-and-Done call. If multiple kernels
// can be run in sequence, it is much more efficient to do multiple Run*
// calls followed by a RunDone call.
func RunOneNewStateLayer(n int, syncVars ...GPUVars) {
if UseGPU {
RunNewStateLayerGPU(n)
RunDone(syncVars...)
} else {
RunNewStateLayerCPU(n)
}
}
// RunNewStateNeuron runs the NewStateNeuron kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// Can call multiple Run* kernels in a row, which are then all launched
// in the same command submission on the GPU, which is by far the most efficient.
// MUST call RunDone (with optional vars to sync) after all Run calls.
// Alternatively, a single-shot RunOneNewStateNeuron call does Run and Done for a
// single run-and-sync case.
func RunNewStateNeuron(n int) {
if UseGPU {
RunNewStateNeuronGPU(n)
} else {
RunNewStateNeuronCPU(n)
}
}
// RunNewStateNeuronGPU runs the NewStateNeuron kernel on the GPU. See [RunNewStateNeuron] for more info.
func RunNewStateNeuronGPU(n int) {
sy := GPUSystem
pl := sy.ComputePipelines["NewStateNeuron"]
ce, _ := sy.BeginComputePass()
pl.Dispatch1D(ce, n, 64)
}
// RunNewStateNeuronCPU runs the NewStateNeuron kernel on the CPU.
func RunNewStateNeuronCPU(n int) {
gpu.VectorizeFunc(0, n, NewStateNeuron)
}
// RunOneNewStateNeuron runs the NewStateNeuron kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// This version then calls RunDone with the given variables to sync
// after the Run, for a single-shot Run-and-Done call. If multiple kernels
// can be run in sequence, it is much more efficient to do multiple Run*
// calls followed by a RunDone call.
func RunOneNewStateNeuron(n int, syncVars ...GPUVars) {
if UseGPU {
RunNewStateNeuronGPU(n)
RunDone(syncVars...)
} else {
RunNewStateNeuronCPU(n)
}
}
// RunPlusPhaseEndNeuron runs the PlusPhaseEndNeuron kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// Can call multiple Run* kernels in a row, which are then all launched
// in the same command submission on the GPU, which is by far the most efficient.
// MUST call RunDone (with optional vars to sync) after all Run calls.
// Alternatively, a single-shot RunOnePlusPhaseEndNeuron call does Run and Done for a
// single run-and-sync case.
func RunPlusPhaseEndNeuron(n int) {
if UseGPU {
RunPlusPhaseEndNeuronGPU(n)
} else {
RunPlusPhaseEndNeuronCPU(n)
}
}
// RunPlusPhaseEndNeuronGPU runs the PlusPhaseEndNeuron kernel on the GPU. See [RunPlusPhaseEndNeuron] for more info.
func RunPlusPhaseEndNeuronGPU(n int) {
sy := GPUSystem
pl := sy.ComputePipelines["PlusPhaseEndNeuron"]
ce, _ := sy.BeginComputePass()
pl.Dispatch1D(ce, n, 64)
}
// RunPlusPhaseEndNeuronCPU runs the PlusPhaseEndNeuron kernel on the CPU.
func RunPlusPhaseEndNeuronCPU(n int) {
gpu.VectorizeFunc(0, n, PlusPhaseEndNeuron)
}
// RunOnePlusPhaseEndNeuron runs the PlusPhaseEndNeuron kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// This version then calls RunDone with the given variables to sync
// after the Run, for a single-shot Run-and-Done call. If multiple kernels
// can be run in sequence, it is much more efficient to do multiple Run*
// calls followed by a RunDone call.
func RunOnePlusPhaseEndNeuron(n int, syncVars ...GPUVars) {
if UseGPU {
RunPlusPhaseEndNeuronGPU(n)
RunDone(syncVars...)
} else {
RunPlusPhaseEndNeuronCPU(n)
}
}
// RunPlusPhaseEndPool runs the PlusPhaseEndPool kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// Can call multiple Run* kernels in a row, which are then all launched
// in the same command submission on the GPU, which is by far the most efficient.
// MUST call RunDone (with optional vars to sync) after all Run calls.
// Alternatively, a single-shot RunOnePlusPhaseEndPool call does Run and Done for a
// single run-and-sync case.
func RunPlusPhaseEndPool(n int) {
if UseGPU {
RunPlusPhaseEndPoolGPU(n)
} else {
RunPlusPhaseEndPoolCPU(n)
}
}
// RunPlusPhaseEndPoolGPU runs the PlusPhaseEndPool kernel on the GPU. See [RunPlusPhaseEndPool] for more info.
func RunPlusPhaseEndPoolGPU(n int) {
sy := GPUSystem
pl := sy.ComputePipelines["PlusPhaseEndPool"]
ce, _ := sy.BeginComputePass()
pl.Dispatch1D(ce, n, 64)
}
// RunPlusPhaseEndPoolCPU runs the PlusPhaseEndPool kernel on the CPU.
func RunPlusPhaseEndPoolCPU(n int) {
gpu.VectorizeFunc(0, n, PlusPhaseEndPool)
}
// RunOnePlusPhaseEndPool runs the PlusPhaseEndPool kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// This version then calls RunDone with the given variables to sync
// after the Run, for a single-shot Run-and-Done call. If multiple kernels
// can be run in sequence, it is much more efficient to do multiple Run*
// calls followed by a RunDone call.
func RunOnePlusPhaseEndPool(n int, syncVars ...GPUVars) {
if UseGPU {
RunPlusPhaseEndPoolGPU(n)
RunDone(syncVars...)
} else {
RunPlusPhaseEndPoolCPU(n)
}
}
// RunPlusPhaseEndPost runs the PlusPhaseEndPost kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// Can call multiple Run* kernels in a row, which are then all launched
// in the same command submission on the GPU, which is by far the most efficient.
// MUST call RunDone (with optional vars to sync) after all Run calls.
// Alternatively, a single-shot RunOnePlusPhaseEndPost call does Run and Done for a
// single run-and-sync case.
func RunPlusPhaseEndPost(n int) {
if UseGPU {
RunPlusPhaseEndPostGPU(n)
} else {
RunPlusPhaseEndPostCPU(n)
}
}
// RunPlusPhaseEndPostGPU runs the PlusPhaseEndPost kernel on the GPU. See [RunPlusPhaseEndPost] for more info.
func RunPlusPhaseEndPostGPU(n int) {
sy := GPUSystem
pl := sy.ComputePipelines["PlusPhaseEndPost"]
ce, _ := sy.BeginComputePass()
pl.Dispatch1D(ce, n, 64)
}
// RunPlusPhaseEndPostCPU runs the PlusPhaseEndPost kernel on the CPU.
func RunPlusPhaseEndPostCPU(n int) {
gpu.VectorizeFunc(0, n, PlusPhaseEndPost)
}
// RunOnePlusPhaseEndPost runs the PlusPhaseEndPost kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// This version then calls RunDone with the given variables to sync
// after the Run, for a single-shot Run-and-Done call. If multiple kernels
// can be run in sequence, it is much more efficient to do multiple Run*
// calls followed by a RunDone call.
func RunOnePlusPhaseEndPost(n int, syncVars ...GPUVars) {
if UseGPU {
RunPlusPhaseEndPostGPU(n)
RunDone(syncVars...)
} else {
RunPlusPhaseEndPostCPU(n)
}
}
// RunPlusPhaseStartContext runs the PlusPhaseStartContext kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// Can call multiple Run* kernels in a row, which are then all launched
// in the same command submission on the GPU, which is by far the most efficient.
// MUST call RunDone (with optional vars to sync) after all Run calls.
// Alternatively, a single-shot RunOnePlusPhaseStartContext call does Run and Done for a
// single run-and-sync case.
func RunPlusPhaseStartContext(n int) {
if UseGPU {
RunPlusPhaseStartContextGPU(n)
} else {
RunPlusPhaseStartContextCPU(n)
}
}
// RunPlusPhaseStartContextGPU runs the PlusPhaseStartContext kernel on the GPU. See [RunPlusPhaseStartContext] for more info.
func RunPlusPhaseStartContextGPU(n int) {
sy := GPUSystem
pl := sy.ComputePipelines["PlusPhaseStartContext"]
ce, _ := sy.BeginComputePass()
pl.Dispatch1D(ce, n, 64)
}
// RunPlusPhaseStartContextCPU runs the PlusPhaseStartContext kernel on the CPU.
func RunPlusPhaseStartContextCPU(n int) {
gpu.VectorizeFunc(0, n, PlusPhaseStartContext)
}
// RunOnePlusPhaseStartContext runs the PlusPhaseStartContext kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// This version then calls RunDone with the given variables to sync
// after the Run, for a single-shot Run-and-Done call. If multiple kernels
// can be run in sequence, it is much more efficient to do multiple Run*
// calls followed by a RunDone call.
func RunOnePlusPhaseStartContext(n int, syncVars ...GPUVars) {
if UseGPU {
RunPlusPhaseStartContextGPU(n)
RunDone(syncVars...)
} else {
RunPlusPhaseStartContextCPU(n)
}
}
// RunPlusPhaseStartNeuron runs the PlusPhaseStartNeuron kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// Can call multiple Run* kernels in a row, which are then all launched
// in the same command submission on the GPU, which is by far the most efficient.
// MUST call RunDone (with optional vars to sync) after all Run calls.
// Alternatively, a single-shot RunOnePlusPhaseStartNeuron call does Run and Done for a
// single run-and-sync case.
func RunPlusPhaseStartNeuron(n int) {
if UseGPU {
RunPlusPhaseStartNeuronGPU(n)
} else {
RunPlusPhaseStartNeuronCPU(n)
}
}
// RunPlusPhaseStartNeuronGPU runs the PlusPhaseStartNeuron kernel on the GPU. See [RunPlusPhaseStartNeuron] for more info.
func RunPlusPhaseStartNeuronGPU(n int) {
sy := GPUSystem
pl := sy.ComputePipelines["PlusPhaseStartNeuron"]
ce, _ := sy.BeginComputePass()
pl.Dispatch1D(ce, n, 64)
}
// RunPlusPhaseStartNeuronCPU runs the PlusPhaseStartNeuron kernel on the CPU.
func RunPlusPhaseStartNeuronCPU(n int) {
gpu.VectorizeFunc(0, n, PlusPhaseStartNeuron)
}
// RunOnePlusPhaseStartNeuron runs the PlusPhaseStartNeuron kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// This version then calls RunDone with the given variables to sync
// after the Run, for a single-shot Run-and-Done call. If multiple kernels
// can be run in sequence, it is much more efficient to do multiple Run*
// calls followed by a RunDone call.
func RunOnePlusPhaseStartNeuron(n int, syncVars ...GPUVars) {
if UseGPU {
RunPlusPhaseStartNeuronGPU(n)
RunDone(syncVars...)
} else {
RunPlusPhaseStartNeuronCPU(n)
}
}
// RunPoolGi runs the PoolGi kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// Can call multiple Run* kernels in a row, which are then all launched
// in the same command submission on the GPU, which is by far the most efficient.
// MUST call RunDone (with optional vars to sync) after all Run calls.
// Alternatively, a single-shot RunOnePoolGi call does Run and Done for a
// single run-and-sync case.
func RunPoolGi(n int) {
if UseGPU {
RunPoolGiGPU(n)
} else {
RunPoolGiCPU(n)
}
}
// RunPoolGiGPU runs the PoolGi kernel on the GPU. See [RunPoolGi] for more info.
func RunPoolGiGPU(n int) {
sy := GPUSystem
pl := sy.ComputePipelines["PoolGi"]
ce, _ := sy.BeginComputePass()
pl.Dispatch1D(ce, n, 64)
}
// RunPoolGiCPU runs the PoolGi kernel on the CPU.
func RunPoolGiCPU(n int) {
gpu.VectorizeFunc(0, n, PoolGi)
}
// RunOnePoolGi runs the PoolGi kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// This version then calls RunDone with the given variables to sync
// after the Run, for a single-shot Run-and-Done call. If multiple kernels
// can be run in sequence, it is much more efficient to do multiple Run*
// calls followed by a RunDone call.
func RunOnePoolGi(n int, syncVars ...GPUVars) {
if UseGPU {
RunPoolGiGPU(n)
RunDone(syncVars...)
} else {
RunPoolGiCPU(n)
}
}
// RunSendSpike runs the SendSpike kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// Can call multiple Run* kernels in a row, which are then all launched
// in the same command submission on the GPU, which is by far the most efficient.
// MUST call RunDone (with optional vars to sync) after all Run calls.
// Alternatively, a single-shot RunOneSendSpike call does Run and Done for a
// single run-and-sync case.
func RunSendSpike(n int) {
if UseGPU {
RunSendSpikeGPU(n)
} else {
RunSendSpikeCPU(n)
}
}
// RunSendSpikeGPU runs the SendSpike kernel on the GPU. See [RunSendSpike] for more info.
func RunSendSpikeGPU(n int) {
sy := GPUSystem
pl := sy.ComputePipelines["SendSpike"]
ce, _ := sy.BeginComputePass()
pl.Dispatch1D(ce, n, 64)
}
// RunSendSpikeCPU runs the SendSpike kernel on the CPU.
func RunSendSpikeCPU(n int) {
gpu.VectorizeFunc(0, n, SendSpike)
}
// RunOneSendSpike runs the SendSpike kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// This version then calls RunDone with the given variables to sync
// after the Run, for a single-shot Run-and-Done call. If multiple kernels
// can be run in sequence, it is much more efficient to do multiple Run*
// calls followed by a RunDone call.
func RunOneSendSpike(n int, syncVars ...GPUVars) {
if UseGPU {
RunSendSpikeGPU(n)
RunDone(syncVars...)
} else {
RunSendSpikeCPU(n)
}
}
// RunSlowAdaptLayer runs the SlowAdaptLayer kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// Can call multiple Run* kernels in a row, which are then all launched
// in the same command submission on the GPU, which is by far the most efficient.
// MUST call RunDone (with optional vars to sync) after all Run calls.
// Alternatively, a single-shot RunOneSlowAdaptLayer call does Run and Done for a
// single run-and-sync case.
func RunSlowAdaptLayer(n int) {
if UseGPU {
RunSlowAdaptLayerGPU(n)
} else {
RunSlowAdaptLayerCPU(n)
}
}
// RunSlowAdaptLayerGPU runs the SlowAdaptLayer kernel on the GPU. See [RunSlowAdaptLayer] for more info.
func RunSlowAdaptLayerGPU(n int) {
sy := GPUSystem
pl := sy.ComputePipelines["SlowAdaptLayer"]
ce, _ := sy.BeginComputePass()
pl.Dispatch1D(ce, n, 64)
}
// RunSlowAdaptLayerCPU runs the SlowAdaptLayer kernel on the CPU.
func RunSlowAdaptLayerCPU(n int) {
gpu.VectorizeFunc(0, n, SlowAdaptLayer)
}
// RunOneSlowAdaptLayer runs the SlowAdaptLayer kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// This version then calls RunDone with the given variables to sync
// after the Run, for a single-shot Run-and-Done call. If multiple kernels
// can be run in sequence, it is much more efficient to do multiple Run*
// calls followed by a RunDone call.
func RunOneSlowAdaptLayer(n int, syncVars ...GPUVars) {
if UseGPU {
RunSlowAdaptLayerGPU(n)
RunDone(syncVars...)
} else {
RunSlowAdaptLayerCPU(n)
}
}
// RunSlowAdaptNeuron runs the SlowAdaptNeuron kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// Can call multiple Run* kernels in a row, which are then all launched
// in the same command submission on the GPU, which is by far the most efficient.
// MUST call RunDone (with optional vars to sync) after all Run calls.
// Alternatively, a single-shot RunOneSlowAdaptNeuron call does Run and Done for a
// single run-and-sync case.
func RunSlowAdaptNeuron(n int) {
if UseGPU {
RunSlowAdaptNeuronGPU(n)
} else {
RunSlowAdaptNeuronCPU(n)
}
}
// RunSlowAdaptNeuronGPU runs the SlowAdaptNeuron kernel on the GPU. See [RunSlowAdaptNeuron] for more info.
func RunSlowAdaptNeuronGPU(n int) {
sy := GPUSystem
pl := sy.ComputePipelines["SlowAdaptNeuron"]
ce, _ := sy.BeginComputePass()
pl.Dispatch1D(ce, n, 64)
}
// RunSlowAdaptNeuronCPU runs the SlowAdaptNeuron kernel on the CPU.
func RunSlowAdaptNeuronCPU(n int) {
gpu.VectorizeFunc(0, n, SlowAdaptNeuron)
}
// RunOneSlowAdaptNeuron runs the SlowAdaptNeuron kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// This version then calls RunDone with the given variables to sync
// after the Run, for a single-shot Run-and-Done call. If multiple kernels
// can be run in sequence, it is much more efficient to do multiple Run*
// calls followed by a RunDone call.
func RunOneSlowAdaptNeuron(n int, syncVars ...GPUVars) {
if UseGPU {
RunSlowAdaptNeuronGPU(n)
RunDone(syncVars...)
} else {
RunSlowAdaptNeuronCPU(n)
}
}
// RunWtFromDWtLayer runs the WtFromDWtLayer kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// Can call multiple Run* kernels in a row, which are then all launched
// in the same command submission on the GPU, which is by far the most efficient.
// MUST call RunDone (with optional vars to sync) after all Run calls.
// Alternatively, a single-shot RunOneWtFromDWtLayer call does Run and Done for a
// single run-and-sync case.
func RunWtFromDWtLayer(n int) {
if UseGPU {
RunWtFromDWtLayerGPU(n)
} else {
RunWtFromDWtLayerCPU(n)
}
}
// RunWtFromDWtLayerGPU runs the WtFromDWtLayer kernel on the GPU. See [RunWtFromDWtLayer] for more info.
func RunWtFromDWtLayerGPU(n int) {
sy := GPUSystem
pl := sy.ComputePipelines["WtFromDWtLayer"]
ce, _ := sy.BeginComputePass()
pl.Dispatch1D(ce, n, 64)
}
// RunWtFromDWtLayerCPU runs the WtFromDWtLayer kernel on the CPU.
func RunWtFromDWtLayerCPU(n int) {
gpu.VectorizeFunc(0, n, WtFromDWtLayer)
}
// RunOneWtFromDWtLayer runs the WtFromDWtLayer kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// This version then calls RunDone with the given variables to sync
// after the Run, for a single-shot Run-and-Done call. If multiple kernels
// can be run in sequence, it is much more efficient to do multiple Run*
// calls followed by a RunDone call.
func RunOneWtFromDWtLayer(n int, syncVars ...GPUVars) {
if UseGPU {
RunWtFromDWtLayerGPU(n)
RunDone(syncVars...)
} else {
RunWtFromDWtLayerCPU(n)
}
}
// RunWtFromDWtSyn runs the WtFromDWtSyn kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// Can call multiple Run* kernels in a row, which are then all launched
// in the same command submission on the GPU, which is by far the most efficient.
// MUST call RunDone (with optional vars to sync) after all Run calls.
// Alternatively, a single-shot RunOneWtFromDWtSyn call does Run and Done for a
// single run-and-sync case.
func RunWtFromDWtSyn(n int) {
if UseGPU {
RunWtFromDWtSynGPU(n)
} else {
RunWtFromDWtSynCPU(n)
}
}
// RunWtFromDWtSynGPU runs the WtFromDWtSyn kernel on the GPU. See [RunWtFromDWtSyn] for more info.
func RunWtFromDWtSynGPU(n int) {
sy := GPUSystem
pl := sy.ComputePipelines["WtFromDWtSyn"]
ce, _ := sy.BeginComputePass()
pl.Dispatch1D(ce, n, 64)
}
// RunWtFromDWtSynCPU runs the WtFromDWtSyn kernel on the CPU.
func RunWtFromDWtSynCPU(n int) {
gpu.VectorizeFunc(0, n, WtFromDWtSyn)
}
// RunOneWtFromDWtSyn runs the WtFromDWtSyn kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// This version then calls RunDone with the given variables to sync
// after the Run, for a single-shot Run-and-Done call. If multiple kernels
// can be run in sequence, it is much more efficient to do multiple Run*
// calls followed by a RunDone call.
func RunOneWtFromDWtSyn(n int, syncVars ...GPUVars) {
if UseGPU {
RunWtFromDWtSynGPU(n)
RunDone(syncVars...)
} else {
RunWtFromDWtSynCPU(n)
}
}
// RunDone must be called after Run* calls to start compute kernels.
// This actually submits the kernel jobs to the GPU, and adds commands
// to synchronize the given variables back from the GPU to the CPU.
// After this function completes, the GPU results will be available in
// the specified variables.
func RunDone(syncVars ...GPUVars) {
if !UseGPU {
return
}
sy := GPUSystem
sy.ComputeEncoder.End()
ReadFromGPU(syncVars...)
sy.EndComputePass()
SyncFromGPU(syncVars...)
}
// ToGPU copies given variables to the GPU for the system.
func ToGPU(vars ...GPUVars) {
if !UseGPU {
return
}
sy := GPUSystem
syVars := sy.Vars()
for _, vr := range vars {
switch vr {
case LayersVar:
v, _ := syVars.ValueByIndex(0, "Layers", 0)
gpu.SetValueFrom(v, Layers)
case PathsVar:
v, _ := syVars.ValueByIndex(0, "Paths", 0)
gpu.SetValueFrom(v, Paths)
case NetworkIxsVar:
v, _ := syVars.ValueByIndex(1, "NetworkIxs", 0)
gpu.SetValueFrom(v, NetworkIxs)
case PoolIxsVar:
v, _ := syVars.ValueByIndex(1, "PoolIxs", 0)
gpu.SetValueFrom(v, PoolIxs.Values)
case NeuronIxsVar:
v, _ := syVars.ValueByIndex(1, "NeuronIxs", 0)
gpu.SetValueFrom(v, NeuronIxs.Values)
case SynapseIxsVar:
v, _ := syVars.ValueByIndex(1, "SynapseIxs", 0)
gpu.SetValueFrom(v, SynapseIxs.Values)
case PathSendConVar:
v, _ := syVars.ValueByIndex(1, "PathSendCon", 0)
gpu.SetValueFrom(v, PathSendCon.Values)
case RecvPathIxsVar:
v, _ := syVars.ValueByIndex(1, "RecvPathIxs", 0)
gpu.SetValueFrom(v, RecvPathIxs.Values)
case PathRecvConVar:
v, _ := syVars.ValueByIndex(1, "PathRecvCon", 0)
gpu.SetValueFrom(v, PathRecvCon.Values)
case RecvSynIxsVar:
v, _ := syVars.ValueByIndex(1, "RecvSynIxs", 0)
gpu.SetValueFrom(v, RecvSynIxs.Values)
case CtxVar:
v, _ := syVars.ValueByIndex(2, "Ctx", 0)
gpu.SetValueFrom(v, Ctx)
case NeuronsVar:
v, _ := syVars.ValueByIndex(2, "Neurons", 0)
gpu.SetValueFrom(v, Neurons.Values)
case NeuronAvgsVar:
v, _ := syVars.ValueByIndex(2, "NeuronAvgs", 0)
gpu.SetValueFrom(v, NeuronAvgs.Values)
case LayerStatesVar:
v, _ := syVars.ValueByIndex(2, "LayerStates", 0)
gpu.SetValueFrom(v, LayerStates.Values)
case GlobalScalarsVar:
v, _ := syVars.ValueByIndex(2, "GlobalScalars", 0)
gpu.SetValueFrom(v, GlobalScalars.Values)
case GlobalVectorsVar:
v, _ := syVars.ValueByIndex(2, "GlobalVectors", 0)
gpu.SetValueFrom(v, GlobalVectors.Values)
case ExtsVar:
v, _ := syVars.ValueByIndex(2, "Exts", 0)
gpu.SetValueFrom(v, Exts.Values)
case PoolsVar:
v, _ := syVars.ValueByIndex(2, "Pools", 0)
gpu.SetValueFrom(v, Pools.Values)
case PoolsIntVar:
v, _ := syVars.ValueByIndex(2, "PoolsInt", 0)
gpu.SetValueFrom(v, PoolsInt.Values)
case PathGBufVar:
v, _ := syVars.ValueByIndex(3, "PathGBuf", 0)
gpu.SetValueFrom(v, PathGBuf.Values)
case PathGSynsVar:
v, _ := syVars.ValueByIndex(3, "PathGSyns", 0)
gpu.SetValueFrom(v, PathGSyns.Values)
case SynapsesVar:
v, _ := syVars.ValueByIndex(3, "Synapses", 0)
gpu.SetValueFrom(v, Synapses.Values)
case SynapseTracesVar:
bsz := 536870904
n := SynapseTraces.Len()
nb := int(math.Ceil(float64(n) / float64(bsz)))
for bi := range nb {
v, _ := syVars.ValueByIndex(3, fmt.Sprintf("SynapseTraces%d", bi), 0)
st := bsz * bi
ed := min(bsz * (bi+1), n)
gpu.SetValueFrom(v, SynapseTraces.Values[st:ed])
}
}
}
}
// RunGPUSync can be called to synchronize data between CPU and GPU.
// Any prior ToGPU* calls will execute to send data to the GPU,
// and any subsequent RunDone* calls will copy data back from the GPU.
func RunGPUSync() {
if !UseGPU {
return
}
sy := GPUSystem
sy.BeginComputePass()
}
// ToGPUTensorStrides gets tensor strides and starts copying to the GPU.
func ToGPUTensorStrides() {
if !UseGPU {
return
}
sy := GPUSystem
syVars := sy.Vars()
TensorStrides.SetShapeSizes(190)
TensorStrides.SetInt1D(PoolIxs.Shape().Strides[0], 0)
TensorStrides.SetInt1D(PoolIxs.Shape().Strides[1], 1)
TensorStrides.SetInt1D(NeuronIxs.Shape().Strides[0], 10)
TensorStrides.SetInt1D(NeuronIxs.Shape().Strides[1], 11)
TensorStrides.SetInt1D(SynapseIxs.Shape().Strides[0], 20)
TensorStrides.SetInt1D(SynapseIxs.Shape().Strides[1], 21)
TensorStrides.SetInt1D(PathSendCon.Shape().Strides[0], 30)
TensorStrides.SetInt1D(PathSendCon.Shape().Strides[1], 31)
TensorStrides.SetInt1D(RecvPathIxs.Shape().Strides[0], 40)
TensorStrides.SetInt1D(PathRecvCon.Shape().Strides[0], 50)
TensorStrides.SetInt1D(PathRecvCon.Shape().Strides[1], 51)
TensorStrides.SetInt1D(RecvSynIxs.Shape().Strides[0], 60)
TensorStrides.SetInt1D(Neurons.Shape().Strides[0], 70)
TensorStrides.SetInt1D(Neurons.Shape().Strides[1], 71)
TensorStrides.SetInt1D(Neurons.Shape().Strides[2], 72)
TensorStrides.SetInt1D(NeuronAvgs.Shape().Strides[0], 80)
TensorStrides.SetInt1D(NeuronAvgs.Shape().Strides[1], 81)
TensorStrides.SetInt1D(LayerStates.Shape().Strides[0], 90)
TensorStrides.SetInt1D(LayerStates.Shape().Strides[1], 91)
TensorStrides.SetInt1D(LayerStates.Shape().Strides[2], 92)
TensorStrides.SetInt1D(GlobalScalars.Shape().Strides[0], 100)
TensorStrides.SetInt1D(GlobalScalars.Shape().Strides[1], 101)
TensorStrides.SetInt1D(GlobalVectors.Shape().Strides[0], 110)
TensorStrides.SetInt1D(GlobalVectors.Shape().Strides[1], 111)
TensorStrides.SetInt1D(GlobalVectors.Shape().Strides[2], 112)
TensorStrides.SetInt1D(Exts.Shape().Strides[0], 120)
TensorStrides.SetInt1D(Exts.Shape().Strides[1], 121)
TensorStrides.SetInt1D(Pools.Shape().Strides[0], 130)
TensorStrides.SetInt1D(Pools.Shape().Strides[1], 131)
TensorStrides.SetInt1D(Pools.Shape().Strides[2], 132)
TensorStrides.SetInt1D(PoolsInt.Shape().Strides[0], 140)
TensorStrides.SetInt1D(PoolsInt.Shape().Strides[1], 141)
TensorStrides.SetInt1D(PoolsInt.Shape().Strides[2], 142)
TensorStrides.SetInt1D(PathGBuf.Shape().Strides[0], 150)
TensorStrides.SetInt1D(PathGBuf.Shape().Strides[1], 151)
TensorStrides.SetInt1D(PathGBuf.Shape().Strides[2], 152)
TensorStrides.SetInt1D(PathGSyns.Shape().Strides[0], 160)
TensorStrides.SetInt1D(PathGSyns.Shape().Strides[1], 161)
TensorStrides.SetInt1D(Synapses.Shape().Strides[0], 170)
TensorStrides.SetInt1D(Synapses.Shape().Strides[1], 171)
TensorStrides.SetInt1D(SynapseTraces.Shape().Strides[0], 180)
TensorStrides.SetInt1D(SynapseTraces.Shape().Strides[1], 181)
TensorStrides.SetInt1D(SynapseTraces.Shape().Strides[2], 182)
v, _ := syVars.ValueByIndex(0, "TensorStrides", 0)
gpu.SetValueFrom(v, TensorStrides.Values)
}
// ReadFromGPU starts the process of copying vars to the GPU.
func ReadFromGPU(vars ...GPUVars) {
sy := GPUSystem
syVars := sy.Vars()
for _, vr := range vars {
switch vr {
case LayersVar:
v, _ := syVars.ValueByIndex(0, "Layers", 0)
v.GPUToRead(sy.CommandEncoder)
case PathsVar:
v, _ := syVars.ValueByIndex(0, "Paths", 0)
v.GPUToRead(sy.CommandEncoder)
case NetworkIxsVar:
v, _ := syVars.ValueByIndex(1, "NetworkIxs", 0)
v.GPUToRead(sy.CommandEncoder)
case PoolIxsVar:
v, _ := syVars.ValueByIndex(1, "PoolIxs", 0)
v.GPUToRead(sy.CommandEncoder)
case NeuronIxsVar:
v, _ := syVars.ValueByIndex(1, "NeuronIxs", 0)
v.GPUToRead(sy.CommandEncoder)
case SynapseIxsVar:
v, _ := syVars.ValueByIndex(1, "SynapseIxs", 0)
v.GPUToRead(sy.CommandEncoder)
case PathSendConVar:
v, _ := syVars.ValueByIndex(1, "PathSendCon", 0)
v.GPUToRead(sy.CommandEncoder)
case RecvPathIxsVar:
v, _ := syVars.ValueByIndex(1, "RecvPathIxs", 0)
v.GPUToRead(sy.CommandEncoder)
case PathRecvConVar:
v, _ := syVars.ValueByIndex(1, "PathRecvCon", 0)
v.GPUToRead(sy.CommandEncoder)
case RecvSynIxsVar:
v, _ := syVars.ValueByIndex(1, "RecvSynIxs", 0)
v.GPUToRead(sy.CommandEncoder)
case CtxVar:
v, _ := syVars.ValueByIndex(2, "Ctx", 0)
v.GPUToRead(sy.CommandEncoder)
case NeuronsVar:
v, _ := syVars.ValueByIndex(2, "Neurons", 0)
v.GPUToRead(sy.CommandEncoder)
case NeuronAvgsVar:
v, _ := syVars.ValueByIndex(2, "NeuronAvgs", 0)
v.GPUToRead(sy.CommandEncoder)
case LayerStatesVar:
v, _ := syVars.ValueByIndex(2, "LayerStates", 0)
v.GPUToRead(sy.CommandEncoder)
case GlobalScalarsVar:
v, _ := syVars.ValueByIndex(2, "GlobalScalars", 0)
v.GPUToRead(sy.CommandEncoder)
case GlobalVectorsVar:
v, _ := syVars.ValueByIndex(2, "GlobalVectors", 0)
v.GPUToRead(sy.CommandEncoder)
case ExtsVar:
v, _ := syVars.ValueByIndex(2, "Exts", 0)
v.GPUToRead(sy.CommandEncoder)
case PoolsVar:
v, _ := syVars.ValueByIndex(2, "Pools", 0)
v.GPUToRead(sy.CommandEncoder)
case PoolsIntVar:
v, _ := syVars.ValueByIndex(2, "PoolsInt", 0)
v.GPUToRead(sy.CommandEncoder)
case PathGBufVar:
v, _ := syVars.ValueByIndex(3, "PathGBuf", 0)
v.GPUToRead(sy.CommandEncoder)
case PathGSynsVar:
v, _ := syVars.ValueByIndex(3, "PathGSyns", 0)
v.GPUToRead(sy.CommandEncoder)
case SynapsesVar:
v, _ := syVars.ValueByIndex(3, "Synapses", 0)
v.GPUToRead(sy.CommandEncoder)
case SynapseTracesVar:
bsz := 536870904
n := SynapseTraces.Len()
nb := int(math.Ceil(float64(n) / float64(bsz)))
for bi := range nb {
v, _ := syVars.ValueByIndex(3, fmt.Sprintf("SynapseTraces%d", bi), 0)
v.GPUToRead(sy.CommandEncoder)
}
}
}
}
// SyncFromGPU synchronizes vars from the GPU to the actual variable.
func SyncFromGPU(vars ...GPUVars) {
sy := GPUSystem
syVars := sy.Vars()
for _, vr := range vars {
switch vr {
case LayersVar:
v, _ := syVars.ValueByIndex(0, "Layers", 0)
v.ReadSync()
gpu.ReadToBytes(v, Layers)
case PathsVar:
v, _ := syVars.ValueByIndex(0, "Paths", 0)
v.ReadSync()
gpu.ReadToBytes(v, Paths)
case NetworkIxsVar:
v, _ := syVars.ValueByIndex(1, "NetworkIxs", 0)
v.ReadSync()
gpu.ReadToBytes(v, NetworkIxs)
case PoolIxsVar:
v, _ := syVars.ValueByIndex(1, "PoolIxs", 0)
v.ReadSync()
gpu.ReadToBytes(v, PoolIxs.Values)
case NeuronIxsVar:
v, _ := syVars.ValueByIndex(1, "NeuronIxs", 0)
v.ReadSync()
gpu.ReadToBytes(v, NeuronIxs.Values)
case SynapseIxsVar:
v, _ := syVars.ValueByIndex(1, "SynapseIxs", 0)
v.ReadSync()
gpu.ReadToBytes(v, SynapseIxs.Values)
case PathSendConVar:
v, _ := syVars.ValueByIndex(1, "PathSendCon", 0)
v.ReadSync()
gpu.ReadToBytes(v, PathSendCon.Values)
case RecvPathIxsVar:
v, _ := syVars.ValueByIndex(1, "RecvPathIxs", 0)
v.ReadSync()
gpu.ReadToBytes(v, RecvPathIxs.Values)
case PathRecvConVar:
v, _ := syVars.ValueByIndex(1, "PathRecvCon", 0)
v.ReadSync()
gpu.ReadToBytes(v, PathRecvCon.Values)
case RecvSynIxsVar:
v, _ := syVars.ValueByIndex(1, "RecvSynIxs", 0)
v.ReadSync()
gpu.ReadToBytes(v, RecvSynIxs.Values)
case CtxVar:
v, _ := syVars.ValueByIndex(2, "Ctx", 0)
v.ReadSync()
gpu.ReadToBytes(v, Ctx)
case NeuronsVar:
v, _ := syVars.ValueByIndex(2, "Neurons", 0)
v.ReadSync()
gpu.ReadToBytes(v, Neurons.Values)
case NeuronAvgsVar:
v, _ := syVars.ValueByIndex(2, "NeuronAvgs", 0)
v.ReadSync()
gpu.ReadToBytes(v, NeuronAvgs.Values)
case LayerStatesVar:
v, _ := syVars.ValueByIndex(2, "LayerStates", 0)
v.ReadSync()
gpu.ReadToBytes(v, LayerStates.Values)
case GlobalScalarsVar:
v, _ := syVars.ValueByIndex(2, "GlobalScalars", 0)
v.ReadSync()
gpu.ReadToBytes(v, GlobalScalars.Values)
case GlobalVectorsVar:
v, _ := syVars.ValueByIndex(2, "GlobalVectors", 0)
v.ReadSync()
gpu.ReadToBytes(v, GlobalVectors.Values)
case ExtsVar:
v, _ := syVars.ValueByIndex(2, "Exts", 0)
v.ReadSync()
gpu.ReadToBytes(v, Exts.Values)
case PoolsVar:
v, _ := syVars.ValueByIndex(2, "Pools", 0)
v.ReadSync()
gpu.ReadToBytes(v, Pools.Values)
case PoolsIntVar:
v, _ := syVars.ValueByIndex(2, "PoolsInt", 0)
v.ReadSync()
gpu.ReadToBytes(v, PoolsInt.Values)
case PathGBufVar:
v, _ := syVars.ValueByIndex(3, "PathGBuf", 0)
v.ReadSync()
gpu.ReadToBytes(v, PathGBuf.Values)
case PathGSynsVar:
v, _ := syVars.ValueByIndex(3, "PathGSyns", 0)
v.ReadSync()
gpu.ReadToBytes(v, PathGSyns.Values)
case SynapsesVar:
v, _ := syVars.ValueByIndex(3, "Synapses", 0)
v.ReadSync()
gpu.ReadToBytes(v, Synapses.Values)
case SynapseTracesVar:
bsz := 536870904
n := SynapseTraces.Len()
nb := int(math.Ceil(float64(n) / float64(bsz)))
for bi := range nb {
v, _ := syVars.ValueByIndex(3, fmt.Sprintf("SynapseTraces%d", bi), 0)
v.ReadSync()
st := bsz * bi
ed := min(bsz * (bi+1), n)
gpu.ReadToBytes(v, SynapseTraces.Values[st:ed])
}
}
}
}
// GetLayers returns a pointer to the given global variable:
// [Layers] []LayerParams at given index. This directly processed in the GPU code,
// so this function call is an equivalent for the CPU.
func GetLayers(idx uint32) *LayerParams {
return &Layers[idx]
}
// GetPaths returns a pointer to the given global variable:
// [Paths] []PathParams at given index. This directly processed in the GPU code,
// so this function call is an equivalent for the CPU.
func GetPaths(idx uint32) *PathParams {
return &Paths[idx]
}
// GetNetworkIxs returns a pointer to the given global variable:
// [NetworkIxs] []NetworkIndexes at given index. This directly processed in the GPU code,
// so this function call is an equivalent for the CPU.
func GetNetworkIxs(idx uint32) *NetworkIndexes {
return &NetworkIxs[idx]
}
// GetCtx returns a pointer to the given global variable:
// [Ctx] []Context at given index. This directly processed in the GPU code,
// so this function call is an equivalent for the CPU.
func GetCtx(idx uint32) *Context {
return &Ctx[idx]
}
// Copyright (c) 2022, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
import (
"fmt"
"cogentcore.org/core/core"
"cogentcore.org/lab/base/mpi"
)
//////// Misc
// ToggleLayersOff can be used to disable layers in a Network,
// for example if you are doing an ablation study.
func ToggleLayersOff(net *Network, layerNames []string, off bool) {
for _, lnm := range layerNames {
lyi := net.LayerByName(lnm)
if lyi == nil {
fmt.Printf("layer not found: %s\n", lnm)
continue
}
lyi.SetOff(off)
}
}
//////// Weights files
// WeightsFilename returns default current weights file name,
// using train run and epoch counters from looper
// and the RunName string identifying tag, parameters and starting run,
func WeightsFilename(net *Network, ctrString, runName string) string {
return net.Name + "_" + runName + "_" + ctrString + ".wts.gz"
}
// SaveWeights saves network weights to filename with WeightsFilename information
// to identify the weights.
// only for 0 rank MPI if running mpi
// Returns the name of the file saved to, or empty if not saved.
func SaveWeights(net *Network, ctrString, runName string) string {
if mpi.WorldRank() > 0 {
return ""
}
fnm := WeightsFilename(net, ctrString, runName)
fmt.Printf("Saving Weights to: %s\n", fnm)
net.SaveWeightsJSON(core.Filename(fnm))
return fnm
}
// SaveWeightsIfConfigSet saves network weights if the given config
// bool value has been set to true.
// uses WeightsFilename information to identify the weights.
// only for 0 rank MPI if running mpi
// Returns the name of the file saved to, or empty if not saved.
func SaveWeightsIfConfigSet(net *Network, cfgWts bool, ctrString, runName string) string {
if cfgWts {
return SaveWeights(net, ctrString, runName)
}
return ""
}
// Copyright (c) 2022, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
import (
"cogentcore.org/core/base/errors"
"cogentcore.org/core/math32/vecint"
"github.com/emer/emergent/v2/emer"
"github.com/emer/emergent/v2/etime"
"github.com/emer/emergent/v2/looper"
"github.com/emer/emergent/v2/paths"
)
// HipConfig have the hippocampus size and connectivity parameters
type HipConfig struct {
// size of EC2
EC2Size vecint.Vector2i `nest:"+"`
// number of EC3 pools (outer dimension)
EC3NPool vecint.Vector2i `nest:"+"`
// number of neurons in one EC3 pool
EC3NNrn vecint.Vector2i `nest:"+"`
// number of neurons in one CA1 pool
CA1NNrn vecint.Vector2i `nest:"+"`
// size of CA3
CA3Size vecint.Vector2i `nest:"+"`
// size of DG / CA3
DGRatio float32 `default:"2.236"`
// percent connectivity from EC3 to EC2
EC3ToEC2PCon float32 `default:"0.1"`
// percent connectivity from EC2 to DG
EC2ToDGPCon float32 `default:"0.25"`
// percent connectivity from EC2 to CA3
EC2ToCA3PCon float32 `default:"0.25"`
// percent connectivity from CA3 to CA1
CA3ToCA1PCon float32 `default:"0.25"`
// percent connectivity into CA3 from DG
DGToCA3PCon float32 `default:"0.02"`
// lateral radius of connectivity in EC2
EC2LatRadius int
// lateral gaussian sigma in EC2 for how quickly weights fall off with distance
EC2LatSigma float32
// proportion of full mossy fiber strength (PathScale.Rel) for CA3 EDL in training, applied at the start of a trial to reduce DG -> CA3 strength. 1 = fully reduce strength, .5 = 50% reduction, etc
MossyDelta float32 `default:"1"`
// proportion of full mossy fiber strength (PathScale.Rel) for CA3 EDL in testing, applied during 2nd-3rd quarters to reduce DG -> CA3 strength. 1 = fully reduce strength, .5 = 50% reduction, etc
MossyDeltaTest float32 `default:"0.75"`
// low theta modulation value for temporal difference EDL -- sets PathScale.Rel on CA1 <-> EC paths consistent with Theta phase model
ThetaLow float32 `default:"0.9"`
// high theta modulation value for temporal difference EDL -- sets PathScale.Rel on CA1 <-> EC paths consistent with Theta phase model
ThetaHigh float32 `default:"1"`
// flag for clamping the EC5 from EC5ClampSrc
EC5Clamp bool `default:"true"`
// source layer for EC5 clamping activations in the plus phase -- biologically it is EC3 but can use an Input layer if available
EC5ClampSrc string `default:"EC3"`
// clamp the EC5 from EC5ClampSrc during testing as well as training -- this will overwrite any target values that might be used in stats (e.g., in the basic hip example), so it must be turned off there
EC5ClampTest bool `default:"true"`
// threshold for binarizing EC5 clamp values -- any value above this is clamped to 1, else 0 -- helps produce a cleaner learning signal. Set to 0 to not perform any binarization.
EC5ClampThr float32 `default:"0.1"`
}
func (hip *HipConfig) Defaults() {
// size
hip.EC2Size.Set(21, 21) // 21
hip.EC3NPool.Set(2, 3)
hip.EC3NNrn.Set(7, 7)
hip.CA1NNrn.Set(10, 10) // using MedHip now
hip.CA3Size.Set(20, 20) // using MedHip now
hip.DGRatio = 2.236 // c.f. Ketz et al., 2013
// ratio
hip.EC2ToDGPCon = 0.25
hip.EC2ToCA3PCon = 0.25
hip.CA3ToCA1PCon = 0.25
hip.DGToCA3PCon = 0.02
hip.EC3ToEC2PCon = 0.1 // 0.1 for EC3-EC2 in WintererMaierWoznyEtAl17, not sure about Input-EC2
// lateral
hip.EC2LatRadius = 2
hip.EC2LatSigma = 2
hip.MossyDelta = 1
hip.MossyDeltaTest = .75
hip.ThetaLow = 0.9
hip.ThetaHigh = 1
hip.EC5Clamp = true
hip.EC5ClampSrc = "EC3"
hip.EC5ClampTest = true
hip.EC5ClampThr = 0.1
}
// AddHip adds a new Hippocampal network for episodic memory.
// Returns layers most likely to be used for remaining connections and positions.
func (net *Network) AddHip(hip *HipConfig, space float32) (ec2, ec3, dg, ca3, ca1, ec5 *Layer) {
// Trisynaptic Pathway (TSP)
ec2 = net.AddLayer2D("EC2", SuperLayer, hip.EC2Size.Y, hip.EC2Size.X)
ec2.SetSampleShape(emer.Layer2DSampleIndexes(ec2, 10))
dg = net.AddLayer2D("DG", SuperLayer, int(float32(hip.CA3Size.Y)*hip.DGRatio), int(float32(hip.CA3Size.X)*hip.DGRatio))
dg.SetSampleShape(emer.Layer2DSampleIndexes(dg, 10))
ca3 = net.AddLayer2D("CA3", SuperLayer, hip.CA3Size.Y, hip.CA3Size.X)
ca3.SetSampleShape(emer.Layer2DSampleIndexes(ca3, 10))
// Monosynaptic Pathway (MSP)
ec3 = net.AddLayer4D("EC3", SuperLayer, hip.EC3NPool.Y, hip.EC3NPool.X, hip.EC3NNrn.Y, hip.EC3NNrn.X)
ec3.AddClass("EC")
ec3.SetSampleShape(emer.CenterPoolIndexes(ec3, 2), emer.CenterPoolShape(ec3, 2))
ca1 = net.AddLayer4D("CA1", SuperLayer, hip.EC3NPool.Y, hip.EC3NPool.X, hip.CA1NNrn.Y, hip.CA1NNrn.X)
ca1.SetSampleShape(emer.CenterPoolIndexes(ca1, 2), emer.CenterPoolShape(ca1, 2))
if hip.EC5Clamp {
ec5 = net.AddLayer4D("EC5", TargetLayer, hip.EC3NPool.Y, hip.EC3NPool.X, hip.EC3NNrn.Y, hip.EC3NNrn.X) // clamped in plus phase
} else {
ec5 = net.AddLayer4D("EC5", SuperLayer, hip.EC3NPool.Y, hip.EC3NPool.X, hip.EC3NNrn.Y, hip.EC3NNrn.X)
}
ec5.AddClass("EC")
ec5.SetSampleShape(emer.CenterPoolIndexes(ec5, 2), emer.CenterPoolShape(ec5, 2))
// Input and ECs connections
onetoone := paths.NewOneToOne()
ec3Toec2 := paths.NewUniformRand()
ec3Toec2.PCon = hip.EC3ToEC2PCon
mossy := paths.NewUniformRand()
mossy.PCon = hip.DGToCA3PCon
net.ConnectLayers(ec3, ec2, ec3Toec2, ForwardPath)
net.ConnectLayers(ec5, ec3, onetoone, BackPath)
// recurrent inhbition in EC2
lat := paths.NewCircle()
lat.TopoWeights = true
lat.Radius = hip.EC2LatRadius
lat.Sigma = hip.EC2LatSigma
inh := net.ConnectLayers(ec2, ec2, lat, InhibPath)
inh.AddClass("InhibLateral")
// TSP connections
ppathDG := paths.NewUniformRand()
ppathDG.PCon = hip.EC2ToDGPCon
ppathCA3 := paths.NewUniformRand()
ppathCA3.PCon = hip.EC2ToCA3PCon
ca3ToCA1 := paths.NewUniformRand()
ca3ToCA1.PCon = hip.CA3ToCA1PCon
full := paths.NewFull()
net.ConnectLayers(ec2, dg, ppathDG, HipPath).AddClass("HippoCHL")
net.ConnectLayers(ec2, ca3, ppathCA3, HipPath).AddClass("PPath")
net.ConnectLayers(ca3, ca3, full, HipPath).AddClass("PPath")
net.ConnectLayers(dg, ca3, mossy, ForwardPath).AddClass("HippoCHL")
net.ConnectLayers(ca3, ca1, ca3ToCA1, HipPath).AddClass("HippoCHL")
// MSP connections
pool1to1 := paths.NewPoolOneToOne()
net.ConnectLayers(ec3, ca1, pool1to1, HipPath).AddClass("EcCA1Path") // HipPath makes wt linear
net.ConnectLayers(ca1, ec5, pool1to1, ForwardPath).AddClass("EcCA1Path") // doesn't work w/ HipPath
net.ConnectLayers(ec5, ca1, pool1to1, HipPath).AddClass("EcCA1Path") // HipPath makes wt linear
// positioning
ec3.PlaceRightOf(ec2, space)
ec5.PlaceRightOf(ec3, space)
dg.PlaceAbove(ec2)
ca3.PlaceAbove(dg)
ca1.PlaceRightOf(ca3, space)
return
}
// ConfigLoopsHip configures the hippocampal looper and should be included in ConfigLoops
// in model to make sure hip loops is configured correctly.
// see hip.go for an instance of implementation of this function.
// ec5ClampFrom specifies the layer to clamp EC5 plus phase values from:
// EC3 is the biological source, but can use Input layer for simple testing net.
func (net *Network) ConfigLoopsHip(ctx *Context, ls *looper.Stacks, hip *HipConfig, pretrain *bool) {
var tmpValues []float32
clampSrc := net.LayerByName(hip.EC5ClampSrc)
ec5 := net.LayerByName("EC5")
ca1 := net.LayerByName("CA1")
ca3 := net.LayerByName("CA3")
dg := net.LayerByName("DG")
dgFromEc2 := errors.Log1(dg.RecvPathBySendName("EC2")).(*Path)
ca1FromEc3 := errors.Log1(ca1.RecvPathBySendName("EC3")).(*Path)
ca1FromCa3 := errors.Log1(ca1.RecvPathBySendName("CA3")).(*Path)
ca3FromDg := errors.Log1(ca3.RecvPathBySendName("DG")).(*Path)
ca3FromEc2 := errors.Log1(ca3.RecvPathBySendName("EC2")).(*Path)
ca3FromCa3 := errors.Log1(ca3.RecvPathBySendName("CA3")).(*Path)
dgPjScale := ca3FromDg.Params.PathScale.Rel
ca1FromCa3Abs := ca1FromCa3.Params.PathScale.Abs
// configure events -- note that events are shared between Train, Test
// so only need to do it once on Train
ls.AddEventAllModes(etime.Cycle, "HipMinusPhase:Start", 0, func() {
if *pretrain {
dgFromEc2.Params.Learn.Learn = 0
ca3FromEc2.Params.Learn.Learn = 0
ca3FromCa3.Params.Learn.Learn = 0
ca1FromCa3.Params.Learn.Learn = 0
ca1FromCa3.Params.PathScale.Abs = 0
} else {
dgFromEc2.Params.Learn.Learn = 1
ca3FromEc2.Params.Learn.Learn = 1
ca3FromCa3.Params.Learn.Learn = 1
ca1FromCa3.Params.Learn.Learn = 1
ca1FromCa3.Params.PathScale.Abs = ca1FromCa3Abs
}
ca1FromEc3.Params.PathScale.Rel = hip.ThetaHigh
ca1FromCa3.Params.PathScale.Rel = hip.ThetaLow
ca3FromDg.Params.PathScale.Rel = dgPjScale * (1 - hip.MossyDelta) // turn off DG input to CA3 in first quarter
net.InitGScale() // update computed scaling factors
// net.GPU.SyncParamsToGPU() // todo:
})
ls.AddEventAllModes(etime.Cycle, "Hip:Beta1", 50, func() {
ca1FromEc3.Params.PathScale.Rel = hip.ThetaLow
ca1FromCa3.Params.PathScale.Rel = hip.ThetaHigh
if ctx.Testing.IsTrue() {
ca3FromDg.Params.PathScale.Rel = dgPjScale * (1 - hip.MossyDeltaTest)
}
net.InitGScale() // update computed scaling factors
// net.GPU.SyncParamsToGPU() // TODO:
})
// note: critical for this to come before std start
for _, st := range ls.Stacks {
ev := st.Loops[etime.Cycle].EventByCounter(150)
ev.OnEvent.Prepend("HipPlusPhase:Start", func() bool {
ca3FromDg.Params.PathScale.Rel = dgPjScale // restore at the beginning of plus phase for CA3 EDL
ca1FromEc3.Params.PathScale.Rel = hip.ThetaHigh
ca1FromCa3.Params.PathScale.Rel = hip.ThetaLow
// clamp EC5 from clamp source (EC3 typically)
if hip.EC5Clamp {
if ctx.Testing.IsFalse() || hip.EC5ClampTest {
for di := uint32(0); di < ctx.NData; di++ {
clampSrc.UnitValues(&tmpValues, "Act", int(di))
// TODO:
// if hip.EC5ClampThr > 0 {
// stats.Binarize(tmpValues, tensor.NewFloat64Scalar(hip.EC5ClampThr))
// }
ec5.ApplyExt1D32(di, tmpValues)
}
}
}
net.InitGScale() // update computed scaling factors
// net.GPU.SyncParamsToGPU()
net.ApplyExts() // essential for GPU
return true
})
st.Loops[etime.Trial].OnEnd.Prepend("HipPlusPhase:End", func() bool {
ca1FromCa3.Params.PathScale.Rel = hip.ThetaHigh
net.InitGScale() // update computed scaling factors
// net.GPU.SyncParamsToGPU()
return true
})
}
}
// Copyright (c) 2020, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
//gosl:start
// HipPathParams define behavior of hippocampus paths, which have special learning rules
type HipPathParams struct {
// Hebbian learning proportion
Hebb float32 `default:"0"`
// EDL proportion
Err float32 `default:"1"`
// proportion of correction to apply to sending average activation for hebbian learning component (0=none, 1=all, .5=half, etc)
SAvgCor float32 `default:"0.4:0.8" min:"0" max:"1"`
// threshold of sending average activation below which learning does not occur (prevents learning when there is no input)
SAvgThr float32 `default:"0.01" min:"0"`
// sending layer Nominal (need to manually set it to be the same as the sending layer)
SNominal float32 `default:"0.1" min:"0"`
pad, pad1, pad2 float32
}
func (hp *HipPathParams) Defaults() {
hp.Hebb = 0
hp.Err = 1
hp.SAvgCor = 0.4
hp.SAvgThr = 0.01
hp.SNominal = 0.1
}
func (hp *HipPathParams) Update() {
}
//gosl:end
func (pj *PathParams) HipDefaults() {
pj.SWts.Init.Sym.SetBool(false)
}
// Code generated by "goal build"; DO NOT EDIT.
//line inhib.goal:1
// Copyright (c) 2019, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
import (
"log/slog"
"sync/atomic"
"cogentcore.org/core/math32"
"cogentcore.org/lab/gosl/slbool"
"github.com/emer/axon/v2/fsfffb"
)
//gosl:start
//gosl:import "github.com/emer/axon/v2/fsfffb"
//////// ActAvgParams
// ActAvgParams represents the nominal average activity levels in the layer
// and parameters for adapting the computed Gi inhibition levels to maintain
// average activity within a target range.
type ActAvgParams struct {
// Nominal is the estimated average activity level in the layer, which is
// used in computing the scaling factor on sending pathways from this layer.
// In general it should roughly match the layer ActAvg.ActMAvg value, which
// can be logged using the axon.LogAddDiagnosticItems function.
// If layers receiving from this layer are not getting enough Ge excitation,
// then this Nominal level can be lowered to increase pathway strength
// (fewer active neurons means each one contributes more, so scaling factor
//
// goes as the inverse of activity level), or vice-versa if Ge is too high.
//
// It is also the basis for the target activity level used for the AdaptGi
//
// option: see the Offset which is added to this value.
Nominal float32 `min:"0" step:"0.01"`
// RTThr is the reaction time (RT) threshold activity level in the layer,
// in terms of the maximum CaP level of any neuron in the layer. The
// LayerStates LayerRT value is recorded for the cycle at which this
// level is exceeded within a theta cycle, after Acts.Dt.MaxCycStart cycles.
RTThr float32 `default:"0.5"`
// AdaptGi enables adapting of layer inhibition Gi multiplier factor
// (stored in layer GiMult value) to maintain a target layer level of
// ActAvg.Nominal. This generally works well and improves the long-term
// stability of the models. It is not enabled by default because it depends
// on having established a reasonable Nominal + Offset target activity level.
AdaptGi slbool.Bool
// Offset is added to Nominal for the target average activity that drives
// adaptation of Gi for this layer. Typically the Nominal level is good,
// but sometimes Nominal must be adjusted up or down to achieve desired Ge
// scaling, so this Offset can compensate accordingly.
Offset float32 `default:"0" min:"0" step:"0.01"`
// HiTol is the tolerance for higher than Target target average activation
// as a proportion of that target value (0 = exactly the target, 0.2 = 20%
// higher than target). Only once activations move outside this tolerance
//
// are inhibitory values adapted.
HiTol float32 `default:"0"`
// LoTol is the tolerance for lower than Target target average activation
// as a proportion of that target value (0 = exactly the target, 0.5 = 50%
// lower than target). Only once activations move outside this tolerance are
//
// inhibitory values adapted.
LoTol float32 `default:"0.8"`
// AdaptRate is the rate of Gi adaptation as function of
// AdaptRate * (Target - ActMAvg) / Target. This occurs at spaced intervals
// determined by Network.SlowInterval value. Slower values such as 0.05 may
// be needed for large networks and sparse layers.
AdaptRate float32 `default:"0.1"`
// AdaptMax is the maximum adaptation step magnitude to take at any point.
AdaptMax float32 `default:"0.01"`
}
func (aa *ActAvgParams) Update() {
}
func (aa *ActAvgParams) Defaults() {
aa.Nominal = 0.1
aa.RTThr = 0.5
aa.Offset = 0
aa.HiTol = 0
aa.LoTol = 0.8
aa.AdaptRate = 0.1
aa.AdaptMax = 0.01
aa.Update()
}
func (aa *ActAvgParams) ShouldDisplay(field string) bool {
switch field {
case "Nominal", "AdaptGi":
return true
default:
return aa.AdaptGi.IsTrue()
}
}
// AvgFromAct updates the running-average activation given average activity level in layer
func (aa *ActAvgParams) AvgFromAct(avg *float32, act float32, dt float32) {
if act < 0.0001 {
return
}
*avg += dt * (act - *avg)
}
// Adapt adapts the given gi multiplier factor as function of target and actual
// average activation, given current params.
func (aa *ActAvgParams) Adapt(gimult *float32, act float32) bool {
trg := aa.Nominal + aa.Offset
del := (act - trg) / trg
if del < -aa.LoTol || del > aa.HiTol {
*gimult += math32.Clamp(aa.AdaptRate*del, -aa.AdaptMax, aa.AdaptMax)
return true
}
return false
}
// InhibParams contains all the inhibition computation params and functions for basic Axon.
// This is included in LayerParams to support computation.
// Also includes the expected average activation in the layer, which is used for
// G conductance rescaling and potentially for adapting inhibition over time.
type InhibParams struct {
// ActAvg has layer-level and pool-level average activation initial values
// and updating / adaptation thereof.
// Initial values help determine initial scaling factors.
ActAvg ActAvgParams `display:"inline"`
// Layer determines inhibition across the entire layer.
// Input layers generally use Gi = 0.8 or 0.9, 1.3 or higher for sparse layers.
// If the layer has sub-pools (4D shape) then this is effectively between-pool inhibition.
Layer fsfffb.GiParams `display:"inline"`
// Pool determines inhibition within sub-pools of units, for layers with 4D shape.
// This is almost always necessary if the layer has sub-pools.
Pool fsfffb.GiParams `display:"inline"`
}
func (ip *InhibParams) Update() {
ip.ActAvg.Update()
ip.Layer.Update()
ip.Pool.Update()
}
func (ip *InhibParams) Defaults() {
ip.ActAvg.Defaults()
ip.Layer.Defaults()
ip.Pool.Defaults()
ip.Layer.Gi = 1.1
ip.Pool.Gi = 1.1
}
// PoolInhib computes FSFFFB inhibition for a pool,
// based on aggregated FFs and FBs spiking values
func PoolInhib(fb *fsfffb.GiParams, pi, di uint32, gimult float32) {
if fb.On.IsFalse() {
PoolInhibZero(pi, di)
return
}
Pools.SetAdd(fb.FFAvgDt*(Pools.Value(int(pi), int(di), int(fsfffb.FFs))-Pools.Value(int(pi), int(di), int(fsfffb.FFAvg))), int(pi), int(di), int(fsfffb.FFAvg))
fsi := Pools.Value(int(pi), int(di), int(fsfffb.FSi))
fsi = fb.FSiFromFFs(fsi, Pools.Value(int(pi), int(di), int(fsfffb.FFs)), Pools.Value(int(pi), int(di), int(fsfffb.FBs)))
Pools.Set(fsi, int(pi), int(di), int(fsfffb.FSi))
clamped := PoolsInt.Value(int(pi), int(di), int(Clamped)) > 0
fsgi := fb.Gi * fb.FS(fsi, Pools.Value(int(pi), int(di), int(fsfffb.GeExts)), clamped)
Pools.Set(fsgi, int(pi), int(di), int(fsfffb.FSGi))
ssf := Pools.Value(int(pi), int(di), int(fsfffb.SSf))
ssi := Pools.Value(int(pi), int(di), int(fsfffb.SSi))
fb.SSFromFBs(&ssf, &ssi, Pools.Value(int(pi), int(di), int(fsfffb.FBs)))
ssgi := fb.Gi * fb.SS * ssi
Pools.Set(ssf, int(pi), int(di), int(fsfffb.SSf))
Pools.Set(ssi, int(pi), int(di), int(fsfffb.SSi))
Pools.Set(ssgi, int(pi), int(di), int(fsfffb.SSGi))
Pools.Set(fsgi+ssgi+fb.FFPrv*Pools.Value(int(pi), int(di), int(fsfffb.FFAvgPrv)), int(pi), int(di), int(fsfffb.TotalGi))
Pools.Set(Pools.Value(int(pi), int(di), int(fsfffb.TotalGi)), int(pi), int(di), int(fsfffb.GiOrig))
}
func PoolInhibInit(pi, di uint32) {
PoolInhibInitRaw(pi, di)
PoolInhibZero(pi, di)
}
// PoolInhibInitRaw clears raw spike counters -- done every cycle prior to accumulating
func PoolInhibInitRaw(pi, di uint32) {
Pools.Set(0.0, int(pi), int(di), int(fsfffb.FFsRaw))
Pools.Set(0.0, int(pi), int(di), int(fsfffb.FBsRaw))
Pools.Set(0.0, int(pi), int(di), int(fsfffb.GeExtRaw))
PoolsInt.Set(0, int(pi), int(di), int(FFsRawInt))
PoolsInt.Set(0, int(pi), int(di), int(FBsRawInt))
PoolsInt.Set(0, int(pi), int(di), int(GeExtRawInt))
}
// PoolInhibZero resets all accumulating inhibition factors to 0
func PoolInhibZero(pi, di uint32) {
Pools.Set(0.0, int(pi), int(di), int(fsfffb.FFs))
Pools.Set(0.0, int(pi), int(di), int(fsfffb.FBs))
Pools.Set(0.0, int(pi), int(di), int(fsfffb.GeExts))
Pools.Set(0.0, int(pi), int(di), int(fsfffb.FSi))
Pools.Set(0.0, int(pi), int(di), int(fsfffb.SSi))
Pools.Set(0.0, int(pi), int(di), int(fsfffb.SSf))
Pools.Set(0.0, int(pi), int(di), int(fsfffb.FSGi))
Pools.Set(0.0, int(pi), int(di), int(fsfffb.SSGi))
Pools.Set(0.0, int(pi), int(di), int(fsfffb.TotalGi))
Pools.Set(0.0, int(pi), int(di), int(fsfffb.FFAvg))
Pools.Set(0.0, int(pi), int(di), int(fsfffb.FFAvgPrv))
Pools.Set(0.0, int(pi), int(di), int(fsfffb.GiOrig))
Pools.Set(0.0, int(pi), int(di), int(fsfffb.LayGi))
PoolsInt.Set(0, int(pi), int(di), int(Clamped))
}
// Decay reduces inhibition values by given decay proportion
func PoolInhibDecay(pi, di uint32, decay float32) {
Pools.Set(Pools.Value(int(pi), int(di), int(fsfffb.FFAvg)), int(pi), int(di), int(fsfffb.FFAvgPrv)) // capture prior to decay
Pools.SetSub(decay*Pools.Value(int(pi), int(di), int(fsfffb.FFs)), int(pi), int(di), int(fsfffb.FFs))
Pools.SetSub(decay*Pools.Value(int(pi), int(di), int(fsfffb.FBs)), int(pi), int(di), int(fsfffb.FBs))
Pools.SetSub(decay*Pools.Value(int(pi), int(di), int(fsfffb.GeExts)), int(pi), int(di), int(fsfffb.GeExts))
Pools.SetSub(decay*Pools.Value(int(pi), int(di), int(fsfffb.FSi)), int(pi), int(di), int(fsfffb.FSi))
Pools.SetSub(decay*Pools.Value(int(pi), int(di), int(fsfffb.SSi)), int(pi), int(di), int(fsfffb.SSi))
Pools.SetSub(decay*Pools.Value(int(pi), int(di), int(fsfffb.SSf)), int(pi), int(di), int(fsfffb.SSf))
Pools.SetSub(decay*Pools.Value(int(pi), int(di), int(fsfffb.FSGi)), int(pi), int(di), int(fsfffb.FSGi))
Pools.SetSub(decay*Pools.Value(int(pi), int(di), int(fsfffb.SSGi)), int(pi), int(di), int(fsfffb.SSGi))
Pools.SetSub(decay*Pools.Value(int(pi), int(di), int(fsfffb.TotalGi)), int(pi), int(di), int(fsfffb.TotalGi))
Pools.SetSub(decay*Pools.Value(int(pi), int(di), int(fsfffb.FFAvg)), int(pi), int(di), int(fsfffb.FFAvg))
}
// SpikesFromRaw updates spike values from raw, dividing by given number in pool
func PoolInhibSpikesFromRaw(pi, di uint32) {
fnn := float32(PoolNNeurons(pi))
Pools.Set(Pools.Value(int(pi), int(di), int(fsfffb.FBsRaw))/fnn, int(pi), int(di), int(fsfffb.FBs))
Pools.Set(Pools.Value(int(pi), int(di), int(fsfffb.FFsRaw)), int(pi), int(di), int(fsfffb.FFs))
Pools.Set(Pools.Value(int(pi), int(di), int(fsfffb.GeExtRaw)), int(pi), int(di), int(fsfffb.GeExts))
PoolInhibInitRaw(pi, di)
}
// LayerMax updates given pool-level inhib values from given layer-level Gi
// with resulting value being the Max of either
func PoolInhibLayerMax(pi, di uint32, liGi float32) {
Pools.Set(liGi, int(pi), int(di), int(fsfffb.LayGi))
Pools.Set(math32.Max(Pools.Value(int(pi), int(di), int(fsfffb.TotalGi)), liGi), int(pi), int(di), int(fsfffb.TotalGi))
}
// PoolMax updates given layer-level inhib values from given pool-level
// with resulting value being the Max of either
func PoolInhibPoolMax(pi, di uint32, piGi float32) {
Pools.Set(math32.Max(Pools.Value(int(pi), int(di), int(fsfffb.TotalGi)), piGi), int(pi), int(di), int(fsfffb.TotalGi))
}
//////// atomic int safe accumulation
// RawIncrInt increments raw values from given neuron-based input values
// for the int-based values (typically use Atomic InterlockedAdd instead)
func PoolInhibRawIncrInt(pi, di uint32, spike, geRaw, geExt float32) {
floatToInt := float32(uint32(1) << 24)
fnn := float32(PoolNNeurons(pi))
atomic.AddInt32(PoolsInt.ValuePtr(int(pi), int(di), int(FBsRawInt)), int32(spike))
atomic.AddInt32(PoolsInt.ValuePtr(int(pi), int(di), int(FFsRawInt)), int32((geRaw/fnn)*floatToInt))
atomic.AddInt32(PoolsInt.ValuePtr(int(pi), int(di), int(GeExtRawInt)), int32((geExt/fnn)*floatToInt))
}
// IntToRaw computes int values into float32 raw values
func PoolInhibIntToRaw(pi, di uint32) {
floatFromInt := 1.0 / float32(uint32(1)<<24)
fbs := PoolsInt.Value(int(pi), int(di), int(FBsRawInt))
ffs := PoolsInt.Value(int(pi), int(di), int(FFsRawInt))
geExt := PoolsInt.Value(int(pi), int(di), int(GeExtRawInt))
//gosl:end
floatToInt := int32(1) << 24
if ffs < 0 {
slog.Warn("PoolInhibIntToRaw overflow in FFsRawInt", "pi:", pi, "di:", di, "val:", ffs)
ffs = floatToInt
}
if geExt < 0 {
slog.Warn("PoolInhibIntToRaw overflow in GeExtRawInt", "pi:", pi, "di:", di, "val:", geExt)
geExt = floatToInt
}
//gosl:start
Pools.Set(float32(fbs), int(pi), int(di), int(fsfffb.FBsRaw))
Pools.Set(float32(ffs)*floatFromInt, int(pi), int(di), int(fsfffb.FFsRaw))
Pools.Set(float32(geExt)*floatFromInt, int(pi), int(di), int(fsfffb.GeExtRaw))
}
//gosl:end
// Code generated by "goal build"; DO NOT EDIT.
//line init-layer.goal:1
// Copyright (c) 2019, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
import (
"cogentcore.org/lab/base/randx"
)
// InitWeights initializes the weight values in the network, i.e., resetting learning
// Also calls InitActs
func (ly *Layer) InitWeights(ctx *Context, nt *Network) { //types:add
ly.UpdateParams()
ly.Params.Acts.Dend.HasMod.SetBool(false)
li := ly.Index
for di := uint32(0); di < ly.MaxData; di++ {
LayerStates.Set(ly.Params.Inhib.ActAvg.Nominal, int(li), int(di), int(LayerActMAvg))
LayerStates.Set(ly.Params.Inhib.ActAvg.Nominal, int(li), int(di), int(LayerActPAvg))
LayerStates.Set(1, int(li), int(di), int(LayerAvgMaxGeM))
LayerStates.Set(1, int(li), int(di), int(LayerAvgMaxGiM))
LayerStates.Set(1, int(li), int(di), int(LayerGiMult))
LayerStates.Set(0, int(li), int(di), int(LayerPhaseDiff))
LayerStates.Set(0, int(li), int(di), int(LayerPhaseDiffAvg))
LayerStates.Set(0, int(li), int(di), int(LayerPhaseDiffVar))
LayerStates.Set(-1, int(li), int(di), int(LayerRT))
LayerStates.Set(-1, int(li), int(di), int(GatedRT))
LayerStates.Set(0, int(li), int(di), int(LayerRewPredPos))
LayerStates.Set(0, int(li), int(di), int(LayerRewPredNeg))
}
ly.InitActAvg(ctx)
ly.InitActs(ctx)
ly.InitGScale(ctx)
for _, pt := range ly.SendPaths {
if pt.Off {
continue
}
pt.InitWeights(ctx, nt)
}
for _, pt := range ly.RecvPaths {
if pt.Off {
continue
}
if pt.Params.Com.GType == ModulatoryG {
ly.Params.Acts.Dend.HasMod.SetBool(true)
break
}
}
}
// InitActAvg initializes the running-average activation values
// that drive learning and the longer time averaging values.
func (ly *Layer) InitActAvg(ctx *Context) {
nn := ly.NNeurons
for lni := uint32(0); lni < nn; lni++ {
ni := ly.NeurStIndex + lni
for di := uint32(0); di < ly.MaxData; di++ {
ly.Params.Learn.InitNeuronCa(ctx, ni, di)
}
}
if ly.Params.HasPoolInhib() && ly.Params.Learn.TrgAvgAct.Pool.IsTrue() {
ly.InitActAvgPools(ctx)
} else {
ly.InitActAvgLayer(ctx)
}
}
// InitActAvgLayer initializes the running-average activation values
// that drive learning and the longer time averaging values.
// version with just overall layer-level inhibition.
func (ly *Layer) InitActAvgLayer(ctx *Context) {
strg := ly.Params.Learn.TrgAvgAct.TrgRange.Min
rng := ly.Params.Learn.TrgAvgAct.TrgRange.Range()
tmax := ly.Params.Learn.TrgAvgAct.TrgRange.Max
gibinit := ly.Params.Learn.TrgAvgAct.GiBaseInit
inc := float32(0)
nn := ly.NNeurons
if nn > 1 {
inc = rng / float32(nn-1)
}
porder := make([]int, nn)
for i := range porder {
porder[i] = i
}
if ly.Params.Learn.TrgAvgAct.Permute.IsTrue() {
randx.PermuteInts(porder, &ly.Network.Rand)
}
for lni := uint32(0); lni < nn; lni++ {
ni := ly.NeurStIndex + lni
if NeuronIsOff(ni) {
continue
}
vi := porder[lni] // same for all datas
trg := strg + inc*float32(vi)
NeuronAvgs.Set(trg, int(ni), int(TrgAvg))
NeuronAvgs.Set(trg, int(ni), int(AvgPct))
NeuronAvgs.Set(ly.Params.Inhib.ActAvg.Nominal*trg, int(ni), int(ActAvg))
NeuronAvgs.Set(0, int(ni), int(AvgDif))
NeuronAvgs.Set(0, int(ni), int(DTrgAvg))
NeuronAvgs.Set(ly.Params.Acts.Init.GetGeBase(&ly.Network.Rand), int(ni), int(GeBase))
NeuronAvgs.Set(ly.Params.Acts.Init.GetGiBase(&ly.Network.Rand), int(ni), int(GiBase))
if gibinit > 0 {
gib := gibinit * (tmax - trg)
NeuronAvgs.Set(gib, int(ni), int(GiBase))
}
}
}
// InitActAvgPools initializes the running-average activation values
// that drive learning and the longer time averaging values.
// version with pooled inhibition.
func (ly *Layer) InitActAvgPools(ctx *Context) {
strg := ly.Params.Learn.TrgAvgAct.TrgRange.Min
rng := ly.Params.Learn.TrgAvgAct.TrgRange.Range()
tmax := ly.Params.Learn.TrgAvgAct.TrgRange.Max
gibinit := ly.Params.Learn.TrgAvgAct.GiBaseInit
inc := float32(0)
nNy := ly.Shape.DimSize(2)
nNx := ly.Shape.DimSize(3)
nn := nNy * nNx
if nn > 1 {
inc = rng / float32(nn-1)
}
np := ly.NPools
porder := make([]int, nn)
for i := range porder {
porder[i] = i
}
for spi := uint32(1); spi < np; spi++ {
if ly.Params.Learn.TrgAvgAct.Permute.IsTrue() {
randx.PermuteInts(porder, &ly.Network.Rand)
}
pi := ly.Params.PoolIndex(spi) // only using for idxs
nsi := PoolIxs.Value(int(pi), int(PoolNeurSt))
nei := PoolIxs.Value(int(pi), int(PoolNeurEd))
for lni := nsi; lni < nei; lni++ {
ni := ly.NeurStIndex + uint32(lni)
if NeuronIsOff(ni) {
continue
}
vi := porder[lni-nsi]
trg := strg + inc*float32(vi)
NeuronAvgs.Set(trg, int(ni), int(TrgAvg))
NeuronAvgs.Set(trg, int(ni), int(AvgPct))
NeuronAvgs.Set(ly.Params.Inhib.ActAvg.Nominal*trg, int(ni), int(ActAvg))
NeuronAvgs.Set(0, int(ni), int(AvgDif))
NeuronAvgs.Set(0, int(ni), int(DTrgAvg))
NeuronAvgs.Set(ly.Params.Acts.Init.GetGeBase(&ly.Network.Rand), int(ni), int(GeBase))
NeuronAvgs.Set(ly.Params.Acts.Init.GetGiBase(&ly.Network.Rand), int(ni), int(GiBase))
if gibinit > 0 {
gib := gibinit * (tmax - trg)
NeuronAvgs.Set(gib, int(ni), int(GiBase))
}
}
}
}
// InitActs fully initializes activation state -- only called automatically during InitWeights
func (ly *Layer) InitActs(ctx *Context) { //types:add
nn := ly.NNeurons
for lni := uint32(0); lni < nn; lni++ {
ni := ly.NeurStIndex + lni
if NeuronIsOff(ni) {
continue
}
for di := uint32(0); di < ly.MaxData; di++ {
ly.Params.Acts.InitActs(ctx, ni, di)
}
}
np := ly.NPools
for spi := uint32(0); spi < np; spi++ {
for di := uint32(0); di < ly.MaxData; di++ {
pi := ly.Params.PoolIndex(spi)
PoolInit(pi, di)
if ly.Params.Acts.Clamp.Add.IsFalse() && ly.Params.IsInput() {
PoolsInt.Set(1, int(pi), int(di), int(Clamped))
}
// Target layers are dynamically updated
}
}
// ly.InitPathGBuffs(ctx)
}
// InitWeightsSym initializes the weight symmetry -- higher layers copy weights from lower layers
func (ly *Layer) InitWtSym(ctx *Context) {
for _, pt := range ly.SendPaths {
if pt.Off {
continue
}
if pt.Params.SWts.Init.Sym.IsFalse() {
continue
}
// key ordering constraint on which way weights are copied
if pt.Recv.Index < pt.Send.Index {
continue
}
rpj, has := ly.RecipToSendPath(pt)
if !has {
continue
}
if rpj.Params.SWts.Init.Sym.IsFalse() {
continue
}
pt.InitWtSym(ctx, rpj)
}
}
// InitGScale computes the initial scaling factor for synaptic input conductances G,
// stored in GScale.Scale, based on sending layer initial activation.
func (ly *Layer) InitGScale(ctx *Context) {
totGeRel := float32(0)
totGiRel := float32(0)
totGmRel := float32(0)
totGmnRel := float32(0)
for _, pt := range ly.RecvPaths {
if pt.Off {
continue
}
slay := pt.Send
savg := slay.Params.Inhib.ActAvg.Nominal
snu := slay.NNeurons
ncon := pt.RecvConNAvgMax.Avg
pt.Params.GScale.Scale = pt.Params.PathScale.FullScale(savg, float32(snu), ncon)
// reverting this change: if you want to eliminate a path, set the Off flag
// if you want to negate it but keep the relative factor in the denominator
// then set the scale to 0.
//
// if pj.Params.GScale == 0 {
// continue
// }
switch pt.Params.Com.GType {
case InhibitoryG:
totGiRel += pt.Params.PathScale.Rel
case ModulatoryG:
totGmRel += pt.Params.PathScale.Rel
case MaintG:
totGmnRel += pt.Params.PathScale.Rel
default:
totGeRel += pt.Params.PathScale.Rel
}
}
for _, pt := range ly.RecvPaths {
switch pt.Params.Com.GType {
case InhibitoryG:
if totGiRel > 0 {
pt.Params.GScale.Rel = pt.Params.PathScale.Rel / totGiRel
pt.Params.GScale.Scale /= totGiRel
} else {
pt.Params.GScale.Rel = 0
pt.Params.GScale.Scale = 0
}
case ModulatoryG:
if totGmRel > 0 {
pt.Params.GScale.Rel = pt.Params.PathScale.Rel / totGmRel
pt.Params.GScale.Scale /= totGmRel
} else {
pt.Params.GScale.Rel = 0
pt.Params.GScale.Scale = 0
}
case MaintG:
if totGmnRel > 0 {
pt.Params.GScale.Rel = pt.Params.PathScale.Rel / totGmnRel
pt.Params.GScale.Scale /= totGmnRel
} else {
pt.Params.GScale.Rel = 0
pt.Params.GScale.Scale = 0
}
default:
if totGeRel > 0 {
pt.Params.GScale.Rel = pt.Params.PathScale.Rel / totGeRel
pt.Params.GScale.Scale /= totGeRel
} else {
pt.Params.GScale.Rel = 0
pt.Params.GScale.Scale = 0
}
}
}
}
//gosl:start
// DecayState decays activation state by given proportion
// (default decay values are ly.Params.Acts.Decay.Act, Glong)
func (ly *LayerParams) DecayState(ctx *Context, di uint32, decay, glong, ahp float32) {
nn := ly.Indexes.NNeurons
for lni := uint32(0); lni < nn; lni++ {
ni := ly.Indexes.NeurSt + lni
if NeuronIsOff(ni) {
continue
}
ly.Acts.DecayState(ctx, ni, di, decay, glong, ahp)
// Note: synapse-level Ca decay happens in DWt
if ahp == 1 {
lt := ly.Type
if lt == PTMaintLayer {
Neurons.Set(0.0, int(ni), int(di), int(CtxtGe))
Neurons.Set(0.0, int(ni), int(di), int(CtxtGeRaw))
Neurons.Set(0.0, int(ni), int(di), int(CtxtGeOrig))
}
}
}
ly.DecayStateLayer(ctx, di, decay, glong, ahp)
}
// DecayStateLayer does layer-level decay, but not neuron level
func (ly *LayerParams) DecayStateLayer(ctx *Context, di uint32, decay, glong, ahp float32) {
np := ly.Indexes.NPools
for spi := uint32(0); spi < np; spi++ {
pi := ly.PoolIndex(spi)
PoolInhibDecay(pi, di, decay)
}
}
// DecayStatePool decays activation state by given proportion in given sub-pool index (0 based)
func (ly *LayerParams) DecayStatePool(ctx *Context, pool int, decay, glong, ahp float32) {
spi := uint32(pool + 1) // 1 based
for di := uint32(0); di < ctx.NData; di++ {
pi := ly.PoolIndex(spi)
nsi := PoolIxs.Value(int(pi), int(PoolNeurSt))
nei := PoolIxs.Value(int(pi), int(PoolNeurEd))
for lni := nsi; lni < nei; lni++ {
ni := ly.Indexes.NeurSt + uint32(lni)
if NeuronIsOff(ni) {
continue
}
ly.Acts.DecayState(ctx, ni, di, decay, glong, ahp)
}
PoolInhibDecay(pi, di, decay)
}
}
//gosl:end
// Code generated by "goal build"; DO NOT EDIT.
//line init-net.goal:1
// Copyright (c) 2019, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
import (
"cogentcore.org/lab/tensor"
"github.com/emer/emergent/v2/paths"
)
// GlobalsReset resets all global values to 0, for all NData
func GlobalsReset() {
nix := GetNetworkIxs(0)
for di := uint32(0); di < nix.MaxData; di++ {
for vg := GvRew; vg < GvCaBinWts; vg++ {
GlobalScalars.Set(0, int(vg), int(di))
}
for vn := GvCost; vn < GlobalVectorVarsN; vn++ {
for ui := uint32(0); ui < MaxGlobalVecN; ui++ {
GlobalVectors.Set(0, int(vn), int(ui), int(di))
}
}
}
}
// InitWeights initializes synaptic weights and all other associated long-term state variables
// including running-average state values (e.g., layer running average activations etc)
func (nt *Network) InitWeights() { //types:add
ctx := nt.Context()
for di := uint32(0); di < ctx.NData; di++ {
nt.Rubicon.Reset(di)
}
nt.BuildPathGBuf()
ctx.SlowCounter = 0
for _, ly := range nt.Layers {
if ly.Off {
continue
}
ly.InitWeights(ctx, nt) // calls InitActs too
}
// separate pass to enforce symmetry
// st := time.Now()
for _, ly := range nt.Layers {
if ly.Off {
continue
}
ly.InitWtSym(ctx)
}
// dur := time.Now().Sub(st)
// fmt.Printf("sym: %v\n", dur)
ToGPUAll()
}
// InitTopoSWts initializes SWt structural weight parameters from
// path types that support topographic weight patterns, having flags set to support it,
// includes: paths.PoolTile paths.Circle.
// call before InitWeights if using Topo wts
func (nt *Network) InitTopoSWts() {
ctx := nt.Context()
swts := &tensor.Float32{}
for _, ly := range nt.Layers {
if ly.Off {
continue
}
for i := 0; i < ly.NumRecvPaths(); i++ {
pj := ly.RecvPaths[i]
if pj.Off {
continue
}
pat := pj.Pattern
switch pt := pat.(type) {
case *paths.PoolTile:
if !pt.HasTopoWeights() {
continue
}
slay := pj.Send
pt.TopoWeights(&slay.Shape, &ly.Shape, swts)
pj.SetSWtsRPool(ctx, swts)
case *paths.Circle:
if !pt.TopoWeights {
continue
}
pj.SetSWtsFunc(ctx, pt.GaussWts)
}
}
}
}
// InitGScale computes the initial scaling factor for synaptic input conductances G,
// stored in GScale.Scale, based on sending layer initial activation.
func (nt *Network) InitGScale() {
ctx := nt.Context()
for _, ly := range nt.Layers {
if ly.Off {
continue
}
ly.InitGScale(ctx)
}
}
// DecayState decays activation state by given proportion
// e.g., 1 = decay completely, and 0 = decay not at all.
// glong = separate decay factor for long-timescale conductances (g)
// This is called automatically in NewState, but is avail
// here for ad-hoc decay cases.
func (nt *Network) DecayState(decay, glong, ahp float32) {
ctx := nt.Context()
// todo: move to gpu
// nt.GPU.SyncStateFromGPU() // note: because we have to sync back, we need to sync from first to be current
for _, ly := range nt.Layers {
if ly.Off {
continue
}
for di := uint32(0); di < ctx.NData; di++ {
ly.Params.DecayState(ctx, di, decay, glong, ahp)
}
}
ToGPULayersNeurons()
}
// DecayStateByType decays activation state for given layer types
// by given proportion e.g., 1 = decay completely, and 0 = decay not at all.
// glong = separate decay factor for long-timescale conductances (g)
func (nt *Network) DecayStateByType(decay, glong, ahp float32, types ...LayerTypes) {
nt.DecayStateLayers(decay, glong, ahp, nt.LayersByType(types...)...)
}
// DecayStateByClass decays activation state for given class name(s)
// by given proportion e.g., 1 = decay completely, and 0 = decay not at all.
// glong = separate decay factor for long-timescale conductances (g)
func (nt *Network) DecayStateByClass(decay, glong, ahp float32, classes ...string) {
nt.DecayStateLayers(decay, glong, ahp, nt.LayersByClass(classes...)...)
}
// DecayStateLayers decays activation state for given layers
// by given proportion e.g., 1 = decay completely, and 0 = decay not at all.
// glong = separate decay factor for long-timescale conductances (g).
// If this is not being called at the start, around NewState call,
// then you should also call: nt.GPU.SyncGBufToGPU()
// to zero the GBuf values which otherwise will persist spikes in flight.
func (nt *Network) DecayStateLayers(decay, glong, ahp float32, layers ...string) {
ctx := nt.Context()
for _, lynm := range layers {
ly := nt.LayerByName(lynm)
if ly.Off {
continue
}
for di := uint32(0); di < ctx.NData; di++ {
ly.Params.DecayState(ctx, di, decay, glong, ahp)
}
}
ToGPULayersNeurons()
}
// InitActs fully initializes activation state -- not automatically called
func (nt *Network) InitActs() { //types:add
ctx := nt.Context()
for _, ly := range nt.Layers {
if ly.Off {
continue
}
ly.InitActs(ctx)
}
ToGPULayersNeurons()
ToGPU(PathGBufVar, PathGSynsVar)
}
// UpdateExtFlags updates the neuron flags for external input based on current
// layer Type field -- call this if the Type has changed since the last
// ApplyExt* method call.
func (nt *Network) UpdateExtFlags() {
ctx := nt.Context()
for _, ly := range nt.Layers {
if ly.Off {
continue
}
ly.UpdateExtFlags(ctx)
}
}
// Code generated by "goal build"; DO NOT EDIT.
//line init-path.goal:1
// Copyright (c) 2019, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
import (
"cogentcore.org/lab/base/randx"
"cogentcore.org/lab/tensor"
)
// index naming:
// syi = path-relative synapse index (per existing usage)
// syni = network-relative synapse index -- add SynStIndex to syi
// SetSWtsRPool initializes SWt structural weight values using given tensor
// of values which has unique values for each recv neuron within a given pool.
func (pt *Path) SetSWtsRPool(ctx *Context, swts tensor.Tensor) {
rNuY := swts.DimSize(0)
rNuX := swts.DimSize(1)
rNu := rNuY * rNuX
rfsz := swts.Len() / rNu
rsh := pt.Recv.Shape
rNpY := rsh.DimSize(0)
rNpX := rsh.DimSize(1)
r2d := false
if rsh.NumDims() != 4 {
r2d = true
rNpY = 1
rNpX = 1
}
wsz := swts.Len()
for rpy := 0; rpy < rNpY; rpy++ {
for rpx := 0; rpx < rNpX; rpx++ {
for ruy := 0; ruy < rNuY; ruy++ {
for rux := 0; rux < rNuX; rux++ {
ri := 0
if r2d {
ri = rsh.IndexTo1D(ruy, rux)
} else {
ri = rsh.IndexTo1D(rpy, rpx, ruy, rux)
}
scst := (ruy*rNuX + rux) * rfsz
syIndexes := pt.RecvSynIxs(uint32(ri))
for ci, syi := range syIndexes {
syni := pt.SynStIndex + syi
swt := float32(swts.Float1D((scst + ci) % wsz))
Synapses.Set(float32(swt), int(syni), int(SWt))
wt := pt.Params.SWts.ClipWt(swt + (Synapses.Value(int(syni), int(Wt)) - pt.Params.SWts.Init.Mean))
Synapses.Set(wt, int(syni), int(Wt))
Synapses.Set(pt.Params.SWts.LWtFromWts(wt, swt), int(syni), int(LWt))
}
}
}
}
}
}
// SetWeightsFunc initializes synaptic Wt value using given function
// based on receiving and sending unit indexes.
// Strongly suggest calling SWtRescale after.
func (pt *Path) SetWeightsFunc(ctx *Context, wtFun func(si, ri int, send, recv *tensor.Shape) float32) {
rsh := &pt.Recv.Shape
rn := rsh.Len()
ssh := &pt.Send.Shape
for ri := 0; ri < rn; ri++ {
syIndexes := pt.RecvSynIxs(uint32(ri))
for _, syi := range syIndexes {
syni := pt.SynStIndex + syi
si := pt.Params.SynSendLayerIndex(syni)
wt := wtFun(int(si), ri, ssh, rsh)
Synapses.Set(wt, int(syni), int(SWt))
Synapses.Set(wt, int(syni), int(Wt))
Synapses.Set(0.5, int(syni), int(LWt))
}
}
}
// SetSWtsFunc initializes structural SWt values using given function
// based on receiving and sending unit indexes.
func (pt *Path) SetSWtsFunc(ctx *Context, swtFun func(si, ri int, send, recv *tensor.Shape) float32) {
rsh := &pt.Recv.Shape
rn := rsh.Len()
ssh := &pt.Send.Shape
for ri := 0; ri < rn; ri++ {
syIndexes := pt.RecvSynIxs(uint32(ri))
for _, syi := range syIndexes {
syni := pt.SynStIndex + syi
si := int(pt.Params.SynSendLayerIndex(syni))
swt := swtFun(si, ri, ssh, rsh)
Synapses.Set(swt, int(syni), int(SWt))
wt := pt.Params.SWts.ClipWt(swt + (Synapses.Value(int(syni), int(Wt)) - pt.Params.SWts.Init.Mean))
Synapses.Set(wt, int(syni), int(Wt))
Synapses.Set(pt.Params.SWts.LWtFromWts(wt, swt), int(syni), int(LWt))
}
}
}
// InitWeightsSyn initializes weight values based on WtInit randomness parameters
// for an individual synapse.
// It also updates the linear weight value based on the sigmoidal weight value.
func (pt *Path) InitWeightsSyn(ctx *Context, syni uint32, rnd randx.Rand, mean, spct float32) {
pt.Params.SWts.InitWeightsSyn(ctx, syni, rnd, mean, spct)
}
// InitWeightsSynTrace initializes SynapseTraces values
// for an individual synapse.
func (pt *Path) InitWeightsSynTrace(ctx *Context, syni, di uint32) {
pt.Params.SWts.InitWeightsSynTrace(ctx, syni, di)
}
// InitWeights initializes weight values according to SWt params,
// enforcing current constraints.
func (pt *Path) InitWeights(ctx *Context, nt *Network) {
pt.Params.Learn.LRate.Init()
pt.Params.InitGBuffs(ctx)
rlay := pt.Recv
spct := pt.Params.SWts.Init.SPct
if rlay.Params.IsTarget() {
pt.Params.SWts.Init.SPct = 0
spct = 0
}
smn := pt.Params.SWts.Init.Mean
// todo: why is this recv based? prob important to keep for consistency
for lni := uint32(0); lni < rlay.NNeurons; lni++ {
ni := rlay.NeurStIndex + lni
if NeuronIsOff(ni) {
continue
}
syIndexes := pt.RecvSynIxs(lni)
for _, syi := range syIndexes {
syni := pt.SynStIndex + syi
pt.InitWeightsSyn(ctx, syni, &nt.Rand, smn, spct)
for di := uint32(0); di < ctx.NData; di++ {
pt.InitWeightsSynTrace(ctx, syni, di)
}
}
}
if pt.Params.SWts.Adapt.On.IsTrue() && !rlay.Params.IsTarget() {
pt.SWtRescale(ctx)
}
}
// SWtRescale rescales the SWt values to preserve the target overall mean value,
// using subtractive normalization.
func (pt *Path) SWtRescale(ctx *Context) {
rlay := pt.Recv
smn := pt.Params.SWts.Init.Mean
for lni := uint32(0); lni < rlay.NNeurons; lni++ {
ni := rlay.NeurStIndex + lni
if NeuronIsOff(ni) {
continue
}
var nmin, nmax int
var sum float32
syIndexes := pt.RecvSynIxs(lni)
nCons := len(syIndexes)
if nCons <= 1 {
continue
}
for _, syi := range syIndexes {
syni := pt.SynStIndex + syi
swt := Synapses.Value(int(syni), int(SWt))
sum += swt
if swt <= pt.Params.SWts.Limit.Min {
nmin++
} else if swt >= pt.Params.SWts.Limit.Max {
nmax++
}
}
amn := sum / float32(nCons)
mdf := smn - amn // subtractive
if mdf == 0 {
continue
}
if mdf > 0 { // need to increase
if nmax > 0 && nmax < nCons {
amn = sum / float32(nCons-nmax)
mdf = smn - amn
}
for _, syi := range syIndexes {
syni := pt.SynStIndex + syi
if Synapses.Value(int(syni), int(SWt)) <= pt.Params.SWts.Limit.Max {
swt := pt.Params.SWts.ClipSWt(Synapses.Value(int(syni), int(SWt)) + mdf)
Synapses.Set(swt, int(syni), int(SWt))
Synapses.Set(pt.Params.SWts.WtValue(swt, Synapses.Value(int(syni), int(LWt))), int(syni), int(Wt))
}
}
} else {
if nmin > 0 && nmin < nCons {
amn = sum / float32(nCons-nmin)
mdf = smn - amn
}
for _, syi := range syIndexes {
syni := pt.SynStIndex + syi
if Synapses.Value(int(syni), int(SWt)) >= pt.Params.SWts.Limit.Min {
swt := pt.Params.SWts.ClipSWt(Synapses.Value(int(syni), int(SWt)) + mdf)
Synapses.Set(swt, int(syni), int(SWt))
Synapses.Set(pt.Params.SWts.WtValue(swt, Synapses.Value(int(syni), int(LWt))), int(syni), int(Wt))
}
}
}
}
}
// sender based version:
// only looked at recv layers > send layers -- ri is "above" si
// for: ri <- this is now sender on recip path: recipSi
// ^ \ <- look for send back to original si, now as a receiver
// / v
// start: si == recipRi <- look in sy.RecvIndex of recipSi's sending cons for recipRi == si
//
// InitWtSym initializes weight symmetry.
// Is given the reciprocal pathway where
// the Send and Recv layers are reversed
// (see LayerBase RecipToRecvPath)
func (pt *Path) InitWtSym(ctx *Context, rpj *Path) {
if len(rpj.SendCon) == 0 {
return
}
slay := pt.Send
for lni := uint32(0); lni < slay.NNeurons; lni++ {
scon := pt.SendCon[lni]
for syi := scon.Start; syi < scon.Start+scon.N; syi++ {
syni := pt.SynStIndex + syi
ri := pt.Params.SynRecvLayerIndex(syni) // <- this sends to me, ri
recipSi := ri // reciprocal case is si is now receiver
recipc := rpj.SendCon[recipSi]
if recipc.N == 0 {
continue
}
firstSyni := rpj.SynStIndex + recipc.Start
lastSyni := rpj.SynStIndex + recipc.Start + recipc.N - 1
firstRi := rpj.Params.SynRecvLayerIndex(firstSyni)
lastRi := rpj.Params.SynRecvLayerIndex(lastSyni)
if lni < firstRi || lni > lastRi { // fast reject -- paths are always in order!
continue
}
// start at index proportional to ri relative to rist
up := int32(0)
if lastRi > firstRi {
up = int32(float32(recipc.N) * float32(lni-firstRi) / float32(lastRi-firstRi))
}
dn := up - 1
for {
doing := false
if up < int32(recipc.N) {
doing = true
recipCi := uint32(recipc.Start) + uint32(up)
recipSyni := rpj.SynStIndex + recipCi
recipRi := rpj.Params.SynRecvLayerIndex(recipSyni)
if recipRi == lni {
Synapses.Set(Synapses.Value(int(syni), int(Wt)), int(recipSyni), int(Wt))
Synapses.Set(Synapses.Value(int(syni), int(LWt)), int(recipSyni), int(LWt))
Synapses.Set(Synapses.Value(int(syni), int(SWt)), int(recipSyni), int(SWt))
// note: if we support SymFromTop then can have option to go other way
break
}
up++
}
if dn >= 0 {
doing = true
recipCi := uint32(recipc.Start) + uint32(dn)
recipSyni := rpj.SynStIndex + recipCi
recipRi := rpj.Params.SynRecvLayerIndex(recipSyni)
if recipRi == lni {
Synapses.Set(Synapses.Value(int(syni), int(Wt)), int(recipSyni), int(Wt))
Synapses.Set(Synapses.Value(int(syni), int(LWt)), int(recipSyni), int(LWt))
Synapses.Set(Synapses.Value(int(syni), int(SWt)), int(recipSyni), int(SWt))
// note: if we support SymFromTop then can have option to go other way
break
}
dn--
}
if !doing {
break
}
}
}
}
}
// Code generated by "goal build"; DO NOT EDIT.
//line layer.goal:1
// Copyright (c) 2019, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
import (
"fmt"
"io"
"log"
"math/rand"
"strconv"
"strings"
"cogentcore.org/core/base/errors"
"cogentcore.org/core/core"
"cogentcore.org/core/icons"
"cogentcore.org/core/math32"
"cogentcore.org/core/tree"
"github.com/emer/axon/v2/fsfffb"
"github.com/emer/emergent/v2/emer"
"github.com/emer/emergent/v2/weights"
)
// index naming:
// lni = layer-based neuron index (0 = first neuron in layer)
// ni = absolute whole network neuron index
// Layer implements the basic Axon spiking activation function,
// and manages learning in the pathways.
type Layer struct {
emer.LayerBase
// Params are layer parameters (pointer to item in Network.LayerParams).
Params *LayerParams
// our parent network, in case we need to use it to find
// other layers etc; set when added by network.
Network *Network `copier:"-" json:"-" xml:"-" display:"-"`
// Type is the type of layer, which drives specialized computation as needed.
Type LayerTypes
// NNeurons is the number of neurons in the layer.
NNeurons uint32 `display:"-"`
// NeurStIndex is the starting index of neurons for this layer within
// the global Network list.
NeurStIndex uint32 `display:"-" inactive:"-"`
// NPools is the number of inhibitory pools based on layer shape,
// with the first one representing the entire set of neurons in the layer,
// and 4D shaped layers have sub-pools after that.
NPools uint32 `display:"-"`
// MaxData is the maximum amount of input data that can be processed in
// parallel in one pass of the network (copied from [NetworkIndexes]).
// Neuron, Pool, Values storage is allocated to hold this amount.
MaxData uint32 `display:"-"`
// RecvPaths is the list of receiving pathways into this layer from other layers.
RecvPaths []*Path
// SendPaths is the list of sending pathways from this layer to other layers.
SendPaths []*Path
// BuildConfig has configuration data set when the network is configured,
// that is used during the network Build() process via PostBuild method,
// after all the structure of the network has been fully constructed.
// In particular, the Params is nil until Build, so setting anything
// specific in there (e.g., an index to another layer) must be done
// as a second pass. Note that Params are all applied after Build
// and can set user-modifiable params, so this is for more special
// algorithm structural parameters set during ConfigNet() methods.
BuildConfig map[string]string `table:"-"`
// DefaultParams are closures that apply default parameters
// prior to user-set parameters. These are useful for specific layer
// functionality in specialized brain areas (e.g., Rubicon, BG etc)
// not associated with a layer type, which otherwise is used to hard-code
// initial default parameters.
DefaultParams []func(ly *LayerParams) `display:"-"`
}
// emer.Layer interface methods
func (ly *Layer) TypeName() string { return ly.Type.String() }
func (ly *Layer) TypeNumber() int { return int(ly.Type) }
func (ly *Layer) NumRecvPaths() int { return len(ly.RecvPaths) }
func (ly *Layer) RecvPath(idx int) emer.Path { return ly.RecvPaths[idx] }
func (ly *Layer) NumSendPaths() int { return len(ly.SendPaths) }
func (ly *Layer) SendPath(idx int) emer.Path { return ly.SendPaths[idx] }
func (ly *Layer) AddClass(cls ...string) *Layer {
ly.LayerBase.AddClass(cls...)
return ly
}
func (ly *Layer) Defaults() { //types:add
ctx := ly.Network.Context()
li := ly.Index
if ly.Params != nil {
ly.Params.Type = ly.Type
ly.Params.Defaults()
for di := uint32(0); di < ly.MaxData; di++ {
LayerStates.Set(1, int(li), int(di), int(LayerGiMult))
}
// ly.Params.Learn.CaLearn.Dt.PDTauForNCycles(int(ctx.ThetaCycles))
// ly.Params.Learn.CaSpike.Dt.PDTauForNCycles(int(ctx.ThetaCycles))
}
for _, pt := range ly.RecvPaths { // must do path defaults first, then custom
pt.Defaults()
}
if ly.Params == nil {
return
}
switch ly.Type {
case InputLayer:
ly.Params.Acts.Clamp.Ge = 1.5
ly.Params.Inhib.Layer.Gi = 0.9
ly.Params.Inhib.Pool.Gi = 0.9
ly.Params.Learn.TrgAvgAct.SubMean = 0
case TargetLayer:
ly.Params.Acts.Clamp.Ge = 0.8
ly.Params.Learn.TrgAvgAct.SubMean = 0
// ly.Params.Learn.RLRate.SigmoidMin = 1
case CTLayer:
ly.Params.CTDefaults()
case PTMaintLayer:
ly.PTMaintDefaults()
case PTPredLayer:
ly.Params.PTPredDefaults()
case PulvinarLayer:
ly.Params.PulvinarDefaults()
case RewLayer:
ly.Params.RWDefaults()
case RWPredLayer:
ly.Params.RWDefaults()
ly.Params.RWPredDefaults()
case RWDaLayer:
ly.Params.RWDefaults()
case TDPredLayer:
ly.Params.TDDefaults()
ly.Params.TDPredDefaults()
case TDIntegLayer, TDDaLayer:
ly.Params.TDDefaults()
case IOLayer:
ly.Params.IODefaults()
case CNiIOLayer:
ly.Params.CNiIODefaults()
case CNiUpLayer:
ly.Params.CNiUpDefaults()
case CNeLayer:
ly.Params.NuclearDefaults()
case LDTLayer:
ly.LDTDefaults()
case BLALayer:
ly.BLADefaults()
case CeMLayer:
ly.CeMDefaults()
case VSPatchLayer:
ly.Params.VSPatchDefaults()
case DrivesLayer:
ly.Params.DrivesDefaults()
case UrgencyLayer:
ly.Params.UrgencyDefaults()
case USLayer:
ly.Params.USDefaults()
case PVLayer:
ly.Params.PVDefaults()
case DSMatrixLayer:
ly.DSMatrixDefaults()
case VSMatrixLayer:
ly.VSMatrixDefaults()
case DSPatchLayer:
ly.DSPatchDefaults()
case GPLayer:
ly.GPDefaults()
case STNLayer:
ly.STNDefaults()
case BGThalLayer:
ly.BGThalDefaults()
case VSGatedLayer:
ly.Params.VSGatedDefaults()
}
ly.Params.CT.DecayForNCycles(int(ctx.ThetaCycles))
ly.applyDefaultParams()
ly.UpdateParams()
}
// Update is an interface for generically updating after edits
// this should be used only for the values on the struct itself.
// UpdateParams is used to update all parameters, including Path.
func (ly *Layer) Update() {
if ly.Params == nil {
return
}
if !ly.Is4D() && ly.Params.Inhib.Pool.On.IsTrue() {
ly.Params.Inhib.Pool.On.SetBool(false)
}
ly.Params.Update()
}
// UpdateParams updates all params given any changes that might
// have been made to individual values including those in the
// receiving pathways of this layer.
// This is not called Update because it is not just about the
// local values in the struct.
func (ly *Layer) UpdateParams() {
ly.Update()
for _, pt := range ly.RecvPaths {
pt.UpdateParams()
}
}
// todo: not standard:
func (ly *Layer) SetOff(off bool) {
ly.Off = off
// a Path is off if either the sending or the receiving layer is off
// or if the path has been set to Off directly
for _, pt := range ly.RecvPaths {
pt.Off = pt.Send.Off || off
}
for _, pt := range ly.SendPaths {
pt.Off = pt.Recv.Off || off
}
}
// RecipToSendPath finds the reciprocal pathway to
// the given sending pathway within the ly layer.
// i.e., where ly is instead the *receiving* layer from same other layer B
// that is the receiver of the spj pathway we're sending to.
//
// ly = A, other layer = B:
//
// spj: S=A -> R=B
// rpj: R=A <- S=B
//
// returns false if not found.
func (ly *Layer) RecipToSendPath(spj *Path) (*Path, bool) {
for _, rpj := range ly.RecvPaths {
if rpj.Send == spj.Recv { // B = sender of rpj, recv of spj
return rpj, true
}
}
return nil, false
}
// RecipToRecvPath finds the reciprocal pathway to
// the given recv pathway within the ly layer.
// i.e., where ly is instead the *sending* layer to same other layer B
// that is the sender of the rpj pathway we're receiving from.
//
// ly = A, other layer = B:
//
// rpj: R=A <- S=B
// spj: S=A -> R=B
//
// returns false if not found.
func (ly *Layer) RecipToRecvPath(rpj *Path) (*Path, bool) {
for _, spj := range ly.SendPaths {
if spj.Recv == rpj.Send { // B = sender of rpj, recv of spj
return spj, true
}
}
return nil, false
}
// AddDefaultParams adds given default param setting function.
func (ly *Layer) AddDefaultParams(fun func(ly *LayerParams)) {
ly.DefaultParams = append(ly.DefaultParams, fun)
}
// applyDefaultParams applies DefaultParams default parameters.
// Called by Layer.Defaults()
func (ly *Layer) applyDefaultParams() {
for _, f := range ly.DefaultParams {
f(ly.Params)
}
}
// ParamsString returns a listing of all parameters in the Layer and
// pathways within the layer. If nonDefault is true, only report those
// not at their default values.
func (ly *Layer) ParamsString(nonDefault bool) string {
var b strings.Builder
b.WriteString("//////// Layer: " + ly.Name + "\n")
b.WriteString(ly.Params.ParamsString(nonDefault))
for _, pt := range ly.RecvPaths {
b.WriteString(pt.ParamsString(nonDefault))
}
return b.String()
}
//////// Build
// SetBuildConfig sets named configuration parameter to given string value
// to be used in the PostBuild stage -- mainly for layer names that need to be
// looked up and turned into indexes, after entire network is built.
func (ly *Layer) SetBuildConfig(param, val string) {
ly.BuildConfig[param] = val
}
// BuildConfigByName looks for given BuildConfig option by name,
// and reports & returns an error if not found.
func (ly *Layer) BuildConfigByName(nm string) (string, error) {
cfg, ok := ly.BuildConfig[nm]
if !ok {
err := fmt.Errorf("Layer: %s does not have BuildConfig: %s set -- error in ConfigNet", ly.Name, nm)
return cfg, errors.Log(err)
}
return cfg, nil
}
// BuildConfigFindLayer looks for BuildConfig of given name
// and if found, looks for layer with corresponding name.
// if mustName is true, then an error is logged if the BuildConfig
// name does not exist. An error is always logged if the layer name
// is not found. -1 is returned in any case of not found.
func (ly *Layer) BuildConfigFindLayer(nm string, mustName bool) int32 {
idx := int32(-1)
if rnm, ok := ly.BuildConfig[nm]; ok {
dly := ly.Network.LayerByName(rnm)
if dly != nil {
idx = int32(dly.Index)
}
} else {
if mustName {
errors.Log(fmt.Errorf("Layer: %s does not have BuildConfig: %s set -- error in ConfigNet", ly.Name, nm))
}
}
return idx
}
// BuildSubPools initializes neuron start / end indexes for sub-pools
func (ly *Layer) BuildSubPools(ctx *Context) {
if !ly.Is4D() {
return
}
sh := ly.Shape.Sizes
spy := sh[0]
spx := sh[1]
spi := uint32(1)
for py := 0; py < spy; py++ {
for px := 0; px < spx; px++ {
soff := uint32(ly.Shape.IndexTo1D(py, px, 0, 0))
eoff := uint32(ly.Shape.IndexTo1D(py, px, sh[2]-1, sh[3]-1) + 1)
pi := ly.Params.PoolIndex(spi)
PoolIxs.Set(soff, int(pi), int(PoolNeurSt))
PoolIxs.Set(eoff, int(pi), int(PoolNeurEd))
for lni := soff; lni < eoff; lni++ {
ni := ly.NeurStIndex + lni
NeuronIxs.Set(spi, int(ni), int(NrnSubPool))
}
spi++
}
}
}
// BuildPools builds the inhibitory pools structures -- nu = number of units in layer
func (ly *Layer) BuildPools(ctx *Context, nn uint32) error {
np := 1 + ly.NumPools()
for di := uint32(0); di < ly.MaxData; di++ {
lpi := ly.Params.PoolIndex(0)
PoolIxs.Set(0, int(lpi), int(PoolNeurSt))
PoolIxs.Set(nn, int(lpi), int(PoolNeurEd))
PoolIxs.Set(1, int(lpi), int(PoolIsLayer))
}
if np > 1 {
ly.BuildSubPools(ctx)
}
return nil
}
// BuildPaths builds the pathways, send-side
func (ly *Layer) BuildPaths(ctx *Context) error {
emsg := ""
for _, pt := range ly.SendPaths {
if pt.Off {
continue
}
err := pt.Build()
if err != nil {
emsg += err.Error() + "\n"
}
}
if emsg != "" {
return errors.New(emsg)
}
return nil
}
// Build constructs the layer state, including calling Build on the pathways
func (ly *Layer) Build() error {
ctx := ly.Network.Context()
nn := uint32(ly.Shape.Len())
if nn == 0 {
return fmt.Errorf("Build Layer %v: no units specified in Shape", ly.Name)
}
for lni := uint32(0); lni < nn; lni++ {
ni := ly.NeurStIndex + lni
NeuronIxs.Set(lni, int(ni), int(NrnNeurIndex))
NeuronIxs.Set(uint32(ly.Index), int(ni), int(NrnLayIndex))
}
err := ly.BuildPools(ctx, nn)
if err != nil {
return err
}
err = ly.BuildPaths(ctx)
ly.PostBuild()
return err
}
// PostBuild performs special post-Build() configuration steps for specific algorithms,
// using configuration data set in BuildConfig during the ConfigNet process.
func (ly *Layer) PostBuild() {
ly.Params.LayInhib.Index1 = ly.BuildConfigFindLayer("LayInhib1Name", false) // optional
ly.Params.LayInhib.Index2 = ly.BuildConfigFindLayer("LayInhib2Name", false) // optional
ly.Params.LayInhib.Index3 = ly.BuildConfigFindLayer("LayInhib3Name", false) // optional
ly.Params.LayInhib.Index4 = ly.BuildConfigFindLayer("LayInhib4Name", false) // optional
switch ly.Type {
case PulvinarLayer:
ly.PulvinarPostBuild()
case DSMatrixLayer:
ly.DSMatrixPostBuild()
case VSMatrixLayer:
ly.VSMatrixPostBuild()
case DSPatchLayer:
ly.PatchPostBuild()
case GPLayer:
ly.GPPostBuild()
case CNeLayer, CNiIOLayer, CNiUpLayer:
ly.NuclearPostBuild()
case LDTLayer:
ly.LDTPostBuild()
case RWDaLayer:
ly.RWDaPostBuild()
case TDIntegLayer:
ly.TDIntegPostBuild()
case TDDaLayer:
ly.TDDaPostBuild()
case BLALayer, CeMLayer, USLayer, PVLayer, VSPatchLayer:
ly.RubiconPostBuild()
}
}
// UnitVarNames returns a list of variable names available on the units in this layer
func (ly *Layer) UnitVarNames() []string {
return NeuronVarNames
}
// UnitVarProps returns properties for variables
func (ly *Layer) UnitVarProps() map[string]string {
return NeuronVarProps
}
// UnitVarIndex returns the index of given variable within the Neuron,
// according to *this layer's* UnitVarNames() list (using a map to lookup index),
// or -1 and error message if not found.
func (ly *Layer) UnitVarIndex(varNm string) (int, error) {
return NeuronVarIndexByName(varNm)
}
// UnitVarNum returns the number of Neuron-level variables
// for this layer. This is needed for extending indexes in derived types.
func (ly *Layer) UnitVarNum() int {
return len(NeuronVarNames)
}
// UnitValue1D returns value of given variable index on given unit, using 1-dimensional index.
// returns NaN on invalid index.
// This is the core unit var access method used by other methods.
func (ly *Layer) UnitValue1D(varIndex int, idx, di int) float32 {
if idx < 0 || idx >= int(ly.NNeurons) {
return math32.NaN()
}
if varIndex < 0 || varIndex >= ly.UnitVarNum() {
return math32.NaN()
}
if di < 0 || di >= int(ly.MaxData) {
return math32.NaN()
}
ni := ly.NeurStIndex + uint32(idx)
nvars := ly.UnitVarNum()
neurVars := int(CaBins) + NNeuronCaBins
layVarSt := nvars - NNeuronLayerVars
pi := ly.Params.PoolIndex(NeuronIxs.Value(int(ni), int(NrnSubPool)))
if varIndex >= layVarSt {
lvi := varIndex - layVarSt
switch lvi {
case 0: // DA
return GlobalScalars.Value(int(GvDA), int(uint32(di)))
case 1: // ACh
return GlobalScalars.Value(int(GvACh), int(uint32(di)))
case 2: // NE
return GlobalScalars.Value(int(GvNE), int(uint32(di)))
case 3: // Ser
return GlobalScalars.Value(int(GvSer), int(uint32(di)))
case 4: // Gated
return float32(PoolsInt.Value(int(pi), int(di), int(PoolGated)))
case 5: // ModAct
return Pools.Value(int(pi), int(di), int(fsfffb.ModAct))
case 6: // PoolDAD1
return Pools.Value(int(pi), int(di), int(fsfffb.DAD1))
case 7: // PoolDAD2
return Pools.Value(int(pi), int(di), int(fsfffb.DAD2))
}
} else if varIndex >= neurVars {
return NeuronAvgs.Value(int(ni), int(NeuronVars(varIndex-neurVars)))
} else if varIndex < int(CaBins) {
return Neurons.Value(int(ni), int(di), int(varIndex))
} else {
sbin := varIndex - int(CaBins)
if sbin >= int(NetworkIxs[0].NCaBins) {
return math32.NaN()
}
return Neurons.Value(int(ni), int(di), int(varIndex))
}
return math32.NaN()
}
// RecvPathValues fills in values of given synapse variable name,
// for pathway into given sending layer and neuron 1D index,
// for all receiving neurons in this layer,
// into given float32 slice (only resized if not big enough).
// pathType is the string representation of the path type -- used if non-empty,
// useful when there are multiple pathways between two layers.
// Returns error on invalid var name.
// If the receiving neuron is not connected to the given sending layer or neuron
// then the value is set to math32.NaN().
// Returns error on invalid var name or lack of recv path (vals always set to nan on path err).
func (ly *Layer) RecvPathValues(vals *[]float32, varNm string, sendLay emer.Layer, sendIndex1D int, pathType string) error {
var err error
nn := int(ly.NNeurons)
if *vals == nil || cap(*vals) < nn {
*vals = make([]float32, nn)
} else if len(*vals) < nn {
*vals = (*vals)[0:nn]
}
nan := math32.NaN()
for i := 0; i < nn; i++ {
(*vals)[i] = nan
}
if sendLay == nil {
return fmt.Errorf("sending layer is nil")
}
slay := sendLay.AsEmer()
var pt emer.Path
if pathType != "" {
pt, err = slay.SendPathByRecvNameType(ly.Name, pathType)
if pt == nil {
pt, err = slay.SendPathByRecvName(ly.Name)
}
} else {
pt, err = slay.SendPathByRecvName(ly.Name)
}
if pt == nil {
return err
}
if pt.AsEmer().Off {
return fmt.Errorf("pathway is off")
}
for ri := 0; ri < nn; ri++ {
(*vals)[ri] = pt.AsEmer().SynValue(varNm, sendIndex1D, ri) // this will work with any variable -- slower, but necessary
}
return nil
}
// SendPathValues fills in values of given synapse variable name,
// for pathway into given receiving layer and neuron 1D index,
// for all sending neurons in this layer,
// into given float32 slice (only resized if not big enough).
// pathType is the string representation of the path type -- used if non-empty,
// useful when there are multiple pathways between two layers.
// Returns error on invalid var name.
// If the sending neuron is not connected to the given receiving layer or neuron
// then the value is set to math32.NaN().
// Returns error on invalid var name or lack of recv path (vals always set to nan on path err).
func (ly *Layer) SendPathValues(vals *[]float32, varNm string, recvLay emer.Layer, recvIndex1D int, pathType string) error {
var err error
nn := int(ly.NNeurons)
if *vals == nil || cap(*vals) < nn {
*vals = make([]float32, nn)
} else if len(*vals) < nn {
*vals = (*vals)[0:nn]
}
nan := math32.NaN()
for i := 0; i < nn; i++ {
(*vals)[i] = nan
}
if recvLay == nil {
return fmt.Errorf("receiving layer is nil")
}
rlay := recvLay.AsEmer()
var pt emer.Path
if pathType != "" {
pt, err = rlay.RecvPathBySendNameType(ly.Name, pathType)
if pt == nil {
pt, err = rlay.RecvPathBySendName(ly.Name)
}
} else {
pt, err = rlay.RecvPathBySendName(ly.Name)
}
if pt == nil {
return err
}
if pt.AsEmer().Off {
return fmt.Errorf("pathway is off")
}
for si := 0; si < nn; si++ {
(*vals)[si] = pt.AsEmer().SynValue(varNm, si, recvIndex1D)
}
return nil
}
// VarRange returns the min / max values for given variable
// todo: support r. s. pathway values
// error occurs when variable name is not found.
func (ly *Layer) VarRange(varNm string) (min, max float32, err error) {
nn := ly.NNeurons
if nn == 0 {
return
}
vidx, err := ly.UnitVarIndex(varNm)
if err != nil {
return
}
nvar := vidx
v0 := Neurons.Value(int(ly.NeurStIndex), int(0), int(nvar))
min = v0
max = v0
for lni := uint32(1); lni < nn; lni++ {
ni := ly.NeurStIndex + lni
vl := Neurons.Value(int(ni), int(0), int(nvar))
if vl < min {
min = vl
}
if vl > max {
max = vl
}
}
return
}
//////// Weights
// WriteWeightsJSON writes the weights from this layer from the receiver-side perspective
// in a JSON text format. We build in the indentation logic to make it much faster and
// more efficient.
func (ly *Layer) WriteWeightsJSON(w io.Writer, depth int) {
li := ly.Index
ly.MetaData = make(map[string]string)
ly.MetaData["ActMAvg"] = fmt.Sprintf("%g", LayerStates.Value(int(li), int(0), int(LayerActMAvg)))
ly.MetaData["ActPAvg"] = fmt.Sprintf("%g", LayerStates.Value(int(li), int(0), int(LayerActPAvg)))
ly.MetaData["GiMult"] = fmt.Sprintf("%g", LayerStates.Value(int(li), int(0), int(LayerGiMult)))
if ly.Params.IsLearnTrgAvg() {
ly.LayerBase.WriteWeightsJSONBase(w, depth, "ActAvg", "TrgAvg")
} else {
ly.LayerBase.WriteWeightsJSONBase(w, depth)
}
}
// SetWeights sets the weights for this layer from weights.Layer decoded values
func (ly *Layer) SetWeights(lw *weights.Layer) error {
if ly.Off {
return nil
}
li := ly.Index
ctx := ly.Network.Context()
if lw.MetaData != nil {
for di := uint32(0); di < ly.MaxData; di++ {
if am, ok := lw.MetaData["ActMAvg"]; ok {
pv, _ := strconv.ParseFloat(am, 32)
LayerStates.Set(float32(pv), int(li), int(di), int(LayerActMAvg))
}
if ap, ok := lw.MetaData["ActPAvg"]; ok {
pv, _ := strconv.ParseFloat(ap, 32)
LayerStates.Set(float32(pv), int(li), int(di), int(LayerActPAvg))
}
if gi, ok := lw.MetaData["GiMult"]; ok {
pv, _ := strconv.ParseFloat(gi, 32)
LayerStates.Set(float32(pv), int(li), int(di), int(LayerGiMult))
}
}
}
if lw.Units != nil {
if ta, ok := lw.Units["ActAvg"]; ok {
for lni := range ta {
if lni > int(ly.NNeurons) {
break
}
ni := ly.NeurStIndex + uint32(lni)
NeuronAvgs.Set(ta[lni], int(ni), int(ActAvg))
}
}
if ta, ok := lw.Units["TrgAvg"]; ok {
for lni := range ta {
if lni > int(ly.NNeurons) {
break
}
ni := ly.NeurStIndex + uint32(lni)
NeuronAvgs.Set(ta[lni], int(ni), int(TrgAvg))
}
}
}
var err error
if len(lw.Paths) == ly.NumRecvPaths() { // this is essential if multiple paths from same layer
for pi := range lw.Paths {
pw := &lw.Paths[pi]
pt := ly.RecvPaths[pi]
er := pt.SetWeights(pw)
if er != nil {
err = er
}
}
} else {
for pi := range lw.Paths {
pw := &lw.Paths[pi]
pt, _ := ly.RecvPathBySendName(pw.From)
if pt != nil {
er := pt.SetWeights(pw)
if er != nil {
err = er
}
}
}
}
ly.Params.AvgDifFromTrgAvg(ctx) // update AvgPct based on loaded ActAvg values
return err
}
// JsonToParams reformates json output to suitable params display output
func JsonToParams(b []byte) string {
br := strings.Replace(string(b), `"`, ``, -1)
br = strings.Replace(br, ",\n", "", -1)
br = strings.Replace(br, "{\n", "{", -1)
br = strings.Replace(br, "} ", "}\n ", -1)
br = strings.Replace(br, "\n }", " }", -1)
br = strings.Replace(br, "\n }\n", " }", -1)
return br[1:] + "\n"
}
// TestValues returns a map of key vals for testing
// ctrKey is a key of counters to contextualize values.
func (ly *Layer) TestValues(ctrKey string, vals map[string]float32) {
for spi := uint32(0); spi < ly.NPools; spi++ {
for di := uint32(0); di < ly.MaxData; di++ {
pi := ly.Params.PoolIndex(spi)
key := fmt.Sprintf("%s Lay: %s\tPool: %d\tDi: %d", ctrKey, ly.Name, pi, di)
PoolTestValues(pi, di, key, vals)
}
}
}
//////// Lesion
// UnLesionNeurons unlesions (clears the Off flag) for all neurons in the layer
func (ly *Layer) UnLesionNeurons() { //types:add
nn := ly.NNeurons
for lni := uint32(0); lni < nn; lni++ {
ni := ly.NeurStIndex + lni
for di := uint32(0); di < ly.MaxData; di++ {
NeuronClearFlag(NeuronOff, ni, di)
}
}
}
// LesionNeurons lesions (sets the Off flag) for given proportion (0-1) of neurons in layer
// returns number of neurons lesioned. Emits error if prop > 1 as indication that percent
// might have been passed
func (ly *Layer) LesionNeurons(prop float32) int { //types:add
ly.UnLesionNeurons()
if prop > 1 {
log.Printf("LesionNeurons got a proportion > 1 -- must be 0-1 as *proportion* (not percent) of neurons to lesion: %v\n", prop)
return 0
}
nn := ly.NNeurons
if nn == 0 {
return 0
}
p := rand.Perm(int(nn))
nl := int(prop * float32(nn))
for lni := uint32(0); lni < nn; lni++ {
nip := uint32(p[lni])
ni := ly.NeurStIndex + nip
if NeuronIsOff(ni) {
continue
}
for di := uint32(0); di < ly.MaxData; di++ {
NeuronSetFlag(NeuronOff, ni, di)
}
}
return nl
}
// MakeToolbar is the standard core GUI toolbar for the layer when edited.
func (ly *Layer) MakeToolbar(p *tree.Plan) {
tree.Add(p, func(w *core.FuncButton) {
w.SetFunc(ly.Defaults).SetIcon(icons.Reset)
})
tree.Add(p, func(w *core.FuncButton) {
w.SetFunc(ly.InitWeights).SetIcon(icons.Reset)
})
tree.Add(p, func(w *core.FuncButton) {
w.SetFunc(ly.InitActs).SetIcon(icons.Reset)
})
tree.Add(p, func(w *core.Separator) {})
tree.Add(p, func(w *core.FuncButton) {
w.SetFunc(ly.LesionNeurons).SetIcon(icons.Cut)
})
tree.Add(p, func(w *core.FuncButton) {
w.SetFunc(ly.UnLesionNeurons).SetIcon(icons.Cut)
})
}
// Copyright (c) 2023, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
import (
"reflect"
"cogentcore.org/core/base/reflectx"
"github.com/emer/emergent/v2/params"
)
//gosl:start
// LayerIndexes contains index access into network global arrays for GPU.
type LayerIndexes struct {
// NPools is the total number of pools for this layer, including layer-wide.
NPools uint32 `edit:"-"`
// start of neurons for this layer in global array (same as Layer.NeurStIndex)
NeurSt uint32 `edit:"-"`
// number of neurons in layer
NNeurons uint32 `edit:"-"`
// start index into RecvPaths global array
RecvSt uint32 `edit:"-"`
// number of recv pathways
RecvN uint32 `edit:"-"`
// start index into RecvPaths global array
SendSt uint32 `edit:"-"`
// number of recv pathways
SendN uint32 `edit:"-"`
// starting neuron index in global Exts list of external input for this layer.
// Only for Input / Target / Compare layer types
ExtsSt uint32 `edit:"-"`
// layer shape Pools Y dimension -- 1 for 2D
ShpPlY int32 `edit:"-"`
// layer shape Pools X dimension -- 1 for 2D
ShpPlX int32 `edit:"-"`
// layer shape Units Y dimension
ShpUnY int32 `edit:"-"`
// layer shape Units X dimension
ShpUnX int32 `edit:"-"`
}
// LayerInhibIndexes contains indexes of layers for between-layer inhibition.
type LayerInhibIndexes struct {
// idx of Layer to get layer-level inhibition from -- set during Build from BuildConfig LayInhib1Name if present -- -1 if not used
Index1 int32 `edit:"-"`
// idx of Layer to get layer-level inhibition from -- set during Build from BuildConfig LayInhib2Name if present -- -1 if not used
Index2 int32 `edit:"-"`
// idx of Layer to get layer-level inhibition from -- set during Build from BuildConfig LayInhib3Name if present -- -1 if not used
Index3 int32 `edit:"-"`
// idx of Layer to geta layer-level inhibition from -- set during Build from BuildConfig LayInhib4Name if present -- -1 if not used
Index4 int32 `edit:"-"`
}
// LayerParams contains all of the layer parameters.
// These values must remain constant over the course of computation.
// On the GPU, they are loaded into a read-only data storage buffer.
type LayerParams struct {
// Type is the functional type of layer, which determines the code path
// for specialized layer types, and is synchronized with [Layer.Type].
Type LayerTypes
// Index of this layer in [Layers] list.
Index uint32 `edit:"-"`
// MaxData is the maximum number of data parallel elements.
MaxData uint32 `display:"-"`
// PoolSt is the start of pools for this layer; first one is always the layer-wide pool.
PoolSt uint32 `display:"-"`
// Activation parameters and methods for computing activations
Acts ActParams `display:"add-fields"`
// Inhibition parameters and methods for computing layer-level inhibition
Inhib InhibParams `display:"add-fields"`
// LayInhib has indexes of layers that contribute between-layer inhibition
// to this layer. Set these indexes via BuildConfig LayInhibXName (X = 1, 2...).
LayInhib LayerInhibIndexes `display:"inline"`
// Learn has learning parameters and methods that operate at the neuron level.
Learn LearnNeuronParams `display:"add-fields"`
// Bursts has [BurstParams] that determine how the 5IB Burst activation
// is computed from CaP integrated spiking values in Super layers.
Bursts BurstParams `display:"inline"`
// CT has params for the CT corticothalamic layer and PTPred layer that
// generates predictions over the Pulvinar using context. Uses the CtxtGe
// excitatory input plus stronger NMDA channels to maintain context trace.
CT CTParams `display:"inline"`
// Pulvinar has parameters for how the plus-phase (outcome) state of Pulvinar
// thalamic relay cell neurons is computed from the corresponding driver
// neuron Burst activation (or CaP if not Super).
Pulvinar PulvinarParams `display:"inline"`
// DSMatrixParams has parameters for dorsal Matrix layers, for SPN / MSN
// direct and indirect pathways.
DSMatrix DSMatrixParams `display:"inline"`
// Striatum has params and indexes for striatum layers: DSMatrix, VSMatrix, DSPatch.
Striatum StriatumParams `display:"inline"`
// GP has params for GP (globus pallidus) of the BG layers.
GP GPParams `display:"inline"`
// IOParams has parameters for the IO inferior olive neurons,
// which compute a temporal offset error signal between CNiIO inhibitory
// predictions and excitatory sensory input, contingent on initial
// above-threshold efferent copy motor trigger input (modulatory).
IO IOParams `display:"inline"`
// Nuclear has parameters for learning in the cerebellum, according
// to the Nuclear model (not just nucleus neurons).
Nuclear NuclearParams `display:"inline"`
// LDT has parameters for laterodorsal tegmentum ACh salience neuromodulatory
// signal, driven by superior colliculus stimulus novelty, US input / absence,
// and OFC / ACC inhibition.
LDT LDTParams `display:"inline"`
// VTA has parameters for ventral tegmental area dopamine (DA) based on
// LHb PVDA (primary value -- at US time, computed at start of each trial
// and stored in LHbPVDA global value) and Amygdala (CeM) CS / learned
// value (LV) activations, which update every cycle.
VTA VTAParams `display:"inline"`
// RWPred has parameters for reward prediction using a simple Rescorla-Wagner
// learning rule (i.e., PV learning in the Rubicon framework).
RWPred RWPredParams `display:"inline"`
// RWDa has parameters for reward prediction dopamine using a simple
// Rescorla-Wagner learning rule (i.e., PV learning in the Rubicon framework).
RWDa RWDaParams `display:"inline"`
// TDInteg has parameters for temporal differences (TD) reward integration layer.
TDInteg TDIntegParams `display:"inline"`
// TDDa has parameters for dopamine (DA) signal as the temporal difference
// (TD) between the TDIntegLayer activations in the minus and plus phase.
TDDa TDDaParams `display:"inline"`
// Indexes has recv and send pathway array access info.
Indexes LayerIndexes `new-window:"+"`
}
// PoolIndex returns the global network index for pool with given
// pool (0 = layer pool, 1+ = subpools): just PoolSt + pi
func (ly *LayerParams) PoolIndex(pi uint32) uint32 {
return ly.PoolSt + pi
}
// HasPoolInhib returns true if the layer is using pool-level inhibition (implies 4D too).
// This is the proper check for using pool-level target average activations, for example.
func (ly *LayerParams) HasPoolInhib() bool {
return ly.Inhib.Pool.On.IsTrue()
}
//gosl:end
// StyleClass implements the [params.Styler] interface for parameter setting,
// and must only be called after the network has been built, and is current,
// because it uses the global CurrentNetwork variable.
func (ly *LayerParams) StyleClass() string {
lay := CurrentNetwork.Layers[ly.Index]
return ly.Type.String() + " " + lay.Class
}
// StyleName implements the [params.Styler] interface for parameter setting,
// and must only be called after the network has been built, and is current,
// because it uses the global CurrentNetwork variable.
func (ly *LayerParams) StyleName() string {
lay := CurrentNetwork.Layers[ly.Index]
return lay.Name
}
func (ly *LayerParams) Update() {
ly.Acts.Update()
ly.Inhib.Update()
ly.Learn.Update()
ly.Bursts.Update()
ly.CT.Update()
ly.Pulvinar.Update()
ly.DSMatrix.Update()
ly.Striatum.Update()
ly.GP.Update()
ly.IO.Update()
ly.Nuclear.Update()
ly.LDT.Update()
ly.VTA.Update()
ly.RWPred.Update()
ly.RWDa.Update()
ly.TDInteg.Update()
ly.TDDa.Update()
}
func (ly *LayerParams) Defaults() {
ly.Acts.Defaults()
ly.Inhib.Defaults()
ly.Learn.Defaults()
ly.Inhib.Layer.On.SetBool(true)
ly.Inhib.Layer.Gi = 1.0
ly.Inhib.Pool.Gi = 1.0
ly.Bursts.Defaults()
ly.CT.Defaults()
ly.Pulvinar.Defaults()
ly.DSMatrix.Defaults()
ly.Striatum.Defaults()
ly.GP.Defaults()
ly.IO.Defaults()
ly.Nuclear.Defaults()
ly.LDT.Defaults()
ly.VTA.Defaults()
ly.RWPred.Defaults()
ly.RWDa.Defaults()
ly.TDInteg.Defaults()
ly.TDDa.Defaults()
}
func (ly *LayerParams) ShouldDisplay(field string) bool {
switch field {
case "Bursts":
return ly.Type == SuperLayer
case "CT":
return ly.Type == CTLayer || ly.Type == PTPredLayer || ly.Type == BLALayer
case "Pulvinar":
return ly.Type == PulvinarLayer
case "DSMatrix":
return ly.Type == DSMatrixLayer
case "Striatum":
return ly.Type == VSMatrixLayer || ly.Type == DSMatrixLayer || ly.Type == DSPatchLayer
case "GP":
return ly.Type == GPLayer
case "IO":
return ly.Type == IOLayer
case "Nuclear":
return ly.IsNuclear()
case "LDT":
return ly.Type == LDTLayer
case "VTA":
return ly.Type == VTALayer
case "RWPred":
return ly.Type == RWPredLayer
case "RWDa":
return ly.Type == RWDaLayer
case "TDInteg":
return ly.Type == TDIntegLayer
case "TDDa":
return ly.Type == TDDaLayer
default:
return true
}
}
// ParamsString returns a listing of all parameters in the Layer and
// pathways within the layer. If nonDefault is true, only report those
// not at their default values.
func (ly *LayerParams) ParamsString(nonDefault bool) string {
return params.PrintStruct(ly, 1, func(path string, ft reflect.StructField, fv any) bool {
if ft.Tag.Get("display") == "-" {
return false
}
if nonDefault {
if def := ft.Tag.Get("default"); def != "" {
if reflectx.ValueIsDefault(reflect.ValueOf(fv), def) {
return false
}
} else {
if reflectx.NonPointerType(ft.Type).Kind() != reflect.Struct {
return false
}
}
}
return ly.ShouldDisplay(path)
},
func(path string, ft reflect.StructField, fv any) string {
if nonDefault {
if def := ft.Tag.Get("default"); def != "" {
return reflectx.ToString(fv) + " [" + def + "]"
}
}
return ""
})
}
// Copyright (c) 2023, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
//gosl:start
// LayerTypes enumerates all the different types of layers,
// for the different algorithm types supported.
// Class parameter styles automatically key off of these types.
type LayerTypes int32 //enums:enum
// note: we need to add the Layer extension to avoid naming
// conflicts between layer, pathway and other things.
// The layer types
const (
// Super is a superficial cortical layer (lamina 2-3-4)
// which does not receive direct input or targets.
// In more generic models, it should be used as a Hidden layer,
// and maps onto the Hidden type in LayerTypes.
SuperLayer LayerTypes = iota
// Input is a layer that receives direct external input
// in its Ext inputs. Biologically, it can be a primary
// sensory layer, or a thalamic layer.
InputLayer
// Target is a layer that receives direct external target inputs
// used for driving plus-phase learning.
// Simple target layers are generally not used in more biological
// models, which instead use predictive learning via Pulvinar
// or related mechanisms.
TargetLayer
// Compare is a layer that receives external comparison inputs,
// which drive statistics but do NOT drive activation
// or learning directly. It is rarely used in axon.
CompareLayer
//////// Deep
// CT are layer 6 corticothalamic projecting neurons,
// which drive "top down" predictions in Pulvinar layers.
// They maintain information over time via stronger NMDA
// channels and use maintained prior state information to
// generate predictions about current states forming on Super
// layers that then drive PT (5IB) bursting activity, which
// are the plus-phase drivers of Pulvinar activity.
CTLayer
// Pulvinar are thalamic relay cell neurons in the higher-order
// Pulvinar nucleus of the thalamus, and functionally isomorphic
// neurons in the MD thalamus, and potentially other areas.
// These cells alternately reflect predictions driven by CT pathways,
// and actual outcomes driven by 5IB Burst activity from corresponding
// PT or Super layer neurons that provide strong driving inputs.
PulvinarLayer
// TRNLayer is thalamic reticular nucleus layer for inhibitory competition
// within the thalamus.
TRNLayer
// PTMaintLayer implements the subset of pyramidal tract (PT)
// layer 5 intrinsic bursting (5IB) deep neurons that exhibit
// robust, stable maintenance of activity over the duration of a
// goal engaged window, modulated by basal ganglia (BG) disinhibitory
// gating, supported by strong MaintNMDA channels and recurrent excitation.
// The lateral PTSelfMaint pathway uses MaintG to drive GMaintRaw input
// that feeds into the stronger, longer MaintNMDA channels,
// and the ThalToPT ModulatoryG pathway from BGThalamus multiplicatively
// modulates the strength of other inputs, such that only at the time of
// BG gating are these strong enough to drive sustained active maintenance.
// Use Act.Dend.ModGain to parameterize.
PTMaintLayer
// PTPredLayer implements the subset of pyramidal tract (PT)
// layer 5 intrinsic bursting (5IB) deep neurons that combine
// modulatory input from PTMaintLayer sustained maintenance and
// CTLayer dynamic predictive learning that helps to predict
// state changes during the period of active goal maintenance.
// This layer provides the primary input to VSPatch US-timing
// prediction layers, and other layers that require predictive dynamic
PTPredLayer
//////// PCore Basal Ganglia (BG)
// DSMatrixLayer represents the matrisome spiny projection neurons
// (SPNs, MSNs) that are the main Go / No gating units in BG,
// and are modulated by phasic dopamine: D1 = Go, D2 = No.
// These are for dorsal striatum, which interact with matrisomes and
// receive PF (parafasciculus) feedback signals.
DSMatrixLayer
// VSMatrixLayer represents the matrisome spiny projection neurons
// (SPNs, MSNs) that are the main Go / No gating units in BG,
// and are modulated by phasic dopamine: D1 = Go, D2 = No.
// These are for ventral striatum, which drive goal-selection
// gating signals through the MD thalamus, and activate instinctive
// behaviors based on learned inputs projecting to various output
// pathways.
VSMatrixLayer
// DSPatchLayer represents the dorsolateral striosomal spiny neurons
// that modulate the activity of SNc dopamine to a given Pool.
DSPatchLayer
// STNLayer represents subthalamic nucleus neurons, with two subtypes:
// STNp are more strongly driven and get over bursting threshold, driving strong,
// rapid activation of the KCa channels, causing a long pause in firing, which
// creates a window during which GPe dynamics resolve Go vs. No balance.
// STNs are more weakly driven and thus more slowly activate KCa, resulting in
// a longer period of activation, during which the GPi is inhibited to prevent
// premature gating based only MtxGo inhibition -- gating only occurs when
// GPePr signal has had a chance to integrate its MtxNo inputs.
STNLayer
// GPLayer represents a globus pallidus layer in the BG, including:
// GPePr, GPeAk (arkypallidal), and GPi / SNr.
// Has intrinsic activity.
GPLayer
// BGThalLayer represents a BG gated thalamic layer,
// which receives BG gating in the form of an
// inhibitory pathway from GPi. Located
// mainly in the Ventral thalamus: VA / VM / VL,
// and also parts of MD mediodorsal thalamus.
BGThalLayer
// VSGated represents explicit coding of VS gating status:
// JustGated and HasGated (since last US or failed predicted US),
// For visualization and / or motor action signaling.
VSGatedLayer
//////// Cerebellum (Nuclear)
// IOLayer represents a cerebellum inferior olive (IO) layer,
// which drive learning in associated cerebellar nuclei and Purkinje cells.
// Receives paired input from the CNiIOLayer inhibitory prediction neurons
// and specific sensory channels that are being predicted, and a modulatory
// input from the efferent copy of motor action to initiate it.
// GaP = integrated GeSyn, GaM = integrated GiSyn, GaD = offset GiSyn,
// TimeDiff = GaP - GaD, TimePeak = 1 if error spike.
IOLayer
// CNeLayer represents the cerebellar nuclei excitatory neurons,
// which have slow learning to maintain a target average firing rate.
CNeLayer
// CNiIOLayer represents the cerebellar nuclei inhibitory prediction
// neurons, which learn to predict the activity of a specific sensory input,
// and inhibit it in the corresponding CNeUpLayer
CNiIOLayer
// CNiUpLayer represents the cerebellar nuclei inhibitory upgoing
// output neurons, which learn from IOLayer error signals to predict
// specific sensory inputs based on motor commands, thereby cancelling
// the effects of self-generated motor commands.
CNiUpLayer
// note: IsNuclear depends on CNiUpLayer being last, and IOLayer being first.
//////// Rubicon
// BLALayer represents a basolateral amygdala layer
// which learns to associate arbitrary stimuli (CSs)
// with behaviorally salient outcomes (USs)
BLALayer
// CeMLayer represents a central nucleus of the amygdala layer.
CeMLayer
// VSPatchLayer represents a ventral striatum patch layer,
// which learns to represent the expected amount of dopamine reward
// and projects both directly with shunting inhibition to the VTA
// and indirectly via the LHb / RMTg to cancel phasic dopamine firing
// to expected rewards (i.e., reward prediction error).
VSPatchLayer
// LHbLayer represents the lateral habenula, which drives dipping
// in the VTA. It tracks the Global LHb values for
// visualization purposes -- updated by VTALayer.
LHbLayer
// DrivesLayer represents the Drives in .Rubicon framework.
// It tracks the Global Drives values for
// visualization and predictive learning purposes.
DrivesLayer
// UrgencyLayer represents the Urgency factor in Rubicon framework.
// It tracks the Global Urgency.Urge value for
// visualization and predictive learning purposes.
UrgencyLayer
// USLayer represents a US unconditioned stimulus layer (USpos or USneg).
// It tracks the Global USpos or USneg, for visualization
// and predictive learning purposes. Actual US inputs are set in Rubicon.
USLayer
// PVLayer represents a PV primary value layer (PVpos or PVneg) representing
// the total primary value as a function of US inputs, drives, and effort.
// It tracks the Global VTA.PVpos, PVneg values for
// visualization and predictive learning purposes.
PVLayer
// LDTLayer represents the laterodorsal tegmentum layer, which
// is the primary limbic ACh (acetylcholine) driver to other ACh:
// BG cholinergic interneurons (CIN) and nucleus basalis ACh areas.
// The phasic ACh release signals reward salient inputs from CS, US
// and US omssion, and it drives widespread disinhibition of BG gating
// and VTA DA firing.
// It receives excitation from superior colliculus which computes
// a temporal derivative (stimulus specific adaptation, SSA)
// of sensory inputs, and inhibitory input from OFC, ACC driving
// suppression of distracting inputs during goal-engaged states.
LDTLayer
// VTALayer represents the ventral tegmental area, which releases
// dopamine. It computes final DA value from Rubicon-computed
// LHb PVDA (primary value DA), updated at start of each trial from
// updated US, Effort, etc state, and cycle-by-cycle LV learned value
// state reflecting CS inputs, in the Amygdala (CeM).
// Its activity reflects this DA level, which is effectively broadcast
// vial Global state values to all layers.
VTALayer
//////// RL
// RewLayer represents positive (first unit) or negative (second unit)
// reward values, showing spiking rates for each, and Act always represents
// the signed value.
RewLayer
// RWPredLayer computes reward prediction for a simple Rescorla-Wagner
// learning dynamic (i.e., PV learning in the Rubicon framework).
// Activity is computed as linear function of excitatory conductance.
// The first unit in the layer represents positive reward, second negative.
// Use with RWPath which does simple delta-rule learning on minus-plus.
RWPredLayer
// RWDaLayer computes a dopamine (DA) signal based on a simple Rescorla-Wagner
// learning dynamic (i.e., PV learning in the Rubicon framework).
// It computes difference between r(t) and RWPred values.
// r(t) is accessed directly from a Rew layer -- if no external input then no
// DA is computed -- critical for effective use of RW only for PV cases.
// RWPred prediction is also accessed directly from Rew layer to avoid any issues.
RWDaLayer
// TDPredLayer is the temporal differences reward prediction layer.
// It represents estimated value V(t) in the minus phase, and computes
// estimated V(t+1) based on its learned weights in plus phase,
// using the TDPredPath pathway type for DA modulated learning.
// The first unit in the layer represents positive reward, second negative.
TDPredLayer
// TDIntegLayer is the temporal differences reward integration layer.
// It represents estimated value V(t) from prior time step in the minus phase,
// and estimated discount * V(t+1) + r(t) in the plus phase.
// It gets Rew, PrevPred from Context.NeuroMod, and Special
// LayerValues from TDPredLayer.
// The first unit in the layer represents positive reward, second negative.
TDIntegLayer
// TDDaLayer computes a dopamine (DA) signal as the temporal difference (TD)
// between the TDIntegLayer activations in the minus and plus phase.
// These are retrieved from Special LayerValues.
TDDaLayer
)
// IsExtLayerType returns true if the layer type deals with external input:
// Input, Target, Compare
func IsExtLayerType(lt LayerTypes) bool {
if lt == InputLayer || lt == TargetLayer || lt == CompareLayer || lt == RewLayer {
return true
}
return false
}
//gosl:end
// IsExt returns true if the layer type deals with external input:
// Input, Target, Compare
func (lt LayerTypes) IsExt() bool {
if lt == InputLayer || lt == TargetLayer || lt == CompareLayer || lt == RewLayer {
return true
}
return false
}
// Code generated by "goal build"; DO NOT EDIT.
//line learn-layer.goal:1
// Copyright (c) 2019, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
import "cogentcore.org/core/math32"
//gosl:start
// DTrgSubMean subtracts the mean from DTrgAvg values.
// Called by TrgAvgFromD
func (ly *LayerParams) DTrgSubMean(ctx *Context) {
submean := ly.Learn.TrgAvgAct.SubMean
if submean == 0 {
return
}
if ly.HasPoolInhib() && ly.Learn.TrgAvgAct.Pool.IsTrue() {
np := ly.Indexes.NPools
for spi := uint32(1); spi < np; spi++ {
pi := ly.PoolIndex(spi)
nsi := PoolIxs.Value(int(pi), int(PoolNeurSt))
nei := PoolIxs.Value(int(pi), int(PoolNeurEd))
nn := 0
avg := float32(0)
for lni := nsi; lni < nei; lni++ {
ni := ly.Indexes.NeurSt + uint32(lni)
if NeuronIsOff(ni) {
continue
}
avg += NeuronAvgs.Value(int(ni), int(DTrgAvg))
nn++
}
if nn == 0 {
continue
}
avg /= float32(nn)
avg *= submean
for lni := nsi; lni < nei; lni++ {
ni := ly.Indexes.NeurSt + uint32(lni)
if NeuronIsOff(ni) {
continue
}
NeuronAvgs.SetSub(avg, int(ni), int(DTrgAvg))
}
}
} else {
nn := 0
avg := float32(0)
tn := ly.Indexes.NNeurons
for lni := uint32(0); lni < tn; lni++ {
ni := ly.Indexes.NeurSt + lni
if NeuronIsOff(ni) {
continue
}
avg += NeuronAvgs.Value(int(ni), int(DTrgAvg))
nn++
}
if nn == 0 {
return
}
avg /= float32(nn)
avg *= submean
for lni := uint32(0); lni < tn; lni++ {
ni := ly.Indexes.NeurSt + lni
if NeuronIsOff(ni) {
continue
}
NeuronAvgs.SetSub(avg, int(ni), int(DTrgAvg))
}
}
}
// TrgAvgFromD updates TrgAvg from DTrgAvg, called in PlusPhasePost.
func (ly *LayerParams) TrgAvgFromD(ctx *Context) {
lr := ly.LearnTrgAvgErrLRate()
if lr == 0 {
return
}
ly.DTrgSubMean(ctx)
nn := ly.Indexes.NNeurons
for lni := uint32(0); lni < nn; lni++ {
ni := ly.Indexes.NeurSt + lni
if NeuronIsOff(ni) {
continue
}
ntrg := NeuronAvgs.Value(int(ni), int(TrgAvg)) + NeuronAvgs.Value(int(ni), int(DTrgAvg))
ntrg = ly.Learn.TrgAvgAct.TrgRange.ClampValue(ntrg)
NeuronAvgs.Set(ntrg, int(ni), int(TrgAvg))
NeuronAvgs.Set(0.0, int(ni), int(DTrgAvg))
}
}
// WtFromDWtLayer does weight update at the layer level.
// does NOT call main pathway-level WtFromDWt method.
// in base, only calls TrgAvgFromD
func (ly *LayerParams) WtFromDWtLayer(ctx *Context) {
ly.TrgAvgFromD(ctx)
}
// DWtSubMean subtracts the mean DWt for each recv neuron.
func (ly *LayerParams) DWtSubMean(ctx *Context, ri uint32) {
if ly.Type == CNeLayer {
ly.NuclearDWtNeuron(ctx, ri)
}
lni := ri - ly.Indexes.NeurSt
rn := ly.Indexes.RecvN
for pi := uint32(0); pi < rn; pi++ {
pti := RecvPathIxs.Value(int(ly.Indexes.RecvSt + pi))
Paths[pti].DWtSubMean(ctx, pti, ri, lni)
}
}
//////// SlowAdapt
// SlowAdaptLayer is the layer-level slow adaptation functions.
// Calls AdaptInhib and AvgDifFromTrgAvg for Synaptic Scaling.
// Does NOT call pathway-level methods.
func (ly *LayerParams) SlowAdaptLayer(ctx *Context) {
ly.AvgDifFromTrgAvg(ctx)
}
// AdaptGi adapts inhibition if enabled.
func (ly *LayerParams) AdaptGi(ctx *Context) {
if ly.Inhib.ActAvg.AdaptGi.IsFalse() || ly.IsInput() {
return
}
// note: this is happening redundantly across all ndata based on shared LayerActMAvg values.
for di := uint32(0); di < ctx.NData; di++ {
giMult := LayerStates.Value(int(ly.Index), int(di), int(LayerGiMult))
avg := LayerStates.Value(int(ly.Index), int(di), int(LayerActMAvg))
ly.Inhib.ActAvg.Adapt(&giMult, avg)
LayerStates.Set(giMult, int(ly.Index), int(di), int(LayerGiMult))
}
}
// AvgDifFromTrgAvg updates neuron-level AvgDif values from AvgPct - TrgAvg
// which is then used for synaptic scaling of LWt values in Path SynScale.
func (ly *LayerParams) AvgDifFromTrgAvg(ctx *Context) {
sp := uint32(0)
if ly.Indexes.NPools > 1 {
sp = 1
}
np := ly.Indexes.NPools
for spi := sp; spi < np; spi++ {
pi := ly.PoolIndex(spi)
nsi := PoolIxs.Value(int(pi), int(PoolNeurSt))
nei := PoolIxs.Value(int(pi), int(PoolNeurEd))
plavg := float32(0)
nn := 0
for lni := nsi; lni < nei; lni++ {
ni := ly.Indexes.NeurSt + uint32(lni)
if NeuronIsOff(ni) {
continue
}
plavg += NeuronAvgs.Value(int(ni), int(ActAvg))
nn++
}
if nn == 0 {
continue
}
plavg /= float32(nn)
if plavg < 0.0001 { // gets unstable below here
continue
}
PoolAvgDifInit(pi, 0)
for lni := nsi; lni < nei; lni++ {
ni := ly.Indexes.NeurSt + uint32(lni)
if NeuronIsOff(ni) {
continue
}
apct := NeuronAvgs.Value(int(ni), int(ActAvg)) / plavg
adif := apct - NeuronAvgs.Value(int(ni), int(TrgAvg))
NeuronAvgs.Set(apct, int(ni), int(AvgPct))
NeuronAvgs.Set(adif, int(ni), int(AvgDif))
PoolAvgDifUpdate(pi, 0, math32.Abs(adif))
}
PoolAvgDifCalc(pi, 0)
for di := uint32(1); di < ctx.NData; di++ { // copy to other datas
Pools.Set(Pools.Value(int(pi), int(0), int(AvgMaxVarIndex(AMAvgDif, AMCycle, Avg))), int(pi), int(di), int(AvgMaxVarIndex(AMAvgDif, AMCycle, Avg)))
Pools.Set(Pools.Value(int(pi), int(0), int(AvgMaxVarIndex(AMAvgDif, AMCycle, Max))), int(pi), int(di), int(AvgMaxVarIndex(AMAvgDif, AMCycle, Max)))
}
}
if sp == 1 { // update layer pool
lpi := ly.PoolIndex(0)
PoolAvgDifInit(lpi, 0)
nsi := PoolIxs.Value(int(lpi), int(PoolNeurSt))
nei := PoolIxs.Value(int(lpi), int(PoolNeurEd))
for lni := nsi; lni < nei; lni++ {
ni := ly.Indexes.NeurSt + uint32(lni)
if NeuronIsOff(ni) {
continue
}
PoolAvgDifUpdate(lpi, 0, math32.Abs(NeuronAvgs.Value(int(ni), int(AvgDif))))
}
PoolAvgDifCalc(lpi, 0)
for di := uint32(1); di < ctx.NData; di++ { // copy to other datas
Pools.Set(Pools.Value(int(lpi), int(0), int(AvgMaxVarIndex(AMAvgDif, AMCycle, Avg))), int(lpi), int(di), int(AvgMaxVarIndex(AMAvgDif, AMCycle, Avg)))
Pools.Set(Pools.Value(int(lpi), int(0), int(AvgMaxVarIndex(AMAvgDif, AMCycle, Max))), int(lpi), int(di), int(AvgMaxVarIndex(AMAvgDif, AMCycle, Max)))
}
}
}
// SlowAdaptNeuron does path & synapse level slow adaptation on SWt and
// overall synaptic scaling, per each receiving neuron ri.
func (ly *LayerParams) SlowAdaptNeuron(ctx *Context, ri uint32) {
lni := ri - ly.Indexes.NeurSt
rn := ly.Indexes.RecvN
for pi := uint32(0); pi < rn; pi++ {
pti := RecvPathIxs.Value(int(ly.Indexes.RecvSt + pi))
Paths[pti].SlowAdapt(ctx, ly, pti, ri, lni)
}
}
//gosl:end
// LRateMod sets the LRate modulation parameter for Paths, which is
// for dynamic modulation of learning rate (see also LRateSched).
// Updates the effective learning rate factor accordingly.
func (ly *Layer) LRateMod(mod float32) {
for _, pj := range ly.RecvPaths {
// if pj.Off { // keep all sync'd
//
// continue
// }
pj.LRateMod(mod)
}
}
// LRateSched sets the schedule-based learning rate multiplier.
// See also LRateMod.
// Updates the effective learning rate factor accordingly.
func (ly *Layer) LRateSched(sched float32) {
for _, pj := range ly.RecvPaths {
// if pj.Off { // keep all sync'd
//
// continue
// }
pj.LRateSched(sched)
}
}
// SetSubMean sets the SubMean parameters in all the layers in the network
// trgAvg is for Learn.TrgAvgAct.SubMean
// path is for the paths Learn.DWt.SubMean
// in both cases, it is generally best to have both parameters set to 0
// at the start of learning
func (ly *Layer) SetSubMean(trgAvg, path float32) {
ly.Params.Learn.TrgAvgAct.SubMean = trgAvg
for _, pj := range ly.RecvPaths {
// if pj.Off { // keep all sync'd
//
// continue
// }
pj.Params.Learn.DWt.SubMean = path
}
}
// Code generated by "goal build"; DO NOT EDIT.
//line learn-net.goal:1
// Copyright (c) 2019, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
// DWt computes the weight change (learning) based on current
// running-average activation values. Copies synapses back from GPU,
// for case where viewing the synapses.
func (nt *Network) DWt() {
nix := nt.NetIxs()
ctx := nt.Context()
sd := int(nix.NSyns * ctx.NData)
RunDWtSyn(sd)
RunDWtFromDiSyn(int(nix.NSyns))
RunDoneSynapsesTrace()
}
// WtFromDWt updates the weights from delta-weight changes,
// after having done DWt previously.
// Also does SlowUpdate.
func (nt *Network) WtFromDWt() {
nix := nt.NetIxs()
RunWtFromDWtLayer(int(nix.NLayers))
RunDWtSubMeanNeuron(int(nix.NNeurons))
RunWtFromDWtSyn(int(nix.NSyns))
nt.SlowUpdate()
RunDoneSynapses()
}
// DWtToWt computes the weight change (learning) based on current
// running-average activation values, and then WtFromDWt,
// without syncing any synapse-level state.
// This should be used when not viewing the weights.
// Also does SlowUpdate.
func (nt *Network) DWtToWt() {
nix := nt.NetIxs()
ctx := nt.Context()
sd := int(nix.NSyns * ctx.NData)
RunDWtSyn(sd)
RunDWtFromDiSyn(int(nix.NSyns))
RunWtFromDWtLayer(int(nix.NLayers))
RunDWtSubMeanNeuron(int(nix.NNeurons))
RunWtFromDWtSyn(int(nix.NSyns))
nt.SlowUpdate()
RunDone()
}
// SlowUpdate does ctx.SlowInc() and calls SlowAdapt at SlowInterval
// and AdaptGi at AdaptGiInterval.
func (nt *Network) SlowUpdate() {
ctx := nt.Context()
slow, adaptgi := ctx.SlowInc()
if slow {
nt.SlowAdapt()
}
if adaptgi {
nt.AdaptGi()
}
}
// SlowAdapt runs slow adaptation functions associated with sleep,
// including synaptic scaling associated with overall neural activity.
func (nt *Network) SlowAdapt() {
nix := nt.NetIxs()
RunSlowAdaptLayer(int(nix.NLayers))
RunSlowAdaptNeuron(int(nix.NNeurons))
}
// AdaptGi does adapting inhibition at a slower interval.
func (nt *Network) AdaptGi() {
nix := nt.NetIxs()
RunAdaptGiLayer(int(nix.NLayers))
}
// LRateMod sets the LRate modulation parameter for Paths, which is
// for dynamic modulation of learning rate (see also LRateSched).
// Updates the effective learning rate factor accordingly.
// Must call ToGPUParams() after once done changing all params.
func (nt *Network) LRateMod(mod float32) {
for _, ly := range nt.Layers {
// if ly.Off { // keep all sync'd
//
// continue
// }
ly.LRateMod(mod)
}
}
// LRateSched sets the schedule-based learning rate multiplier.
// See also LRateMod.
// Updates the effective learning rate factor accordingly.
// Must call ToGPUParams() after once done changing all params.
func (nt *Network) LRateSched(sched float32) {
for _, ly := range nt.Layers {
// if ly.Off { // keep all sync'd
//
// continue
// }
ly.LRateSched(sched)
}
}
// SetSubMean sets the SubMean parameters in all the layers in the network
// trgAvg is for Learn.TrgAvgAct.SubMean
// path is for the paths Learn.DWt.SubMean
// in both cases, it is generally best to have both parameters set to 0
// at the start of learning
func (nt *Network) SetSubMean(trgAvg, path float32) {
for _, ly := range nt.Layers {
// if ly.Off { // keep all sync'd
//
// continue
// }
ly.SetSubMean(trgAvg, path)
}
}
//////// Methods used in MPI computation, which don't depend on MPI specifically
// CollectDWts writes all of the synaptic DWt values to given dwts slice
// which is pre-allocated to given nwts size if dwts is nil,
// in which case the method returns true so that the actual length of
// dwts can be passed next time around.
// Used for MPI sharing of weight changes across processors.
// This Sync's Layers and Synapses from GPU first (nop if not using).
func (nt *Network) CollectDWts(dwts *[]float32) bool {
RunGPUSync()
RunDoneLayersSynapses()
idx := 0
made := false
if *dwts == nil {
nwts := 0
for _, ly := range nt.Layers {
nwts += 5 // ActAvgValues
nwts += int(ly.NNeurons) // ActAvg
if ly.Params.IsLearnTrgAvg() {
nwts += int(ly.NNeurons)
}
for _, pj := range ly.SendPaths {
nwts += int(pj.NSyns) + 3 // Scale, AvgAvg, MaxAvg
}
}
*dwts = make([]float32, nwts)
made = true
}
for li, ly := range nt.Layers {
nn := ly.NNeurons
(*dwts)[idx+0] = LayerStates.Value(int(li), int(0), int(LayerActMAvg))
(*dwts)[idx+1] = LayerStates.Value(int(li), int(0), int(LayerActPAvg))
(*dwts)[idx+2] = LayerStates.Value(int(li), int(0), int(LayerAvgMaxGeM))
(*dwts)[idx+3] = LayerStates.Value(int(li), int(0), int(LayerAvgMaxGiM))
(*dwts)[idx+4] = LayerStates.Value(int(li), int(0), int(LayerGiMult))
idx += 5
for lni := uint32(0); lni < nn; lni++ {
ni := ly.NeurStIndex + lni
(*dwts)[idx+int(lni)] = NeuronAvgs.Value(int(ni), int(ActAvg))
}
idx += int(nn)
if ly.Params.IsLearnTrgAvg() {
for lni := uint32(0); lni < nn; lni++ {
ni := ly.NeurStIndex + lni
(*dwts)[idx+int(lni)] = NeuronAvgs.Value(int(ni), int(DTrgAvg))
}
idx += int(nn)
}
for _, pj := range ly.SendPaths {
for lni := range pj.SendCon {
scon := pj.SendCon[lni]
for syi := scon.Start; syi < scon.Start+scon.N; syi++ {
syni := pj.SynStIndex + syi
(*dwts)[idx+int(syi)] = Synapses.Value(int(syni), int(DWt))
// if syni < 100 {
// fmt.Printf("%d: %d = %g\n", syni, syi, (*dwts)[idx+int(syi)])
// }
}
}
idx += int(pj.NSyns)
}
}
return made
}
// SetDWts sets the DWt weight changes from given array of floats, which must be correct size
// navg is the number of processors aggregated in these dwts -- some variables need to be
// averaged instead of summed (e.g., ActAvg)
// This Sync's Layers and Synapses to the GPU after (nop if not using).
func (nt *Network) SetDWts(dwts []float32, navg int) {
idx := 0
davg := 1 / float32(navg)
for li, ly := range nt.Layers {
nn := ly.NNeurons
LayerStates.Set(davg*dwts[idx+0], int(li), int(0), int(LayerActMAvg))
LayerStates.Set(davg*dwts[idx+1], int(li), int(0), int(LayerActPAvg))
LayerStates.Set(davg*dwts[idx+2], int(li), int(0), int(LayerAvgMaxGeM))
LayerStates.Set(davg*dwts[idx+3], int(li), int(0), int(LayerAvgMaxGiM))
LayerStates.Set(davg*dwts[idx+4], int(li), int(0), int(LayerGiMult))
idx += 5
for lni := uint32(0); lni < nn; lni++ {
ni := ly.NeurStIndex + lni
NeuronAvgs.Set(davg*dwts[idx+int(lni)], int(ni), int(ActAvg))
}
idx += int(nn)
if ly.Params.IsLearnTrgAvg() {
for lni := uint32(0); lni < nn; lni++ {
ni := ly.NeurStIndex + lni
NeuronAvgs.Set(dwts[idx+int(lni)], int(ni), int(DTrgAvg))
}
idx += int(nn)
}
for _, pj := range ly.SendPaths {
for lni := range pj.SendCon {
scon := pj.SendCon[lni]
for syi := scon.Start; syi < scon.Start+scon.N; syi++ {
syni := pj.SynStIndex + syi
Synapses.Set(dwts[idx+int(syi)], int(syni), int(DWt))
// if syni < 100 {
// fmt.Printf("%d: %d = %g = %g\n", syni, syi, dwts[idx+int(syi)], Synapses[syni, DWt])
// }
}
}
idx += int(pj.NSyns)
}
}
ToGPULayersSynapses()
RunGPUSync()
RunDone()
}
//gosl:start
// DWtSyn is the kernel over Synapses * Data to
// compute weight changes (learning).
func DWtSyn(i uint32) { //gosl:kernel
ctx := GetCtx(0)
syni := ctx.ItemIndex(i)
if syni >= NetworkIxs[0].NSyns {
return
}
di := ctx.DataIndex(i)
pti := SynapseIxs.Value(int(syni), int(SynPathIndex))
si := SynapseIxs.Value(int(syni), int(SynSendIndex))
ri := SynapseIxs.Value(int(syni), int(SynRecvIndex))
Paths[pti].DWtSyn(ctx, &Layers[Paths[pti].Indexes.RecvLayer], syni, si, ri, di)
}
// DWtFromDiSyn is the kernel over Synapses (not * Data) to
// integrate DWt over Di data parallel values.
func DWtFromDiSyn(syni uint32) { //gosl:kernel
ctx := GetCtx(0)
if syni >= NetworkIxs[0].NSyns {
return
}
pti := SynapseIxs.Value(int(syni), int(SynPathIndex))
Paths[pti].DWtFromDi(ctx, syni)
}
// WtFromDWtLayer is the kernel over Layers for layer-level Wt update.
// Does TrgAvg updating.
func WtFromDWtLayer(li uint32) { //gosl:kernel
ctx := GetCtx(0)
if li >= NetworkIxs[0].NLayers {
return
}
Layers[li].WtFromDWtLayer(ctx)
}
// DWtSubMeanNeuron is the kernel over Paths to
// compute DWt - mean(DWt) for each recv neuron.
func DWtSubMeanNeuron(ni uint32) { //gosl:kernel
ctx := GetCtx(0)
if ni >= NetworkIxs[0].NNeurons {
return
}
li := NeuronIxs.Value(int(ni), int(NrnLayIndex))
Layers[li].DWtSubMean(ctx, ni)
}
// WtFromDWtSyn is the kernel over Synapses (not * Data) to
// compute Wt from DWt weight changes.
func WtFromDWtSyn(syni uint32) { //gosl:kernel
ctx := GetCtx(0)
if syni >= NetworkIxs[0].NSyns {
return
}
pti := SynapseIxs.Value(int(syni), int(SynPathIndex))
Paths[pti].WtFromDWtSyn(ctx, syni)
}
// SlowAdaptLayer is the kernel over Layers (not * Data) to
// run slow adaptation functions.
// Calls AvgDifFromTrgAvg for Synaptic Scaling.
func SlowAdaptLayer(li uint32) { //gosl:kernel
ctx := GetCtx(0)
if li >= NetworkIxs[0].NLayers {
return
}
Layers[li].SlowAdaptLayer(ctx)
}
// SlowAdaptNeuron is the kernel over receiving Neurons to
// compute slow adaptation in receiving pathways.
func SlowAdaptNeuron(ni uint32) { //gosl:kernel
ctx := GetCtx(0)
if ni >= NetworkIxs[0].NNeurons {
return
}
li := NeuronIxs.Value(int(ni), int(NrnLayIndex))
Layers[li].SlowAdaptNeuron(ctx, ni)
}
// AdaptGiLayer is the kernel over Layers (not * Data) to
// run adaptating inhibition function.
func AdaptGiLayer(li uint32) { //gosl:kernel
ctx := GetCtx(0)
if li >= NetworkIxs[0].NLayers {
return
}
Layers[li].AdaptGi(ctx)
}
//gosl:end
// Code generated by "goal build"; DO NOT EDIT.
//line learn-path.goal:1
// Copyright (c) 2023, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
import (
// "fmt"
"cogentcore.org/core/math32"
"github.com/emer/axon/v2/fsfffb"
)
//gosl:start
// DWtSyn is the overall entry point for weight change (learning) at given synapse.
// It selects appropriate function based on pathway type.
// rpl is the receiving layer SubPool
func (pt *PathParams) DWtSyn(ctx *Context, rlay *LayerParams, syni, si, ri, di uint32) {
if pt.Learn.Learn == 0 {
return
}
isTarget := rlay.IsTarget()
spi := NeuronIxs.Value(int(ri), int(NrnSubPool))
pi := rlay.PoolIndex(spi)
lpi := rlay.PoolIndex(0)
switch pt.Type {
case CTCtxtPath:
pt.DWtSynCTCtxt(ctx, syni, si, ri, lpi, pi, di)
case VSMatrixPath:
pt.DWtSynVSMatrix(ctx, syni, si, ri, lpi, pi, di)
case DSMatrixPath:
pt.DWtSynDSMatrix(ctx, syni, si, ri, lpi, pi, di)
case VSPatchPath:
pt.DWtSynVSPatch(ctx, syni, si, ri, lpi, pi, di)
case DSPatchPath:
pt.DWtSynDSPatch(ctx, syni, si, ri, lpi, pi, di)
case CNIOPath:
pt.DWtCNIO(ctx, rlay, syni, si, ri, lpi, pi, di)
case CNeUpPath:
pt.DWtCNeUp(ctx, rlay, syni, si, ri, lpi, pi, di)
case RWPath:
pt.DWtSynRWPred(ctx, syni, si, ri, lpi, pi, di)
case TDPredPath:
pt.DWtSynTDPred(ctx, syni, si, ri, lpi, pi, di)
case BLAPath:
pt.DWtSynBLA(ctx, syni, si, ri, lpi, pi, di)
case HipPath:
pt.DWtSynHip(ctx, syni, si, ri, lpi, pi, di, isTarget) // by default this is the same as DWtSynCortex (w/ unused Hebb component in the algorithm) except that it uses WtFromDWtSynNoLimits
default:
if pt.Learn.Hebb.On.IsTrue() {
pt.DWtSynHebb(ctx, syni, si, ri, lpi, pi, di)
} else if isTarget {
pt.DWtSynTarget(ctx, syni, si, ri, lpi, pi, di)
} else {
pt.DWtSynCortex(ctx, rlay, syni, si, ri, lpi, pi, di)
}
}
}
// SynCa gets the synaptic calcium P (potentiation) and D (depression)
// values, using an optimized integration of neuron-level [CaBins] values,
// and weight factors to capture the different CaP vs. CaD time constants.
func (pt *PathParams) SynCa(ctx *Context, si, ri, di uint32, syCaP, syCaD *float32) {
edcyc := ctx.CyclesTotal
stcyc := edcyc - (ctx.MinusCycles + ctx.PlusCycles)
nbins := (edcyc - stcyc) / CaBinCycles
cadSt := GvCaBinWts + GlobalScalarVars(nbins)
b0 := CaBinForCycle(stcyc)
// T0
r0 := Neurons.Value(int(ri), int(di), int(CaBins+NeuronVars(b0)))
s0 := Neurons.Value(int(si), int(di), int(CaBins+NeuronVars(b0)))
sp := r0 * s0
cp := sp * GlobalScalars.Value(int(GvCaBinWts+GlobalScalarVars(0)), int(0))
cd := sp * GlobalScalars.Value(int(cadSt+GlobalScalarVars(0)), int(0))
syn20 := pt.Learn.DWt.SynCa20.IsTrue()
for i := int32(1); i < nbins; i++ {
bi := CaBinForCycle(stcyc + i*CaBinCycles)
rt := Neurons.Value(int(ri), int(di), int(CaBins+NeuronVars(bi)))
st := Neurons.Value(int(si), int(di), int(CaBins+NeuronVars(bi)))
sp := float32(0)
if syn20 {
bm := CaBinForCycle(stcyc + (i-1)*CaBinCycles)
rt1 := Neurons.Value(int(ri), int(di), int(CaBins+NeuronVars(bm)))
st1 := Neurons.Value(int(si), int(di), int(CaBins+NeuronVars(bm)))
sp = 0.25 * (rt + rt1) * (st + st1)
} else {
sp = rt * st
}
cp += sp * GlobalScalars.Value(int(GvCaBinWts+GlobalScalarVars(i)), int(0))
cd += sp * GlobalScalars.Value(int(cadSt+GlobalScalarVars(i)), int(0))
}
*syCaP = pt.Learn.DWt.CaPScale * cp
*syCaD = cd
}
// SynCaTotal gets the total synaptic calcium coproduct from
// given ending cycle (total elapsed cycles) and number of cycles prior,
// using an optimized integration of neuron-level [CaBins] values.
func (pt *PathParams) SynCaTotal(ctx *Context, si, ri, di uint32, edcyc, ncyc int32) float32 {
nbins := ncyc / CaBinCycles
stcyc := edcyc - ncyc
sum := float32(0)
for i := range nbins {
bi := CaBinForCycle(stcyc + i*CaBinCycles)
rc := Neurons.Value(int(ri), int(di), int(CaBins+NeuronVars(bi)))
sc := Neurons.Value(int(si), int(di), int(CaBins+NeuronVars(bi)))
sum += rc * sc
}
return sum * (8.0 / float32(nbins)) // original 150/50 weights sum to 8
}
// IMPORTANT: all DWt routines MUST set DiDWt to _something_, otherwise the
// previous value will persist! i.e., set it to 0 if no learning.
// DWtSynSoftBound does the standard soft weight bounding for given
// dwt weight change value. SoftBound must be done in the DWt step
// and not later, because it is learning-rule specific and enters
// into the zero-sum computation.
func (pt *PathParams) DWtSynSoftBound(ctx *Context, syni, di uint32, dwt float32) {
if dwt == 0 {
SynapseTraces.Set(0.0, int(syni), int(di), int(DiDWt))
} else {
lwt := Synapses.Value(int(syni), int(LWt)) // linear weight
edw := dwt
if edw > 0 {
edw *= (1 - lwt)
} else {
edw *= lwt
}
SynapseTraces.Set(pt.Learn.LRate.Eff*edw, int(syni), int(di), int(DiDWt))
}
}
// DWtSynCortex computes the weight change (learning) at given synapse, using the
// kinase error-driven learning rule for cortical neurons. The error delta is
// based on the receiving neuron's [LearnCaP] - [LearnCaD], multiplied by a separate
// synaptic activation credit assignment factor computed from synaptic co-product CaD values.
func (pt *PathParams) DWtSynCortex(ctx *Context, rlay *LayerParams, syni, si, ri, lpi, pi, di uint32) {
learnNow := int32(Neurons.Value(int(ri), int(di), int(LearnNow)))
if learnNow-(ctx.CyclesTotal-ctx.ThetaCycles) < 0 { // not in this time window
SynapseTraces.Set(0.0, int(syni), int(di), int(DTr))
SynapseTraces.Set(0.0, int(syni), int(di), int(DiDWt))
return
}
syCa := pt.SynCaTotal(ctx, si, ri, di, learnNow, rlay.Learn.Timing.SynCaCycles)
// integrate synaptic trace over time: this is actually beneficial in certain cases,
// in addition to the ETrLearn factor.
SynapseTraces.Set(syCa, int(syni), int(di), int(DTr))
tr := pt.Learn.DWt.SynTrace(SynapseTraces.Value(int(syni), int(di), int(Tr)), syCa)
SynapseTraces.Set(tr, int(syni), int(di), int(Tr))
dwt := float32(0)
if syCa > pt.Learn.DWt.LearnThr { // todo: elminate?
dwt = tr * Neurons.Value(int(ri), int(di), int(RLRate)) * Neurons.Value(int(ri), int(di), int(LearnDiff)) * Neurons.Value(int(ri), int(di), int(ETrLearn))
}
pt.DWtSynSoftBound(ctx, syni, di, dwt)
}
// DWtSynTarget computes the weight change (learning) at given synapse,
// for a target layer (i.e., Pulvinar in predictive error-driven learning).
func (pt *PathParams) DWtSynTarget(ctx *Context, syni, si, ri, lpi, pi, di uint32) {
var caP, caD float32
pt.SynCa(ctx, si, ri, di, &caP, &caD)
// caP := Neurons[ri, di, CaP] // significantly worse!
// caD := Neurons[ri, di, CaD]
SynapseTraces.Set(caD, int(syni), int(di), int(DTr))
tr := pt.Learn.DWt.SynTrace(SynapseTraces.Value(int(syni), int(di), int(Tr)), caD)
SynapseTraces.Set(tr, int(syni), int(di), int(Tr))
dwt := Neurons.Value(int(ri), int(di), int(RLRate)) * (caP - caD)
pt.DWtSynSoftBound(ctx, syni, di, dwt)
}
// DWtSynCTCtxt computes the weight change (learning) at given synapse, using the
// kinase error-driven learning rule for cortical neurons, for CT context paths.
// The error delta is based on the receiving neuron's [LearnCaP] - [LearnCaD],
// multiplied by a separate synaptic activation credit assignment factor computed
// from synaptic co-product CaD values.
func (pt *PathParams) DWtSynCTCtxt(ctx *Context, syni, si, ri, lpi, pi, di uint32) {
syn := Neurons.Value(int(si), int(di), int(BurstPrv)) // previous burst, not synCa
SynapseTraces.Set(syn, int(syni), int(di), int(DTr))
tr := pt.Learn.DWt.SynTrace(SynapseTraces.Value(int(syni), int(di), int(Tr)), syn)
SynapseTraces.Set(tr, int(syni), int(di), int(Tr))
// note: not including RLRate here!
dwt := tr * (Neurons.Value(int(ri), int(di), int(LearnCaP)) - Neurons.Value(int(ri), int(di), int(LearnCaD))) * Neurons.Value(int(ri), int(di), int(ETrLearn))
pt.DWtSynSoftBound(ctx, syni, di, dwt)
}
// DWtSynHebb computes the weight change (learning) at given synapse for cortex.
// Uses synaptically integrated spiking, computed at the Theta cycle interval.
// This is the trace version for hidden units, and uses syn CaP - CaD for targets.
func (pt *PathParams) DWtSynHebb(ctx *Context, syni, si, ri, lpi, pi, di uint32) {
rLearnCaP := Neurons.Value(int(ri), int(di), int(LearnCaP))
sNrnCap := Neurons.Value(int(si), int(di), int(LearnCaP))
lwt := Synapses.Value(int(syni), int(LWt)) // linear weight
hebb := rLearnCaP * (pt.Learn.Hebb.Up*sNrnCap*(1-lwt) - pt.Learn.Hebb.Down*(1-sNrnCap)*lwt)
// not: Neurons[ri, di, RLRate]*
SynapseTraces.Set(pt.Learn.LRate.Eff*hebb, int(syni), int(di), int(DiDWt))
}
// DWtSynHip computes the weight change (learning) at given synapse for cortex + Hip (CPCA Hebb learning).
// Uses synaptically integrated spiking, computed at the Theta cycle interval.
// This is the trace version for hidden units, and uses syn CaP - CaD for targets.
// Adds proportional CPCA learning rule for hip-specific paths
func (pt *PathParams) DWtSynHip(ctx *Context, syni, si, ri, lpi, pi, di uint32, isTarget bool) {
// todo:
// var syCaP, syCaD float32
// pt.SynCa(ctx, si, ri, di, &syCaP, &syCaD)
//
// syn := syCaD // synaptic activity co-product factor.
// // integrate synaptic trace over time: this is actually beneficial in certain cases,
// // in addition to the ETrLearn factor.
// SynapseTraces[syni, di, DTr] = syn
// tr := pt.Learn.DWt.SynTrace(SynapseTraces[syni, di, Tr], syn)
// SynapseTraces[syni, di, Tr] = tr
//
// // error-driven learning part
// rLearnCaP := Neurons[ri, di, LearnCaP]
// rLearnCaD := Neurons[ri, di, LearnCaD]
// err := float32(0)
//
// if isTarget {
// err = syCaP - syCaD // for target layers, syn Ca drives error signal directly
// } else {
//
// err = tr * (rLearnCaP - rLearnCaD) * Neurons[ri, di, ETrLearn]
// }
//
// // softbound immediately -- enters into zero sum.
// // also other types might not use, so need to do this per learning rule
// lwt := Synapses[syni, LWt] // linear weight
//
// if err > 0 {
// err *= (1 - lwt)
// } else {
//
// err *= lwt
// }
//
// // hebbian-learning part
// sNrnCap := Neurons[si, di, LearnCaP]
// savg := 0.5 + pt.Hip.SAvgCor*(pt.Hip.SNominal-0.5)
// savg = 0.5 / math32.Max(pt.Hip.SAvgThr, savg) // keep this Sending Average Correction term within bounds (SAvgThr)
// hebb := rLearnCaP * (sNrnCap*(savg-lwt) - (1-sNrnCap)*lwt)
//
// // setting delta weight (note: impossible to be CTCtxtPath)
// dwt := Neurons[ri, di, RLRate] * pt.Learn.LRate.Eff * (pt.Hip.Hebb*hebb + pt.Hip.Err*err)
// SynapseTraces[syni, di, DiDWt] = dwt
}
// DWtSynBLA computes the weight change (learning) at given synapse for BLAPath type.
// Like the BG Matrix learning rule, a synaptic tag "trace" is established at CS onset (ACh)
// and learning at US / extinction is a function of trace * delta from US activity
// (temporal difference), which limits learning.
func (pt *PathParams) DWtSynBLA(ctx *Context, syni, si, ri, lpi, pi, di uint32) {
ach := GlobalScalars.Value(int(GvACh), int(di))
dwt := float32(0)
if GlobalScalars.Value(int(GvHasRew), int(di)) > 0 { // learn and reset
ract := Neurons.Value(int(ri), int(di), int(CaD))
if ract < pt.Learn.DWt.LearnThr {
ract = 0
}
tr := SynapseTraces.Value(int(syni), int(di), int(Tr))
ustr := pt.BLA.USTrace
tr = ustr*Neurons.Value(int(si), int(di), int(Burst)) + (1.0-ustr)*tr
delta := Neurons.Value(int(ri), int(di), int(CaP)) - Neurons.Value(int(ri), int(di), int(CaDPrev))
if delta < 0 { // neg delta learns slower in Acq, not Ext
delta *= pt.BLA.NegDeltaLRate
}
dwt = Neurons.Value(int(ri), int(di), int(RLRate)) * tr * delta * ract
SynapseTraces.Set(0.0, int(syni), int(di), int(Tr))
} else if ach > pt.BLA.AChThr {
// note: the former NonUSLRate parameter is not used -- Trace update Tau replaces it.. elegant
dtr := ach * Neurons.Value(int(si), int(di), int(Burst))
SynapseTraces.Set(dtr, int(syni), int(di), int(DTr))
tr := pt.Learn.DWt.SynTrace(SynapseTraces.Value(int(syni), int(di), int(Tr)), dtr)
SynapseTraces.Set(tr, int(syni), int(di), int(Tr))
} else {
SynapseTraces.Set(0.0, int(syni), int(di), int(DTr))
}
pt.DWtSynSoftBound(ctx, syni, di, dwt)
}
// DWtSynRWPred computes the weight change (learning) at given synapse,
// for the RWPredPath type
func (pt *PathParams) DWtSynRWPred(ctx *Context, syni, si, ri, lpi, pi, di uint32) {
// todo: move all of this into rn.RLRate
lda := GlobalScalars.Value(int(GvDA), int(di))
da := lda
lr := pt.Learn.LRate.Eff
eff_lr := lr
if NeuronIxs.Value(int(ri), int(NrnNeurIndex)) == 0 {
if Neurons.Value(int(ri), int(di), int(Ge)) > Neurons.Value(int(ri), int(di), int(Act)) && da > 0 { // clipped at top, saturate up
da = 0
}
if Neurons.Value(int(ri), int(di), int(Ge)) < Neurons.Value(int(ri), int(di), int(Act)) && da < 0 { // clipped at bottom, saturate down
da = 0
}
if da < 0 {
eff_lr *= pt.RLPred.OppSignLRate
}
} else {
eff_lr = -eff_lr // negative case
if Neurons.Value(int(ri), int(di), int(Ge)) > Neurons.Value(int(ri), int(di), int(Act)) && da < 0 { // clipped at top, saturate up
da = 0
}
if Neurons.Value(int(ri), int(di), int(Ge)) < Neurons.Value(int(ri), int(di), int(Act)) && da > 0 { // clipped at bottom, saturate down
da = 0
}
if da >= 0 {
eff_lr *= pt.RLPred.OppSignLRate
}
}
dwt := da * Neurons.Value(int(si), int(di), int(CaP)) // no recv unit activation
SynapseTraces.Set(eff_lr*dwt, int(syni), int(di), int(DiDWt))
}
// DWtSynTDPred computes the weight change (learning) at given synapse,
// for the TDPredPath type
func (pt *PathParams) DWtSynTDPred(ctx *Context, syni, si, ri, lpi, pi, di uint32) {
// todo: move all of this into rn.RLRate
lda := GlobalScalars.Value(int(GvDA), int(di))
da := lda
lr := pt.Learn.LRate.Eff
eff_lr := lr
ni := NeuronIxs.Value(int(ri), int(NrnNeurIndex))
if ni == 0 {
if da < 0 {
eff_lr *= pt.RLPred.OppSignLRate
}
} else {
eff_lr = -eff_lr
if da >= 0 {
eff_lr *= pt.RLPred.OppSignLRate
}
}
dwt := da * Neurons.Value(int(si), int(di), int(CaDPrev)) // no recv unit activation, prior trial act
SynapseTraces.Set(eff_lr*dwt, int(syni), int(di), int(DiDWt))
}
// DWtSynVSMatrix computes the weight change (learning) at given synapse,
// for the VSMatrixPath type.
func (pt *PathParams) DWtSynVSMatrix(ctx *Context, syni, si, ri, lpi, pi, di uint32) {
// note: rn.RLRate already has BurstGain * ACh * DA * (D1 vs. D2 sign reversal) factored in.
hasRew := GlobalScalars.Value(int(GvHasRew), int(di)) > 0
ach := GlobalScalars.Value(int(GvACh), int(di))
if !hasRew && ach < 0.1 {
SynapseTraces.Set(0.0, int(syni), int(di), int(DTr))
return
}
rlr := Neurons.Value(int(ri), int(di), int(RLRate))
rplus := Neurons.Value(int(ri), int(di), int(CaP))
rminus := Neurons.Value(int(ri), int(di), int(CaD))
sact := Neurons.Value(int(si), int(di), int(CaD))
dtr := ach * (pt.VSMatrix.Delta * sact * (rplus - rminus))
if rminus > pt.Learn.DWt.LearnThr { // key: prevents learning if < threshold
dtr += ach * (pt.VSMatrix.Credit * sact * rminus)
}
dwt := float32(0)
if hasRew {
tr := SynapseTraces.Value(int(syni), int(di), int(Tr))
if pt.VSMatrix.RewActLearn.IsTrue() {
tr += (1 - GlobalScalars.Value(int(GvGoalMaint), int(di))) * dtr
}
dtr = 0
dwt = rlr * pt.Learn.LRate.Eff * tr
SynapseTraces.Set(0.0, int(syni), int(di), int(Tr))
} else {
dtr *= rlr
SynapseTraces.SetAdd(dtr, int(syni), int(di), int(Tr))
}
SynapseTraces.Set(dwt, int(syni), int(di), int(DiDWt))
SynapseTraces.Set(dtr, int(syni), int(di), int(DTr))
}
// DWtSynDSMatrix computes the weight change (learning) at given synapse,
// for the DSMatrixPath type.
func (pt *PathParams) DWtSynDSMatrix(ctx *Context, syni, si, ri, lpi, pi, di uint32) {
// note: rn.RLRate already has ACh * DA * (D1 vs. D2 sign reversal) factored in,
// at time of reward, and otherwise is just the sig deriv mod.
rlr := Neurons.Value(int(ri), int(di), int(RLRate))
dwt := float32(0)
dtr := float32(0)
if GlobalScalars.Value(int(GvHasRew), int(di)) > 0 { // US time -- use DA and current recv activity
tr := SynapseTraces.Value(int(syni), int(di), int(Tr))
dwt = rlr * pt.Learn.LRate.Eff * tr
SynapseTraces.Set(0.0, int(syni), int(di), int(Tr))
} else {
// pfmod := Pools[pi, di, fsfffb.ModAct]
pfmod := Neurons.Value(int(ri), int(di), int(GModSyn)) // syn value is always better
patchDAD1 := Pools.Value(int(pi), int(di), int(fsfffb.DAD1))
patchDAD2 := pt.DSMatrix.D2Scale * Pools.Value(int(pi), int(di), int(fsfffb.DAD2))
rplus := Neurons.Value(int(ri), int(di), int(CaP))
rminus := Neurons.Value(int(ri), int(di), int(CaD))
sact := Neurons.Value(int(si), int(di), int(CaD))
dtr = rlr * (pt.DSMatrix.Delta * sact * (rplus - rminus)) // always delta
if rminus > pt.Learn.DWt.LearnThr { // key: prevents learning if < threshold
act := pt.DSMatrix.Credit * rlr * sact * rminus // rlr is sig deriv -- todo: CaSyn??
dtr += (1.0 - pt.DSMatrix.PatchDA) * pfmod * act // std credit
if pfmod > pt.Learn.DWt.LearnThr { // we were active in output
// D1 dopamine discounts to the extent we are the correct action at this time: shunting
// if reward is positive at end, this doesn't overtrain; if reward is negative because
// _other_ actions were bad, this insulates the correct one.
// if reward is negative because this action is bad, patchD2 adds to get more blame,
dtr += pfmod * pt.DSMatrix.PatchDA * ((1.0 - patchDAD1) + patchDAD2) * act
} else { // not active; we have no role in the outcome
// if the actual outcome is good, it is good for us to stay off
// but if it is bad, then we should actually turn on.
// so the sign should flip.
// how does patch factor into that? If it thinks this is good,
// but it wasn't activated, then go up, and vice-versa..
// note: despite similarities with active case above, neither eq works
// as well as the one eq: modulation by PF = much better learning
dtr += pt.DSMatrix.OffTrace * pt.DSMatrix.PatchDA * (patchDAD2 - patchDAD1) * act
}
}
SynapseTraces.SetAdd(dtr, int(syni), int(di), int(Tr))
}
SynapseTraces.Set(dwt, int(syni), int(di), int(DiDWt))
SynapseTraces.Set(dtr, int(syni), int(di), int(DTr))
}
// DWtSynVSPatch computes the weight change (learning) at given synapse,
// for the VSPatchPath type.
func (pt *PathParams) DWtSynVSPatch(ctx *Context, syni, si, ri, lpi, pi, di uint32) {
ract := Neurons.Value(int(ri), int(di), int(CaDPrev)) // t-1
if ract < pt.Learn.DWt.LearnThr {
ract = 0
}
// note: rn.RLRate already has ACh * DA * (D1 vs. D2 sign reversal) factored in.
// and also the logic that non-positive DA leads to weight decreases.
sact := Neurons.Value(int(si), int(di), int(CaDPrev)) // t-1
dwt := Neurons.Value(int(ri), int(di), int(RLRate)) * pt.Learn.LRate.Eff * sact * ract
SynapseTraces.Set(dwt, int(syni), int(di), int(DiDWt))
}
// DWtSynDSPatch computes the weight change (learning) at given synapse,
// for the DSPatchPath type. Conditioned on PF modulatory inputs.
func (pt *PathParams) DWtSynDSPatch(ctx *Context, syni, si, ri, lpi, pi, di uint32) {
ract := Neurons.Value(int(ri), int(di), int(CaD))
if ract < pt.Learn.DWt.LearnThr {
ract = 0
}
// note: rn.RLRate already has ACh * DA * (D1 vs. D2 sign reversal) factored in,
// at time of reward; otherwise is just sig deriv.
rlr := Neurons.Value(int(ri), int(di), int(RLRate))
dwt := float32(0)
dtr := float32(0)
if GlobalScalars.Value(int(GvHasRew), int(di)) > 0 { // US time -- use DA * tr
tr := SynapseTraces.Value(int(syni), int(di), int(Tr))
dwt = rlr * pt.Learn.LRate.Eff * tr
SynapseTraces.Set(0.0, int(syni), int(di), int(Tr))
} else {
pfmod := Neurons.Value(int(ri), int(di), int(GModSyn)) // so much better! todo: why!?
// pfmod := Pools[pi, di, fsfffb.ModAct]
sact := Neurons.Value(int(si), int(di), int(CaD)) // todo: use CaSyn instead of sact * ract? But BG is transient, so no?
dtr = pfmod * rlr * sact * ract // rlr is just sig deriv
SynapseTraces.SetAdd(dtr, int(syni), int(di), int(Tr))
}
SynapseTraces.Set(dwt, int(syni), int(di), int(DiDWt))
SynapseTraces.Set(dtr, int(syni), int(di), int(DTr))
}
// DWtCNIO computes the weight change (learning) at given synapse,
// for cerebellar neurons that learn from IO LearnNow signals.
func (pt *PathParams) DWtCNIO(ctx *Context, rlay *LayerParams, syni, si, ri, lpi, pi, di uint32) {
learnNow := int32(Neurons.Value(int(ri), int(di), int(LearnNow)))
if learnNow-(ctx.CyclesTotal-ctx.ThetaCycles) < 0 { // not in this time window
SynapseTraces.Set(0.0, int(syni), int(di), int(DTr))
SynapseTraces.Set(0.0, int(syni), int(di), int(DiDWt))
return
}
stcyc := learnNow - rlay.Nuclear.SendTimeOff
nbins := rlay.Nuclear.SendTimeBins
sact := float32(0)
for i := range nbins {
bi := CaBinForCycle(stcyc + i*CaBinCycles)
sact += Neurons.Value(int(si), int(di), int(CaBins+NeuronVars(bi)))
}
// todo: rlrate? Neurons[ri, di, RLRate]
dwt := sact
if Neurons.Value(int(ri), int(di), int(TimePeak)) == 0 { // means that we got to end of cycle with no err: decay
aerr := rlay.Nuclear.ActTarget - Neurons.Value(int(ri), int(di), int(CaD))
dwt = sact * aerr * rlay.Nuclear.Decay
}
// todo: softbound?
pt.DWtSynSoftBound(ctx, syni, di, dwt)
// SynapseTraces[syni, di, DiDWt] = pt.Learn.LRate.Eff * dwt
}
// DWtCNeUp computes the weight change (learning) at given synapse,
// for [CNeUpPath] pathways. We assume that the actual inhib input
// is correct (it will be eventually), and adapt inhib to get closer
// to target baseline activity (cancellation), leaving the excite
// input alone because it is ground truth. If too active, increase inhib
// if below, decrease inhib. This is only used on the inhib pathway.
func (pt *PathParams) DWtCNeUp(ctx *Context, rlay *LayerParams, syni, si, ri, lpi, pi, di uint32) {
learnNow := int32(Neurons.Value(int(ri), int(di), int(LearnNow)))
timePeak := Neurons.Value(int(ri), int(di), int(TimePeak))
if learnNow-(ctx.CyclesTotal-ctx.ThetaCycles) < 0 || timePeak == 0 { // no learn at baseline
SynapseTraces.Set(0.0, int(syni), int(di), int(DTr))
SynapseTraces.Set(0.0, int(syni), int(di), int(DiDWt))
return
}
bi := CaBinForCycle(learnNow - rlay.Nuclear.SendTimeOff)
sact := Neurons.Value(int(si), int(di), int(CaBins+NeuronVars(bi))) // sending activity
// todo: rlrate? Neurons[ri, di, RLRate]
aerr := rlay.Nuclear.ActTarget - Neurons.Value(int(ri), int(di), int(CaP)) // shorter time window here
dwt := -sact * aerr // opposite sign because inhibitory
// todo: softbound?
SynapseTraces.Set(pt.Learn.LRate.Eff*dwt, int(syni), int(di), int(DiDWt))
}
//////// WtFromDWt
// DWtFromDi updates DWt from data parallel DiDWt values
func (pt *PathParams) DWtFromDi(ctx *Context, syni uint32) {
dwt := float32(0)
for di := uint32(0); di < ctx.NData; di++ {
dwt += SynapseTraces.Value(int(syni), int(di), int(DiDWt))
}
Synapses.SetAdd(dwt, int(syni), int(DWt))
}
// DWtSubMean subtracts the mean for given recv neuron ri,
// for pathways that have SubMean > 0.
// This is called on *receiving* pathways, prior to WtFromDwt.
func (pt *PathParams) DWtSubMean(ctx *Context, pti, ri, lni uint32) {
if pt.Learn.Learn.IsFalse() {
return
}
sm := pt.Learn.DWt.SubMean
if sm == 0 { // note default is now 0, so don't exclude Target layers, which should be 0
return
}
cni := pt.Indexes.RecvConSt + lni
synn := PathRecvCon.Value(int(cni), int(Nitems))
if synn < 1 {
return
}
synst := pt.Indexes.RecvSynSt + PathRecvCon.Value(int(cni), int(StartOff))
sumDWt := float32(0)
nnz := 0 // non-zero
for ci := uint32(0); ci < synn; ci++ {
syni := RecvSynIxs.Value(int(synst + ci))
dw := Synapses.Value(int(syni), int(DWt))
if dw != 0 {
sumDWt += dw
nnz++
}
}
if nnz <= 1 {
return
}
sumDWt /= float32(nnz)
for ci := uint32(0); ci < synn; ci++ {
syni := RecvSynIxs.Value(int(synst + ci))
if Synapses.Value(int(syni), int(DWt)) != 0 {
Synapses.SetAdd(-sm*sumDWt, int(syni), int(DWt))
}
}
}
// WtFromDWtSyn is the overall entry point for updating weights from weight changes.
func (pt *PathParams) WtFromDWtSyn(ctx *Context, syni uint32) {
switch pt.Type {
case RWPath:
pt.WtFromDWtSynNoLimits(ctx, syni)
case TDPredPath:
pt.WtFromDWtSynNoLimits(ctx, syni)
case BLAPath:
pt.WtFromDWtSynNoLimits(ctx, syni)
case HipPath:
pt.WtFromDWtSynNoLimits(ctx, syni)
default:
pt.WtFromDWtSynCortex(ctx, syni)
}
}
// WtFromDWtSynCortex updates weights from dwt changes
func (pt *PathParams) WtFromDWtSynCortex(ctx *Context, syni uint32) {
dwt := Synapses.Value(int(syni), int(DWt))
Synapses.SetAdd(dwt, int(syni), int(DSWt))
wt := Synapses.Value(int(syni), int(Wt))
lwt := Synapses.Value(int(syni), int(LWt))
pt.SWts.WtFromDWt(&wt, &lwt, dwt, Synapses.Value(int(syni), int(SWt)))
Synapses.Set(0.0, int(syni), int(DWt))
Synapses.Set(wt, int(syni), int(Wt))
Synapses.Set(lwt, int(syni), int(LWt))
// pj.Com.Fail(&sy.Wt, sy.SWt) // skipping for now -- not useful actually
}
// WtFromDWtSynNoLimits -- weight update without limits
func (pt *PathParams) WtFromDWtSynNoLimits(ctx *Context, syni uint32) {
dwt := Synapses.Value(int(syni), int(DWt))
if dwt == 0 {
return
}
Synapses.SetAdd(dwt, int(syni), int(Wt))
if Synapses.Value(int(syni), int(Wt)) < 0 {
Synapses.Set(0.0, int(syni), int(Wt))
}
Synapses.Set(Synapses.Value(int(syni), int(Wt)), int(syni), int(LWt))
Synapses.Set(0.0, int(syni), int(DWt))
}
// SlowAdapt does the slow adaptation: SWt learning and SynScale
func (pt *PathParams) SlowAdapt(ctx *Context, rlay *LayerParams, pti, ri, lni uint32) {
pt.SWtFromWt(ctx, rlay, pti, ri, lni)
pt.SynScale(ctx, rlay, pti, ri, lni)
}
// SWtFromWt updates structural, slowly adapting SWt value based on
// accumulated DSWt values, which are zero-summed with additional soft bounding
// relative to SWt limits.
func (pt *PathParams) SWtFromWt(ctx *Context, rlay *LayerParams, pti, ri, lni uint32) {
if pt.Learn.Learn.IsFalse() || pt.SWts.Adapt.On.IsFalse() {
return
}
if rlay.IsTarget() {
return
}
mx := pt.SWts.Limit.Max
mn := pt.SWts.Limit.Min
lr := pt.SWts.Adapt.LRate
cni := pt.Indexes.RecvConSt + lni
synn := PathRecvCon.Value(int(cni), int(Nitems))
synst := pt.Indexes.RecvSynSt + PathRecvCon.Value(int(cni), int(StartOff))
avgDWt := float32(0)
avgWt := float32(0)
for ci := uint32(0); ci < synn; ci++ {
syni := RecvSynIxs.Value(int(synst + ci))
swt := Synapses.Value(int(syni), int(SWt))
// softbound for SWt
if Synapses.Value(int(syni), int(DSWt)) >= 0 {
Synapses.SetMul((mx - swt), int(syni), int(DSWt))
} else {
Synapses.SetMul((swt - mn), int(syni), int(DSWt))
}
avgDWt += Synapses.Value(int(syni), int(DSWt))
avgWt += Synapses.Value(int(syni), int(Wt))
}
avgDWt /= float32(synn)
avgWt /= float32(synn)
hiDk := math32.Clamp(pt.SWts.Adapt.HiMeanDecay*(avgWt-pt.SWts.Adapt.HiMeanThr), 0.0, pt.SWts.Adapt.HiMeanDecay)
avgDWt *= pt.SWts.Adapt.SubMean
for ci := uint32(0); ci < synn; ci++ {
syni := RecvSynIxs.Value(int(synst + ci))
Synapses.SetAdd(lr*(Synapses.Value(int(syni), int(DSWt))-avgDWt), int(syni), int(SWt))
swt := Synapses.Value(int(syni), int(SWt))
Synapses.Set(0.0, int(syni), int(DSWt))
wt := Synapses.Value(int(syni), int(Wt))
lwt := pt.SWts.LWtFromWts(wt, swt)
lwt -= hiDk * lwt
Synapses.Set(lwt, int(syni), int(LWt))
Synapses.Set(pt.SWts.WtValue(swt, lwt), int(syni), int(Wt))
}
}
// SynScale performs synaptic scaling based on running average activation vs. targets.
// Layer-level AvgDifFromTrgAvg function must be called first.
func (pt *PathParams) SynScale(ctx *Context, rlay *LayerParams, pti, ri, lni uint32) {
if pt.Learn.Learn.IsFalse() || pt.IsInhib() {
return
}
if !rlay.IsLearnTrgAvg() {
return
}
lr := rlay.Learn.TrgAvgAct.SynScaleRate
cni := pt.Indexes.RecvConSt + lni
synn := PathRecvCon.Value(int(cni), int(Nitems))
synst := pt.Indexes.RecvSynSt + PathRecvCon.Value(int(cni), int(StartOff))
adif := -lr * NeuronAvgs.Value(int(ri), int(AvgDif))
for ci := uint32(0); ci < synn; ci++ {
syni := RecvSynIxs.Value(int(synst + ci))
lwt := Synapses.Value(int(syni), int(LWt))
swt := Synapses.Value(int(syni), int(SWt))
if adif >= 0 { // key to have soft bounding on lwt here!
Synapses.SetAdd((1-lwt)*adif*swt, int(syni), int(LWt))
} else {
Synapses.SetAdd(lwt*adif*swt, int(syni), int(LWt))
}
Synapses.Set(pt.SWts.WtValue(swt, Synapses.Value(int(syni), int(LWt))), int(syni), int(Wt))
}
}
//gosl:end
// LRateMod sets the LRate modulation parameter for Paths, which is
// for dynamic modulation of learning rate (see also LRateSched).
// Updates the effective learning rate factor accordingly.
func (pt *Path) LRateMod(mod float32) {
pt.Params.Learn.LRate.Mod = mod
pt.Params.Learn.LRate.Update()
}
// LRateSched sets the schedule-based learning rate multiplier.
// See also LRateMod.
// Updates the effective learning rate factor accordingly.
func (pt *Path) LRateSched(sched float32) {
pt.Params.Learn.LRate.Sched = sched
pt.Params.Learn.LRate.Update()
}
// Code generated by "goal build"; DO NOT EDIT.
//line learn.goal:1
// Copyright (c) 2019, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
import (
"cogentcore.org/core/math32"
"cogentcore.org/core/math32/minmax"
"cogentcore.org/lab/base/randx"
"cogentcore.org/lab/gosl/slbool"
"github.com/emer/axon/v2/chans"
"github.com/emer/axon/v2/kinase"
)
//////// learn.go contains the learning params and functions for axon
//gosl:start
//gosl:import "github.com/emer/axon/v2/kinase"
// LearnCaParams parameterizes the neuron-level calcium signals driving learning:
// LearnCa = NMDA + VGCC Ca sources, where VGCC can be simulated from spiking or
// use the more complex and dynamaic VGCC channel directly.
// LearnCa is then integrated in a cascading manner at multiple time scales:
// CaM (as in calmodulin), CaP (ltP, CaMKII, plus phase), CaD (ltD, DAPK1, minus phase).
type LearnCaParams struct {
// Norm is the denominator used for normalizing [LearnCa], so the
// max is roughly 1 - 1.5 or so, which works best in terms of previous
// standard learning rules, and overall learning performance.
Norm float32 `default:"80"`
// SpikeVGCC uses spikes to generate VGCC instead of actual VGCC current.
// See SpikeVGCCa for calcium contribution from each spike.
SpikeVGCC slbool.Bool `default:"true"`
// SpikeVgccCa is the multiplier on spike for computing Ca contribution
// to [LearnCa], in SpikeVGCC mode.
SpikeVgccCa float32 `default:"35"`
// VgccTau is the time constant of decay for VgccCa calcium.
// It is highly transient around spikes, so decay and diffusion
// factors are more important than for long-lasting NMDA factor.
// VgccCa is integrated separately in [VgccCaInt] prior to adding
// into NMDA Ca in [LearnCa].
VgccTau float32 `default:"10"`
// PosBias is a multiplier on [LearnCaP] in computing [CaDiff] that drives learning.
// In some rare cases this can be useful in adjusting overall weight dynamics.
PosBias float32 `default:"1"`
// ETraceTau is the time constant for integrating an eligibility trace factor,
// which computes an exponential integrator of local neuron-wise error gradients.
ETraceTau float32 `default:"4"`
// ETraceScale multiplies the contribution of the ETrace to learning, determining
// the strength of its effect. This is definitely beneficial in cases that can
// benefit from longer traces, such as the deep music sim.
// Where beneficial, 0.1 or so is a useful value.
ETraceScale float32
pad float32
// Dt are time constants for integrating [LearnCa] across
// M, P and D cascading levels.
Dt kinase.CaDtParams `display:"inline"`
// VgccDt rate = 1 / tau
VgccDt float32 `display:"-" json:"-" xml:"-" edit:"-"`
// ETraceDt rate = 1 / tau
ETraceDt float32 `display:"-" json:"-" xml:"-" edit:"-"`
// NormInv = 1 / Norm
NormInv float32 `display:"-" json:"-" xml:"-" edit:"-"`
pad2 float32
}
func (lc *LearnCaParams) Defaults() {
lc.Norm = 80
lc.SpikeVGCC.SetBool(true)
lc.SpikeVgccCa = 35
lc.VgccTau = 10
lc.PosBias = 1
lc.ETraceTau = 4
lc.ETraceScale = 0
lc.Dt.Defaults()
lc.Dt.MTau = 2 // 2 > 5 in deepfsa, significant effect
lc.Update()
}
func (lc *LearnCaParams) Update() {
lc.Dt.Update()
lc.VgccDt = 1 / lc.VgccTau
lc.ETraceDt = 1 / lc.ETraceTau
lc.NormInv = 1 / lc.Norm
}
func (lc *LearnCaParams) ShouldDisplay(field string) bool {
switch field {
case "SpikeVgccCa":
return lc.SpikeVGCC.IsTrue()
default:
return true
}
}
// VgccCa updates the simulated VGCC calcium from spiking, if that option is selected,
// and performs time-integration of VgccCa
func (lc *LearnCaParams) VgccCaFromSpike(ctx *Context, ni, di uint32) {
if lc.SpikeVGCC.IsTrue() {
Neurons.Set(lc.SpikeVgccCa*Neurons.Value(int(ni), int(di), int(Spike)), int(ni), int(di), int(VgccCa))
}
Neurons.SetAdd(Neurons.Value(int(ni), int(di), int(VgccCa))-lc.VgccDt*Neurons.Value(int(ni), int(di), int(VgccCaInt)), int(ni), int(di), int(VgccCaInt))
// Dt only affects decay, not rise time
}
// LearnCas updates the LearnCa value and its cascaded values, based on NMDA, VGCC Ca
// it first calls VgccCa to update the spike-driven version of that variable, and
// perform its time-integration.
func (lc *LearnCaParams) LearnCas(ctx *Context, ni, di uint32) {
lc.VgccCaFromSpike(ctx, ni, di)
Neurons.Set(lc.NormInv*(Neurons.Value(int(ni), int(di), int(NmdaCa))+Neurons.Value(int(ni), int(di), int(VgccCaInt))), int(ni), int(di), int(LearnCa))
Neurons.SetAdd(lc.Dt.MDt*(Neurons.Value(int(ni), int(di), int(LearnCa))-Neurons.Value(int(ni), int(di), int(LearnCaM))), int(ni), int(di), int(LearnCaM))
Neurons.SetAdd(lc.Dt.PDt*(Neurons.Value(int(ni), int(di), int(LearnCaM))-Neurons.Value(int(ni), int(di), int(LearnCaP))), int(ni), int(di), int(LearnCaP))
Neurons.SetAdd(lc.Dt.DDt*(Neurons.Value(int(ni), int(di), int(LearnCaP))-Neurons.Value(int(ni), int(di), int(LearnCaD))), int(ni), int(di), int(LearnCaD))
Neurons.Set(lc.PosBias*Neurons.Value(int(ni), int(di), int(LearnCaP))-Neurons.Value(int(ni), int(di), int(LearnCaD)), int(ni), int(di), int(CaDiff))
}
func (lc *LearnCaParams) ETrace(ctx *Context, ni, di uint32, cad float32) {
tr := cad - Neurons.Value(int(ni), int(di), int(CaDPrev))
et := Neurons.Value(int(ni), int(di), int(ETrace))
et += lc.ETraceDt * (tr - et)
etLrn := 1 + lc.ETraceScale*et
if etLrn < 0 {
etLrn = 0
}
Neurons.Set(et, int(ni), int(di), int(ETrace))
Neurons.Set(etLrn, int(ni), int(di), int(ETrLearn))
}
// LearnTimingParams parameterizes the timing of Ca-driven Kinase
// algorithm learning, based on detecting the first major peak of
// differential fast - slow activity associated with the start of
// the minus phases: [TimePeak]. Learning occurs a fixed number of
// Cycles (ms) offset from the peak.
type LearnTimingParams struct {
// SynCaCycles is the number of cycles over which to integrate the synaptic
// pre * post calcium trace, which provides the credit assignment factor.
// Must be a multiple of CaBinCycles (10). Used for all learning (timed or not).
SynCaCycles int32 `default:"160"`
// LearnThr is the threshold on CaD that must be reached in order to be
// eligible for learning. Applies to non-timing based learning too.
LearnThr float32 `default:"0.1"`
// On indicates whether to use the timing parameters to drive
// learning timing, or instead just learn at the end of the trial
// automatically.
On slbool.Bool
// Refractory makes new learning depend on dropping below the learning
// threshold. Applies only to timing based learning.
Refractory slbool.Bool
// Cycles is the number of cycles (ms) after the [TimePeak] before
// learning occurs, or the peak detection is reset to start anew.
Cycles int32 `default:"170"`
// Time constant for integrating [TimeDiff] as the absolute value of
// CaDiff integrated over time to smooth out significant local bumps.
TimeDiffTau float32 `default:"4"`
// Dt is 1/Tau
TimeDiffDt float32 `display:"-"`
pad float32
}
func (lt *LearnTimingParams) Defaults() {
lt.SynCaCycles = 160
lt.LearnThr = 0.1
lt.Cycles = 170
lt.TimeDiffTau = 4
lt.Update()
}
func (lt *LearnTimingParams) Update() {
lt.TimeDiffDt = 1.0 / lt.TimeDiffTau
}
func (lt *LearnTimingParams) ShouldDisplay(field string) bool {
switch field {
case "On", "SynCaCycles", "LearnThr":
return true
default:
return lt.On.IsTrue()
}
}
// TimingReset resets [TimePeak] and [TimeCycle] to 0.
func (lt *LearnTimingParams) TimingReset(ctx *Context, ni, di uint32) {
Neurons.Set(0.0, int(ni), int(di), int(TimePeak))
Neurons.Set(0.0, int(ni), int(di), int(TimeCycle))
}
// LearnNow sets [LearnNow] to CyclesTotal and sets the current
// [LearnDiff] = [CaDiff].
func (lt *LearnTimingParams) LearnNow(ctx *Context, ni, di uint32) {
Neurons.Set(float32(ctx.CyclesTotal), int(ni), int(di), int(LearnNow))
Neurons.Set(Neurons.Value(int(ni), int(di), int(CaDiff)), int(ni), int(di), int(LearnDiff))
}
// LearnNowOff sets [LearnNow] and [LearnDiff] to 0.
func (lt *LearnTimingParams) LearnNowOff(ctx *Context, ni, di uint32) {
Neurons.Set(0.0, int(ni), int(di), int(LearnNow))
Neurons.Set(0.0, int(ni), int(di), int(LearnDiff))
}
// | ISI | Minus | Plus |
// |-----|------------------|---------|
// ^ learn
// LearnTrialEnd sets LearnNow at end of the ThetaCycles trial,
// for timing=off case.
func (lt *LearnTimingParams) LearnTrialEnd(ctx *Context, ni, di uint32) bool {
if ctx.Cycle == ctx.ThetaCycles-1 {
// todo: this is breaking the TestNDataLearn test, for DTr, Tr values :(
if Neurons.Value(int(ni), int(di), int(CaD)) > lt.LearnThr {
lt.LearnNow(ctx, ni, di)
return true
}
lt.LearnNowOff(ctx, ni, di)
}
return false
}
// LearnTiming determines whether it is time to learn, for given neuron.
// returns true if just triggered learning.
func (lt *LearnTimingParams) LearnTiming(ctx *Context, ni, di uint32) bool {
if lt.On.IsFalse() {
return lt.LearnTrialEnd(ctx, ni, di)
}
timeDiff := Neurons.Value(int(ni), int(di), int(TimeDiff))
gaDiff := Neurons.Value(int(ni), int(di), int(GaP)) - Neurons.Value(int(ni), int(di), int(GaD))
timeDiff += lt.TimeDiffDt * (math32.Abs(gaDiff) - timeDiff)
Neurons.Set(timeDiff, int(ni), int(di), int(TimeDiff))
lrnNow := int32(Neurons.Value(int(ni), int(di), int(LearnNow)))
peak := Neurons.Value(int(ni), int(di), int(TimePeak))
peakCyc := int32(Neurons.Value(int(ni), int(di), int(TimeCycle)))
if timeDiff > peak {
peak = timeDiff
peakCyc = ctx.CyclesTotal
Neurons.Set(peak, int(ni), int(di), int(TimePeak))
Neurons.Set(float32(peakCyc), int(ni), int(di), int(TimeCycle))
}
tcyc := ctx.CyclesTotal - peakCyc
if tcyc >= lt.Cycles {
lt.TimingReset(ctx, ni, di)
if lt.Refractory.IsTrue() && lrnNow > 0 { // no learning once learned
if Neurons.Value(int(ni), int(di), int(CaD)) <= lt.LearnThr {
lt.LearnNowOff(ctx, ni, di)
}
return false
}
if Neurons.Value(int(ni), int(di), int(CaD)) > lt.LearnThr {
lt.LearnNow(ctx, ni, di)
return true
}
Neurons.Set(0.0, int(ni), int(di), int(LearnNow))
}
return false
}
//////// TrgAvgActParams
// TrgAvgActParams govern the target and actual long-term average activity in neurons.
// Target value is adapted by neuron-wise error and difference in actual vs. target.
// drives synaptic scaling at a slow timescale (Network.SlowInterval).
type TrgAvgActParams struct {
// GiBaseInit sets an initial [GiBase] value, as a proportion of TrgRange.Max - [TrgAvg].
// This gives neurons differences in intrinsic inhibition / leak as a starting bias.
// This is independent of using the target values to scale synaptic weights. Only used if > 0.
GiBaseInit float32 `default:"0"`
// RescaleOn is whether to use target average activity mechanism to rescale
// synaptic weights, so that activity tracks the target values.
RescaleOn slbool.Bool `default:"true"`
// ErrLRate is the learning rate for adjustments to [TrgAvg] value based on the
// neuron-level error signal. Population TrgAvg values are renormalized to
// a fixed overall average, in TrgRange. Generally, deviating from the default value
// of this parameter doesn't make much difference.
ErrLRate float32 `default:"0.02"`
// SynScaleRate is a rate parameter for how much to scale synaptic weights
// in proportion to the [AvgDif] between target and actual proportion activity.
// This determines the effective strength of the constraint, and larger models
// may need more than the weaker default value.
SynScaleRate float32 `default:"0.005,0.0002"`
// SubMean is the amount of the mean [TrgAvg] change to subtract when updating.
// 1 = full zero sum changes. 1 works best in general, but in some cases it
// may be better to start with 0 and then increase using network SetSubMean
// method at a later point.
SubMean float32 `default:"0,1"`
// Permute the order of TrgAvg values within layer. Otherwise they are just
// assigned in order from highest to lowest for easy visualization.
// Generally must be true if any topographic weights are being used.
Permute slbool.Bool `default:"true"`
// Pool means use pool-level target values if pool-level inhibition and
// 4D pooled layers are present. If pool sizes are relatively small,
// then may not be useful to distribute targets just within pool.
Pool slbool.Bool
pad int32
// TrgRange is the range of target normalized average activations.
// Individual neuron [TrgAvg] values are assigned values within this range,
// and clamped within this range. This is a critical parameter and the default
// usually works best.
TrgRange minmax.F32 `default:"{'Min':0.5,'Max':2}"`
}
func (ta *TrgAvgActParams) Update() {
}
func (ta *TrgAvgActParams) Defaults() {
ta.RescaleOn.SetBool(true)
ta.ErrLRate = 0.02
ta.SynScaleRate = 0.005
ta.SubMean = 1 // 1 in general beneficial
ta.TrgRange.Set(0.5, 2)
ta.Permute.SetBool(true)
ta.Pool.SetBool(true)
ta.Update()
}
func (ta *TrgAvgActParams) ShouldDisplay(field string) bool {
switch field {
case "RescaleOn", "GiBaseInit":
return true
case "TrgRange":
return ta.RescaleOn.IsTrue() || ta.GiBaseInit > 0
default:
return ta.RescaleOn.IsTrue()
}
}
//////// RLRateParams
// RLRateParams are receiving neuron learning rate modulation parameters.
// Has two factors: the derivative of the sigmoid based on CaD
// activity levels, and the max-normalized phase-wise differences in activity
// (Diff): |CaP - CaD| / max(CaP, CaD).
type RLRateParams struct {
// On toggles use of learning rate modulation.
On slbool.Bool `default:"true"`
// SigmoidLinear uses a linear sigmoid function: if act > .5: 1-act; else act
// otherwise use the actual sigmoid derivative which is squared: a(1-a).
// This can improve learning in some cases but is generally not beneficial.
SigmoidLinear slbool.Bool `default:"false"`
// SigmoidMin is the minimum learning rate multiplier for sigmoidal
// act (1-act) factor, which prevents lrate from going too low for extreme values.
// Set to 1 to disable Sigmoid derivative factor, which is default for Target layers.
SigmoidMin float32 `default:"0.05,1"`
// Diff modulates learning rate as a function of max-normalized plus - minus
// differences, which reduces learning for more active neurons and emphasizes
// it for less active ones. This is typically essential.
// Diff = |CaP - CaD| / max(CaP, CaD).
Diff slbool.Bool
// SpikeThr is the threshold on Max(CaP, CaD) below which Min lrate applies.
// Must be > 0 to prevent div by zero.
SpikeThr float32 `default:"0.1"`
// DiffThr is the threshold on recv neuron error delta, i.e., |CaP - CaD|
// below which lrate is at Min value.
DiffThr float32 `default:"0.02"`
// Min is the minimum learning rate value when |CaP - CaD| Diff is below DiffThr.
Min float32 `default:"0.001"`
pad int32
}
func (rl *RLRateParams) Update() {
}
func (rl *RLRateParams) Defaults() {
rl.On.SetBool(true)
rl.SigmoidLinear.SetBool(false)
rl.SigmoidMin = 0.05
rl.Diff.SetBool(true)
rl.SpikeThr = 0.1
rl.DiffThr = 0.02
rl.Min = 0.001
rl.Update()
}
func (rl *RLRateParams) ShouldDisplay(field string) bool {
switch field {
case "On":
return true
case "Diff", "SigmoidMin", "SigmoidLinear":
return rl.On.IsTrue()
default:
return rl.On.IsTrue() && rl.Diff.IsTrue()
}
}
// RLRateSigDeriv returns the sigmoid derivative learning rate
// factor as a function of spiking activity, with mid-range values having
// full learning and extreme values a reduced learning rate:
// deriv = 4*act*(1-act) or linear: if act > .5: 2*(1-act); else 2*act
// The activity should be CaP and the layer maximum is used
// to normalize that to a 0-1 range.
func (rl *RLRateParams) RLRateSigDeriv(act float32, laymax float32) float32 {
if rl.On.IsFalse() || laymax == 0 {
return 1.0
}
ca := min(act/laymax, 1.0)
var lr float32
if rl.SigmoidLinear.IsTrue() {
if ca < 0.5 {
lr = 2 * ca
} else {
lr = 2 * (1 - ca)
}
} else {
lr = 4.0 * ca * (1 - ca) // .5 * .5 = .25 = peak
}
if lr < rl.SigmoidMin {
lr = rl.SigmoidMin
}
return lr
}
// RLRateDiff returns the learning rate as a function of difference between
// CaP and CaD values, normalized by max(CaP, CaD)
func (rl *RLRateParams) RLRateDiff(scap, scad float32) float32 {
if rl.On.IsFalse() || rl.Diff.IsFalse() {
return 1.0
}
smax := math32.Max(scap, scad)
if smax > rl.SpikeThr { // avoid div by 0
dif := math32.Abs(scap - scad)
if dif < rl.DiffThr {
return rl.Min
}
return (dif / smax)
}
return rl.Min
}
// LearnNeuronParams manages learning-related parameters at the neuron-level.
// This is mainly the running average activations that drive learning
type LearnNeuronParams struct {
// CaLearn parameterizes the neuron-level calcium signals driving learning:
// LearnCa = NMDA + VGCC Ca sources, where VGCC can be simulated from spiking
// or use the more complex and dynamic VGCC channel directly. LearnCa is then
// integrated in a cascading manner at multiple time scales:
// LearnCaM (as in calmodulin), LearnCaP (ltP, CaMKII, plus phase),
// LearnCaD (ltD, DAPK1, minus phase).
CaLearn LearnCaParams `display:"inline"`
// LearnTimingParams parameterizes the timing of Ca-driven Kinase
// algorithm learning, based on detecting the first major peak of
// differential fast - slow activity associated with the start of
// the minus phases: [TimePeak]. Learning occurs a fixed number of
// Cycles (ms) offset from the peak.
Timing LearnTimingParams `display:"inline"`
// CaSpike parameterizes the neuron-level spike-driven calcium signals:
// CaM (calmodulin), CaP (ltP, CaMKII, plus phase), CaD (ltD, DAPK1, minus phase).
// These values are used in various cases as a proxy for the activation (spiking)
// based learning signal.
CaSpike kinase.CaSpikeParams `display:"inline"`
// NMDA channel parameters used for learning, vs. the ones driving activation.
// This allows exploration of learning parameters independent of their effects
// on active maintenance contributions of NMDA, and may be supported by different
// receptor subtypes.
LearnNMDA chans.NMDAParams `display:"inline"`
// TrgAvgAct has the synaptic scaling parameters for regulating overall average
// activity compared to neuron's own target level.
TrgAvgAct TrgAvgActParams `display:"inline"`
// RLRate has the recv neuron learning rate modulation params: an additional
// error-based modulation of learning for receiver side:
// RLRate = |CaP - CaD| / Max(CaP, CaD)
RLRate RLRateParams `display:"inline"`
// NeuroMod parameterizes neuromodulation effects on learning rate and activity,
// as a function of layer-level DA and ACh values, which are updated from global
// Context values, and computed from reinforcement learning algorithms.
NeuroMod NeuroModParams `display:"inline"`
}
func (ln *LearnNeuronParams) Update() {
ln.CaLearn.Update()
ln.Timing.Update()
ln.CaSpike.Update()
ln.LearnNMDA.Update()
ln.TrgAvgAct.Update()
ln.RLRate.Update()
ln.NeuroMod.Update()
}
func (ln *LearnNeuronParams) Defaults() {
ln.CaLearn.Defaults()
ln.Timing.Defaults()
ln.CaSpike.Defaults()
ln.LearnNMDA.Defaults()
ln.LearnNMDA.ITau = 1
ln.LearnNMDA.Update()
ln.TrgAvgAct.Defaults()
ln.RLRate.Defaults()
ln.NeuroMod.Defaults()
}
// InitNeuronCa initializes the neuron-level calcium learning and spking variables.
// Called by InitWeights (at start of learning).
func (ln *LearnNeuronParams) InitNeuronCa(ctx *Context, ni, di uint32) {
Neurons.Set(0, int(ni), int(di), int(GnmdaLrn))
Neurons.Set(0, int(ni), int(di), int(NmdaCa))
Neurons.Set(0, int(ni), int(di), int(VgccCa))
Neurons.Set(0, int(ni), int(di), int(VgccCaInt))
Neurons.Set(0, int(ni), int(di), int(LearnCa))
Neurons.Set(0, int(ni), int(di), int(CaM))
Neurons.Set(0, int(ni), int(di), int(CaP))
Neurons.Set(0, int(ni), int(di), int(CaD))
Neurons.Set(0, int(ni), int(di), int(CaDPrev))
Neurons.Set(0, int(ni), int(di), int(CaSyn))
Neurons.Set(0, int(ni), int(di), int(LearnCaM))
Neurons.Set(0, int(ni), int(di), int(LearnCaP))
Neurons.Set(0, int(ni), int(di), int(LearnCaD))
Neurons.Set(0, int(ni), int(di), int(CaDiff))
Neurons.Set(0, int(ni), int(di), int(GaM))
Neurons.Set(0, int(ni), int(di), int(GaP))
Neurons.Set(0, int(ni), int(di), int(GaD))
Neurons.Set(0.0, int(ni), int(di), int(TimeDiff))
Neurons.Set(0.0, int(ni), int(di), int(TimePeak))
Neurons.Set(0.0, int(ni), int(di), int(TimeCycle))
Neurons.Set(0, int(ni), int(di), int(LearnDiff))
Neurons.Set(0, int(ni), int(di), int(LearnNow))
}
// LearnNMDAFromRaw updates the separate NMDA conductance and calcium values
// based on GeTot = GeRaw + external ge conductance. These are the variables
// that drive learning -- can be the same as activation but also can be different
// for testing learning Ca effects independent of activation effects.
func (ln *LearnNeuronParams) LearnNMDAFromRaw(ctx *Context, ni, di uint32, geTot float32) {
geEff := max(geTot, 0.0)
vmd := Neurons.Value(int(ni), int(di), int(VmDend))
Neurons.Set(ln.LearnNMDA.NMDASyn(Neurons.Value(int(ni), int(di), int(GnmdaLrn)), geEff), int(ni), int(di), int(GnmdaLrn))
gnmda := ln.LearnNMDA.Gnmda(Neurons.Value(int(ni), int(di), int(GnmdaLrn)), vmd)
Neurons.Set(float32(gnmda*ln.LearnNMDA.CaFromV(vmd)), int(ni), int(di), int(NmdaCa))
}
// CaFromSpike updates all spike-driven calcium variables, including LearnCa and CaSpike.
// Computed after new activation for current cycle is updated.
func (ln *LearnNeuronParams) CaFromSpike(ctx *Context, ni, di uint32) {
caM := Neurons.Value(int(ni), int(di), int(CaM))
caP := Neurons.Value(int(ni), int(di), int(CaP))
caD := Neurons.Value(int(ni), int(di), int(CaD))
spike := Neurons.Value(int(ni), int(di), int(Spike))
ln.CaSpike.CaMFromSpike(spike, &caM, &caP, &caD)
Neurons.Set(caM, int(ni), int(di), int(CaM))
Neurons.Set(caP, int(ni), int(di), int(CaP))
Neurons.Set(caD, int(ni), int(di), int(CaD))
ga := Neurons.Value(int(ni), int(di), int(Ge)) + Neurons.Value(int(ni), int(di), int(Gi))
gaM := Neurons.Value(int(ni), int(di), int(GaM))
gaP := Neurons.Value(int(ni), int(di), int(GaP))
gaD := Neurons.Value(int(ni), int(di), int(GaD))
ln.CaSpike.Dt.FromCa(ga, &gaM, &gaP, &gaD)
Neurons.Set(gaM, int(ni), int(di), int(GaM))
Neurons.Set(gaP, int(ni), int(di), int(GaP))
Neurons.Set(gaD, int(ni), int(di), int(GaD))
caSyn := Neurons.Value(int(ni), int(di), int(CaSyn))
caSyn = ln.CaSpike.CaSynFromSpike(spike, caSyn)
Neurons.Set(caSyn, int(ni), int(di), int(CaSyn))
ln.CaLearn.LearnCas(ctx, ni, di)
}
//////// SWtParams
// SigFun is the sigmoid function for value w in 0-1 range, with gain and offset params
func SigFun(w, gain, off float32) float32 {
if w <= 0 {
return 0
}
if w >= 1 {
return 1
}
return (1 / (1 + math32.Pow((off*(1-w))/w, gain)))
}
// SigFun61 is the sigmoid function for value w in 0-1 range, with default gain = 6, offset = 1 params
func SigFun61(w float32) float32 {
if w <= 0 {
return 0
}
if w >= 1 {
return 1
}
pw := (1 - w) / w
return (1 / (1 + pw*pw*pw*pw*pw*pw))
}
// SigInvFun is the inverse of the sigmoid function
func SigInvFun(w, gain, off float32) float32 {
if w <= 0 {
return 0
}
if w >= 1 {
return 1
}
return 1.0 / (1.0 + math32.Pow((1.0-w)/w, 1/gain)/off)
}
// SigInvFun61 is the inverse of the sigmoid function, with default gain = 6, offset = 1 params
func SigInvFun61(w float32) float32 {
if w <= 0 {
return 0
}
if w >= 1 {
return 1
}
rval := 1.0 / (1.0 + math32.Pow((1.0-w)/w, 1.0/6.0))
return rval
}
// SWtInitParams for initial SWt (slow, structural weight) values.
type SWtInitParams struct {
// SPct is how much of the initial random weights to capture in the
// slow, structural SWt values, with the rest going into the online learning
// LWt values. 1 gives the strongest initial biasing effect, for larger
// models that need more structural support. 0.5 should work for most models
// where stronger constraints are not needed.
SPct float32 `min:"0" max:"1" default:"0,1,0.5"`
// Mean is the target mean weight value across receiving neuron's pathway.
// The mean SWt values are constrained to remain at this value.
// Some pathways may benefit from lower mean of .4.
Mean float32 `default:"0.5,0.4"`
// Var is the initial variance in weight values, prior to constraints.
Var float32 `default:"0.25"`
// Sym symmetrizes the initial weight values with those in reciprocal pathway.
// Typically true for bidirectional excitatory connections.
Sym slbool.Bool `default:"true"`
}
func (sp *SWtInitParams) Defaults() {
sp.SPct = 0.5
sp.Mean = 0.5
sp.Var = 0.25
sp.Sym.SetBool(true)
}
func (sp *SWtInitParams) Update() {
}
// SWtAdaptParams manages adaptation of the [SWt] (slow, structural weight) values.
type SWtAdaptParams struct {
// On enables adaptation of [SWt] values at a slower time scale. If false, SWt
// values are not updated, in which case it is generally good to set Init.SPct=0 too.
On slbool.Bool
// LRate is the learning rate multiplier on the accumulated [DWt] values
// (which already have fast LRate applied), to drive updating of [SWt]
// during slow outer loop updating. Lower values impose stronger constraints,
// for larger networks that need more structural support, e.g., 0.001 is better
// after 1,000 epochs in large models. 0.1 is fine for smaller models.
LRate float32 `default:"0.1,0.01,0.001,0.0002"`
// SubMean is the amount of the mean to subtract from [SWt] delta when updating,
// to impose a zero-sum constraint on overall structural weight strengths.
// Generally best to set to 1. There is a separate SubMean factor for [LWt].
SubMean float32 `default:"1"`
// HiMeanDecay specifies a decay factor applied across all [LWt] weights
// in proportion to the deviation of the average effective weight value [Wt]
// above the HiMeanThr threshold. This is applied at the slow learning interval
// and should be very slow, for counteracting a gradual accumulation in overall
// weights that can occur even with SubMean factors (which only operate on weights
// that are actually changing on the current trial).
HiMeanDecay float32 `default:"0.0008"`
// HiMeanThr specifies a decay factor applied across all [LWt] weights
// in proportion to the deviation of the average effective weight value [Wt]
// away from SWt.Init.Mean. This is applied at the slow learning interval
// and should be very slow, for counteracting a gradual accumulation in overall
// weights that can occur even with SubMean factors, which only operate on weights
// that are actually changing on the current trial.
HiMeanThr float32 `default:"0.5"`
// SigGain is the gain of the sigmoidal constrast enhancement function
// used to transform learned, linear [LWt] values into [Wt] values.
// This is critical to offset the damping effect of exponential soft bounding,
// but some special cases with different learning rules may benefit by making
// this linear (1) instead.
SigGain float32 `default:"6"`
pad, pad1 float32
}
func (sp *SWtAdaptParams) Defaults() {
sp.On.SetBool(true)
sp.LRate = 0.1
sp.SubMean = 1
sp.HiMeanDecay = 0.0008
sp.HiMeanThr = 0.5
sp.SigGain = 6
sp.Update()
}
func (sp *SWtAdaptParams) Update() {
}
func (sp *SWtAdaptParams) ShouldDisplay(field string) bool {
switch field {
case "On":
return true
default:
return sp.On.IsTrue()
}
}
// SWtParams manages structural, slowly adapting weight values [SWt],
// in terms of initialization and updating over course of learning.
// SWts impose initial and slowly adapting constraints on neuron connectivity
// to encourage differentiation of neuron representations and overall good behavior
// in terms of not hogging the representational space.
// The [TrgAvg] activity constraint is not enforced through SWt: it needs to be
// more dynamic and is supported by the regular learned weights [LWt].
type SWtParams struct {
// Init controls the initialization of [SWt] values.
Init SWtInitParams `display:"inline"`
// Adapt controls adaptation of [SWt] values in response to [LWt] learning.
Adapt SWtAdaptParams `display:"inline"`
// Limit limits the range of [SWt] values, so that they do not fully
// determine the effective overall weight value.
Limit minmax.F32 `default:"{'Min':0.2,'Max':0.8}" display:"inline"`
}
func (sp *SWtParams) Defaults() {
sp.Init.Defaults()
sp.Adapt.Defaults()
sp.Limit.Set(0.2, 0.8)
}
func (sp *SWtParams) Update() {
sp.Init.Update()
sp.Adapt.Update()
}
// WtVal returns the effective Wt value given the SWt and LWt values
func (sp *SWtParams) WtValue(swt, lwt float32) float32 {
return swt * sp.SigmoidLWt(lwt)
}
// ClipSWt returns SWt value clipped to valid range
func (sp *SWtParams) ClipSWt(swt float32) float32 {
return sp.Limit.ClampValue(swt)
}
// ClipWt returns Wt value clipped to 0-1 range
func (sp *SWtParams) ClipWt(wt float32) float32 {
if wt > 1 {
return 1
}
if wt < 0 {
return 0
}
return wt
}
// SigmoidLWt returns sigmoidal contrast-enhanced weight from linear weight,
// centered at 1 and normed in range +/- 1 around that
// in preparation for multiplying times SWt
func (sp *SWtParams) SigmoidLWt(lw float32) float32 {
var wt float32
if sp.Adapt.SigGain == 1 {
wt = lw
} else if sp.Adapt.SigGain == 6 {
wt = SigFun61(lw)
} else {
wt = SigFun(lw, sp.Adapt.SigGain, 1)
}
return 2.0 * wt // center at 1 instead of .5
}
// LWtFromWt returns linear weight from sigmoidal contrast-enhanced weight.
// wt is centered at 1, and normed in range +/- 1 around that,
// return value is in 0-1 range, centered at .5
func (sp *SWtParams) LWtFromWt(wt float32) float32 {
wte := wt * 0.5
if wte < 0 {
wte = 0
} else if wte > 1 {
wte = 1
}
if sp.Adapt.SigGain == 1 {
return wte
}
if sp.Adapt.SigGain == 6 {
return SigInvFun61(wte)
}
return SigInvFun(wte, sp.Adapt.SigGain, 1)
}
// LWtFromWts returns linear, learning LWt from wt and swt.
// LWt is set to reproduce given Wt relative to given SWt base value.
func (sp *SWtParams) LWtFromWts(wt, swt float32) float32 {
rwt := wt / swt
return sp.LWtFromWt(rwt)
}
// WtFromDWt updates the synaptic weights from accumulated weight changes.
// wt is the sigmoidal contrast-enhanced weight and lwt is the linear weight value.
func (sp *SWtParams) WtFromDWt(wt, lwt *float32, dwt, swt float32) {
if dwt == 0 {
if *wt == 0 { // restore failed wts
*wt = sp.WtValue(swt, *lwt)
}
return
}
// note: softbound happened at dwt stage
*lwt += dwt
if *lwt < 0 {
*lwt = 0
} else if *lwt > 1 {
*lwt = 1
}
*wt = sp.WtValue(swt, *lwt)
}
//gosl:end
// RandVar returns the random variance in weight value (zero mean) based on Var param
func (sp *SWtInitParams) RandVar(rnd randx.Rand) float32 {
return sp.Var * 2.0 * (rnd.Float32() - 0.5)
}
// // RandVar returns the random variance (zero mean) based on DreamVar param
// func (sp *SWtAdaptParams) RandVar(rnd randx.Rand) float32 {
// return sp.DreamVar * 2.0 * (rnd.Float32(-1) - 0.5)
// }
// InitWeightsSyn initializes weight values based on WtInit randomness parameters
// for an individual synapse.
// It also updates the linear weight value based on the sigmoidal weight value.
func (sp *SWtParams) InitWeightsSyn(ctx *Context, syni uint32, rnd randx.Rand, mean, spct float32) {
wtv := sp.Init.RandVar(rnd)
wt := mean + wtv
Synapses.Set(wt, int(syni), int(Wt))
Synapses.Set(sp.ClipSWt(mean+spct*wtv), int(syni), int(SWt))
if spct == 0 { // this is critical for weak init wt, SPCt = 0 paths
Synapses.Set(0.5, int(syni), int(SWt))
}
Synapses.Set(sp.LWtFromWts(wt, Synapses.Value(int(syni), int(SWt))), int(syni), int(LWt))
Synapses.Set(0, int(syni), int(DWt))
Synapses.Set(0, int(syni), int(DSWt))
}
// InitWeightsSynTrace initializes SynapseTrace values
// for an individual synapse.
func (sp *SWtParams) InitWeightsSynTrace(ctx *Context, syni, di uint32) {
SynapseTraces.Set(0, int(syni), int(di), int(Tr))
SynapseTraces.Set(0, int(syni), int(di), int(DTr))
SynapseTraces.Set(0, int(syni), int(di), int(DiDWt))
}
//gosl:start
// LRateParams manages learning rate parameters for scaling [DWt] delta
// weight values that then update [LWt] online learned weights.
// It has two optional modulation factors on top of a Base learning rate.
type LRateParams struct {
// Base learning rate for this pathway, which can be modulated
// by the other factors below. Generally larger networks use slower rates.
Base float32 `default:"0.04,0.1,0.2"`
// Sched is a scheduled learning rate multiplier, simulating reduction
// in plasticity over aging. Use the [Network.LRateSched] method to apply
// a given value to all pathways in the network.
Sched float32
// Mod is a dynamic learning rate modulation factor, typically driven by
// neuromodulation (e.g., dopamine).
Mod float32
// Eff is the net effective actual learning rate multiplier used in
// computing [DWt]: Eff = Mod * Sched * Base
Eff float32 `edit:"-"`
}
func (ls *LRateParams) Defaults() {
ls.Base = 0.04
ls.Sched = 1
ls.Mod = 1
ls.Update()
}
func (ls *LRateParams) Update() {
ls.UpdateEff()
}
func (ls *LRateParams) UpdateEff() {
ls.Eff = ls.Mod * ls.Sched * ls.Base
}
// Init initializes modulation values back to 1 and updates Eff
func (ls *LRateParams) Init() {
ls.Sched = 1
ls.Mod = 1
ls.UpdateEff()
}
//////// DWtParams
// DWtParams has misc parameters for computing weight changes ([DWt]) for the default
// kinase trace-based error-driven cortical learning rule, and for other specialized
// learning rules.
type DWtParams struct {
// SubMean is the amount of the mean [dWt] to subtract for updating the online
// learning [LWt] values, producing a zero-sum effect. 1.0 = full zero-sum dWt.
// Only applies to non-zero DWts. There is a separate such factor for [SWt].
// Typically set to 0 for standard trace learning pathways, although some require it
// for stability over the long haul. Can use [Network.SetSubMean] to set to 1 after
// significant early learning has occurred with 0.
// Some special path types (e.g., Hebb) benefit from SubMean = 1 always.
SubMean float32 `default:"0,1"`
// SynTraceTau is the time constant for integrating the synaptic trace [Tr]
// as a function of the synaptic activity credit assignment factor at the end
// of the theta cycle learning timescale. Larger values (greater than 1)
// produce longer time windows of integration, and should only be used when
// there is temporal structure to be learned across these longer timescales.
// This synaptic trace is beneficial in addition to the receiver-based
// eligibility trace [ETrLearn].
SynTraceTau float32 `default:"1,2,4"`
// LearnThr is the threshold for learning, applied to SynCa CaP and CaD for Kinase
// cortical learning rule.
// In Matrix and VSPatch it applies to normalized GeIntNorm value: setting this relatively
// high encourages sparser representations.
LearnThr float32
// SynCa20 uses an effective 20msec time window for synaptic calcium computation
// from the [CaBins] values for send and recv neurons in computing the SynCa
// synaptic calcium value. Only applicable for pathways to [TargetLayer] layers
// (including [PulvinarLayer]), which use synaptic CaP - CaD directly for learning.
// Default of 10msec (1 bin), works well for most cases.
// Internal cortical layers use integrated CaD-like value directly, see SynCaCycles
// in [LearnTimingParams]. This is only used for long ThetaCycle window (> 250 ms).
SynCa20 slbool.Bool
// CaPScale is a separate multiplier for the CaP component of synaptic calcium, to
// allow separate weighting of potentiation (CaP) vs. depression (CaD) factors.
// Only applicable for pathways to [TargetLayer] layers (including [PulvinarLayer]).
// The default of 1 works best in most cases -- only adjust in special cases.
// An increased CaP level results in an overall potentiation bias, which acts
// like a hebbian learning factor, whereas a lower value produces more negatively
// biased synaptic weight changes.
CaPScale float32 `default:"1,0.95,1.05"`
// Dt rate = 1 / tau
SynTraceDt float32 `display:"-" json:"-" xml:"-" edit:"-"`
pad, pad1 float32
}
func (tp *DWtParams) Defaults() {
tp.SynCa20.SetBool(false)
tp.CaPScale = 1
tp.SubMean = 0
tp.SynTraceTau = 1
tp.LearnThr = 0
tp.Update()
}
func (tp *DWtParams) Update() {
tp.SynTraceDt = 1.0 / tp.SynTraceTau
}
// SynTrace returns updated trace factor as function of the synaptic
// coactivity factor and the current trace.
func (tp *DWtParams) SynTrace(tr float32, syn float32) float32 {
return tr + tp.SynTraceDt*(syn-tr)
}
//////// HebbParams
// HebbParams for optional hebbian learning that replaces the
// default learning rule, based on S = sending activity,
// R = receiving activity
type HebbParams struct {
// On turns on the use of the Hebbian learning rule instead of the default.
On slbool.Bool
// Up is the strength multiplier for hebbian increases, based on R * S * (1-LWt).
Up float32 `default:"0.5"`
// Down is the strength multiplier for hebbian decreases, based on R * (1 - S) * LWt.
Down float32 `default:"1"`
pad float32
}
func (hp *HebbParams) Defaults() {
hp.Up = 0.5
hp.Down = 1
}
func (hp *HebbParams) Update() {
}
func (hp *HebbParams) ShouldDisplay(field string) bool {
switch field {
case "On":
return true
default:
return hp.On.IsTrue()
}
}
//////// LearnSynParams
// LearnSynParams manages learning-related parameters at the synapse-level.
type LearnSynParams struct {
// Learn enables learning for this pathway.
Learn slbool.Bool
pad, pad1, pad2 int32
// LRateParams manages learning rate parameters for scaling [DWt] delta
// weight values that then update [LWt] online learned weights.
// It has two optional modulation factors on top of a Base learning rate.
LRate LRateParams `display:"inline"`
// DWtParams has misc parameters for computing weight changes ([DWt]) for the default
// trace-based cortical learning rule and for other specialized learning rules.
DWt DWtParams `display:"inline"`
// hebbian learning option, which overrides the default learning rules
Hebb HebbParams `display:"inline"`
}
func (ls *LearnSynParams) Update() {
ls.LRate.Update()
ls.DWt.Update()
ls.Hebb.Update()
}
func (ls *LearnSynParams) Defaults() {
ls.Learn.SetBool(true)
ls.LRate.Defaults()
ls.DWt.Defaults()
ls.Hebb.Defaults()
}
func (ls *LearnSynParams) ShouldDisplay(field string) bool {
switch field {
case "Learn":
return true
default:
return ls.Learn.IsTrue()
}
}
// CHLdWt returns the error-driven weight change component for a
// CHL contrastive hebbian learning rule, optionally using the checkmark
// temporally eXtended Contrastive Attractor Learning (XCAL) function
func (ls *LearnSynParams) CHLdWt(suCaP, suCaD, ruCaP, ruCaD float32) float32 {
srp := suCaP * ruCaP
srd := suCaD * ruCaD
return srp - srd
}
// DeltaDWt returns the error-driven weight change component for a
// simple delta between a minus and plus phase factor, optionally using the checkmark
// temporally eXtended Contrastive Attractor Learning (XCAL) function
func (ls *LearnSynParams) DeltaDWt(plus, minus float32) float32 {
return plus - minus
}
//gosl:end
//////// LRateMod
// LRateMod implements global learning rate modulation, based on a performance-based
// factor, for example error. Increasing levels of the factor = higher learning rate.
// This can be added to a Sim and called prior to DWt() to dynamically change lrate
// based on overall network performance. It is not used by default in the standard params.
type LRateMod struct {
// toggle use of this modulation factor
On slbool.Bool
// baseline learning rate -- what you get for correct cases
Base float32 `min:"0" max:"1"`
pad, pad1 int32
// defines the range over which modulation occurs for the modulator factor -- Min and below get the Base level of learning rate modulation, Max and above get a modulation of 1
Range minmax.F32
}
func (lr *LRateMod) Defaults() {
lr.On.SetBool(true)
lr.Base = 0.2
lr.Range.Set(0.2, 0.8)
}
func (lr *LRateMod) Update() {
}
func (lr *LRateMod) ShouldDisplay(field string) bool {
switch field {
case "On":
return true
default:
return lr.On.IsTrue()
}
}
// Mod returns the learning rate modulation factor as a function
// of any kind of normalized modulation factor, e.g., an error measure.
// If fact <= Range.Min, returns Base
// If fact >= Range.Max, returns 1
// otherwise, returns proportional value between Base..1
func (lr *LRateMod) Mod(fact float32) float32 {
lrm := lr.Range.NormValue(fact) // clips to 0-1 range
md := lr.Base + lrm*(1-lr.Base) // resulting mod is in Base-1 range
return md
}
// LRateMod calls LRateMod on given network, using computed Mod factor
// based on given normalized modulation factor
// (0 = no error = Base learning rate, 1 = maximum error).
// returns modulation factor applied.
func (lr *LRateMod) LRateMod(net *Network, fact float32) float32 {
if lr.Range.Max == 0 {
lr.Defaults()
}
if lr.On.IsFalse() {
return 1
}
md := lr.Mod(fact)
net.LRateMod(md)
return md
}
// Copyright (c) 2022, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
import (
"cogentcore.org/core/enums"
"github.com/emer/emergent/v2/looper"
"github.com/emer/emergent/v2/netview"
)
// LooperStandard adds all the standard Axon Trial and Cycle level processing calls
// to the given Looper Stacks. cycle and trial are the enums for the looper levels,
// trainMode is the training mode enum value. Cycles obtained from net.Context.
// - ISI (inter-stimulus interval), minus and plus phases of the theta cycle (trial).
// - The clearInputs function is called at the start of the minus phase to begin
// the ISI period, and applyInputs is called after that to apply new inputs.
// - embedded beta phases within theta, that record Beta1 and Beta2 states.
// - net.Cycle() at every cycle step.
// - net.DWt() and net.WtFromDWt() learning calls in training mode, with netview update
// between these two calls if it is visible and viewing synapse variables.
// - netview update calls at appropriate levels (no-op if no GUI).
func LooperStandard(ls *looper.Stacks, net *Network, viewFunc func(mode enums.Enum) *NetViewUpdate, cycle, trial, trainMode enums.Enum, clearInputs, applyInputs func(mode enums.Enum)) {
ctx := net.Context()
isiCycles := int(ctx.ISICycles)
minusCycles := int(ctx.MinusCycles)
plusStart := isiCycles + minusCycles
ls.AddEventAllModes(cycle, "Beta1", isiCycles+50, func() { net.Beta1() })
ls.AddEventAllModes(cycle, "Beta2", isiCycles+100, func() { net.Beta2() })
ls.AddEventAllModes(cycle, "MinusPhase:End", plusStart, func() { net.MinusPhaseEnd() })
ls.AddEventAllModes(cycle, "PlusPhase:Start", plusStart, func() { net.PlusPhaseStart() })
for mode, st := range ls.Stacks {
cycLoop := st.Loops[cycle]
trlLoop := st.Loops[trial]
testing := mode.Int64() != trainMode.Int64()
cycLoop.OnStart.Add("Cycle", LooperCycleStartFunc(ls, net, viewFunc, cycle, mode))
if isiCycles > 0 && mode.Int64() == trainMode.Int64() {
cycLoop.AddEvent("UpdateWeights", isiCycles, LooperUpdateWeightsFunc(ls, net, viewFunc, mode))
}
cycLoop.AddEvent("MinusPhase:Start", isiCycles, func() {
net.MinusPhaseStart()
applyInputs(mode)
})
trlLoop.OnStart.Add("ISI:Start", func() { net.ThetaCycleStart(mode, testing); clearInputs(mode) })
trlLoop.OnEnd.Add("PlusPhase:End", func() { net.PlusPhaseEnd() })
if isiCycles == 0 && mode.Int64() == trainMode.Int64() {
trlLoop.OnEnd.Add("UpdateWeights", LooperUpdateWeightsFunc(ls, net, viewFunc, mode))
}
}
}
// LooperCycleStartFunc returns a standard looper OnStart function at Cycle level,
// which runs every cycle and updates the view.
func LooperCycleStartFunc(ls *looper.Stacks, net *Network, viewFunc func(mode enums.Enum) *NetViewUpdate, cycle, mode enums.Enum) func() {
return func() {
getNeurons := false
if ls.ModeStack().StepLevel.Int64() == cycle.Int64() {
getNeurons = true
} else if view := viewFunc(mode); view != nil && view.View != nil {
if view.IsCycleUpdating() {
getNeurons = true
} else {
if view.Time < Theta {
getNeurons = true
}
}
}
net.Cycle(getNeurons)
if UseGPU && !getNeurons {
net.Context().CycleInc() // keep synced
}
}
}
// LooperUpdateWeightsFunc returns a standard looper OnEnd function at Trial level
// to update the weights, with different GPU logic for when weights are being viewed.
func LooperUpdateWeightsFunc(ls *looper.Stacks, net *Network, viewFunc func(mode enums.Enum) *NetViewUpdate, mode enums.Enum) func() {
return func() {
if view := viewFunc(mode); view != nil && view.IsViewingSynapse() {
net.DWt() // todo: need to get synapses here, not after
view.RecordSyns() // note: critical to update weights here so DWt is visible
net.WtFromDWt()
} else {
net.DWtToWt()
}
}
}
// LooperUpdateNetView adds netview update calls to the given
// trial and cycle levels for given NetViewUpdate associated with the mode,
// returned by the given viewFunc function.
// The countersFunc returns the counters and other stats to display at the
// bottom of the NetView, based on given mode and level.
func LooperUpdateNetView(ls *looper.Stacks, cycle, trial enums.Enum, viewFunc func(mode enums.Enum) *NetViewUpdate) {
for mode, st := range ls.Stacks {
viewUpdt := viewFunc(mode)
cycLoop := st.Loops[cycle]
cycLoop.OnEnd.Add("GUI:UpdateNetView", func() {
viewUpdt.UpdateCycle(cycLoop.Counter.Cur, mode, cycle)
})
trlLoop := st.Loops[trial]
trlLoop.OnEnd.Add("GUI:UpdateNetView", func() {
viewUpdt.GoUpdate(mode, trial)
})
}
}
//////// NetViewUpdate
//gosl:start
// ViewTimes are the options for when the NetView can be updated.
type ViewTimes int32 //enums:enum
const (
// Cycle is an update of neuron state, equivalent to 1 msec of real time.
Cycle ViewTimes = iota
// FastSpike is 10 cycles (msec) or 100hz. This is the fastest spiking time
// generally observed in the neocortex.
FastSpike
// Gamma is 25 cycles (msec) or 40hz. Neocortical activity often exhibits
// synchrony peaks in this range.
Gamma
// Beta is 50 cycles (msec) or 20 hz (two Gammas).
// Gating in the basal ganglia and associated updating in prefrontal
// cortex occurs at this frequency.
Beta
// Alpha is 100 cycle (msec) or 10 hz (two Betas).
// Posterior neocortex exhibits synchrony peaks in this range,
// corresponding to the intrinsic bursting frequency of layer 5
// IB neurons, and corticothalamic loop resonance.
Alpha
// Phase is the Minus or Plus phase, where plus phase is bursting / outcome
// that drives positive learning relative to prediction in minus phase.
// Minus phase is at 150 cycles (msec).
Phase
// Theta is 200 cycles (msec) or 5 hz (two Alphas), i.e., a Trial.
// This is the modal duration of a saccade, the update frequency of
// medial temporal lobe episodic memory, and the minimal predictive learning cycle
// (perceive on Alpha 1, predict on 2).
Theta
)
//gosl:end
// ViewTimeCycles are the cycle intervals associated with each ViewTimes level.
var ViewTimeCycles = []int{1, 10, 25, 50, 100, 150, 200}
// Cycles returns the number of cycles associated with a given view time.
func (vt ViewTimes) Cycles() int {
return ViewTimeCycles[vt]
}
// NetViewUpdate manages time scales for updating the NetView.
// Use one of these for each mode you want to control separately.
type NetViewUpdate struct {
// On toggles update of display on
On bool
// Time scale to update the network view (Cycle to Trial timescales).
Time ViewTimes
// CounterFunc returns the counter string showing current counters etc.
CounterFunc func(mode, level enums.Enum) string `display:"-"`
// View is the network view.
View *netview.NetView `display:"-"`
}
// Config configures for given NetView, time and counter function,
// which returns a string to show at the bottom of the netview,
// given the current mode and level.
func (vu *NetViewUpdate) Config(nv *netview.NetView, tm ViewTimes, fun func(mode, level enums.Enum) string) {
vu.View = nv
vu.On = true
vu.Time = tm
vu.CounterFunc = fun
}
// ShouldUpdate returns true if the view is On,
// View is != nil, and it is visible.
func (vu *NetViewUpdate) ShouldUpdate() bool {
if !vu.On || vu.View == nil || !vu.View.IsVisible() {
return false
}
return true
}
// GoUpdate does an update if view is On, visible and active,
// including recording new data and driving update of display.
// This version is only for calling from a separate goroutine,
// not the main event loop (see also Update).
func (vu *NetViewUpdate) GoUpdate(mode, level enums.Enum) {
if !vu.ShouldUpdate() {
return
}
if vu.IsCycleUpdating() && vu.View.Options.Raster.On { // no update for raster
return
}
counters := vu.CounterFunc(mode, level)
vu.View.Record(counters, -1) // -1 = default incrementing raster
vu.View.GoUpdateView()
}
// Update does an update if view is On, visible and active,
// including recording new data and driving update of display.
// This version is only for calling from the main event loop
// (see also GoUpdate).
func (vu *NetViewUpdate) Update(mode, level enums.Enum) {
if !vu.ShouldUpdate() {
return
}
counters := vu.CounterFunc(mode, level)
vu.View.Record(counters, -1) // -1 = default incrementing raster
vu.View.UpdateView()
}
// UpdateWhenStopped does an update when the network updating was stopped
// either via stepping or hitting the stop button.
// This has different logic for the raster view vs. regular.
// This is only for calling from a separate goroutine,
// not the main event loop.
func (vu *NetViewUpdate) UpdateWhenStopped(mode, level enums.Enum) {
if !vu.ShouldUpdate() {
return
}
if !vu.View.Options.Raster.On { // always record when not in raster mode
counters := vu.CounterFunc(mode, level)
vu.View.Record(counters, -1) // -1 = use a dummy counter
}
vu.View.GoUpdateView()
}
// IsCycleUpdating returns true if the view is updating at a cycle level,
// either from raster or literal cycle level.
func (vu *NetViewUpdate) IsCycleUpdating() bool {
if !vu.ShouldUpdate() {
return false
}
if vu.View.Options.Raster.On || vu.Time == Cycle {
return true
}
return false
}
// IsViewingSynapse returns true if netview is actively viewing synapses.
func (vu *NetViewUpdate) IsViewingSynapse() bool {
if !vu.ShouldUpdate() {
return false
}
return vu.View.IsViewingSynapse()
}
// UpdateCycle triggers an update at the Cycle (Millisecond) timescale,
// using given text to display at bottom of view
func (vu *NetViewUpdate) UpdateCycle(cyc int, mode, level enums.Enum) {
if !vu.ShouldUpdate() {
return
}
if vu.View.Options.Raster.On {
counters := vu.CounterFunc(mode, level)
vu.updateCycleRaster(cyc, counters)
return
}
if vu.Time == Theta { // only trial
return
}
vtc := vu.Time.Cycles()
if (cyc+1)%vtc == 0 {
vu.GoUpdate(mode, level)
}
}
// updateCycleRaster raster version of Cycle update.
// it always records data at the cycle level.
func (vu *NetViewUpdate) updateCycleRaster(cyc int, counters string) {
vu.View.Record(counters, cyc)
vtc := vu.Time.Cycles()
if (cyc+1)%vtc == 0 {
vu.View.GoUpdateView()
}
}
// RecordSyns records synaptic data -- stored separate from unit data
// and only needs to be called when synaptic values are updated.
// Should be done when the DWt values have been computed, before
// updating Wts and zeroing.
// NetView displays this recorded data when Update is next called.
func (vu *NetViewUpdate) RecordSyns() {
if !vu.ShouldUpdate() {
return
}
vu.View.RecordSyns()
}
// Code generated by "goal build"; DO NOT EDIT.
//line network.goal:1
// Copyright (c) 2019, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
//go:generate core generate -add-types -gosl
import (
"crypto/md5"
"encoding/binary"
"encoding/hex"
"fmt"
"io"
"log"
"log/slog"
"math"
"os"
"path/filepath"
"strings"
"time"
"cogentcore.org/core/base/datasize"
"cogentcore.org/core/base/errors"
"cogentcore.org/core/base/iox/tomlx"
"cogentcore.org/core/base/slicesx"
"cogentcore.org/core/base/timer"
"cogentcore.org/core/core"
"cogentcore.org/core/gpu"
"cogentcore.org/core/icons"
"cogentcore.org/core/text/textcore"
"cogentcore.org/core/tree"
"cogentcore.org/lab/tensor"
"github.com/emer/axon/v2/kinase"
"github.com/emer/emergent/v2/emer"
"github.com/emer/emergent/v2/paths"
)
//gosl:start
// NetworkIndexes are indexes and sizes for processing network.
type NetworkIndexes struct {
// MaxData is the maximum number of data inputs that can be processed
// in parallel in one pass of the network.
// Neuron storage is allocated to hold this amount during
// Build process, and this value reflects that.
MaxData uint32 `edit:"-"`
// MaxDelay is the maximum synaptic delay across all pathways at the time of
// [Network.Build]. This determines the size of the spike sending delay buffers.
MaxDelay uint32 `edit:-"-"`
// NCaBins is the total number of [CaBins] in the neuron state variables.
// Set to [Context.ThetaCycles] / [Context.CaBinCycles] in Build.
NCaBins int32 `edit:"-"`
// NLayers is the number of layers in the network.
NLayers uint32 `edit:"-"`
// NNeurons is the total number of neurons.
NNeurons uint32 `edit:"-"`
// NPools is the total number of pools.
NPools uint32 `edit:"-"`
// NPaths is the total number of paths.
NPaths uint32 `edit:"-"`
// NSyns is the total number of synapses.
NSyns uint32 `edit:"-"`
// RubiconNPosUSs is the total number of Rubicon Drives / positive USs.
RubiconNPosUSs uint32 `edit:"-"`
// RubiconNCosts is the total number of Rubicon Costs.
RubiconNCosts uint32 `edit:"-"`
// RubiconNNegUSs is the total number of .Rubicon Negative USs.
RubiconNNegUSs uint32 `edit:"-"`
pad uint32
}
//gosl:end
// Network implements the Axon spiking model.
// Most of the fields are copied to the global vars, needed for GPU,
// via the SetAsCurrent method, and must be slices or tensors so that
// there is one canonical underlying instance of all such data.
// There are also Layer and Path lists that are used to scaffold the
// building and display of the network, but contain no data.
type Network struct {
emer.NetworkBase
// Rubicon system for goal-driven motivated behavior,
// including Rubicon phasic dopamine signaling.
// Manages internal drives, US outcomes. Core LHb (lateral habenula)
// and VTA (ventral tegmental area) dopamine are computed
// in equations using inputs from specialized network layers
// (LDTLayer driven by BLA, CeM layers, VSPatchLayer).
// Renders USLayer, PVLayer, DrivesLayer representations
// based on state updated here.
Rubicon Rubicon
// Layers is the array of layers, used for CPU initialization, not GPU computation.
Layers []*Layer
// Paths has pointers to all pathways in the network, sender-based, for CPU initialization,
// not GPU computation.
Paths []*Path `display:"-"`
// LayerClassMap is a map from class name to layer names.
LayerClassMap map[string][]string `display:"-"`
// NThreads is number of threads to use for parallel processing.
NThreads int
// todo: following is basically obsolete:
// record function timer information.
RecFunTimes bool `display:"-"`
// timers for each major function (step of processing).
FunTimes map[string]*timer.Time `display:"-"`
//////// Params
// LayerParams are all the layer parameters. [NLayers]
LayerParams []LayerParams `display:"-"`
// PathParams are all the path parameters, in sending order. [NPaths]
PathParams []PathParams `display:"-"`
//////// Indexes
// NetworkIxs have indexes and sizes for entire network (one only).
NetworkIxs []NetworkIndexes
// PoolIxs have index values for each Pool.
// [Layer * Pools][PoolIndexVars]
PoolIxs tensor.Uint32 `display:"-"`
// NeuronIxs have index values for each neuron: index into layer, pools.
// [Neurons][Indexes]
NeuronIxs tensor.Uint32 `display:"-"`
// SynapseIxs have index values for each synapse:
// providing index into recv, send neurons, path.
// [Indexes][NSyns]; NSyns = [Layer][SendPaths][SendNeurons][Syns]
SynapseIxs tensor.Uint32 `display:"-"`
// PathSendCon are starting offset and N cons for each sending neuron,
// for indexing into the Syns synapses, which are organized sender-based.
// [NSendCon][StartNN]; NSendCon = [Layer][SendPaths][SendNeurons]
PathSendCon tensor.Uint32 `display:"-"`
// RecvPathIxs indexes into Paths (organized by SendPath) organized
// by recv pathways. needed for iterating through recv paths efficiently on GPU.
// [NRecvPaths] = [Layer][RecvPaths]
RecvPathIxs tensor.Uint32 `display:"-"`
// PathRecvCon are the receiving path starting index and number of connections.
// [NRecvCon][StartNN]; NRecvCon = [Layer][RecvPaths][RecvNeurons]
PathRecvCon tensor.Uint32 `display:"-"`
// RecvSynIxs are the indexes into Synapses for each recv neuron, organized
// into blocks according to PathRecvCon, for receiver-based access.
// [NSyns] = [Layer][RecvPaths][RecvNeurons][Syns]
RecvSynIxs tensor.Uint32 `display:"-"`
//////// Neuron State
// note: A slice is needed even for single elements so that global vars and network
// point to the same underlying instance.
// Ctx is the context state (one). Other copies of Context can be maintained
// and [SetContext] to update this one, but this instance is the canonical one.
Ctx []Context `new-window:"+"`
// Neurons are all the neuron state variables.
// [Neurons][Data][Vars]
Neurons tensor.Float32 `display:"-"`
// NeuronAvgs are variables with averages over the
// Data parallel dimension for each neuron.
// [Neurons][Vars]
NeuronAvgs tensor.Float32 `display:"-"`
// Pools are the [PoolVars] float32 state values for layer and sub-pool inhibition,
// Including the float32 AvgMax values by Phase and variable: use [AvgMaxVarIndex].
// [Layer * Pools][Data][PoolVars+AvgMax]
Pools tensor.Float32 `display:"-"`
// PoolsInt are the [PoolIntVars] int32 state values for layer and sub-pool
// inhibition, AvgMax atomic integration, and other vars: use [AvgMaxIntVarIndex]
// [Layer * Pools][Data][PoolIntVars+AvgMax]
PoolsInt tensor.Int32 `display:"-"`
// LayerStates holds layer-level state values, with variables defined in
// [LayerVars], for each layer and Data parallel index.
// [Layer][Data][LayerVarsN]
LayerStates tensor.Float32 `display:"-"`
// GlobalScalars are the global scalar state variables.
// [GlobalScalarVarsN+2*NCaWeights][Data]
GlobalScalars tensor.Float32 `display:"-"`
// GlobalVectors are the global vector state variables.
// [GlobalVectorsN][MaxGlobalVecN][Data]
GlobalVectors tensor.Float32 `display:"-"`
// Exts are external input values for all Input / Target / Compare layers
// in the network. The ApplyExt methods write to this per layer,
// and it is then actually applied in one consistent method.
// [NExts][Data]; NExts = [In / Out Layers][Neurons]
Exts tensor.Float32 `display:"-"`
//////// Synapse State
// PathGBuf is the conductance buffer for accumulating spikes.
// Subslices are allocated to each pathway.
// Uses int-encoded values for faster GPU atomic integration.
// [NPathNeur][Data][MaxDel+1]; NPathNeur = [Layer][RecvPaths][RecvNeurons]
PathGBuf tensor.Int32 `display:"-"`
// PathGSyns are synaptic conductance integrated over time per pathway
// per recv neurons. spikes come in via PathBuf.
// subslices are allocated to each pathway.
// [NPathNeur][Data]
PathGSyns tensor.Float32 `display:"-"`
// Synapses are the synapse level variables (weights etc).
//
// These do not depend on the data parallel index, unlike [SynapseTraces].
// [NSyns][Vars]; NSyns = [Layer][SendPaths][SendNeurons][Syns]
Synapses tensor.Float32 `display:"-"`
//////// SynapseTraces
// SynapseTraces are synaptic variables that depend on the data
// parallel index, for accumulating learning traces and weight changes per data.
// This is the largest data size, so multiple instances are used
// to handle larger networks.
// [NSyns][Data][Vars]; NSyns = [Layer][SendPaths][SendNeurons][Syns]
SynapseTraces tensor.Float32 `display:"-"`
// SynapseTraces1 is an overflow buffer for SynapseTraces. // todo
// SynapseTraces1 tensor.Float32 `display:"-"`
}
// Context gets the network context state.
func (nt *Network) Context() *Context { return &nt.Ctx[0] }
func (nt *Network) NetIxs() *NetworkIndexes { return &nt.NetworkIxs[0] }
// SetContext sets the values of the network context, which is the canonical instance.
func (nt *Network) SetContext(ctx *Context) { nt.Ctx[0] = *ctx }
// SetNData sets the NData in [Context] to given value.
func (nt *Network) SetNData(nData int) { nt.Context().NData = uint32(nData) }
// SetMaxData sets the MaxData and current NData to the same value.
func (nt *Network) SetMaxData(maxData int) {
nt.NetIxs().MaxData = uint32(maxData)
nt.SetNData(maxData)
}
// emer.Network interface methods:
func (nt *Network) NumLayers() int { return len(nt.Layers) }
func (nt *Network) EmerLayer(idx int) emer.Layer { return nt.Layers[idx] }
func (nt *Network) MaxParallelData() int { return int(nt.NetIxs().MaxData) }
func (nt *Network) NParallelData() int { return int(nt.Context().NData) }
func (nt *Network) Init() {
nt.NetworkIxs = make([]NetworkIndexes, 1)
nt.Ctx = make([]Context, 1)
nt.Context().Defaults()
nt.NetIxs().MaxData = 1
NetworkIxs = nt.NetworkIxs // may reference things before build
}
// NewNetwork returns a new axon Network
func NewNetwork(name string) *Network {
net := &Network{}
emer.InitNetwork(net, name)
net.Init()
return net
}
// Defaults sets all the default parameters for all layers and pathways
func (nt *Network) Defaults() {
nt.Rubicon.Defaults()
nt.SetNThreads(0) // default
for _, ly := range nt.Layers {
ly.Defaults()
}
}
// UpdateParams updates all the derived parameters if any have changed, for all layers
// and pathways
func (nt *Network) UpdateParams() {
for _, ly := range nt.Layers {
ly.UpdateParams()
}
}
// LayerByName returns a layer by looking it up by name in the layer map
// (nil if not found).
func (nt *Network) LayerByName(name string) *Layer {
ely, _ := nt.EmerLayerByName(name)
if ely == nil {
return nil
}
return ely.(*Layer)
}
// LayersByType returns a list of layer names by given layer type(s).
func (nt *Network) LayersByType(layType ...LayerTypes) []string {
var nms []string
for _, tp := range layType {
nm := tp.String()
nms = append(nms, nm)
}
return nt.LayersByClass(nms...)
}
func (nt *Network) UpdateLayerMaps() {
nt.UpdateLayerNameMap()
nt.LayerClassMap = make(map[string][]string)
for _, ly := range nt.Layers {
cs := ly.Type.String() + " " + ly.Class
cls := strings.Split(cs, " ")
for _, cl := range cls {
if cl == "" {
continue
}
ll := nt.LayerClassMap[cl]
ll = append(ll, ly.Name)
nt.LayerClassMap[cl] = ll
}
}
}
// LayersByClass returns a list of layer names by given class(es).
// Lists are compiled when network Build() function called,
// or now if not yet present.
// The layer Type is always included as a Class, along with any other
// space-separated strings specified in Class for parameter styling, etc.
// If no classes are passed, all layer names in order are returned.
func (nt *Network) LayersByClass(classes ...string) []string {
if nt.LayerClassMap == nil {
nt.UpdateLayerMaps()
}
var nms []string
if len(classes) == 0 {
for _, ly := range nt.Layers {
if ly.Off {
continue
}
nms = append(nms, ly.Name)
}
return nms
}
for _, lc := range classes {
nms = append(nms, nt.LayerClassMap[lc]...)
}
// only get unique layers
layers := []string{}
has := map[string]bool{}
for _, nm := range nms {
if has[nm] {
continue
}
layers = append(layers, nm)
has[nm] = true
}
if len(layers) == 0 {
panic(fmt.Sprintf("No Layers found for query: %#v.", classes))
}
return layers
}
// UnitVarNames returns a list of variable names available on the units in this network.
// Not all layers need to support all variables, but must safely return 0's for
// unsupported ones. The order of this list determines NetView variable display order.
// This is typically a global list so do not modify!
func (nt *Network) UnitVarNames() []string {
return NeuronVarNames
}
func (nt *Network) VarCategories() []emer.VarCategory {
return VarCategories
}
// UnitVarProps returns properties for variables
func (nt *Network) UnitVarProps() map[string]string {
return NeuronVarProps
}
// SynVarNames returns the names of all the variables on the synapses in this network.
// Not all pathways need to support all variables, but must safely return 0's for
// unsupported ones. The order of this list determines NetView variable display order.
// This is typically a global list so do not modify!
func (nt *Network) SynVarNames() []string {
return SynapseVarNames
}
// SynVarProps returns properties for variables
func (nt *Network) SynVarProps() map[string]string {
return SynapseVarProps
}
// KeyLayerParams returns a listing for all layers in the network,
// of the most important layer-level params (specific to each algorithm).
func (nt *Network) KeyLayerParams() string {
return nt.AllLayerInhibs()
}
// KeyPathParams returns a listing for all Recv pathways in the network,
// of the most important pathway-level params (specific to each algorithm).
func (nt *Network) KeyPathParams() string {
return nt.AllPathScales()
}
// AllLayerInhibs returns a listing of all Layer Inhibition parameters in the Network
func (nt *Network) AllLayerInhibs() string {
var b strings.Builder
for _, ly := range nt.Layers {
if ly.Off {
continue
}
lp := ly.Params
b.WriteString(fmt.Sprintf("%15s\t%15s\tNominal: %6.2f", ly.Name, strings.TrimSuffix(ly.Type.String(), "Layer"), lp.Inhib.ActAvg.Nominal))
if lp.Inhib.Layer.On.IsTrue() {
b.WriteString(fmt.Sprintf("\tLayer.Gi: %6.2f", lp.Inhib.Layer.Gi))
}
if lp.Inhib.Pool.On.IsTrue() {
b.WriteString(fmt.Sprintf("\tPool.Gi: %6.2f", lp.Inhib.Pool.Gi))
}
if lp.Learn.NeuroMod.DAMod != NoDAMod {
b.WriteString(fmt.Sprintf("\t%7s\t%7s", lp.Learn.NeuroMod.DAMod.String(), lp.Learn.NeuroMod.Valence.String()))
}
b.WriteString("\n")
}
return b.String()
}
// AllPathScales returns a listing of all PathScale parameters in the Network
// in all Layers, Recv pathways. These are among the most important
// and numerous of parameters (in larger networks) -- this helps keep
// track of what they all are set to.
func (nt *Network) AllPathScales() string {
str := ""
for _, ly := range nt.Layers {
if ly.Off {
continue
}
str += "\nLayer: " + ly.Name + "\n"
for i := 0; i < ly.NumRecvPaths(); i++ {
pt := ly.RecvPaths[i]
if pt.Off {
continue
}
sn := pt.Send.Name
str += fmt.Sprintf("%15s %10s Abs:\t%6.2f\tRel:\t%6.2f\tGScale:\t%6.2f\tRel:\t%6.2f\tLRate:\t%6.2f\n", sn, strings.TrimSuffix(pt.Type.String(), "Path"), pt.Params.PathScale.Abs, pt.Params.PathScale.Rel, pt.Params.GScale.Scale, pt.Params.GScale.Rel, pt.Params.Learn.LRate.Base)
}
}
return str
}
// SaveParamsSnapshot saves various views of current parameters
// to either `params_good` if good = true (for current good reference params)
// or `params_2006_01_02` (year, month, day) datestamp,
// providing a snapshot of the simulation params for easy diffs and later reference.
// Also saves current Config state.
func (nt *Network) SaveParamsSnapshot(cfg any, good bool) error {
date := time.Now().Format("2006_01_02")
if good {
date = "good"
}
dir := "params_" + date
err := os.Mkdir(dir, 0775)
if err != nil {
errors.Log(err) // notify but OK if it exists
}
fmt.Println("Saving params to:", dir)
tomlx.Save(cfg, filepath.Join(dir, "config.toml"))
nt.SaveParams(emer.AllParams, core.Filename(filepath.Join(dir, "params_all.txt")))
nt.SaveParams(emer.NonDefault, core.Filename(filepath.Join(dir, "params_nondef.txt")))
nt.SaveAllLayerInhibs(core.Filename(filepath.Join(dir, "params_layers.txt")))
nt.SaveAllPathScales(core.Filename(filepath.Join(dir, "params_paths.txt")))
return nil
}
// SaveAllLayerInhibs saves list of all layer Inhibition parameters to given file
func (nt *Network) SaveAllLayerInhibs(filename core.Filename) error {
str := nt.AllLayerInhibs()
err := os.WriteFile(string(filename), []byte(str), 0666)
return errors.Log(err)
}
// SavePathScales saves a listing of all PathScale parameters in the Network
// in all Layers, Recv pathways. These are among the most important
// and numerous of parameters (in larger networks) -- this helps keep
// track of what they all are set to.
func (nt *Network) SaveAllPathScales(filename core.Filename) error {
str := nt.AllPathScales()
err := os.WriteFile(string(filename), []byte(str), 0666)
return errors.Log(err)
}
// AllGlobals returns a listing of all Global variables and values.
func (nt *Network) AllGlobals() string {
nix := nt.NetIxs()
md := nix.MaxData
ctx := nt.Context()
nCaWts := ctx.NCaWeights()
str := ""
for di := uint32(0); di < md; di++ {
str += fmt.Sprintf("\n###############################\nData Index: %02d\n\n", di)
for vv := GvRew; vv < GvCaBinWts; vv++ {
str += fmt.Sprintf("%20s:\t%7.4f\n", vv.String(), GlobalScalars.Value(int(vv), int(di)))
}
for vv := GvCost; vv <= GvCostRaw; vv++ {
str += fmt.Sprintf("%20s:\t", vv.String())
for ui := uint32(0); ui < nix.RubiconNCosts; ui++ {
str += fmt.Sprintf("%d: %7.4f\t", ui, GlobalVectors.Value(int(vv), int(ui), int(di)))
}
str += "\n"
}
for vv := GvUSneg; vv <= GvUSnegRaw; vv++ {
str += fmt.Sprintf("%20s:\t", vv.String())
for ui := uint32(0); ui < nix.RubiconNNegUSs; ui++ {
str += fmt.Sprintf("%d: %7.4f\t", ui, GlobalVectors.Value(int(vv), int(ui), int(di)))
}
str += "\n"
}
for vv := GvDrives; vv < GlobalVectorVarsN; vv++ {
str += fmt.Sprintf("%20s:\t", vv.String())
for ui := uint32(0); ui < nix.RubiconNPosUSs; ui++ {
str += fmt.Sprintf("%d:\t%7.4f\t", ui, GlobalVectors.Value(int(vv), int(ui), int(di)))
}
str += "\n"
}
}
str += "\n###############################\nSpike Bin Weights\n\n"
for i := range nCaWts {
str += fmt.Sprintf("CaBinWtsCaP%02d:\t%7.4f\n", i, GlobalScalars.Value(int(GvCaBinWts+GlobalScalarVars(i)), int(0)))
}
str += "#### CaD\n"
for i := range nCaWts {
str += fmt.Sprintf("CaBinWtsCaD%02d:\t%7.4f\n", i, GlobalScalars.Value(int(GvCaBinWts+GlobalScalarVars(nCaWts+i)), int(0)))
}
return str
}
// ShowAllGlobals shows a listing of all Global variables and values.
func (nt *Network) ShowAllGlobals() { //types:add
agv := nt.AllGlobals()
textcore.TextDialog(nil, "All Global Vars: "+nt.Name, agv)
}
// AllGlobalValues adds to map of all Global variables and values.
// ctrKey is a key of counters to contextualize values.
func (nt *Network) AllGlobalValues(ctrKey string, vals map[string]float32) {
nix := nt.NetIxs()
md := nix.MaxData
for di := uint32(0); di < md; di++ {
for vv := GvRew; vv < GvCaBinWts; vv++ {
key := fmt.Sprintf("%s Di: %d\t%s", ctrKey, di, vv.String())
vals[key] = GlobalScalars.Value(int(vv), int(di))
}
for vv := GvCost; vv <= GvCostRaw; vv++ {
for ui := uint32(0); ui < nix.RubiconNCosts; ui++ {
key := fmt.Sprintf("%s Di: %d\t%s\t%d", ctrKey, di, vv.String(), ui)
vals[key] = GlobalVectors.Value(int(vv), int(ui), int(di))
}
}
for vv := GvUSneg; vv <= GvUSnegRaw; vv++ {
for ui := uint32(0); ui < nix.RubiconNNegUSs; ui++ {
key := fmt.Sprintf("%s Di: %d\t%s\t%d", ctrKey, di, vv.String(), ui)
vals[key] = GlobalVectors.Value(int(vv), int(ui), int(di))
}
}
for vv := GvDrives; vv < GlobalVectorVarsN; vv++ {
for ui := uint32(0); ui < nix.RubiconNPosUSs; ui++ {
key := fmt.Sprintf("%s Di: %d\t%s\t%d", ctrKey, di, vv.String(), ui)
vals[key] = GlobalVectors.Value(int(vv), int(ui), int(di))
}
}
}
}
// AddLayerInit adds layer to network with proper initialization.
func (nt *Network) AddLayerInit(ly *Layer, name string, typ LayerTypes, shape ...int) {
if nt.EmerNetwork == nil {
log.Printf("Network EmerNetwork is nil: MUST call emer.InitNetwork on network, passing a pointer to the network to initialize properly!")
return
}
emer.InitLayer(ly, name)
ly.Network = nt
ly.Shape.SetShapeSizes(shape...)
ly.Type = typ
nt.Layers = append(nt.Layers, ly)
ly.Index = len(nt.Layers) - 1
ly.BuildConfig = make(map[string]string)
ly.Doc = typ.Desc()
nt.UpdateLayerMaps()
}
// AddLayer adds a new layer with given name and shape to the network.
// 2D and 4D layer shapes are generally preferred but not essential -- see
// AddLayer2D and 4D for convenience methods for those. 4D layers enable
// pool (unit-group) level inhibition in Axon networks, for example.
// shape is in row-major format with outer-most dimensions first:
// e.g., 4D 3, 2, 4, 5 = 3 rows (Y) of 2 cols (X) of pools, with each unit
// group having 4 rows (Y) of 5 (X) units.
func (nt *Network) AddLayer(name string, typ LayerTypes, shape ...int) *Layer {
ly := &Layer{}
nt.AddLayerInit(ly, name, typ, shape...)
return ly
}
// AddLayer2D adds a new layer with given name and 2D shape to the network.
// 2D and 4D layer shapes are generally preferred but not essential.
func (nt *Network) AddLayer2D(name string, typ LayerTypes, shapeY, shapeX int) *Layer {
return nt.AddLayer(name, typ, shapeY, shapeX)
}
// AddLayer4D adds a new layer with given name and 4D shape to the network.
// 4D layers enable pool (unit-group) level inhibition in Axon networks, for example.
// shape is in row-major format with outer-most dimensions first:
// e.g., 4D 3, 2, 4, 5 = 3 rows (Y) of 2 cols (X) of pools, with each pool
// having 4 rows (Y) of 5 (X) neurons.
func (nt *Network) AddLayer4D(name string, typ LayerTypes, nPoolsY, nPoolsX, nNeurY, nNeurX int) *Layer {
return nt.AddLayer(name, typ, nPoolsY, nPoolsX, nNeurY, nNeurX)
}
// ConnectLayerNames establishes a pathway between two layers, referenced by name
// adding to the recv and send pathway lists on each side of the connection.
// Returns error if not successful.
func (nt *Network) ConnectLayerNames(send, recv string, pat paths.Pattern, typ PathTypes) (rlay, slay *Layer, pt *Path, err error) {
rlay = nt.LayerByName(recv)
if rlay == nil {
return
}
slay = nt.LayerByName(send)
if slay == nil {
return
}
pt = nt.ConnectLayers(slay, rlay, pat, typ)
return
}
// ConnectLayers establishes a pathway between two layers,
// adding to the recv and send pathway lists on each side of the connection.
func (nt *Network) ConnectLayers(send, recv *Layer, pat paths.Pattern, typ PathTypes) *Path {
pt := &Path{}
emer.InitPath(pt)
pt.Connect(send, recv, pat, typ)
recv.RecvPaths = append(recv.RecvPaths, pt)
send.SendPaths = append(send.SendPaths, pt)
return pt
}
// BidirConnectLayerNames establishes bidirectional pathways between two layers,
// referenced by name, with low = the lower layer that sends a Forward pathway
// to the high layer, and receives a Back pathway in the opposite direction.
// Returns error if not successful.
func (nt *Network) BidirConnectLayerNames(low, high string, pat paths.Pattern) (lowlay, highlay *Layer, fwdpt, backpt *Path, err error) {
lowlay = nt.LayerByName(low)
if lowlay == nil {
return
}
highlay = nt.LayerByName(high)
if highlay == nil {
return
}
fwdpt = nt.ConnectLayers(lowlay, highlay, pat, ForwardPath)
backpt = nt.ConnectLayers(highlay, lowlay, pat, BackPath)
return
}
// BidirConnectLayers establishes bidirectional pathways between two layers,
// with low = lower layer that sends a Forward pathway to the high layer,
// and receives a Back pathway in the opposite direction.
func (nt *Network) BidirConnectLayers(low, high *Layer, pat paths.Pattern) (fwdpt, backpt *Path) {
fwdpt = nt.ConnectLayers(low, high, pat, ForwardPath)
backpt = nt.ConnectLayers(high, low, pat, BackPath)
return
}
// LateralConnectLayer establishes a self-pathway within given layer.
func (nt *Network) LateralConnectLayer(lay *Layer, pat paths.Pattern) *Path {
pt := &Path{}
return nt.LateralConnectLayerPath(lay, pat, pt)
}
// LateralConnectLayerPath makes lateral self-pathway using given pathway.
func (nt *Network) LateralConnectLayerPath(lay *Layer, pat paths.Pattern, pt *Path) *Path {
emer.InitPath(pt)
pt.Connect(lay, lay, pat, LateralPath)
lay.RecvPaths = append(lay.RecvPaths, pt)
lay.SendPaths = append(lay.SendPaths, pt)
return pt
}
// Build constructs the layer and pathway state based on the layer shapes
// and patterns of interconnectivity. Everything in the network must have been
// configured by this point, including key values in Context such as ThetaCycles
// and CaBinCycles which drive allocation of number of [CaBins] neuron
// variables and corresponding [GvCaBinWts] global scalar variables.
func (nt *Network) Build() error { //types:add
nix := nt.NetIxs()
ctx := nt.Context()
maxBins := ctx.NCaBins()
nix.NCaBins = maxBins
nt.UpdateLayerMaps()
if nt.Rubicon.NPosUSs == 0 {
nt.Rubicon.SetNUSs(1, 1)
}
nt.Rubicon.Update()
nt.FunTimes = make(map[string]*timer.Time)
maxData := int(nix.MaxData)
var errs []error
totNeurons := 0
totPaths := 0
totExts := 0
nLayers := len(nt.Layers)
totPools := nLayers // layer pool for each layer at least
for _, ly := range nt.Layers {
if ly.Off { // note: better not turn on later!
continue
}
totPools += ly.NumPools()
nn := ly.Shape.Len()
totNeurons += nn
if ly.Type.IsExt() {
totExts += nn
}
totPaths += ly.NumSendPaths() // either way
}
nix.NNeurons = uint32(totNeurons)
nix.NLayers = uint32(nLayers)
nix.NPools = uint32(totPools)
nix.NPaths = uint32(totPaths)
nix.RubiconNPosUSs = nt.Rubicon.NPosUSs
nix.RubiconNNegUSs = nt.Rubicon.NNegUSs
nt.LayerParams = make([]LayerParams, nLayers)
nt.Paths = make([]*Path, totPaths)
nt.PathParams = make([]PathParams, totPaths)
nt.LayerStates.SetShapeSizes(nLayers, maxData, int(LayerVarsN))
nt.Pools.SetShapeSizes(totPools, maxData, int(PoolVarsTotal))
nt.PoolIxs.SetShapeSizes(totPools, int(PoolIndexVarsN))
nt.PoolsInt.SetShapeSizes(totPools, maxData, int(PoolIntVarsTot))
nt.Neurons.SetShapeSizes(totNeurons, maxData, int(NeuronVarsN)+int(maxBins))
nt.NeuronAvgs.SetShapeSizes(totNeurons, int(NeuronAvgVarsN))
nt.NeuronIxs.SetShapeSizes(totNeurons, int(NeuronIndexVarsN))
nt.Exts.SetShapeSizes(totExts, maxData)
nCaWts := ctx.NCaWeights()
nt.GlobalScalars.SetShapeSizes(int(GlobalScalarVarsN)+int(2*nCaWts), maxData)
nt.GlobalVectors.SetShapeSizes(int(GlobalVectorVarsN), int(MaxGlobalVecN), maxData)
nt.SetAsCurrent()
totSynapses := 0
totRecvCon := 0
totSendCon := 0
neurIndex := 0
pathIndex := 0
rpathIndex := 0
poolIndex := 0
extIndex := 0
for li, ly := range nt.Layers {
ly.Params = &nt.LayerParams[li]
ly.Params.Type = ly.Type
if ly.Off {
continue
}
shp := ly.Shape
nn := shp.Len()
ly.NNeurons = uint32(nn)
ly.NeurStIndex = uint32(neurIndex)
ly.MaxData = uint32(maxData)
np := ly.NumPools() + 1
ly.NPools = uint32(np)
ly.Params.Index = uint32(li)
ly.Params.MaxData = uint32(maxData)
ly.Params.PoolSt = uint32(poolIndex)
ly.Params.Indexes.NPools = uint32(np)
ly.Params.Indexes.NeurSt = uint32(neurIndex)
ly.Params.Indexes.NNeurons = uint32(nn)
if shp.NumDims() == 2 {
ly.Params.Indexes.ShpUnY = int32(shp.DimSize(0))
ly.Params.Indexes.ShpUnX = int32(shp.DimSize(1))
ly.Params.Indexes.ShpPlY = 1
ly.Params.Indexes.ShpPlX = 1
} else {
ly.Params.Indexes.ShpPlY = int32(shp.DimSize(0))
ly.Params.Indexes.ShpPlX = int32(shp.DimSize(1))
ly.Params.Indexes.ShpUnY = int32(shp.DimSize(2))
ly.Params.Indexes.ShpUnX = int32(shp.DimSize(3))
}
for pi := 0; pi < np; pi++ {
for di := 0; di < maxData; di++ {
nt.PoolIxs.Set(uint32(li), int(poolIndex+pi), int(PoolLayerIdx))
}
}
if ly.Type.IsExt() {
ly.Params.Indexes.ExtsSt = uint32(extIndex)
extIndex += nn
} else {
ly.Params.Indexes.ExtsSt = 0 // sticking with uint32 here -- otherwise could be -1
}
spaths := ly.SendPaths
ly.Params.Indexes.SendSt = uint32(pathIndex)
ly.Params.Indexes.SendN = uint32(len(spaths))
for pi, pt := range spaths {
pii := pathIndex + pi
pt.Params = &nt.PathParams[pii]
nt.Paths[pii] = pt
}
err := ly.Build() // also builds paths and sets SubPool indexes
if err != nil {
errs = append(errs, err)
}
// now collect total number of synapses after layer build
for _, pt := range spaths {
totSynapses += len(pt.SendConIndex)
totSendCon += nn // sep vals for each send neuron per path
}
rpaths := ly.RecvPaths
ly.Params.Indexes.RecvSt = uint32(rpathIndex)
ly.Params.Indexes.RecvN = uint32(len(rpaths))
totRecvCon += nn * len(rpaths)
rpathIndex += len(rpaths)
neurIndex += nn
pathIndex += len(spaths)
poolIndex += np
}
if totSynapses > math.MaxUint32 {
log.Fatalf("ERROR: total number of synapses is greater than uint32 capacity\n")
}
nt.NetworkIxs[0].NSyns = uint32(totSynapses)
nt.Synapses.SetShapeSizes(totSynapses, int(SynapseVarsN))
nt.SynapseTraces.SetShapeSizes(totSynapses, maxData, int(SynapseTraceVarsN))
nt.SynapseIxs.SetShapeSizes(totSynapses, int(SynapseIndexVarsN))
nt.PathSendCon.SetShapeSizes(totSendCon, 2)
nt.PathRecvCon.SetShapeSizes(totRecvCon, 2)
nt.RecvPathIxs.SetShapeSizes(rpathIndex)
nt.RecvSynIxs.SetShapeSizes(totSynapses)
// distribute synapses, send
syIndex := 0
ptidx := 0
sendConIndex := 0
for _, ly := range nt.Layers {
for _, pt := range ly.SendPaths {
rlay := pt.Recv
pt.Params.Indexes.RecvLayer = uint32(rlay.Index)
pt.Params.Indexes.RecvNeurSt = uint32(rlay.NeurStIndex)
pt.Params.Indexes.RecvNeurN = rlay.NNeurons
pt.Params.Indexes.SendLayer = uint32(ly.Index)
pt.Params.Indexes.SendNeurSt = uint32(ly.NeurStIndex)
pt.Params.Indexes.SendNeurN = ly.NNeurons
nsyn := len(pt.SendConIndex)
pt.Params.Indexes.SendConSt = uint32(sendConIndex)
pt.Params.Indexes.SynapseSt = uint32(syIndex)
pt.SynStIndex = uint32(syIndex)
pt.Params.Index = uint32(ptidx)
pt.NSyns = uint32(nsyn)
for sni := uint32(0); sni < ly.NNeurons; sni++ {
si := ly.NeurStIndex + sni
scon := pt.SendCon[sni]
nt.PathSendCon.Set(scon.Start, int(sendConIndex), int(StartOff))
nt.PathSendCon.Set(scon.N, int(sendConIndex), int(Nitems))
sendConIndex++
for syi := scon.Start; syi < scon.Start+scon.N; syi++ {
syni := pt.SynStIndex + syi
nt.SynapseIxs.Set(uint32(si), int(syni), int(SynSendIndex)) // network-global idx
nt.SynapseIxs.Set(pt.SendConIndex[syi]+uint32(rlay.NeurStIndex), int(syni), int(SynRecvIndex))
nt.SynapseIxs.Set(uint32(ptidx), int(syni), int(SynPathIndex))
syIndex++
}
}
ptidx++
}
}
// update recv synapse / path info
rpathIndex = 0
recvConIndex := 0
syIndex = 0
for _, ly := range nt.Layers {
isTarg := ly.Params.IsTarget()
for _, pt := range ly.RecvPaths {
if isTarg {
pt.AddClass("ToTarget")
}
nt.RecvPathIxs.Set(pt.Params.Index, rpathIndex)
pt.Params.Indexes.RecvConSt = uint32(recvConIndex)
pt.Params.Indexes.RecvSynSt = uint32(syIndex)
synSt := pt.Params.Indexes.SynapseSt
for rni := uint32(0); rni < ly.NNeurons; rni++ {
if len(pt.RecvCon) <= int(rni) {
continue
}
rcon := pt.RecvCon[rni]
nt.PathRecvCon.Set(rcon.Start, int(recvConIndex), int(StartOff))
nt.PathRecvCon.Set(rcon.N, int(recvConIndex), int(Nitems))
recvConIndex++
syIndexes := pt.RecvSynIxs(rni)
for _, ssi := range syIndexes {
nt.RecvSynIxs.Set(ssi+synSt, syIndex)
syIndex++
}
}
rpathIndex++
}
}
nix.NSyns = uint32(syIndex)
nt.SetCaBinWts()
nt.LayoutLayers()
nt.SetAsCurrent()
return errors.Join(errs...)
}
// SetCaBinWts sets the [GvCaBinWts] global ca bin weights for kinase
// trace learning rule integration of [CaBins] neuron-level spike values.
func (nt *Network) SetCaBinWts() {
ctx := nt.Context()
nCaWts := ctx.NCaWeights()
cp := make([]float32, nCaWts)
cd := make([]float32, nCaWts)
kinase.CaBinWts(int(ctx.PlusCycles), cp, cd)
for i := range nCaWts {
nt.GlobalScalars.Set(cp[i], int(GvCaBinWts+GlobalScalarVars(i)), int(0))
nt.GlobalScalars.Set(cd[i], int(GvCaBinWts+GlobalScalarVars(nCaWts+i)), int(0))
}
}
// ToGPUParams copies LayerParams and PathParams to the GPU.
func ToGPUParams() {
ToGPU(LayersVar, PathsVar)
}
// ToGPUIndexes copies indexes to the GPU.
func ToGPUIndexes() {
ToGPU(NetworkIxsVar, PoolIxsVar, NeuronIxsVar, SynapseIxsVar, PathSendConVar, RecvPathIxsVar, PathRecvConVar, RecvSynIxsVar)
}
// ToGPUCtxGlobal copies Context and Global vars to the GPU.
func ToGPUCtxGlobal() {
ToGPU(CtxVar, GlobalScalarsVar, GlobalVectorsVar)
}
// ToGPUExts copies Exts and Context and Global vars to the GPU.
// This is done after ApplyInputs typically, and sets the network going
// at the start of a new trial.
func ToGPUExts() {
ToGPU(CtxVar, GlobalScalarsVar, GlobalVectorsVar, ExtsVar)
}
// ToGPULayers copies all the layer-level state to the GPU, including context and globals.
func ToGPULayers() {
ToGPU(CtxVar, GlobalScalarsVar, GlobalVectorsVar, LayerStatesVar, PoolsVar, PoolsIntVar)
}
// ToGPUNeurons copies Neurons, NeuronAvgs to the GPU.
func ToGPUNeurons() {
ToGPU(NeuronsVar, NeuronAvgsVar)
}
// ToGPULayersNeurons copies all the layer-level and neuron state to the GPU.
func ToGPULayersNeurons() {
ToGPU(CtxVar, GlobalScalarsVar, GlobalVectorsVar, LayerStatesVar, PoolsVar, PoolsIntVar, NeuronsVar, NeuronAvgsVar)
}
// ToGPUSynapses copies the Synapse state to the GPU.
func ToGPUSynapses() {
ToGPU(SynapsesVar)
}
// ToGPULayersSynapses copies the Layers and Synapse state to the GPU.
func ToGPULayersSynapses() {
ToGPULayers()
ToGPUSynapses()
}
// ToGPUAll copies all state up to the GPU. Only for InitWeights.
func ToGPUAll() {
ToGPUTensorStrides()
ToGPUIndexes()
ToGPUParams()
ToGPULayersNeurons()
ToGPUSynapses()
ToGPU(SynapseTracesVar) // only time we call this
ToGPU(PathGBufVar, PathGSynsVar) // and this
}
// note: RunDone can only be run once, so all vars need to be present in the one call.
func RunDoneContext() {
RunDone(CtxVar)
}
// RunDoneLayers finishes running and copies all the layer-level state from the GPU,
// (and Context, Globals) but NOT neurons. This is the minimal case for Cycle().
func RunDoneLayers() {
RunDone(CtxVar, GlobalScalarsVar, GlobalVectorsVar, LayerStatesVar, PoolsVar, PoolsIntVar)
}
// RunDoneLayersNeurons finishes running and copies all the layer-level
// and neuron state from the GPU, including context and globals.
func RunDoneLayersNeurons() {
RunDone(CtxVar, GlobalScalarsVar, GlobalVectorsVar, LayerStatesVar, PoolsVar, PoolsIntVar, NeuronsVar, NeuronAvgsVar)
}
// RunDoneSynapses finishes running and copies the Synapse state back.
func RunDoneSynapses() {
RunDone(SynapsesVar)
}
// RunDoneLayersSynapses finishes running and copies the Layers and Synapse state back.
// This is sufficient for saving synaptic weights.
func RunDoneLayersSynapses() {
RunDone(SynapsesVar)
}
// RunDoneSynapses finishes running and copies the Synapse state back,
// including SynapseTraces, for visualization.
func RunDoneSynapsesTrace() {
RunDone(SynapsesVar, SynapseTracesVar)
}
// BuildPathGBuf builds the PathGBuf, PathGSyns,
// based on the MaxDelay values in the PathParams,
// which should have been configured by this point.
// Called by default in InitWeights()
func (nt *Network) BuildPathGBuf() {
nix := nt.NetIxs()
maxData := nix.MaxData
maxDel := uint32(0)
nptneur := uint32(0)
for _, ly := range nt.Layers {
nneur := uint32(ly.NNeurons)
for _, pt := range ly.RecvPaths {
if pt.Params.Com.MaxDelay > maxDel {
maxDel = pt.Params.Com.MaxDelay
}
nptneur += nneur
}
}
nix.MaxDelay = maxDel
mxlen := maxDel + 1
nt.PathGBuf.SetShapeSizes(int(nptneur), int(maxData), int(mxlen))
nt.PathGSyns.SetShapeSizes(int(nptneur), int(maxData))
npti := uint32(0)
for _, ly := range nt.Layers {
nneur := uint32(ly.NNeurons)
for _, pt := range ly.RecvPaths {
pt.Params.Indexes.NPathNeurSt = npti
npti += nneur
}
}
}
// SetAsCurrent sets this network's values as the current global variables,
// that are then processed in the code.
func (nt *Network) SetAsCurrent() {
CurrentNetwork = nt
Layers = nt.LayerParams
Paths = nt.PathParams
NetworkIxs = nt.NetworkIxs
PoolIxs = &nt.PoolIxs
NeuronIxs = &nt.NeuronIxs
SynapseIxs = &nt.SynapseIxs
PathSendCon = &nt.PathSendCon
RecvPathIxs = &nt.RecvPathIxs
PathRecvCon = &nt.PathRecvCon
RecvSynIxs = &nt.RecvSynIxs
Ctx = nt.Ctx
Neurons = &nt.Neurons
NeuronAvgs = &nt.NeuronAvgs
LayerStates = &nt.LayerStates
GlobalScalars = &nt.GlobalScalars
GlobalVectors = &nt.GlobalVectors
Exts = &nt.Exts
Pools = &nt.Pools
PoolsInt = &nt.PoolsInt
PathGBuf = &nt.PathGBuf
PathGSyns = &nt.PathGSyns
Synapses = &nt.Synapses
SynapseTraces = &nt.SynapseTraces
gpu.NumThreads = nt.NThreads
}
// DeleteAll deletes all layers, prepares network for re-configuring and building
func (nt *Network) DeleteAll() {
nt.Layers = nil
nt.Paths = nil
nt.FunTimes = nil
nt.LayerParams = nil
nt.PathParams = nil
}
func (nt *Network) WriteWeightsJSON(w io.Writer) error {
RunGPUSync()
RunDoneLayersSynapses()
return nt.NetworkBase.WriteWeightsJSON(w)
}
func (nt *Network) ReadWeightsJSON(r io.Reader) error {
err := nt.NetworkBase.ReadWeightsJSON(r)
ToGPULayersSynapses()
RunGPUSync()
RunDone()
return err
}
// SynsSlice returns a slice of synaptic values, in natural sending order,
// using given synaptic variable, resizing as needed.
func (nt *Network) SynsSlice(vals *[]float32, synvar SynapseVars) {
nix := nt.NetIxs()
*vals = slicesx.SetLength(*vals, int(nix.NSyns))
i := 0
for _, ly := range nt.Layers {
for _, pt := range ly.SendPaths {
for lni := range pt.SendCon {
scon := pt.SendCon[lni]
for syi := scon.Start; syi < scon.Start+scon.N; syi++ {
syni := pt.SynStIndex + syi
(*vals)[i] = nt.Synapses.Value(int(syni), int(synvar))
i++
}
}
}
}
}
// NeuronsSlice returns a slice of neuron values
// using given neuron variable, resizing as needed.
func (nt *Network) NeuronsSlice(vals *[]float32, nrnVar string, di int) {
nix := nt.NetIxs()
*vals = slicesx.SetLength(*vals, int(nix.NNeurons))
i := 0
for _, ly := range nt.Layers {
varIndex, _ := ly.UnitVarIndex(nrnVar)
nn := int(ly.NNeurons)
for lni := 0; lni < nn; lni++ {
(*vals)[i] = ly.UnitValue1D(varIndex, lni, di)
i++
}
}
}
// WeightsHash returns a hash code of all weight values
func (nt *Network) WeightsHash() string {
var wts []float32
nt.SynsSlice(&wts, Wt)
return HashEncodeSlice(wts)
}
func HashEncodeSlice(slice []float32) string {
byteSlice := make([]byte, len(slice)*4)
for i, f := range slice {
binary.LittleEndian.PutUint32(byteSlice[i*4:], math.Float32bits(f))
}
md5Hasher := md5.New()
md5Hasher.Write(byteSlice)
md5Sum := md5Hasher.Sum(nil)
return hex.EncodeToString(md5Sum)
}
// CheckSameSize checks if this network is the same size as given other,
// in terms of NNeurons, MaxData, and NSyns. Returns error message if not.
func (nt *Network) CheckSameSize(on *Network) error {
nix := nt.NetIxs()
nox := on.NetIxs()
if nix.NNeurons != nox.NNeurons {
err := fmt.Errorf("CheckSameSize: dest NNeurons: %d != src NNeurons: %d", nix.NNeurons, nox.NNeurons)
return err
}
if nix.MaxData != nox.MaxData {
err := fmt.Errorf("CheckSameSize: dest MaxData: %d != src MaxData: %d", nix.MaxData, nox.MaxData)
return err
}
if nix.NSyns != nox.NSyns {
err := fmt.Errorf("CheckSameSize: dest NSyns: %d != src NSyns: %d", nix.NSyns, nox.NSyns)
return err
}
return nil
}
// SizeReport returns a string reporting the size of each layer and pathway
// in the network, and total memory footprint.
// If detail flag is true, details per layer, pathway is included.
func (nt *Network) SizeReport(detail bool) string {
var b strings.Builder
varBytes := 4
synVarBytes := 4
nix := nt.NetIxs()
maxData := int(nix.MaxData)
memNeuron := (int(NeuronVarsN)+int(nix.NCaBins))*maxData*varBytes + int(NeuronAvgVarsN)*varBytes + int(NeuronIndexVarsN)*varBytes
memSynapse := int(SynapseVarsN)*varBytes + int(SynapseTraceVarsN)*maxData*varBytes + int(SynapseIndexVarsN)*varBytes
globalProjIndexes := 0
for _, ly := range nt.Layers {
if detail {
nn := int(ly.NNeurons)
// Sizeof returns size of struct in bytes
nrnMem := nn * memNeuron
fmt.Fprintf(&b, "%14s:\t Neurons: %d\t NeurMem: %v \t Sends To:\n", ly.Name, nn,
(datasize.Size)(nrnMem).String())
}
for _, pt := range ly.SendPaths {
// We only calculate the size of the important parts of the proj struct:
// 1. Synapse slice (consists of Synapse struct)
// 2. RecvConIndex + RecvSynIndex + SendConIndex (consists of int32 indices = 4B)
//
// Everything else (like eg the GBuf) is not included in the size calculation, as their size
// doesn't grow quadratically with the number of neurons, and hence pales when compared to the synapses
// It's also useful to run a -memprofile=mem.prof to validate actual memory usage
projMemIndexes := len(pt.RecvConIndex)*varBytes + len(pt.RecvSynIndex)*varBytes + len(pt.SendConIndex)*varBytes
globalProjIndexes += projMemIndexes
if detail {
nSyn := int(pt.NSyns)
synMem := nSyn*memSynapse + projMemIndexes
fmt.Fprintf(&b, "\t%14s:\t Syns: %d\t SynnMem: %v\n", pt.Recv.Name,
nSyn, (datasize.Size)(synMem).String())
}
}
}
nrnMem := (nt.Neurons.Len() + nt.NeuronAvgs.Len() + nt.NeuronIxs.Len()) * varBytes
synIndexMem := nt.SynapseIxs.Len() * varBytes
synWtMem := nt.Synapses.Len() * synVarBytes
synCaMem := nt.SynapseTraces.Len() * synVarBytes
fmt.Fprintf(&b, "\n\n%14s:\t Neurons: %d\t NeurMem: %v \t Syns: %d \t SynIndexes: %v \t SynWts: %v \t SynTr: %v\n",
nt.Name, nix.NNeurons, (datasize.Size)(nrnMem).String(), nix.NSyns,
(datasize.Size)(synIndexMem).String(), (datasize.Size)(synWtMem).String(), (datasize.Size)(synCaMem).String())
return b.String()
}
// CopyStateFrom copies entire network state from other network.
// Other network must have identical configuration, as this just
// does a literal copy of the state values. This is checked
// and errors are returned (and logged).
// See also DiffFrom.
func (nt *Network) CopyStateFrom(on *Network) error {
if err := nt.CheckSameSize(on); err != nil {
slog.Error(err.Error())
return err
}
nt.Pools.CopyFrom(&on.Pools)
nt.PoolsInt.CopyFrom(&on.PoolsInt)
nt.Neurons.CopyFrom(&on.Neurons)
nt.NeuronAvgs.CopyFrom(&on.NeuronAvgs)
nt.LayerStates.CopyFrom(&on.LayerStates)
nt.Synapses.CopyFrom(&on.Synapses)
nt.SynapseTraces.CopyFrom(&on.SynapseTraces)
return nil
}
// DiffFrom returns a string reporting differences between this network
// and given other, up to given max number of differences (0 = all),
// for each state value.
func (nt *Network) DiffFrom(ctx *Context, on *Network, maxDiff int) string {
nix := nt.NetIxs()
diffs := ""
ndif := 0
for di := uint32(0); di < ctx.NData; di++ {
for ni := uint32(0); ni < nix.NNeurons; ni++ {
for nvar := Spike; nvar < NeuronVarsN; nvar++ {
nv := nt.Neurons.Value(int(ni), int(di), int(nvar))
ov := on.Neurons.Value(int(ni), int(di), int(nvar))
if nv != ov {
diffs += fmt.Sprintf("Neuron: di: %d\tni: %d\tvar: %s\tval: %g\toth: %g\n", di, ni, nvar.String(), nv, ov)
ndif++
if maxDiff > 0 && ndif >= maxDiff {
return diffs
}
}
}
}
}
for ni := uint32(0); ni < nix.NNeurons; ni++ {
for nvar := ActAvg; nvar < NeuronAvgVarsN; nvar++ {
nv := nt.NeuronAvgs.Value(int(ni), int(nvar))
ov := on.NeuronAvgs.Value(int(ni), int(nvar))
if nv != ov {
diffs += fmt.Sprintf("NeuronAvg: ni: %d\tvar: %s\tval: %g\toth: %g\n", ni, nvar.String(), nv, ov)
ndif++
if maxDiff > 0 && ndif >= maxDiff {
return diffs
}
}
}
}
for si := uint32(0); si < nix.NSyns; si++ {
for svar := Wt; svar < SynapseVarsN; svar++ {
sv := nt.Synapses.Value(int(si), int(svar))
ov := on.Synapses.Value(int(si), int(svar))
if sv != ov {
diffs += fmt.Sprintf("Synapse: si: %d\tvar: %s\tval: %g\toth: %g\n", si, svar.String(), sv, ov)
ndif++
if maxDiff > 0 && ndif >= maxDiff {
return diffs
}
}
}
}
for di := uint32(0); di < ctx.NData; di++ {
for si := uint32(0); si < nix.NSyns; si++ {
for svar := Tr; svar < SynapseTraceVarsN; svar++ {
sv := nt.SynapseTraces.Value(int(si), int(di), int(svar))
ov := on.SynapseTraces.Value(int(si), int(di), int(svar))
if sv != ov {
diffs += fmt.Sprintf("SynapseTraces: di: %d, si: %d\tvar: %s\tval: %g\toth: %g\n", di, si, svar.String(), sv, ov)
ndif++
if maxDiff > 0 && ndif >= maxDiff {
return diffs
}
}
}
}
}
return diffs
}
//////// Lesion methods
// LayersSetOff sets the Off flag for all layers to given setting
func (nt *Network) LayersSetOff(off bool) {
for _, ly := range nt.Layers {
ly.SetOff(off)
}
}
// UnLesionNeurons unlesions neurons in all layers in the network.
// Provides a clean starting point for subsequent lesion experiments.
func (nt *Network) UnLesionNeurons() {
for _, ly := range nt.Layers {
// if ly.Off { // keep all sync'd
//
// continue
// }
ly.UnLesionNeurons()
}
}
func (nt *Network) MakeToolbar(p *tree.Plan) {
tree.Add(p, func(w *core.FuncButton) {
w.SetFunc(nt.ShowAllGlobals).SetText("Global Vars").SetIcon(icons.Info)
})
tree.Add(p, func(w *core.FuncButton) {
w.SetFunc(nt.SaveWeightsJSON).
SetText("Save Weights").SetIcon(icons.Save)
w.Args[0].SetTag(`extension:".wts,.wts.gz"`)
})
tree.Add(p, func(w *core.FuncButton) {
w.SetFunc(nt.OpenWeightsJSON).SetText("Open Weights").SetIcon(icons.Open)
w.Args[0].SetTag(`extension:".wts,.wts.gz"`)
})
tree.Add(p, func(w *core.Separator) {})
tree.Add(p, func(w *core.FuncButton) {
w.SetFunc(nt.Build).SetIcon(icons.Reset)
})
tree.Add(p, func(w *core.FuncButton) {
w.SetFunc(nt.InitWeights).SetIcon(icons.Reset)
})
tree.Add(p, func(w *core.FuncButton) {
w.SetFunc(nt.InitActs).SetIcon(icons.Reset)
})
}
// Copyright (c) 2023, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
import (
"cogentcore.org/core/math32"
"cogentcore.org/lab/gosl/slbool"
)
//gosl:start
// DAModTypes are types of dopamine modulation of neural activity.
type DAModTypes int32 //enums:enum
const (
// NoDAMod means there is no effect of dopamine on neural activity
NoDAMod DAModTypes = iota
// D1Mod is for neurons that primarily express dopamine D1 receptors,
// which are excitatory from DA bursts, inhibitory from dips.
// Cortical neurons can generally use this type, while subcortical
// populations are more diverse in having both D1 and D2 subtypes.
D1Mod
// D2Mod is for neurons that primarily express dopamine D2 receptors,
// which are excitatory from DA dips, inhibitory from bursts.
D2Mod
// D1AbsMod is like D1Mod, except the absolute value of DA is used
// instead of the signed value.
// There are a subset of DA neurons that send increased DA for
// both negative and positive outcomes, targeting frontal neurons.
D1AbsMod
)
// ValenceTypes are types of valence coding: positive or negative.
type ValenceTypes int32 //enums:enum
const (
// Positive valence codes for outcomes aligned with drives / goals.
Positive ValenceTypes = iota
// Negative valence codes for harmful or aversive outcomes.
Negative
// Cost codes for continous ongoing cost factors such as Time and Effort
Cost
)
// NeuroModParams specifies the effects of neuromodulators on neural
// activity and learning rate. These can apply to any neuron type,
// and are applied in the core cycle update equations.
type NeuroModParams struct {
// dopamine receptor-based effects of dopamine modulation
// on excitatory and inhibitory conductances: D1 is excitatory,
// D2 is inhibitory as a function of increasing dopamine.
DAMod DAModTypes
// valence coding of this layer, which may affect specific layer
// types but does not directly affect neuromodulators currently.
Valence ValenceTypes
// dopamine modulation of excitatory and inhibitory conductances
// (i.e., "performance dopamine" effect: this does NOT affect
// learning dopamine modulation in terms of RLrate): g *= 1 + (DAModGain * DA).
DAModGain float32
// modulate the sign of the learning rate factor according to
// the DA sign, taking into account the DAMod sign reversal for D2Mod,
// also using BurstGain and DipGain to modulate DA value.
// Otherwise, only the magnitude of the learning rate is modulated
// as a function of raw DA magnitude according to DALRateMod
// (without additional gain factors).
DALRateSign slbool.Bool
// if not using DALRateSign, this is the proportion of maximum learning
// rate that Abs(DA) magnitude can modulate.
// e.g., if 0.2, then DA = 0 = 80% of std learning rate, 1 = 100%.
DALRateMod float32 `min:"0" max:"1"`
// proportion of maximum learning rate that ACh can modulate.
// e.g., if 0.2, then ACh = 0 = 80% of std learning rate, 1 = 100%.
AChLRateMod float32 `min:"0" max:"1"`
// amount of extra Gi inhibition added in proportion to 1 - ACh level.
// makes ACh disinhibitory
AChDisInhib float32 `min:"0" default:"0,5"`
// multiplicative gain factor applied to positive dopamine signals.
// This operates on the raw dopamine signal prior to any effect
// of D2 receptors in reversing its sign!
BurstGain float32 `min:"0" default:"1"`
// multiplicative gain factor applied to negative dopamine signals.
// This operates on the raw dopamine signal prior to any effect
// of D2 receptors in reversing its sign!
// should be small for acq, but roughly equal to burst for ext.
DipGain float32 `min:"0" default:"1"`
pad, pad1, pad2 float32
}
func (nm *NeuroModParams) Defaults() {
// nm.DAMod is typically set by BuildConfig -- don't reset here
nm.DAModGain = 0
nm.DALRateMod = 0
nm.AChLRateMod = 0
nm.BurstGain = 1
nm.DipGain = 1
}
func (nm *NeuroModParams) Update() {
nm.DALRateMod = math32.Clamp(nm.DALRateMod, 0, 1)
nm.AChLRateMod = math32.Clamp(nm.AChLRateMod, 0, 1)
}
func (nm *NeuroModParams) ShouldDisplay(field string) bool {
switch field {
case "DAModGain":
return nm.DAMod != NoDAMod
case "DALRateMod":
return !nm.DALRateSign.IsTrue()
default:
return true
}
}
// IsBLAExt returns true if this is Positive, D2 or Negative D1 -- BLA extinction
func (nm *NeuroModParams) IsBLAExt() bool {
return (nm.Valence == Positive && nm.DAMod == D2Mod) ||
(nm.Valence == Negative && nm.DAMod == D1Mod)
}
// LRModFact returns learning rate modulation factor for given inputs.
func (nm *NeuroModParams) LRModFact(pct, val float32) float32 {
aval := math32.Clamp(math32.Abs(val), 0.0, 1.0)
return 1.0 - pct*(1.0-aval)
}
// DAGain returns DA dopamine value with Burst / Dip Gain factors applied
func (nm *NeuroModParams) DAGain(da float32) float32 {
ada := da
if da > 0 {
ada *= nm.BurstGain
} else {
ada *= nm.DipGain
}
return ada
}
// DASign returns the sign of dopamine effects: D2Mod = -1, else 1
func (nm *NeuroModParams) DASign() float32 {
if nm.DAMod == D2Mod {
return -1.0
}
return 1.0
}
// LRMod returns overall learning rate modulation factor due to neuromodulation
// from given dopamine (DA) and ACh inputs.
// If DALRateMod is true and DAMod == D1Mod or D2Mod, then the sign is a function
// of the DA
func (nm *NeuroModParams) LRMod(da, ach float32) float32 {
lmod := nm.LRModFact(nm.AChLRateMod, ach)
if nm.DALRateSign.IsTrue() {
lmod *= nm.DAGain(da) * nm.DASign()
} else {
lmod *= nm.LRModFact(nm.DALRateMod, da)
}
return lmod
}
// GGain returns effective Ge and Gi gain factor given
// total dopamine (DA) value: tonic + phasic.
// factor is 1 for no modulation, otherwise higher or lower.
func (nm *NeuroModParams) GGain(da float32) float32 {
ada := da
if da > 0 {
ada *= nm.BurstGain
} else {
ada *= nm.DipGain
}
gain := float32(1)
switch nm.DAMod {
case NoDAMod:
case D1Mod:
gain += nm.DAModGain * ada
case D2Mod:
gain -= nm.DAModGain * ada
case D1AbsMod:
gain += nm.DAModGain * math32.Abs(ada)
default:
}
if gain < 0 {
gain = 0
}
return gain
}
// GIFromACh returns amount of extra inhibition to add based on disinhibitory
// effects of ACh -- no inhibition when ACh = 1, extra when < 1.
func (nm *NeuroModParams) GiFromACh(ach float32) float32 {
ai := 1 - ach
if ai < 0 {
ai = 0
}
return nm.AChDisInhib * ai
}
//gosl:end
// Copyright (c) 2019, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
import (
"fmt"
"strings"
_ "cogentcore.org/core/tree"
"github.com/emer/emergent/v2/emer"
)
//gosl:start
// NeuronFlags are bit-flags encoding relevant binary state for neurons
type NeuronFlags int32 //enums:enum
// The neuron flags
const (
// NeuronOff flag indicates that this neuron has been turned off (i.e., lesioned).
NeuronOff NeuronFlags = 1
// NeuronHasExt means the neuron has external input in its Ext field.
NeuronHasExt NeuronFlags = 2
// NeuronHasTarg means the neuron has external target input in its Target field.
NeuronHasTarg NeuronFlags = 4
// NeuronHasCmpr means the neuron has external comparison input in its Target field.
// Used for computing comparison statistics but does not drive neural activity ever.
NeuronHasCmpr NeuronFlags = 8
)
// NeuronVars are the neuron variables representing current active state,
// specific to each input data state.
// See NeuronAvgVars for vars shared across data.
type NeuronVars int32 //enums:enum
const (
//////// Spiking, Activation
// Spike is whether neuron has spiked or not on this cycle (0 or 1).
Spike NeuronVars = iota
// Spiked is 1 if neuron has spiked within the last 10 cycles (msecs),
// corresponding to a nominal max spiking rate of 100 Hz, 0 otherwise.
// Useful for visualization and computing activity levels in terms of
// average spiked levels.
Spiked
// Act is rate-coded activation value reflecting instantaneous estimated rate
// of spiking, based on 1 / ISIAvg. It is integrated over time for ActInt
// which is then used for performance statistics and layer average activations, etc.
// Should not be used for learning or other computations: just for stats / display.
Act
// ActInt is integrated running-average activation value computed from Act
// with time constant Act.Dt.IntTau, to produce a longer-term integrated value
// reflecting the overall activation state across the ThetaCycle time scale,
// as the overall response of network to current input state. This is copied
// to ActM and ActP at the ends of the minus and plus phases, respectively,
// and used in computing some performance-level statistics (based on ActM).
// Should not be used for learning or other computations.
ActInt
//////// Major conductances, Vm
// Ge is total excitatory conductance, including all forms of excitation
// (e.g., NMDA). Does *not* include the Gbar.E factor.
Ge
// Gi is total inhibitory synaptic conductance, i.e., the net inhibitory input
// to the neuron. Does *not* include the Gbar.I factor.
Gi
// Gk is total potassium conductance, typically reflecting sodium-gated potassium
// currents involved in adaptation effects. Does *not* include the Gbar.K factor.
Gk
// Inet is net current produced by all channels, which drives update of Vm.
Inet
// Vm is the membrane potential at the cell body, which integrates Inet current
// over time, and drives spiking at the axon initial segment of the neuron.
Vm
// VmDend is the dendritic membrane potential, which has a slower time constant
// than Vm and is not subject to the VmR reset after spiking.
VmDend
// ISI is the current inter-spike-interval, which counts up since last spike.
// Starts at -1 when initialized.
ISI
// ISIAvg is the average inter-spike-interval, i.e., the average time interval
// between spikes, integrated with ISITau rate constant (relatively fast) to
// capture something close to an instantaneous spiking rate. Starts at -1 when
// initialized, and goes to -2 after first spike, and is only valid after the
// second spike post-initialization.
ISIAvg
// Ext is the external input: drives activation of unit from outside influences
// (e.g., sensory input).
Ext
// Target is the target value: drives learning to produce this activation value.
Target
//////// Spike-driven calcium for stats
// CaM is the spike-driven calcium trace at the neuron level, which then drives
// longer time-integrated variables: [CaP] and [CaD]. These variables are used
// for statistics and display to capture spiking activity at different timescales.
// They fluctuate more than [Act] and [ActInt], but are closer to the biological
// variables driving learning. CaM is the exponential integration of SpikeG * Spike
// using the MTau time constant (typically 5), and simulates a calmodulin (CaM)
// like signal, at an abstract level.
CaM
// CaP is the continuous cascaded integration of [CaM] using the PTau time constant
// (typically 40), representing a neuron-level, purely spiking version of the plus,
// LTP direction of weight change in the Kinase learning rule, dependent on CaMKII.
// This is not used for learning (see [LearnCaP]), but instead for statistics
// as a representation of recent activity.
CaP
// CaD is the continuous cascaded integration [CaP] using the DTau time constant
// (typically 40), representing a neuron-level, purely spiking version of the minus,
// LTD direction of weight change in the Kinase learning rule, dependent on DAPK1.
// This is not used for learning (see [LearnCaD]), but instead for statistics
// as a representation of trial-level activity.
CaD
// CaDPrev is the final [CaD] activation state at the end of previous theta cycle.
// This is used for specialized learning mechanisms that operate on delayed
// sending activations.
CaDPrev
//////// Calcium for learning
// CaSyn is the neuron-level integration of spike-driven calcium, used to approximate
// synaptic calcium influx as a product of sender and receiver neuron CaSyn values,
// which are integrated separately because it is computationally much more efficient.
// CaSyn enters into a Sender * Receiver product at each synapse to give the effective
// credit assignment factor for learning.
// This value is driven directly by spikes, with an exponential integration time
// constant of 30 msec (default), which captures the coincidence window for pre*post
// firing on NMDA receptor opening. The neuron [CaBins] values record the temporal
// trajectory of CaSyn over the course of the theta cycle window, and then the
// pre*post product is integrated over these bins at the synaptic level.
CaSyn
// LearnCa is the receiving neuron calcium signal, which is integrated up to
// [LearnCaP] and [LearnCaD], the difference of which is the temporal error
// component of the kinase cortical learning rule.
// LearnCa combines NMDA via [NmdaCa] and spiking-driven VGCC [VgccCaInt] calcium
// sources. The NMDA signal reflects both sending and receiving activity, while the
// VGCC signal is purely receiver spiking, and a balance of both works best.
LearnCa
// LearnCaM is the integrated [LearnCa] at the MTau timescale (typically 5),
// simulating a calmodulin (CaM) like signal, which then drives [LearnCaP],
// and [LearnCaD] for the delta signal for error-driven learning.
LearnCaM
// LearnCaP is the cascaded integration of [LearnCaM] using the PTau time constant
// (typically 40), representing the plus, LTP direction of weight change,
// capturing the function of CaMKII in the Kinase learning rule.
LearnCaP
// LearnCaD is the cascaded integration of [LearnCaP] using the DTau time constant
// (typically 40), representing the minus, LTD direction of weight change,
// capturing the function of DAPK1 in the Kinase learning rule.
LearnCaD
// CaDiff is difference between [LearnCaP] - [LearnCaD]. This is the error
// signal that drives error-driven learning.
CaDiff
// LearnDiff is the actual difference signal that drives learning, which is
// computed from [CaDiff] for neocortical neurons, but specifically at the
// point of learning ([LearnNow]).
LearnDiff
//////// Learning Timing
// GaM is first-level integration of all input conductances g_a,
// which then drives longer time-integrated variables: [GaP] and [GaD].
// These variables are used for timing of learning based on bursts of activity
// change over time: at the minus and plus phases.
GaM
// GaP is the continuous cascaded integration of [GaM] using the PTau time constant
// (typically 40), representing a neuron-level, all-conductance-based version
// of the plus, LTP direction of weight change in the Kinase learning rule.
GaP
// GaD is the continuous cascaded integration of [GaP] using the DTau time constant
// (typically 40), representing a neuron-level, all-conductance-based version
// of the minus, LTD direction of weight change in the Kinase learning rule.
GaD
// TimeDiff is the running time-average of |P - D| (absolute value),
// used for determining the timing of learning in terms of onsets of peaks.
// See [TimePeak]. GaP - GaD is used, as it is
// smoother and more reliable than LearnCaP - D.
TimeDiff
// TimePeak is the value of the current peak (local maximum) of [TimeDiff].
// This typically occurs at the onset of the minus phase, and drives
// the timing of learning a given number of cycles after that.
TimePeak
// TimeCycle is the absolute cycle where [TimePeak] occurred.
TimeCycle
// LearnNow is the absolute cycle (ms, CyclesTotal) when the receiving
// neuron learns. For neocortex, either at end of theta cycle or based
// on timing computed from [TimeCycle] per [LearnTimingParams].
LearnNow
// RLRate is recv-unit based learning rate multiplier, reflecting the sigmoid
// derivative computed from [CaD] of recv unit, and the normalized difference
// (CaP - CaD) / MAX(CaP - CaD).
RLRate
// ETrace is the eligibility trace for this neuron.
ETrace
// ETrLearn is the learning factor for the eligibility trace for this neuron.
// 1 + ETraceScale * [ETrace]
ETrLearn
//////// NMDA channels
// GnmdaSyn is the integrated NMDA synaptic current on the receiving neuron.
// It adds GeRaw and decays with a time constant.
GnmdaSyn
// Gnmda is the net postsynaptic (receiving) NMDA conductance,
// after Mg V-gating and Gbar. This is added directly to Ge as it has the same
// reversal potential.
Gnmda
// GnmdaLrn is learning version of integrated NMDA recv synaptic current.
// It adds [GeRaw] and decays with a time constant. This drives [NmdaCa] that
// then drives [LearnCa] for learning.
GnmdaLrn
// GnmdaMaint is net postsynaptic maintenance NMDA conductance, computed from
// [GMaintSyn] and [GMaintRaw], after Mg V-gating and Gbar. This is added directly
// to Ge as it has the same reversal potential.
GnmdaMaint
// NmdaCa is NMDA calcium computed from GnmdaLrn, drives learning via CaM.
NmdaCa
//////// VGCC voltage gated calcium channels
// Gvgcc is conductance (via Ca) for VGCC voltage gated calcium channels.
Gvgcc
// VgccM is activation gate of VGCC channels.
VgccM
// VgccH inactivation gate of VGCC channels.
VgccH
// VgccCa is the instantaneous VGCC calcium flux: can be driven by spiking
// or directly from Gvgcc.
VgccCa
// VgccCaInt is the time-integrated VGCC calcium flux. This is actually
// what drives learning.
VgccCaInt
// Burst is the layer 5 IB intrinsic bursting neural activation value,
// computed by thresholding the [CaP] value in Super superficial layers.
Burst
// BurstPrv is previous Burst bursting activation from prior time step.
// Used for context-based learning.
BurstPrv
// CtxtGe is context (temporally delayed) excitatory conductance,
// driven by deep bursting at end of the plus phase, for CT layers.
CtxtGe
// CtxtGeRaw is raw update of context (temporally delayed) excitatory
// conductance, driven by deep bursting at end of the plus phase, for CT layers.
CtxtGeRaw
// CtxtGeOrig is original CtxtGe value prior to any decay factor.
// Updates at end of plus phase.
CtxtGeOrig
//////// GABA-B channels
// GgabaB is net GABA-B conductance, after Vm gating and Gk + Gbase.
// Applies to Gk, not Gi, for GIRK, with .1 reversal potential.
GgabaB
// GababM is the GABA-B / GIRK activation, which is a time-integrated value
// with rise and decay time constants.
GababM
// GababX is GABA-B / GIRK internal drive variable. This gets the raw
// activation and decays.
GababX
//////// SST somatostatin inhibition factors
// Gak is the conductance of A-type K potassium channels.
Gak
// SSGiDend is the amount of SST+ somatostatin positive slow spiking
// inhibition applied to dendritic Vm (VmDend).
SSGiDend
// GknaMed is the conductance of sodium-gated potassium channel (KNa)
// medium dynamics (Slick), which produces accommodation / adaptation.
GknaMed
// GknaSlow is the conductance of sodium-gated potassium channel (KNa)
// slow dynamics (Slack), which produces accommodation / adaptation.
GknaSlow
// Gkir is the conductance of the potassium (K) inwardly rectifying channel,
// which is strongest at low membrane potentials. Can be modulated by DA.
Gkir
// KirM is the Kir potassium (K) inwardly rectifying gating value.
KirM
//////// SKCa small conductance calcium-gated potassium channels
// Gsk is Calcium-gated potassium channel conductance as a function
// of Gbar * SKCaM.
Gsk
// SKCaIn is intracellular calcium store level, available to be released
// with spiking as SKCaR, which can bind to SKCa receptors and drive K
// current. replenishment is a function of spiking activity being below
// a threshold.
SKCaIn
// SKCaR is the released amount of intracellular calcium, from SKCaIn,
// as a function of spiking events. This can bind to SKCa channels and
// drive K currents.
SKCaR
// SKCaM is the Calcium-gated potassium channel gating factor, driven by
// SKCaR via a Hill equation as in chans.SKPCaParams.
SKCaM
///////// AHP channels: Mahp, Sahp, Gkna
// Gmahp is medium time scale AHP conductance.
Gmahp
// MahpN is accumulating voltage-gated gating value for the medium time
// scale AHP.
MahpN
// Gsahp is slow time scale AHP conductance.
Gsahp
// SahpCa is slowly accumulating calcium value that drives the slow AHP.
SahpCa
// SahpN is the sAHP gating value.
SahpN
//////// Stats, aggregate values
// ActM is ActInt activation state at end of third quarter, representing
// the posterior-cortical minus phase activation. This is used for statistics
// and monitoring network performance.
// Should not be used for learning or other computations.
ActM
// ActP is ActInt activation state at end of fourth quarter, representing
// the posterior-cortical plus_phase activation. This is used for statistics
// and monitoring network performance.
// Should not be used for learning or other computations.
ActP
// Beta1 is the activation state at the first beta cycle within current
// state processing window (i.e., at 50 msec), as saved by Beta1() function.
// Used for example in hippocampus for CA3, CA1 learning.
Beta1
// Beta2 is the activation state at the second beta cycle within current
// state processing window (i.e., at 100 msec), as saved by Beta2() function.
// Used for example in hippocampus for CA3, CA1 learning.
Beta2
// CaPMax is the maximum [CaP] across one theta cycle time window
// (max of CaPMaxCa). It is used for specialized algorithms that have more
// phasic behavior within a single trial, e.g., BG Matrix layer gating.
// Also useful for visualization of peak activity of neurons.
CaPMax
// CaPMaxCa is the Ca integrated like [CaP] but only starting at
// the MaxCycStart cycle, to prevent inclusion of carryover spiking from
// prior theta cycle trial. The PTau time constant otherwise results in
// significant carryover. This is the input to CaPMax.
CaPMaxCa
//////// Noise
// GeNoise is integrated noise excitatory conductance, added into Ge.
GeNoise
// GeNoiseP is accumulating poisson probability factor for driving excitatory
// noise spiking. Multiply times uniform random deviate at each time step,
// until it gets below the target threshold based on poisson lambda as function
// of noise firing rate.
GeNoiseP
// GiNoise is integrated noise inhibitory conductance, added into Gi.
GiNoise
// GiNoiseP is accumulating poisson probability factor for driving inhibitory
// noise spiking. Multiply times uniform random deviate at each time step,
// until it gets below the target threshold based on poisson lambda as a function
// of noise firing rate.
GiNoiseP
//////// Ge, Gi integration
// GeExt is extra excitatory conductance added to Ge, from Ext input, GeCtxt etc.
GeExt
// GeRaw is the raw excitatory conductance (net input) received from
// senders = current raw spiking drive.
GeRaw
// GeSyn is the time-integrated total excitatory (AMPA) synaptic conductance,
// with an instantaneous rise time from each spike (in GeRaw) and
// exponential decay with Dt.GeTau, aggregated over pathways.
// Does *not* include Gbar.E.
GeSyn
// GiRaw is the raw inhibitory conductance (net input) received from senders
// = current raw spiking drive.
GiRaw
// GiSyn is time-integrated total inhibitory synaptic conductance, with an
// instantaneous rise time from each spike (in GiRaw) and exponential decay
// with Dt.GiTau, aggregated over pathways -- does *not* include Gbar.I.
// This is added with computed FFFB inhibition to get the full inhibition in Gi.
GiSyn
// GeInt is integrated running-average activation value computed from Ge
// with time constant Act.Dt.IntTau, to produce a longer-term integrated value
// reflecting the overall Ge level across the ThetaCycle time scale (Ge itself
// fluctuates considerably). This is useful for stats to set strength of
// connections etc to get neurons into right range of overall excitatory drive.
GeInt
// GeIntNorm is normalized GeInt value (divided by the layer maximum).
// This is used for learning in layers that require learning on
// subthreshold activity.
GeIntNorm
// GiInt is integrated running-average activation value computed from GiSyn
// with time constant Act.Dt.IntTau, to produce a longer-term integrated
// value reflecting the overall synaptic Gi level across the ThetaCycle
// time scale (Gi itself fluctuates considerably). Useful for stats to set
// strength of connections etc to get neurons into right range of overall
// inhibitory drive.
GiInt
// GModRaw is raw modulatory conductance, received from GType
// = ModulatoryG pathways.
GModRaw
// GModSyn is syn integrated modulatory conductance, received from GType
// = ModulatoryG pathways.
GModSyn
// SMaintP is accumulating poisson probability factor for driving
// self-maintenance by simulating a population of mutually interconnected neurons.
// Multiply times uniform random deviate at each time step, until it gets below
// the target threshold based on poisson lambda based on accumulating self maint
// factor.
SMaintP
// GMaintRaw is raw maintenance conductance, received from GType
// = MaintG pathways.
GMaintRaw
// GMaintSyn is syn integrated maintenance conductance, integrated
// using MaintNMDA params.
GMaintSyn
// NeurFlags are bit flags for binary state variables, which are converted
// to / from uint32. These need to be in Vars because they can be
// differential per data (for ext inputs) and are writable (indexes are read only).
NeurFlags
// CaBins is a vector of values starting here, with aggregated [CaSyn] values
// in time bins of [CaBinCycles] across two theta cycles,
// for computing synaptic calcium efficiently. Each bin = Sum(CaSyn / CaBinCycles).
// Total number of bins = 2 * [Context.ThetaCycles] / CaBinCycles.
// Use [CaBinForCycle] to access.
// Synaptic calcium is integrated from sender * receiver CaBins values,
// with weights for CaP vs CaD that reflect their faster vs. slower time constants,
// respectively. CaD is used for the credit assignment factor, while CaP - CaD is
// used directly for error-driven learning at Target layers.
CaBins
)
// NeuronAvgVars are mostly neuron variables involved in longer-term average activity
// which is aggregated over time and not specific to each input data state,
// along with any other state that is not input data specific.
type NeuronAvgVars int32 //enums:enum
const (
// ActAvg is average activation (of minus phase activation state)
// over long time intervals (time constant = Dt.LongAvgTau).
// Useful for finding hog units and seeing overall distribution of activation.
ActAvg NeuronAvgVars = iota
// AvgPct is ActAvg as a proportion of overall layer activation.
// This is used for synaptic scaling to match TrgAvg activation,
// updated at SlowInterval intervals.
AvgPct
// TrgAvg is neuron's target average activation as a proportion
// of overall layer activation, assigned during weight initialization,
// driving synaptic scaling relative to AvgPct.
TrgAvg
// DTrgAvg is change in neuron's target average activation as a result
// of unit-wise error gradient. Acts like a bias weight.
// MPI needs to share these across processors.
DTrgAvg
// AvgDif is AvgPct - TrgAvg, i.e., the error in overall activity level
// relative to set point for this neuron, which drives synaptic scaling.
// Updated at SlowInterval intervals.
AvgDif
// GeBase is baseline level of Ge, added to GeRaw, for intrinsic excitability.
GeBase
// GiBase is baseline level of Gi, added to GiRaw, for intrinsic excitability.
GiBase
)
// NeuronIndexVars are neuron-level indexes used to access layers and pools
// from the individual neuron level.
type NeuronIndexVars int32 //enums:enum
const (
// NrnNeurIndex is the index of this neuron within its owning layer.
NrnNeurIndex NeuronIndexVars = iota
// NrnLayIndex is the index of the layer that this neuron belongs to,
// needed for neuron-level parallel code.
NrnLayIndex
// NrnSubPool is the index of the sub-level inhibitory pool for this neuron
// (only for 4D shapes, the pool (unit-group / hypercolumn) structure level).
// Indicies start at 1 -- 0 is layer-level pool (is 0 if no sub-pools).
NrnSubPool
)
//gosl:end
var VarCategories = []emer.VarCategory{
{"Act", "basic activation variables, including conductances, current, Vm, spiking"},
{"Learn", "calcium-based learning variables and other related learning factors"},
{"Excite", "excitatory channels including NMDA, Vgcc and other excitatory inputs"},
{"Inhib", "inhibitory channels including GABA inhibition, after hyperpolarization (AHP) and other K channels"},
{"Stats", "statistics and aggregate values"},
{"Gmisc", "more detailed conductance (G) variables for integration and other computational values"},
{"Avg", "longer-term average variables and homeostatic regulation"},
{"Spikes", "Binned spike counts used for learning"},
{"Wts", "weights and other synaptic-level variables"},
}
// NeuronVarProps has display properties for neuron variables.
var NeuronVarProps = map[string]string{
//////// Spiking, Activation, Major conductances, Vm
"Spike": `cat:"Act"`,
"Spiked": `cat:"Act"`,
"Act": `cat:"Act"`,
"ActInt": `cat:"Act"`,
"Ge": `cat:"Act" range:"2"`,
"Gi": `cat:"Act" auto-scale:"+"`,
"Gk": `cat:"Act" auto-scale:"+"`,
"Inet": `cat:"Act" auto-scale:"+"`,
"Vm": `cat:"Act" min:"-100" max:"0"`,
"VmDend": `cat:"Act" min:"-100" max:"0"`,
"ISI": `cat:"Act" auto-scale:"+"`,
"ISIAvg": `cat:"Act" auto-scale:"+"`,
"Ext": `cat:"Act"`,
"Target": `cat:"Act"`,
//////// Calcium for learning
"CaM": `cat:"Learn"`,
"CaP": `cat:"Learn"`,
"CaD": `cat:"Learn"`,
"CaDPrev": `cat:"Learn"`,
"CaSyn": `cat:"Learn"`,
"LearnCa": `cat:"Learn"`,
"LearnCaM": `cat:"Learn"`,
"LearnCaP": `cat:"Learn"`,
"LearnCaD": `cat:"Learn"`,
"CaDiff": `cat:"Learn"`,
"LearnDiff": `cat:"Learn"`,
"GaM": `cat:"Learn"`,
"GaP": `cat:"Learn"`,
"GaD": `cat:"Learn"`,
"TimeDiff": `cat:"Learn"`,
"TimePeak": `cat:"Learn"`,
"TimeCycle": `cat:"Learn" auto-scale:"+"`,
"LearnPeak": `cat:"Learn"`,
"LearnPeakCyc": `cat:"Learn" auto-scale:"+"`,
"LearnNow": `cat:"Learn" auto-scale:"+"`,
"RLRate": `cat:"Learn" auto-scale:"+"`,
"ETrace": `cat:"Learn"`,
"ETrLearn": `cat:"Learn" auto-scale:"+"`,
//////// NMDA channels
"GnmdaSyn": `cat:"Excite" auto-scale:"+"`,
"Gnmda": `cat:"Excite" auto-scale:"+"`,
"GnmdaLrn": `cat:"Excite" auto-scale:"+"`,
"GnmdaMaint": `cat:"Excite" auto-scale:"+"`,
"NmdaCa": `cat:"Excite" auto-scale:"+"`,
//////// VGCC voltage gated calcium channels
"Gvgcc": `cat:"Excite" auto-scale:"+"`,
"VgccM": `cat:"Excite"`,
"VgccH": `cat:"Excite"`,
"VgccCa": `cat:"Excite" auto-scale:"+"`,
"VgccCaInt": `cat:"Excite" auto-scale:"+"`,
//////// Misc Excitatory Vars
"Burst": `cat:"Excite"`,
"BurstPrv": `cat:"Excite"`,
"CtxtGe": `cat:"Excite"`,
"CtxtGeRaw": `cat:"Excite"`,
"CtxtGeOrig": `cat:"Excite"`,
//////// GABA channels
"GgabaB": `cat:"Inhib" auto-scale:"+"`,
"GababM": `cat:"Inhib" auto-scale:"+"`,
"GababX": `cat:"Inhib" auto-scale:"+"`,
//////// SST somatostatin inhibition factors
"Gak": `cat:"Inhib" auto-scale:"+"`,
"SSGiDend": `cat:"Inhib" auto-scale:"+"`,
"GknaMed": `cat:"Inhib" auto-scale:"+"`,
"GknaSlow": `cat:"Inhib" auto-scale:"+"`,
"Gkir": `cat:"Inhib"`,
"KirM": `cat:"Inhib"`,
//////// SKCa small conductance calcium-gated potassium channels
"Gsk": `cat:"Inhib"`,
"SKCaIn": `cat:"Inhib"`,
"SKCaR": `cat:"Inhib"`,
"SKCaM": `cat:"Inhib"`,
//////// AHP channels: Mahp, Sahp
"Gmahp": `cat:"Inhib" auto-scale:"+"`,
"MahpN": `cat:"Inhib" auto-scale:"+"`,
"Gsahp": `cat:"Inhib" auto-scale:"+"`,
"SahpCa": `cat:"Inhib"`,
"SahpN": `cat:"Inhib"`,
//////// Stats, aggregate values
"ActM": `cat:"Stats"`,
"ActP": `cat:"Stats"`,
"Beta1": `cat:"Stats"`,
"Beta2": `cat:"Stats"`,
"CaPMax": `cat:"Stats"`,
"CaPMaxCa": `cat:"Stats"`,
//////// Noise
"GeNoise": `cat:"Gmisc"`,
"GeNoiseP": `cat:"Gmisc"`,
"GiNoise": `cat:"Gmisc"`,
"GiNoiseP": `cat:"Gmisc"`,
//////// Ge, Gi integration
"GeExt": `cat:"Gmisc"`,
"GeRaw": `cat:"Gmisc"`,
"GeSyn": `cat:"Gmisc" range:"2"`,
"GiRaw": `cat:"Gmisc"`,
"GiSyn": `cat:"Gmisc"`,
"GeInt": `cat:"Gmisc" range:"2"`,
"GeIntNorm": `cat:"Gmisc" range:"1"`,
"GiInt": `cat:"Gmisc" range:"2"`,
"GModRaw": `cat:"Gmisc"`,
"GModSyn": `cat:"Gmisc"`,
"SMaintP": `cat:"Gmisc"`,
"GMaintRaw": `cat:"Gmisc"`,
"GMaintSyn": `cat:"Gmisc"`,
"NeurFlags": `display:"-"`,
"CaBins": `cat:"Spikes"`,
//////// Long-term average activation, set point for synaptic scaling
"ActAvg": `cat:"Avg"`,
"AvgPct": `cat:"Avg" range:"2"`,
"TrgAvg": `cat:"Avg" range:"2"`,
"DTrgAvg": `cat:"Avg" auto-scale:"+"`,
"AvgDif": `cat:"Avg"`,
"GeBase": `cat:"Avg"`,
"GiBase": `cat:"Avg"`,
//////// Layer-level variables
"DA": `cat:"Learn" doc:"dopamine neuromodulation (layer-level variable)"`,
"ACh": `cat:"Learn" doc:"cholinergic neuromodulation (layer-level variable)"`,
"NE": `cat:"Learn" doc:"norepinepherine (noradrenaline) neuromodulation (layer-level variable)"`,
"Ser": `cat:"Learn" doc:"serotonin neuromodulation (layer-level variable)"`,
"Gated": `cat:"Learn" doc:"signals whether the layer gated (pool-level variable)"`,
"ModAct": `cat:"Learn" doc:"pool-level modulatory activity signal (for BG Matrix and Patch layers)"`,
"PoolDAD1": `cat:"Learn" doc:"pool-level dopamine D1 signal (for BG Matrix layers only)"`,
"PoolDAD2": `cat:"Learn" doc:"pool-level dopamine D2 signal (for BG Matrix layers only)"`,
}
var (
NeuronVarNames []string
NeuronVarsMap map[string]int
)
// NeuronLayerVars are pool or layer-level variables displayed as neuron layers.
var (
NeuronLayerVars = []string{"DA", "ACh", "NE", "Ser", "Gated", "ModAct", "PoolDAD1", "PoolDAD2"}
NNeuronLayerVars = len(NeuronLayerVars)
NNeuronCaBins = 20 // generic max for display
)
func init() {
NeuronVarsMap = make(map[string]int, int(NeuronVarsN)+int(NeuronAvgVarsN)+NNeuronLayerVars)
for i := Spike; i < CaBins; i++ {
vnm := i.String()
NeuronVarNames = append(NeuronVarNames, vnm)
NeuronVarsMap[vnm] = int(i)
tag := NeuronVarProps[vnm]
NeuronVarProps[vnm] = tag + ` doc:"` + strings.ReplaceAll(i.Desc(), "\n", " ") + `"`
}
for i := range NNeuronCaBins {
vnm := fmt.Sprintf("CaBin%02d", i)
NeuronVarNames = append(NeuronVarNames, vnm)
NeuronVarsMap[vnm] = int(CaBins) + i
tag := NeuronVarProps[CaBins.String()]
NeuronVarProps[vnm] = tag + ` doc:"` + strings.ReplaceAll(CaBins.Desc(), "\n", " ") + `"`
}
nVars := int(CaBins) + NNeuronCaBins
for i := ActAvg; i < NeuronAvgVarsN; i++ {
vnm := i.String()
NeuronVarNames = append(NeuronVarNames, vnm)
NeuronVarsMap[vnm] = nVars + int(i)
tag := NeuronVarProps[vnm]
NeuronVarProps[vnm] = tag + ` doc:"` + strings.ReplaceAll(i.Desc(), "\n", " ") + `"`
}
for i, vnm := range NeuronLayerVars {
NeuronVarNames = append(NeuronVarNames, vnm)
NeuronVarsMap[vnm] = i + int(nVars) + int(NeuronAvgVarsN)
}
}
// NeuronVarIndexByName returns the index of the variable in the Neuron, or error
func NeuronVarIndexByName(varNm string) (int, error) {
i, ok := NeuronVarsMap[varNm]
if !ok {
return -1, fmt.Errorf("Neuron VarByName: variable name: %s not valid", varNm)
}
return i, nil
}
// Code generated by "goal build"; DO NOT EDIT.
//line nuclear-layer.goal:1
// Copyright (c) 2025, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
// import "fmt"
//gosl:start
// NuclearParams has parameters that apply to all cerebellum Nuclear model neurons.
// Not just cerebellar nuclei neurons: also applies to PC.
type NuclearParams struct {
// ActionEnv is the total time envelope for actions to be tracked,
// in ms (cycles). Must be consistent across microzone elements.
ActionEnv int32 `default:"180" min:"0"`
// SendTimeOff is the time offset for sending activations used in learning,
// relative to the IO-driven LearnNow time. Should be 0 for CNiUp.
// Must be an even multiple of [CaBinCycles].
SendTimeOff int32 `default:"40,0" min:"0"`
// SendTimeWindow is the time window to integrate sending activations
// used in learning. Must be an even multiple of [CaBinCycles].
SendTimeWindow int32 `default:"30" min:"0"`
// ActTarget is the target activity level, as measured by CaD.
// GeBase is adapted, along with excitatory MF inputs in proportion to activity,
// which is the source of very slow synaptic decay in these pathways.
ActTarget float32 `default:"0.5" min:"0.0"`
// Decay is the rate of decay (prior to the learning rate multiplier)
// for baseline non-learning trials.
Decay float32 `default:"0.01"`
// GeBaseLRate is the learning rate for neuron-level [GeBase] baseline
// excitatory conductance, to maintain target activity levels.
GeBaseLRate float32 `default:"0.01"`
// IOLayIndex of IO (inferior olive) layer for sending error signals
// to this layer. Set via SetBuildConfig(IOLayName) setting.
IOLayIndex int32 `edit:"-"`
// SendTimeBins = SendTimeWindow / [CaBinCycles].
SendTimeBins int32 `edit:"-"`
}
func (tp *NuclearParams) Update() {
tp.SendTimeBins = tp.SendTimeWindow / CaBinCycles
}
func (tp *NuclearParams) Defaults() {
tp.ActionEnv = 180
tp.SendTimeOff = 40
tp.SendTimeWindow = 30
tp.ActTarget = 0.5
tp.Decay = 0.01
tp.GeBaseLRate = 0.01
tp.Update()
}
// IsNuclear returns true if layer type is cerebellum (Nuclear model).
func (ly *LayerParams) IsNuclear() bool {
return ly.Type >= IOLayer && ly.Type <= CNiUpLayer
}
// IOLearn gets the IO layer [LearnNow] signal.
func (ly *LayerParams) IOLearn(ctx *Context, lni, lpi, pi, ni, di uint32) {
ioi := uint32(ly.Nuclear.IOLayIndex)
ioly := GetLayers(ioi)
Neurons.Set(Neurons.Value(int(ioly.Indexes.NeurSt+lni), int(di), int(LearnNow)), int(ni), int(di), int(LearnNow))
Neurons.Set(Neurons.Value(int(ioly.Indexes.NeurSt+lni), int(di), int(TimePeak)), int(ni), int(di), int(TimePeak))
}
// NuclearLearnReset resets LearnNow if past envelope time, in new state
func (ly *LayerParams) NuclearLearnReset(ctx *Context, ni, di uint32) {
effAct := int32(Neurons.Value(int(ni), int(di), int(TimeCycle)))
if effAct == 0 {
return
}
if Neurons.Value(int(ni), int(di), int(LearnNow)) == 0.0 { // not done yet
return
}
envCyc := ctx.CyclesTotal - effAct // cycle within envelope
if envCyc >= ly.Nuclear.ActionEnv {
Neurons.Set(0.0, int(ni), int(di), int(TimeCycle))
Neurons.Set(0.0, int(ni), int(di), int(TimePeak))
Neurons.Set(0.0, int(ni), int(di), int(LearnNow))
}
}
// NuclearDWtNeuron is the neuron-level learning rule for tonically-active
// Nuclear layers (e.g., [CNeLayer]).
// Used to adjust the GeBase levels per neuron.
func (ly *LayerParams) NuclearDWtNeuron(ctx *Context, ni uint32) {
dbase := float32(0)
for di := uint32(0); di < ly.MaxData; di++ {
if Neurons.Value(int(ni), int(di), int(TimePeak)) == 1.0 { // non-baseline
continue
}
aerr := ly.Nuclear.ActTarget - Neurons.Value(int(ni), int(di), int(CaD))
dbase += aerr
}
dbase *= ly.Nuclear.GeBaseLRate
gbase := NeuronAvgs.Value(int(ni), int(GeBase))
gbase += dbase
if gbase < 0 {
gbase = 0
}
NeuronAvgs.Set(gbase, int(ni), int(GeBase))
}
// IOParams has parameters for the IO inferior olive neurons,
// which compute a temporal offset error signal between CNiIO inhibitory
// predictions and excitatory sensory input, contingent on initial
// above-threshold efferent copy motor trigger input (modulatory).
// Neuron [CaBins] are used to store TimeOff past inhibitory inputs.
type IOParams struct {
// TimeOff is the time offset for earlier predictive inhibitory inputs to
// compare against current excitatory inputs to trigger an error,
// in ms (cycles). Must be an even multiple of [CaBinCycles].
TimeOff int32 `default:"50" min:"0"`
// ErrThr is the threshold on the GeSyn - GiSyn_(t-TimeOff) difference
// to trigger an error.
ErrThr float32 `default:"0.1" min:"0.0"`
// EfferentThr is the threshold for modulatory [GModSyn] from efferent copy
// inputs to trigger an activated IO window where error comparison occurs.
// Efferent inputs can continue post-threshold, but this is the point at which
// the envelope opens.
EfferentThr float32 `default:"0.2" min:"0.0"`
// EfferentOff is the offset from the time of the efferent signal before
// meaningful sensory comparison can occur. The inhibitory prediction values
// are assumed to be strongly activated at this time.
// in ms (cycles). Must be an even multiple of [CaBinCycles].
EfferentOff int32 `default:"20" min:"0"`
// GTau is the time constant in ms for integrating [GeSyn] and [GiSyn]
// excitatory and inhibitory conductances for comparison.
// Integration goes into GaM (only for IO neurons).
GTau float32 `default:"20"`
// Dt = 1 / Tau
GDt float32 `display:"-"`
pad, pad1 float32
}
func (tp *IOParams) Update() {
tp.GDt = 1.0 / tp.GTau
}
func (tp *IOParams) Defaults() {
tp.TimeOff = 50
tp.ErrThr = 0.1
tp.EfferentThr = 0.2
tp.EfferentOff = 20
tp.GTau = 20
tp.Update()
}
// TODO: IO neurons integrate across many distinct comparisons!
// here, we just have 1 IO per pairwise comparison.
// Need to get layer and do indexing or something. Figure out
// best way to organize later.
// IOUpdate is the main IO update routine, called in
// LayerParams::PostSpikeSpecial
func (ly *LayerParams) IOUpdate(ctx *Context, lpi, pi, ni, di uint32) {
cycTot := float32(ctx.CyclesTotal)
effAct := int32(Neurons.Value(int(ni), int(di), int(TimeCycle)))
envCyc := ctx.CyclesTotal - effAct // cycle within envelope
gaP := Neurons.Value(int(ni), int(di), int(GaP)) // IOe excitatory input
gaP += ly.IO.GDt * (Neurons.Value(int(ni), int(di), int(GeSyn)) - gaP)
Neurons.Set(gaP, int(ni), int(di), int(GaP))
gaM := Neurons.Value(int(ni), int(di), int(GaM))
gaM += ly.IO.GDt * (Neurons.Value(int(ni), int(di), int(GiSyn)) - gaM)
Neurons.Set(gaM, int(ni), int(di), int(GaM))
inhibAct := gaM
// CaBinCycles to ensure that full bin is filled
if effAct > 0 && envCyc <= ly.IO.EfferentOff+CaBinCycles {
inhibAct = 1.0
}
CaBinIncrement(inhibAct, ctx.CyclesTotal, ni, di) // always store
Neurons.Set(0.0, int(ni), int(di), int(TimeDiff)) // set below for display
Neurons.Set(0.0, int(ni), int(di), int(Spike)) // default is no spike
oldInhib := float32(0)
nbins := ly.IO.TimeOff / CaBinCycles
nbins = max(1, nbins-1)
stcyc := ctx.CyclesTotal - ly.IO.TimeOff
for i := range nbins {
bi := CaBinForCycle(stcyc + i*CaBinCycles)
oldInhib += Neurons.Value(int(ni), int(di), int(CaBins+NeuronVars(bi)))
}
oldInhib /= float32(nbins)
Neurons.Set(oldInhib, int(ni), int(di), int(GaD))
if Neurons.Value(int(ni), int(di), int(LearnNow)) > 0 { // already learned, done until cleared in NuclearLearnReset
Neurons.Set(0.0, int(ni), int(di), int(Spike))
return
}
if effAct == 0 {
Neurons.Set(0.0, int(ni), int(di), int(Spike))
if Neurons.Value(int(ni), int(di), int(GModSyn)) > ly.IO.EfferentThr { // efferent always activates.
Neurons.Set(cycTot, int(ni), int(di), int(TimeCycle)) // efferent activation cycle
Neurons.Set(0.0, int(ni), int(di), int(TimePeak))
Neurons.Set(0.0, int(ni), int(di), int(LearnNow))
}
return
}
if envCyc <= (ly.IO.TimeOff + ly.IO.EfferentOff) { // nothing until min
return
}
if envCyc >= ly.Nuclear.ActionEnv { // no errors until the end of envelope: baseline spike
Neurons.Set(cycTot, int(ni), int(di), int(LearnNow))
Neurons.Set(1.0, int(ni), int(di), int(Spike)) // baseline spike
return
}
errVal := gaP - oldInhib
Neurons.Set(errVal, int(ni), int(di), int(TimeDiff))
if gaP > ly.Learn.Timing.LearnThr && errVal > ly.IO.ErrThr {
// if ni == 1664 {
// fmt.Println("act:", gaP, oldInhib, errVal)
// }
Neurons.Set(1.0, int(ni), int(di), int(Spike)) // error spike
Neurons.Set(cycTot, int(ni), int(di), int(LearnNow)) // record point of error
Neurons.Set(1.0, int(ni), int(di), int(TimePeak)) // records that we got err spike
}
}
//gosl:end
// NuclearPostBuild does post-Build config of Nuclear models of the
// cerebellum based on BuildConfig options.
func (ly *Layer) NuclearPostBuild() {
ly.Params.Nuclear.IOLayIndex = ly.BuildConfigFindLayer("IOLayName", true)
}
// LinearDefaults sets parameters to allow neurons to linearly encode
// values in their rate of firing, to the extent possible.
func (ly *LayerParams) LinearDefaults() {
// turn off accommodation currents
ly.Acts.Mahp.Gk = 0
ly.Acts.Sahp.Gk = 0
ly.Acts.KNa.On.SetBool(false)
// no sustained:
ly.Acts.NMDA.Ge = 0
ly.Acts.GabaB.Gk = 0
}
// NuclearDefaults called in Defaults for all Nuclear layers
func (ly *LayerParams) NuclearDefaults() {
ly.Learn.TrgAvgAct.RescaleOn.SetBool(false)
ly.Acts.Init.GeBase = 0.25
ly.Inhib.Layer.On.SetBool(false)
ly.Acts.Decay.Act = 0.0
ly.Acts.Decay.Glong = 0.0 // clear long
ly.Acts.Decay.AHP = 0.0 // clear long
ly.LinearDefaults()
// ly.Learn.RLRate.SigmoidMin = 1.0 // 1.0 generally better but worth trying 0.05 too
// for _, pj := range lly.RecvPaths {
// pj.Params.SWts.Init.Mean = 0.8
// pj.Params.SWts.Init.Var = 0.0
// if pj.Send.Type != CNiIOLayer {
// pj.Params.SetFixedWts()
// } else {
// pj.Params.SWts.Init.Mean = 0.5
// }
// }
}
// called in Defaults for [IOLayer] type
func (ly *LayerParams) IODefaults() {
ly.NuclearDefaults()
ly.Learn.Timing.On.SetBool(false)
ly.Acts.Init.GeBase = 0
}
// called in Defaults for [CNiIOLayer] type
func (ly *LayerParams) CNiIODefaults() {
ly.NuclearDefaults()
ly.Acts.Init.GeBase = 0
ly.Nuclear.ActTarget = 0
ly.Nuclear.SendTimeOff = 40
}
// called in Defaults for [CNiUpLayer] type
func (ly *LayerParams) CNiUpDefaults() {
ly.NuclearDefaults()
ly.Acts.Init.GeBase = 0
ly.Nuclear.ActTarget = 0
ly.Nuclear.SendTimeOff = 10 // match the excitatory
}
// Copyright (c) 2025, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
import "github.com/emer/emergent/v2/paths"
// AddNuclearCNUp adds Nuclear model cerebellar upbound nucleus
// for adaptive filtering of given sensory input layer,
// from which they copy their shape. actEff layer is the efferent
// copy of the action layer, which sends a full modulatory projection.
// actEnv is the default ActionEnv environment timing value in cycles.
func (net *Network) AddNuclearCNUp(sense, actEff *Layer, actEnv int, space float32) (ioUp, cniIOUp, cniUp, cneUp *Layer) {
name := sense.Name
shp := sense.Shape
if shp.NumDims() == 2 {
ioUp = net.AddLayer2D(name+"IO", IOLayer, shp.DimSize(0), shp.DimSize(1))
cniIOUp = net.AddLayer2D(name+"CNiIO", CNiIOLayer, shp.DimSize(0), shp.DimSize(1))
cniUp = net.AddLayer2D(name+"CNiUp", CNiUpLayer, shp.DimSize(0), shp.DimSize(1))
cneUp = net.AddLayer2D(name+"CNeUp", CNeLayer, shp.DimSize(0), shp.DimSize(1))
} else {
ioUp = net.AddLayer4D(name+"IO", IOLayer, shp.DimSize(0), shp.DimSize(1), shp.DimSize(2), shp.DimSize(3))
cniIOUp = net.AddLayer4D(name+"CNiIO", CNiIOLayer, shp.DimSize(0), shp.DimSize(1), shp.DimSize(2), shp.DimSize(3))
cniUp = net.AddLayer4D(name+"CNiUp", CNiUpLayer, shp.DimSize(0), shp.DimSize(1), shp.DimSize(2), shp.DimSize(3))
cneUp = net.AddLayer4D(name+"CNeUp", CNeLayer, shp.DimSize(0), shp.DimSize(1), shp.DimSize(2), shp.DimSize(3))
}
cniIOUp.SetBuildConfig("IOLayName", ioUp.Name)
cniUp.SetBuildConfig("IOLayName", ioUp.Name)
cneUp.SetBuildConfig("IOLayName", ioUp.Name)
cniIOUp.AddClass("CNLayer", "CNiLayer")
cniUp.AddClass("CNLayer", "CNiLayer")
cneUp.AddClass("CNLayer")
aep := func(ly *LayerParams) {
ly.Nuclear.ActionEnv = int32(actEnv)
}
ioUp.AddDefaultParams(aep)
cniIOUp.AddDefaultParams(aep)
cniUp.AddDefaultParams(aep)
cneUp.AddDefaultParams(aep)
full := paths.NewFull()
one2one := paths.NewPoolOneToOne()
pt := net.ConnectLayers(actEff, ioUp, full, ForwardPath).AddClass("EffToIO")
pt.AddDefaultParams(func(pt *PathParams) {
pt.SetFixedWts()
pt.Com.GType = ModulatoryG
})
pt = net.ConnectLayers(sense, ioUp, one2one, ForwardPath).AddClass("SenseToIO")
pt.AddDefaultParams(func(pt *PathParams) {
pt.SetFixedWts()
})
pt = net.ConnectLayers(cniIOUp, ioUp, one2one, InhibPath).AddClass("CNiIOToIO")
pt.AddDefaultParams(func(pt *PathParams) {
pt.SetFixedWts()
})
pt = net.ConnectLayers(cniUp, cneUp, one2one, CNeUpPath).AddClass("CNiToCNe")
pt.AddDefaultParams(func(pt *PathParams) {
pt.Com.GType = InhibitoryG
})
// CNiIO in front, as most important learning
cniUp.PlaceBehind(cniIOUp, space)
cneUp.PlaceBehind(cniUp, space)
ioUp.PlaceBehind(cneUp, space)
return
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
import (
"fmt"
"reflect"
"strings"
"cogentcore.org/core/base/errors"
"cogentcore.org/lab/base/mpi"
"github.com/cogentcore/yaegi/interp"
"github.com/emer/emergent/v2/params"
)
// type aliases for params generic types that we use:
type (
// LayerSheets contains Layer parameter Sheets.
LayerSheets = params.Sheets[*LayerParams]
// LayerSheet is one Layer parameter Sheet.
LayerSheet = params.Sheet[*LayerParams]
// LayerSel is one Layer parameter Selector.
LayerSel = params.Sel[*LayerParams]
// LayerSearches is a list of parameter Search elements.
LayerSearches = params.Searches[*LayerParams]
// PathSheets contains Path parameter Sheets.
PathSheets = params.Sheets[*PathParams]
// PathSheet is one Path parameter Sheet.
PathSheet = params.Sheet[*PathParams]
// PathSel is one Path parameter Selector.
PathSel = params.Sel[*PathParams]
// PathSearches is a list of parameter Search elements.
PathSearches = params.Searches[*PathParams]
)
// Params contains the [LayerParams] and [PathParams] parameter setting functions
// provided by the [emergent] [params] package.
type Params struct {
// Layer has the parameters to apply to the [LayerParams] for layers.
Layer LayerSheets `display:"-"`
// Path has the parameters to apply to the [PathParams] for paths.
Path PathSheets `display:"-"`
// ExtraSheets has optional additional sheets of parameters to apply
// after the default Base sheet. Use "Script" for default Script sheet.
// Multiple names separated by spaces can be used (don't put spaces in Sheet names!)
ExtraSheets string
// Tag is an optional additional tag to add to log file names to identify
// a specific run of the model (typically set by a config file or args).
Tag string
// Script is a parameter setting script, which adds to the Layer and Path sheets
// typically using the "Script" set name.
Script string `display:"-"`
// Interp is the yaegi interpreter for running the script.
Interp *interp.Interpreter `display:"-"`
}
// ScriptParams is a template for yaegi interpreted parameters
var ScriptParams = `sim.Sim.Params.Layer["Script"] = &axon.LayerSheet{
&axon.LayerSel{Sel:"Layer", Set: func(ly *axon.LayerParams) {
// set params
}},
}
sim.Sim.Params.Path["Script"] = &axon.PathSheet{
&axon.PathSel{Sel:"Path", Set: func(pt *axon.PathParams) {
// set params
}},
}
`
// Config configures the ExtraSheets, Tag, and Network fields, and
// initializes the yaegi interpreter for dynamic parameter scripts.
// Pass a reflect.ValueOf(*Sim) to initialize the yaegi interpreter.
// Sim must have Params in a field called Params.
func (pr *Params) Config(layer LayerSheets, path PathSheets, extraSheets, tag string, sim reflect.Value) {
pr.Layer = layer
pr.Path = path
report := ""
if extraSheets != "" {
pr.ExtraSheets = extraSheets
report += " ExtraSheets: " + extraSheets
}
if tag != "" {
pr.Tag = tag
report += " Tag: " + tag
}
if report != "" {
mpi.Printf("Params Set: %s\n", report)
}
pr.Interp = interp.New(interp.Options{})
pr.Interp.Use(interp.Exports{
"github.com/emer/axon/axon": map[string]reflect.Value{
"LayerParams": reflect.ValueOf((*LayerParams)(nil)),
"PathParams": reflect.ValueOf((*PathParams)(nil)),
"LayerSel": reflect.ValueOf((*LayerSel)(nil)),
"LayerSheet": reflect.ValueOf((*LayerSheet)(nil)),
"LayerSheets": reflect.ValueOf((*LayerSheets)(nil)),
"PathSel": reflect.ValueOf((*PathSel)(nil)),
"PathSheet": reflect.ValueOf((*PathSheet)(nil)),
"PathSheets": reflect.ValueOf((*PathSheets)(nil)),
},
"github.com/emer/axon/sim/sim": map[string]reflect.Value{
"Sim": sim,
},
})
pr.Interp.ImportUsed()
}
// Name returns name of current set of parameters, including Tag.
// if ExtraSheets is empty then it returns "Base", otherwise returns ExtraSheets
func (pr *Params) Name() string {
rn := ""
if pr.Tag != "" {
rn += pr.Tag + "_"
}
if pr.ExtraSheets == "" {
rn += "Base"
} else {
rn += pr.ExtraSheets
}
return rn
}
// RunName returns the name of a simulation run based on params Name()
// and starting run number.
func (pr *Params) RunName(startRun int) string {
return fmt.Sprintf("%s_%03d", pr.Name(), startRun)
}
// ApplyAll applies all parameters to given network,
// using "Base" Sheet then any ExtraSheets,
// for Layer and Path params (each must have the named sheets,
// for proper error checking in case of typos).
func (pr *Params) ApplyAll(net *Network) {
pr.ApplySheet(net, "Base")
if pr.ExtraSheets == "" {
return
}
if pr.Script != "" {
_, err := pr.Interp.Eval(pr.Script)
if err != nil {
fmt.Println(pr.Script)
errors.Log(err)
}
}
sps := strings.Fields(pr.ExtraSheets)
for _, ps := range sps {
if ps == "Base" {
continue
}
pr.ApplySheet(net, ps)
}
}
// ApplySheet applies parameters for given [params.Sheet] name
// for Layer and Path params (each must have the named sheets,
// for proper error checking in case of typos).
func (pr *Params) ApplySheet(net *Network, sheetName string) error {
lsheet, err := pr.Layer.SheetByName(sheetName)
if err != nil {
return err
}
psheet, err := pr.Path.SheetByName(sheetName)
if err != nil {
return err
}
lsheet.SelMatchReset()
psheet.SelMatchReset()
ApplyParamSheets(net, lsheet, psheet)
lsheet.SelNoMatchWarn(sheetName, net.Name)
psheet.SelNoMatchWarn(sheetName, net.Name)
return nil
}
// ApplyParamSheets applies Layer and Path parameters from given sheets,
// returning true if any applied.
func ApplyParamSheets(net *Network, layer *params.Sheet[*LayerParams], path *params.Sheet[*PathParams]) bool {
appl := ApplyLayerSheet(net, layer)
appp := ApplyPathSheet(net, path)
return appl || appp
}
// ApplyLayerSheet applies Layer parameters from given sheet,
// returning true if any applied.
func ApplyLayerSheet(net *Network, sheet *params.Sheet[*LayerParams]) bool {
applied := false
for _, ly := range net.Layers {
app := sheet.Apply(ly.Params)
ly.UpdateParams()
if app {
applied = true
}
}
return applied
}
// ApplyPathSheet applies Path parameters from given sheet,
// returning true if any applied.
func ApplyPathSheet(net *Network, sheet *params.Sheet[*PathParams]) bool {
applied := false
for _, ly := range net.Layers {
for _, pt := range ly.RecvPaths {
app := sheet.Apply(pt.Params)
pt.UpdateParams()
if app {
applied = true
}
}
}
return applied
}
// ApplyLayerSearch applies Layer [params.Searches] to network
// for parameter at given param index within that search,
// returning error for invalid index, and if no matching selections.
// Returns string descriptor for parameter to record in job.label.
func ApplyLayerSearch(net *Network, sr params.Searches[*LayerParams], paramIndex int) (string, error) {
ps, val, lbl, err := sr.SearchValue(paramIndex)
if err != nil {
return lbl, err
}
applied := false
for _, ly := range net.Layers {
app := ps.Apply(ly.Params, val)
ly.UpdateParams()
if app {
applied = true
}
}
if !applied {
return lbl, fmt.Errorf("No matches for Layer parameter search: %s", lbl)
}
return lbl, nil
}
// ApplyPathSearch applies Path [params.Searches] to network
// for parameter at given param index within that search,
// returning error for invalid index, and if no matching selections.
// Returns string descriptor for parameter to record in job.label.
func ApplyPathSearch(net *Network, sr params.Searches[*PathParams], paramIndex int) (string, error) {
ps, val, lbl, err := sr.SearchValue(paramIndex)
if err != nil {
return lbl, err
}
applied := false
for _, ly := range net.Layers {
for _, pt := range ly.RecvPaths {
app := ps.Apply(pt.Params, val)
pt.UpdateParams()
if app {
applied = true
}
}
}
if !applied {
return lbl, fmt.Errorf("No matches for Path parameter search: %s", lbl)
}
return lbl, nil
}
// Code generated by "goal build"; DO NOT EDIT.
//line path.goal:1
// Copyright (c) 2019, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
import (
"fmt"
"io"
"log"
"strconv"
"strings"
"cogentcore.org/core/base/errors"
"cogentcore.org/core/base/indent"
"cogentcore.org/core/math32"
"cogentcore.org/core/math32/minmax"
"cogentcore.org/lab/tensor"
"github.com/emer/emergent/v2/emer"
"github.com/emer/emergent/v2/paths"
"github.com/emer/emergent/v2/weights"
)
// https://github.com/kisvegabor/abbreviations-in-code suggests Buf instead of Buff
// index naming:
// syi = path-relative synapse index (per existing usage)
// syni = network-relative synapse index -- add SynStIndex to syi
// Path implements axon spiking communication and learning.
type Path struct {
emer.PathBase
// path parameters.
Params *PathParams
// sending layer for this pathway.
Send *Layer
// receiving layer for this pathway.
Recv *Layer
// type of pathway.
Type PathTypes
// DefaultParams are functions to apply parameters prior to user-set
// parameters. These are useful for specific functionality in specialized
// brain areas (e.g., Rubicon, BG etc) not associated with a path type,
// which otherwise is used to hard-code initial default parameters.
DefaultParams []func(pt *PathParams) `display:"-"`
// average and maximum number of recv connections in the receiving layer
RecvConNAvgMax minmax.AvgMax32 `table:"-" edit:"-" display:"inline"`
// average and maximum number of sending connections in the sending layer
SendConNAvgMax minmax.AvgMax32 `table:"-" edit:"-" display:"inline"`
// start index into global Synapse array:
SynStIndex uint32 `display:"-"`
// number of synapses in this pathway
NSyns uint32 `display:"-"`
// starting offset and N cons for each recv neuron, for indexing into the RecvSynIndex array of indexes into the Syns synapses, which are organized sender-based. This is locally managed during build process, but also copied to network global PathRecvCons slice for GPU usage.
RecvCon []StartN `display:"-"`
// index into Syns synaptic state for each sending unit and connection within that, for the sending pathway which does not own the synapses, and instead indexes into recv-ordered list
RecvSynIndex []uint32 `display:"-"`
// for each recv synapse, this is index of *sending* neuron It is generally preferable to use the Synapse SendIndex where needed, instead of this slice, because then the memory access will be close by other values on the synapse.
RecvConIndex []uint32 `display:"-"`
// starting offset and N cons for each sending neuron, for indexing into the Syns synapses, which are organized sender-based. This is locally managed during build process, but also copied to network global PathSendCons slice for GPU usage.
SendCon []StartN `display:"-"`
// index of other neuron that receives the sender's synaptic input, ordered by the sending layer's order of units as the outer loop, and SendCon.N receiving units within that. It is generally preferable to use the Synapse RecvIndex where needed, instead of this slice, because then the memory access will be close by other values on the synapse.
SendConIndex []uint32 `display:"-"`
}
// emer.Path interface
func (pt *Path) RecvLayer() emer.Layer { return pt.Recv }
func (pt *Path) SendLayer() emer.Layer { return pt.Send }
func (pt *Path) TypeName() string { return pt.Type.String() }
func (pt *Path) TypeNumber() int { return int(pt.Type) }
func (pt *Path) AddClass(cls ...string) *Path {
pt.PathBase.AddClass(cls...)
return pt
}
func (pt *Path) SetPattern(pat paths.Pattern) *Path {
pt.Pattern = pat
return pt
}
func (pt *Path) Defaults() {
if pt.Params == nil {
return
}
pt.Params.Type = pt.Type
pt.Params.Defaults()
switch pt.Type {
case InhibPath:
pt.Params.SWts.Adapt.On.SetBool(false)
case BackPath:
pt.Params.PathScale.Rel = 0.1
case RWPath, TDPredPath:
pt.Params.RLPredDefaults()
case BLAPath:
pt.Params.BLADefaults()
case HipPath:
pt.Params.HipDefaults()
case VSPatchPath:
pt.Params.VSPatchDefaults()
case VSMatrixPath:
pt.Params.MatrixDefaults()
case DSMatrixPath:
pt.Params.MatrixDefaults()
}
pt.applyDefaultParams()
pt.UpdateParams()
}
// Update is interface that does local update of struct vals
func (pt *Path) Update() {
if pt.Params == nil {
return
}
if pt.Params.Type == InhibPath { // || pt.Params.Type == CNiIOToOutPath {
pt.Params.Com.GType = InhibitoryG
}
pt.Params.Update()
}
// UpdateParams updates all params given any changes
// that might have been made to individual values
func (pt *Path) UpdateParams() {
pt.Update()
}
// Connect sets the connectivity between two layers and the pattern to use in interconnecting them
func (pt *Path) Connect(slay, rlay *Layer, pat paths.Pattern, typ PathTypes) {
pt.Send = slay
pt.Recv = rlay
pt.Pattern = pat
pt.Type = typ
pt.Name = pt.Send.Name + "To" + pt.Recv.Name
}
// todo: move to emer?
// Validate tests for non-nil settings for the pathway -- returns error
// message or nil if no problems (and logs them if logmsg = true)
func (pt *Path) Validate(logmsg bool) error {
emsg := ""
if pt.Pattern == nil {
emsg += "Pattern is nil; "
}
if pt.Recv == nil {
emsg += "Recv is nil; "
}
if pt.Send == nil {
emsg += "Send is nil; "
}
if emsg != "" {
err := errors.New(emsg)
if logmsg {
errors.Log(err)
}
return err
}
return nil
}
// RecvSynIxs returns the receiving synapse indexes for given recv unit index
// within the receiving layer, to be iterated over for recv-based processing.
func (pt *Path) RecvSynIxs(ri uint32) []uint32 {
if int(ri) >= len(pt.RecvCon) {
return nil
}
rcon := pt.RecvCon[ri]
return pt.RecvSynIndex[rcon.Start : rcon.Start+rcon.N]
}
// Build constructs the full connectivity among the layers.
// Calls Validate and returns error if invalid.
// Pat.Connect is called to get the pattern of the connection.
// Then the connection indexes are configured according to that pattern.
// Does NOT allocate synapses -- these are set by Network from global slice.
func (pt *Path) Build() error {
if pt.Off {
return nil
}
err := pt.Validate(true)
if err != nil {
return err
}
ssh := &pt.Send.Shape
rsh := &pt.Recv.Shape
sendn, recvn, cons := pt.Pattern.Connect(ssh, rsh, pt.Recv == pt.Send)
slen := ssh.Len()
rlen := rsh.Len()
tcons := pt.SetConStartN(&pt.SendCon, &pt.SendConNAvgMax, sendn)
tconr := pt.SetConStartN(&pt.RecvCon, &pt.RecvConNAvgMax, recvn)
if tconr != tcons {
log.Printf("%v programmer error: total recv cons %v != total send cons %v\n", pt.String(), tconr, tcons)
}
// these are large allocs, as number of connections tends to be ~quadratic
// These indexes are not used in GPU computation -- only for CPU side.
pt.RecvConIndex = make([]uint32, tconr)
pt.RecvSynIndex = make([]uint32, tcons)
pt.SendConIndex = make([]uint32, tcons)
sconN := make([]uint32, slen) // temporary mem needed to tracks cur n of sending cons
cbits := cons.Values
for ri := 0; ri < rlen; ri++ {
rbi := ri * slen // recv bit index
rcon := pt.RecvCon[ri]
rci := uint32(0)
for si := 0; si < slen; si++ {
if !cbits.Index(rbi + si) { // no connection
continue
}
if rci >= rcon.N {
log.Printf("%v programmer error: recv target total con number: %v exceeded at recv idx: %v, send idx: %v\n", pt.String(), rcon.N, ri, si)
break
}
pt.RecvConIndex[rcon.Start+rci] = uint32(si)
sci := sconN[si]
scon := pt.SendCon[si]
if sci >= scon.N {
log.Printf("%v programmer error: send target total con number: %v exceeded at recv idx: %v, send idx: %v\n", pt.String(), scon.N, ri, si)
break
}
pt.SendConIndex[scon.Start+sci] = uint32(ri)
pt.RecvSynIndex[rcon.Start+rci] = scon.Start + sci
(sconN[si])++
rci++
}
}
return nil
}
// SetConStartN sets the *Con StartN values given n tensor from Pat.
// Returns total number of connections for this direction.
func (pt *Path) SetConStartN(con *[]StartN, avgmax *minmax.AvgMax32, tn *tensor.Int32) uint32 {
ln := tn.Len()
tnv := tn.Values
*con = make([]StartN, ln)
idx := uint32(0)
avgmax.Init()
for i := 0; i < ln; i++ {
nv := uint32(tnv[i])
(*con)[i] = StartN{N: nv, Start: idx}
idx += nv
avgmax.UpdateValue(float32(nv), int32(i))
}
avgmax.CalcAvg()
return idx
}
// String satisfies fmt.Stringer for path
func (pt *Path) String() string {
str := ""
if pt.Recv == nil {
str += "recv=nil; "
} else {
str += pt.Recv.Name + " <- "
}
if pt.Send == nil {
str += "send=nil"
} else {
str += pt.Send.Name
}
if pt.Pattern == nil {
str += " Pat=nil"
} else {
str += " Pat=" + pt.Pattern.Name()
}
return str
}
// AddDefaultParams adds given default param setting function.
func (pt *Path) AddDefaultParams(fun func(pt *PathParams)) {
pt.DefaultParams = append(pt.DefaultParams, fun)
}
// applyDefaultParams applies DefaultParams default parameters.
// Called by Path.Defaults()
func (pt *Path) applyDefaultParams() {
for _, f := range pt.DefaultParams {
f(pt.Params)
}
}
func (pt *Path) SynVarNames() []string {
return SynapseVarNames
}
// SynVarProps returns properties for variables
func (pt *Path) SynVarProps() map[string]string {
return SynapseVarProps
}
// SynIndex returns the index of the synapse between given send, recv unit indexes
// (1D, flat indexes, layer relative).
// Returns -1 if synapse not found between these two neurons.
// Requires searching within connections for sending unit.
func (pt *Path) SynIndex(sidx, ridx int) int {
if sidx >= len(pt.SendCon) {
return -1
}
scon := pt.SendCon[sidx]
if scon.N == 0 {
return -1
}
firstRi := int(pt.SendConIndex[scon.Start])
lastRi := int(pt.SendConIndex[scon.Start+scon.N-1])
if ridx < firstRi || ridx > lastRi { // fast reject -- paths are always in order!
return -1
}
// start at index proportional to ri relative to rist
up := int32(0)
if lastRi > firstRi {
up = int32(float32(scon.N) * float32(ridx-firstRi) / float32(lastRi-firstRi))
}
dn := up - 1
for {
doing := false
if up < int32(scon.N) {
doing = true
sconi := int32(scon.Start) + up
if int(pt.SendConIndex[sconi]) == ridx {
return int(sconi)
}
up++
}
if dn >= 0 {
doing = true
sconi := int32(scon.Start) + dn
if int(pt.SendConIndex[sconi]) == ridx {
return int(sconi)
}
dn--
}
if !doing {
break
}
}
return -1
}
// SynVarIndex returns the index of given variable within the synapse,
// according to *this path's* SynVarNames() list (using a map to lookup index),
// or -1 and error message if not found.
func (pt *Path) SynVarIndex(varNm string) (int, error) {
return SynapseVarByName(varNm)
}
// SynVarNum returns the number of synapse-level variables
// for this paths. This is needed for extending indexes in derived types.
func (pt *Path) SynVarNum() int {
return len(SynapseVarNames)
}
// NumSyns returns the number of synapses for this path as a 1D array.
// This is the max idx for SynVal1D and the number of vals set by SynValues.
func (pt *Path) NumSyns() int {
return int(pt.NSyns)
}
// SynValue1D returns value of given variable index (from SynVarIndex) on given SynIndex.
// Returns NaN on invalid index.
// This is the core synapse var access method used by other methods.
func (pt *Path) SynValue1D(varIndex int, synIndex int) float32 {
if synIndex < 0 || synIndex >= int(pt.NSyns) {
return math32.NaN()
}
if varIndex < 0 || varIndex >= pt.SynVarNum() {
return math32.NaN()
}
syni := pt.SynStIndex + uint32(synIndex)
if varIndex < int(SynapseVarsN) {
return Synapses.Value(int(syni), int(SynapseVars(varIndex)))
} else {
return SynapseTraces.Value(int(syni), int(0), int(SynapseTraceVars(varIndex-int(SynapseVarsN))))
}
}
// SynValues sets values of given variable name for each synapse,
// using the natural ordering of the synapses (sender based for Axon),
// into given float32 slice (only resized if not big enough).
// Returns error on invalid var name.
func (pt *Path) SynValues(vals *[]float32, varNm string) error {
vidx, err := pt.EmerPath.SynVarIndex(varNm)
if err != nil {
return err
}
ns := int(pt.NSyns)
if *vals == nil || cap(*vals) < ns {
*vals = make([]float32, ns)
} else if len(*vals) < ns {
*vals = (*vals)[0:ns]
}
slay := pt.Send
i := 0
for lni := uint32(0); lni < slay.NNeurons; lni++ {
scon := pt.SendCon[lni]
for syi := scon.Start; syi < scon.Start+scon.N; syi++ {
(*vals)[i] = pt.SynValue1D(vidx, i)
i++
}
}
return nil
}
// SynVal1DDi returns value of given variable index (from SynVarIndex) on given SynIndex.
// Returns NaN on invalid index.
// This is the core synapse var access method used by other methods.
// Includes Di data parallel index for data-parallel synaptic values.
func (pt *Path) SynVal1DDi(varIndex int, synIndex int, di int) float32 {
if synIndex < 0 || synIndex >= int(pt.NSyns) {
return math32.NaN()
}
if varIndex < 0 || varIndex >= pt.SynVarNum() {
return math32.NaN()
}
syni := pt.SynStIndex + uint32(synIndex)
if varIndex < int(SynapseVarsN) {
return Synapses.Value(int(syni), int(SynapseVars(varIndex)))
} else {
return SynapseTraces.Value(int(syni), int(di), int(SynapseTraceVars(varIndex-int(SynapseVarsN))))
}
}
// SynValDi returns value of given variable name on the synapse
// between given send, recv unit indexes (1D, flat indexes).
// Returns math32.NaN() for access errors (see SynValTry for error message)
// Includes Di data parallel index for data-parallel synaptic values.
func (pt *Path) SynValDi(varNm string, sidx, ridx int, di int) float32 {
vidx, err := pt.EmerPath.SynVarIndex(varNm)
if err != nil {
return math32.NaN()
}
syi := pt.SynIndex(sidx, ridx)
return pt.SynVal1DDi(vidx, syi, di)
}
///////////////////////////////////////////////////////////////////////
// Weights
// WriteWeightsJSON writes the weights from this pathway
// from the receiver-side perspective in a JSON text format.
func (pt *Path) WriteWeightsJSON(w io.Writer, depth int) {
slay := pt.Send
rlay := pt.Recv
nr := int(rlay.NNeurons)
w.Write(indent.TabBytes(depth))
w.Write([]byte("{\n"))
depth++
w.Write(indent.TabBytes(depth))
w.Write([]byte(fmt.Sprintf("\"From\": %q,\n", slay.Name)))
w.Write(indent.TabBytes(depth))
w.Write([]byte(fmt.Sprintf("\"Rs\": [\n")))
depth++
for ri := 0; ri < nr; ri++ {
rc := pt.RecvCon[ri]
syIndexes := pt.RecvSynIxs(uint32(ri))
w.Write(indent.TabBytes(depth))
w.Write([]byte("{\n"))
depth++
w.Write(indent.TabBytes(depth))
w.Write([]byte(fmt.Sprintf("\"Ri\": %v,\n", ri)))
w.Write(indent.TabBytes(depth))
w.Write([]byte(fmt.Sprintf("\"N\": %v,\n", rc.N)))
w.Write(indent.TabBytes(depth))
w.Write([]byte("\"Si\": [ "))
for ci, syi := range syIndexes {
syni := pt.SynStIndex + syi
si := pt.Params.SynSendLayerIndex(syni)
w.Write([]byte(fmt.Sprintf("%v", si)))
if ci == int(rc.N-1) {
w.Write([]byte(" "))
} else {
w.Write([]byte(", "))
}
}
w.Write([]byte("],\n"))
w.Write(indent.TabBytes(depth))
w.Write([]byte("\"Wt\": [ "))
for ci, syi := range syIndexes {
syni := pt.SynStIndex + syi
w.Write([]byte(strconv.FormatFloat(float64(Synapses.Value(int(syni), int(Wt))), 'g', weights.Prec, 32)))
if ci == int(rc.N-1) {
w.Write([]byte(" "))
} else {
w.Write([]byte(", "))
}
}
w.Write([]byte("],\n"))
w.Write(indent.TabBytes(depth))
w.Write([]byte("\"Wt1\": [ ")) // Wt1 is SWt
for ci, syi := range syIndexes {
syni := pt.SynStIndex + syi
w.Write([]byte(strconv.FormatFloat(float64(Synapses.Value(int(syni), int(SWt))), 'g', weights.Prec, 32)))
if ci == int(rc.N-1) {
w.Write([]byte(" "))
} else {
w.Write([]byte(", "))
}
}
w.Write([]byte("]\n"))
depth--
w.Write(indent.TabBytes(depth))
if ri == nr-1 {
w.Write([]byte("}\n"))
} else {
w.Write([]byte("},\n"))
}
}
depth--
w.Write(indent.TabBytes(depth))
w.Write([]byte("]\n"))
depth--
w.Write(indent.TabBytes(depth))
w.Write([]byte("}")) // note: leave unterminated as outer loop needs to add , or just \n depending
}
// SetWeights sets the weights for this pathway from weights.Path decoded values
func (pt *Path) SetWeights(pw *weights.Path) error {
var err error
for i := range pw.Rs {
pr := &pw.Rs[i]
hasWt1 := len(pr.Wt1) >= len(pr.Si)
for si := range pr.Si {
if hasWt1 {
er := pt.SetSynValue("SWt", pr.Si[si], pr.Ri, pr.Wt1[si])
if er != nil {
err = er
}
}
er := pt.SetSynValue("Wt", pr.Si[si], pr.Ri, pr.Wt[si]) // updates lin wt
if er != nil {
err = er
}
}
}
return err
}
// ParamsString returns a listing of all parameters in the Layer and
// pathways within the layer. If nonDefault is true, only report those
// not at their default values.
func (pt *Path) ParamsString(nonDefault bool) string {
var b strings.Builder
b.WriteString(" //////// Path: " + pt.Name + "\n")
b.WriteString(pt.Params.ParamsString(nonDefault))
return b.String()
}
// SetSynVal sets value of given variable name on the synapse
// between given send, recv unit indexes (1D, flat indexes)
// returns error for access errors.
func (pt *Path) SetSynValue(varNm string, sidx, ridx int, val float32) error {
vidx, err := pt.SynVarIndex(varNm)
if err != nil {
return err
}
syi := uint32(pt.SynIndex(sidx, ridx))
if syi < 0 || syi >= pt.NSyns {
return err
}
syni := pt.SynStIndex + syi
if vidx < int(SynapseVarsN) {
Synapses.Set(val, int(syni), int(SynapseVars(vidx)))
} else {
for di := uint32(0); di < pt.Recv.MaxData; di++ {
SynapseTraces.Set(val, int(syni), int(di), int(SynapseTraceVars(vidx-int(SynapseVarsN))))
}
}
if varNm == "Wt" {
wt := Synapses.Value(int(syni), int(Wt))
if Synapses.Value(int(syni), int(SWt)) == 0 {
Synapses.Set(wt, int(syni), int(SWt))
}
Synapses.Set(pt.Params.SWts.LWtFromWts(wt, Synapses.Value(int(syni), int(SWt))), int(syni), int(LWt))
}
return nil
}
// Copyright (c) 2023, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
import (
"reflect"
"cogentcore.org/core/base/reflectx"
"github.com/emer/emergent/v2/params"
)
//gosl:start
const (
// StartOff is the starting offset.
StartOff int32 = iota
// Number of items.
Nitems
// Number of StartN elements.
StartNN
)
// StartN holds a starting offset index and a number of items
// arranged from Start to Start+N (exclusive).
// This is not 16 byte padded and only for use on CPU side.
type StartN struct {
// starting offset
Start uint32
// number of items --
N uint32
pad, pad1 uint32 // todo: see if we can do without these?
}
// PathIndexes contains path-level index information into global memory arrays
type PathIndexes struct {
// RecvLayer is the index of the receiving layer in global list of layers.
RecvLayer uint32
// RecvNeurSt is the starting index of neurons in recv layer,
// so we don't need layer to get to neurons.
RecvNeurSt uint32
// RecvNeurN is the number of neurons in recv layer.
RecvNeurN uint32
// SendLayer is the index of the sending layer in global list of layers.
SendLayer uint32
// SendNeurSt is the starting index of neurons in sending layer,
// so we don't need layer to get to neurons.
SendNeurSt uint32
// SendNeurN is the number of neurons in send layer
SendNeurN uint32
// SynapseSt is the start index into global Synapse array.
// [Layer][SendPaths][Synapses].
SynapseSt uint32
// SendConSt is the start index into global PathSendCon array.
// [Layer][SendPaths][SendNeurons]
SendConSt uint32
// RecvConSt is the start index into global PathRecvCon array.
// [Layer][RecvPaths][RecvNeurons]
RecvConSt uint32
// RecvSynSt is the start index into global sender-based Synapse index array.
// [Layer][SendPaths][Synapses]
RecvSynSt uint32
// NPathNeurSt is the start NPathNeur index into PathGBuf, PathGSyns global arrays.
// [Layer][RecvPaths][RecvNeurons]
NPathNeurSt uint32
pad uint32
}
// RecvNIndexToLayIndex converts a neuron's index in network level global list of all neurons
// to receiving layer-specific index-- e.g., for accessing GBuf and GSyn values.
// Just subtracts RecvNeurSt -- docu-function basically..
func (pi *PathIndexes) RecvNIndexToLayIndex(ni uint32) uint32 {
return ni - pi.RecvNeurSt
}
// SendNIndexToLayIndex converts a neuron's index in network level global list of all neurons
// to sending layer-specific index. Just subtracts SendNeurSt -- docu-function basically..
func (pi *PathIndexes) SendNIndexToLayIndex(ni uint32) uint32 {
return ni - pi.SendNeurSt
}
// GScaleValues holds the conductance scaling values.
// These are computed once at start and remain constant thereafter,
// and therefore belong on Params and not on PathValues.
type GScaleValues struct {
// scaling factor for integrating synaptic input conductances (G's), originally computed as a function of sending layer activity and number of connections, and typically adapted from there -- see Path.PathScale adapt params
Scale float32 `edit:"-"`
// normalized relative proportion of total receiving conductance for this pathway: PathScale.Rel / sum(PathScale.Rel across relevant paths)
Rel float32 `edit:"-"`
pad, pad1 float32
}
// PathParams contains all of the path parameters.
// These values must remain constant over the course of computation.
// On the GPU, they are loaded into a read-only storage buffer.
type PathParams struct {
// Type is the functional type of path, which determines the code path
// for specialized types, and is synchronized with [Path.Type].
Type PathTypes
// Index is the index of the pathway in global path list: [Layer][SendPaths]
Index uint32 `edit:"-"`
pad, pad1 int32
// recv and send neuron-level pathway index array access info
Indexes PathIndexes `display:"-"`
// synaptic communication parameters: delay, probability of failure
Com SynComParams `display:"inline"`
// pathway scaling parameters for computing GScale:
// modulates overall strength of pathway, using both
// absolute and relative factors, with adaptation option to maintain target max conductances
PathScale PathScaleParams `display:"inline"`
// slowly adapting, structural weight value parameters,
// which control initial weight values and slower outer-loop adjustments
SWts SWtParams `display:"add-fields"`
// synaptic-level learning parameters for learning in the fast LWt values.
Learn LearnSynParams `display:"add-fields"`
// conductance scaling values
GScale GScaleValues `display:"inline"`
// Params for RWPath and TDPredPath for doing dopamine-modulated learning
// for reward prediction: Da * Send activity.
// Use in RWPredLayer or TDPredLayer typically to generate reward predictions.
// If the Da sign is positive, the first recv unit learns fully; for negative,
// second one learns fully.
// Lower lrate applies for opposite cases. Weights are positive-only.
RLPred RLPredPathParams `display:"inline"`
// VSMatrix has parameters for trace-based learning in the VSMatrixPath.
// A trace of synaptic co-activity is formed, and then modulated by
// dopamine whenever it occurs.
// This bridges the temporal gap between gating activity and subsequent activity,
// and is based biologically on synaptic tags.
// DSPatch provides modulation of trace activity based on local critic signal.
VSMatrix VSMatrixPathParams `display:"inline"`
// DSMatrix has parameters for trace-based learning in the DSMatrixPath.
// A trace of synaptic co-activity is formed, and then modulated by
// dopamine whenever it occurs.
// This bridges the temporal gap between gating activity and subsequent activity,
// and is based biologically on synaptic tags.
// DSPatch provides modulation of trace activity based on local critic signal.
DSMatrix DSMatrixPathParams `display:"inline"`
// Basolateral Amygdala pathway parameters.
BLA BLAPathParams `display:"inline"`
// Hip bench parameters.
Hip HipPathParams `display:"inline"`
}
func (pt *PathParams) IsInhib() bool {
return pt.Com.GType == InhibitoryG
}
func (pt *PathParams) IsExcitatory() bool {
return pt.Com.GType == ExcitatoryG
}
// SetFixedWts sets parameters for fixed, non-learning weights
// with a default of Mean = 0.8, Var = 0 strength
func (pt *PathParams) SetFixedWts() {
pt.SWts.Init.SPct = 0
pt.Learn.Learn.SetBool(false)
pt.SWts.Adapt.On.SetBool(false)
pt.SWts.Adapt.SigGain = 1
pt.SWts.Init.Mean = 0.8
pt.SWts.Init.Var = 0.0
pt.SWts.Init.Sym.SetBool(false)
}
//gosl:end
// StyleClass implements the [params.Styler] interface for parameter setting,
// and must only be called after the network has been built, and is current,
// because it uses the global CurrentNetwork variable.
func (pt *PathParams) StyleClass() string {
pth := CurrentNetwork.Paths[pt.Index]
return pt.Type.String() + " " + pth.Class
}
// StyleName implements the [params.Styler] interface for parameter setting,
// and must only be called after the network has been built, and is current,
// because it uses the global CurrentNetwork variable.
func (pt *PathParams) StyleName() string {
pth := CurrentNetwork.Paths[pt.Index]
return pth.Name
}
func (pt *PathParams) Defaults() {
pt.Com.Defaults()
pt.SWts.Defaults()
pt.PathScale.Defaults()
pt.Learn.Defaults()
pt.RLPred.Defaults()
pt.VSMatrix.Defaults()
pt.DSMatrix.Defaults()
pt.BLA.Defaults()
pt.Hip.Defaults()
}
func (pt *PathParams) Update() {
pt.Com.Update()
pt.PathScale.Update()
pt.SWts.Update()
pt.Learn.Update()
pt.RLPred.Update()
pt.VSMatrix.Update()
pt.DSMatrix.Update()
pt.BLA.Update()
pt.Hip.Update()
if pt.Type == CTCtxtPath {
pt.Com.GType = ContextG
}
}
func (pt *PathParams) ShouldDisplay(field string) bool {
switch field {
case "RLPred":
return pt.Type == RWPath || pt.Type == TDPredPath
case "VSMatrix":
return pt.Type == VSMatrixPath
case "DSMatrix":
return pt.Type == DSMatrixPath
case "BLA":
return pt.Type == BLAPath
case "Hip":
return pt.Type == HipPath
default:
return true
}
}
// ParamsString returns a listing of all parameters in the Pathway.
// If nonDefault is true, only report those not at their default values.
func (pt *PathParams) ParamsString(nonDefault bool) string {
return params.PrintStruct(pt, 2, func(path string, ft reflect.StructField, fv any) bool {
if ft.Tag.Get("display") == "-" {
return false
}
if nonDefault {
if def := ft.Tag.Get("default"); def != "" {
if reflectx.ValueIsDefault(reflect.ValueOf(fv), def) {
return false
}
} else {
if reflectx.NonPointerType(ft.Type).Kind() != reflect.Struct {
return false
}
}
}
return pt.ShouldDisplay(path)
},
func(path string, ft reflect.StructField, fv any) string {
if nonDefault {
if def := ft.Tag.Get("default"); def != "" {
return reflectx.ToString(fv) + " [" + def + "]"
}
}
return ""
})
}
// Code generated by "goal build"; DO NOT EDIT.
//line pcore-layer.goal:1
// Copyright (c) 2022, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
import (
"strings"
"cogentcore.org/core/base/errors"
"cogentcore.org/core/base/num"
"cogentcore.org/core/math32/minmax"
"github.com/emer/axon/v2/fsfffb"
)
//gosl:start
// DSMatrixParams has parameters for DSMatrixLayer.
// DA, ACh learning rate modulation is pre-computed on the recv neuron
// RLRate variable via NeuroMod.
// Must set Learn.NeuroMod.DAMod = D1Mod or D2Mod via SetBuildConfig("DAMod").
type DSMatrixParams struct {
// PatchD1Range is the range of PatchD1 values to normalize into effective value.
PatchD1Range minmax.F32 `default:"{'Min':0.1,'Max':0.3}" display:"inline"`
// PatchD2Range is the range of PatchD2 values to normalize into effective value.
PatchD2Range minmax.F32 `default:"{'Min':0.05,'Max':0.25}" display:"inline"`
// PatchDAModGain is a separate NeuroMod.DAModGain factor applying
// to DA performance gain effects from the Patch-based DA values.
// The standard NeuroMod parameters apply only to the final outcome-based
// dopamine values.
PatchDAModGain float32 `default:"0.02"`
// PatchBurstGain is a separate NeuroMod.BurstGain-like factor applying
// to DA performance gain effects from the Patch-based DA values.
// The standard NeuroMod parameters apply only to the final outcome-based
// dopamine values, which do not drive performance DA effects in dorsal striatum.
// NeuroMod.DAModGain does control overall performance gain from patch.
PatchBurstGain float32 `default:"1.0"`
// Index of PatchD1 layer to get striosome modulation state from.
// Set during Build from BuildConfig PatchD1Name.
PatchD1Index int32 `edit:"-"`
// Index of PatchD2 layer to get striosome modulation state from.
// Set during Build from BuildConfig PatchD2Name.
PatchD2Index int32 `edit:"-"`
}
func (mp *DSMatrixParams) Defaults() {
mp.PatchDAModGain = 0.02
mp.PatchBurstGain = 1.0
mp.PatchD1Range.Set(0.1, 0.3)
mp.PatchD2Range.Set(0.05, 0.25)
}
func (mp *DSMatrixParams) Update() {
}
// StriatumParams has params and indexes for BG Striatum layers including
// DSMatrixLayer, VSMatrixLayer, and DSPatchLayer.
type StriatumParams struct {
// GateThr is the threshold on layer Avg CaPMax for Matrix Go and BG Thal
// layers to count as having gated.
GateThr float32 `default:"0.05"`
// Index of other layer (D2 if we are D1 and vice-versa).
// Set during Build from BuildConfig OtherName.
OtherIndex int32 `edit:"-"`
// Index of PF parafasciculus layer to get gating output state from.
// Set during Build from BuildConfig PFName.
PFIndex int32 `edit:"-"`
// Index of thalamus layer that we gate. needed to get gating information.
// Set during Build from BuildConfig ThalLay1Name if present -- -1 if not used
ThalLay1Index int32 `edit:"-"`
// Index of thalamus layer that we gate. needed to get gating information.
// Set during Build from BuildConfig ThalLay1Name if present -- -1 if not used
ThalLay2Index int32 `edit:"-"`
// Index of thalamus layer that we gate. needed to get gating information.
// Set during Build from BuildConfig ThalLay1Name if present -- -1 if not used
ThalLay3Index int32 `edit:"-"`
// Index of thalamus layer that we gate. needed to get gating information.
// Set during Build from BuildConfig ThalLay1Name if present -- -1 if not used
ThalLay4Index int32 `edit:"-"`
// Index of thalamus layer that we gate. needed to get gating information.
// Set during Build from BuildConfig ThalLay1Name if present -- -1 if not used
ThalLay5Index int32 `edit:"-"`
// Index of thalamus layer that we gate. needed to get gating information.
// Set during Build from BuildConfig ThalLay1Name if present -- -1 if not used
ThalLay6Index int32 `edit:"-"`
pad, pad1, pad2 float32
}
func (mp *StriatumParams) Defaults() {
mp.GateThr = 0.05
}
func (mp *StriatumParams) Update() {
}
//////// GP
// GPLayerTypes is a GPLayer axon-specific layer type enum.
type GPLayerTypes int32 //enums:enum
// The GPLayer types
const (
// GPePr is the set of prototypical GPe neurons, mediating classical NoGo
GPePr GPLayerTypes = iota
// GPeAk is arkypallidal layer of GPe neurons, receiving inhibition from GPePr
// and projecting inhibition to Mtx
GPeAk
// GPi is the inner globus pallidus, functionally equivalent to SNr,
// receiving from MtxGo and GPePr, and sending inhibition to VThal
GPi
)
// GPLayer represents a globus pallidus layer, including:
// GPePr, GPeAk (arkypallidal), and GPi (see GPType for type).
// Typically just a single unit per Pool representing a given stripe.
type GPParams struct {
// type of GP Layer -- must set during config using SetBuildConfig of GPType.
GPType GPLayerTypes
pad, pad1, pad2 uint32
}
func (gp *GPParams) Defaults() {
}
func (gp *GPParams) Update() {
}
// MatrixGated is called after std PlusPhase
// Uses Pool state to set Gated flag based on CaPMax activity.
func (ly *LayerParams) MatrixGated(ctx *Context) {
lpi := ly.PoolIndex(0)
if ly.Learn.NeuroMod.DAMod != D1Mod {
oly := Layers[ly.Striatum.OtherIndex]
olpi := oly.PoolSt
// note: NoGo layers don't track gating at the sub-pool level!
for di := uint32(0); di < ctx.NData; di++ {
PoolsInt.Set(PoolsInt.Value(int(olpi), int(di), int(PoolGated)), int(lpi), int(di), int(PoolGated))
}
return
}
for di := uint32(0); di < ctx.NData; di++ {
mtxGated := PoolsInt.Value(int(lpi), int(di), int(PoolGated)) > 0
thalGated := false
if ly.Striatum.ThalLay1Index >= 0 {
tly := Layers[ly.Striatum.ThalLay1Index]
tlpi := tly.PoolSt
gt := PoolsInt.Value(int(tlpi), int(di), int(PoolGated))
thalGated = thalGated || gt > 0
}
if ly.Striatum.ThalLay2Index >= 0 {
tly := Layers[ly.Striatum.ThalLay2Index]
tlpi := tly.PoolSt
gt := PoolsInt.Value(int(tlpi), int(di), int(PoolGated))
thalGated = thalGated || gt > 0
}
if ly.Striatum.ThalLay3Index >= 0 {
tly := Layers[ly.Striatum.ThalLay3Index]
tlpi := tly.PoolSt
gt := PoolsInt.Value(int(tlpi), int(di), int(PoolGated))
thalGated = thalGated || gt > 0
}
if ly.Striatum.ThalLay4Index >= 0 {
tly := Layers[ly.Striatum.ThalLay4Index]
tlpi := tly.PoolSt
gt := PoolsInt.Value(int(tlpi), int(di), int(PoolGated))
thalGated = thalGated || gt > 0
}
if ly.Striatum.ThalLay5Index >= 0 {
tly := Layers[ly.Striatum.ThalLay5Index]
tlpi := tly.PoolSt
gt := PoolsInt.Value(int(tlpi), int(di), int(PoolGated))
thalGated = thalGated || gt > 0
}
if ly.Striatum.ThalLay6Index >= 0 {
tly := Layers[ly.Striatum.ThalLay6Index]
tlpi := tly.PoolSt
gt := PoolsInt.Value(int(tlpi), int(di), int(PoolGated))
thalGated = thalGated || gt > 0
}
mtxGated = mtxGated && thalGated
// note: in principle with multi-pool GP, could try to establish
// a correspondence between thal and matrix pools, such that
// a failure to gate at the thal level for a given pool would veto
// just the one corresponding pool. However, we're not really sure
// that this will make sense and not doing yet..
if !mtxGated { // nobody did if thal didn't
for spi := uint32(0); spi < ly.Indexes.NPools; spi++ {
pi := ly.PoolIndex(spi)
PoolsInt.Set(0, int(pi), int(di), int(PoolGated))
}
}
if ctx.PlusPhase.IsTrue() && ly.Type == VSMatrixLayer {
GlobalScalars.Set(num.FromBool[float32](mtxGated), int(GvVSMatrixJustGated), int(di))
if mtxGated {
poolIndex := int32(-1)
for spi := uint32(1); spi < ly.Indexes.NPools; spi++ {
pi := ly.PoolIndex(spi)
if poolIndex < 0 && PoolsInt.Value(int(pi), int(di), int(PoolGated)) > 0 {
poolIndex = int32(spi)
}
}
if poolIndex > 0 {
GlobalVectors.Set(float32(1.0), int(GvVSMatrixPoolGated), int(poolIndex), int(di))
}
}
}
}
}
// GatedFromCaPMax updates the Gated state in Pools of given layer,
// based on Avg CaPMax being above given threshold.
func (ly *LayerParams) GatedFromCaPMax(ctx *Context, di uint32) {
anyGated := false
lpi := ly.PoolIndex(0)
thr := ly.Striatum.GateThr
if ly.Indexes.NPools > 1 {
for spi := uint32(1); spi < ly.Indexes.NPools; spi++ {
pi := ly.PoolIndex(spi)
spkavg := PoolAvgMax(AMCaPMax, AMCycle, Avg, pi, di)
gthr := spkavg > thr
if gthr {
anyGated = true
PoolsInt.Set(1, int(pi), int(di), int(PoolGated))
} else {
PoolsInt.Set(0, int(pi), int(di), int(PoolGated))
}
}
} else {
spkavg := PoolAvgMax(AMCaPMax, AMCycle, Avg, lpi, di)
if spkavg > thr {
anyGated = true
}
}
if anyGated {
PoolsInt.Set(1, int(lpi), int(di), int(PoolGated))
} else {
PoolsInt.Set(0, int(lpi), int(di), int(PoolGated))
}
}
// AnyGated returns true if the layer-level pool Gated flag is true,
// which indicates if any of the layers gated.
func (ly *LayerParams) AnyGated(di uint32) bool {
lpi := ly.PoolIndex(0)
return PoolsInt.Value(int(lpi), int(di), int(PoolGated)) > 0
}
// CyclePostDSPatchLayer grabs PF activation
func (ly *LayerParams) CyclePostDSPatchLayer(ctx *Context, pi, di uint32, spi int32) {
pf := Layers[ly.Striatum.PFIndex]
pfact := PoolAvgMax(AMCaP, AMCycle, Avg, pf.PoolIndex(uint32(spi)), di) // must be CaP, not CaD
Pools.Set(pfact, int(pi), int(di), int(fsfffb.ModAct))
}
// CyclePostDSMatrixLayer sets pool-specific DA dopamine signal based on PF
// activity and DSPatch
func (ly *LayerParams) CyclePostDSMatrixLayer(ctx *Context, pi, di uint32, spi int32) {
pf := Layers[ly.Striatum.PFIndex]
patchD1 := Layers[ly.DSMatrix.PatchD1Index]
patchD2 := Layers[ly.DSMatrix.PatchD2Index]
pfact := PoolAvgMax(AMCaP, AMCycle, Avg, pf.PoolIndex(uint32(spi)), di) // must be CaP
ptD1act := PoolAvgMax(AMCaP, AMCycle, Avg, patchD1.PoolIndex(uint32(spi)), di)
ptD2act := PoolAvgMax(AMCaP, AMCycle, Avg, patchD2.PoolIndex(uint32(spi)), di)
Pools.Set(ly.DSMatrix.PatchD1Range.NormValue(ptD1act), int(pi), int(di), int(fsfffb.DAD1))
Pools.Set(ly.DSMatrix.PatchD2Range.NormValue(ptD2act), int(pi), int(di), int(fsfffb.DAD2))
Pools.Set(pfact, int(pi), int(di), int(fsfffb.ModAct))
}
//gosl:end
func (lly *Layer) MatrixDefaults() {
ly := lly.Params
ly.Acts.Decay.Act = 1
ly.Acts.Decay.Glong = 1 // prevent carryover of NMDA
ly.Acts.Kir.Gk = 10
ly.Acts.GabaB.Gk = 0 // Kir replaces GabaB
// ly.Acts.NMDA.Ge = 0 // Matrix needs nmda, default is fine
ly.Inhib.Layer.FB = 0 // pure FF
ly.Inhib.Layer.Gi = 0.5
ly.Inhib.Pool.On.SetBool(true) // needs both pool and layer if has pools
ly.Inhib.Pool.FB = 0 // pure FF
ly.Inhib.Pool.Gi = 0.5
ly.Inhib.ActAvg.Nominal = 0.25 // pooled should be lower
ly.Learn.RLRate.On.SetBool(true) // key: sig deriv used outside of rew trials
ly.Learn.RLRate.Diff.SetBool(false)
ly.Learn.TrgAvgAct.RescaleOn.SetBool(true) // major effect
// ly.Learn.NeuroMod.DAMod needs to be set via BuildConfig
ly.Learn.NeuroMod.DALRateSign.SetBool(true) // critical
ly.Learn.NeuroMod.DALRateMod = 1
ly.Learn.NeuroMod.DAModGain = 0
ly.Learn.NeuroMod.AChLRateMod = 0
ly.Learn.NeuroMod.BurstGain = 0.1
ly.Learn.RLRate.SigmoidMin = 0.001
// important: user needs to adjust wt scale of some PFC inputs vs others:
// drivers vs. modulators
for _, pj := range lly.RecvPaths {
pj.Params.SWts.Init.SPct = 0
if pj.Send.Type == GPLayer { // GPeAkToMtx
pj.Params.SetFixedWts()
pj.Params.PathScale.Abs = 3
pj.Params.SWts.Init.Mean = 0.75
pj.Params.SWts.Init.Var = 0.0
}
}
}
func (lly *Layer) DSMatrixDefaults() {
ly := lly.Params
lly.MatrixDefaults()
ly.Inhib.Layer.On.SetBool(false)
ly.Acts.Dend.ModBase = 1
ly.Acts.Dend.ModGain = 0
ly.Learn.NeuroMod.AChDisInhib = 0
ly.Learn.NeuroMod.DAModGain = 0.0 // DS
for _, pj := range lly.RecvPaths {
if pj.Send.Type == GPLayer { // GPeAkToMtx
if strings.Contains(lly.Name, "No") {
pj.Params.PathScale.Abs = 4
}
}
}
}
func (lly *Layer) VSMatrixDefaults() {
ly := lly.Params
lly.MatrixDefaults()
ly.Inhib.Layer.On.SetBool(true)
ly.Acts.Dend.ModBase = 0
ly.Acts.Dend.ModGain = 2 // for VS case -- otherwise irrelevant
ly.Learn.NeuroMod.AChDisInhib = 5
ly.Learn.NeuroMod.BurstGain = 1
}
func (lly *Layer) MatrixPostBuild() {
ly := lly.Params
ly.Striatum.ThalLay1Index = lly.BuildConfigFindLayer("ThalLay1Name", false) // optional
ly.Striatum.ThalLay2Index = lly.BuildConfigFindLayer("ThalLay2Name", false) // optional
ly.Striatum.ThalLay3Index = lly.BuildConfigFindLayer("ThalLay3Name", false) // optional
ly.Striatum.ThalLay4Index = lly.BuildConfigFindLayer("ThalLay4Name", false) // optional
ly.Striatum.ThalLay5Index = lly.BuildConfigFindLayer("ThalLay5Name", false) // optional
ly.Striatum.ThalLay6Index = lly.BuildConfigFindLayer("ThalLay6Name", false) // optional
ly.Striatum.OtherIndex = lly.BuildConfigFindLayer("OtherName", true)
dm, err := lly.BuildConfigByName("DAMod")
if err == nil {
errors.Log(ly.Learn.NeuroMod.DAMod.SetString(dm))
}
}
func (lly *Layer) DSMatrixPostBuild() {
ly := lly.Params
lly.MatrixPostBuild()
ly.Striatum.PFIndex = lly.BuildConfigFindLayer("PFName", true)
ly.DSMatrix.PatchD1Index = lly.BuildConfigFindLayer("PatchD1Name", true)
ly.DSMatrix.PatchD2Index = lly.BuildConfigFindLayer("PatchD2Name", true)
}
func (lly *Layer) VSMatrixPostBuild() {
lly.MatrixPostBuild()
}
func (lly *Layer) DSPatchDefaults() {
ly := lly.Params
ly.VSPatchDefaults()
ly.Acts.Dend.ModBase = 1
ly.Acts.Dend.ModGain = 0
ly.Learn.NeuroMod.AChLRateMod = 1
}
func (lly *Layer) PatchPostBuild() {
ly := lly.Params
ly.Striatum.OtherIndex = lly.BuildConfigFindLayer("OtherName", true)
ly.Striatum.PFIndex = lly.BuildConfigFindLayer("PFName", true)
dm, err := lly.BuildConfigByName("DAMod")
if err == nil {
errors.Log(ly.Learn.NeuroMod.DAMod.SetString(dm))
}
}
//////// GP
func (lly *Layer) GPDefaults() {
ly := lly.Params
// GP is tonically self-active and has no FFFB inhibition
// Defaults are for GPePr, Ak has special values below
ly.Acts.Init.GeBase = 0.4
ly.Acts.Init.GeVar = 0.2
ly.Acts.Init.GiVar = 0.1
ly.Acts.Decay.Act = 0
ly.Acts.Decay.Glong = 1
ly.Acts.NMDA.Ge = 0 // carryover of NMDA was causing issues!
ly.Acts.GabaB.Gk = 0
ly.Inhib.ActAvg.Nominal = 1 // very active!
ly.Inhib.Layer.On.SetBool(false)
ly.Inhib.Pool.On.SetBool(false)
if ly.GP.GPType == GPeAk {
ly.Acts.Init.GeBase = 0.2 // definitely lower in bio data, necessary
ly.Acts.Init.GeVar = 0.1
}
for _, pj := range lly.RecvPaths {
pj.Params.SetFixedWts()
pj.Params.SWts.Init.Mean = 0.75 // 0.75 -- very similar -- maybe a bit more reliable with 0.8 / 0
pj.Params.SWts.Init.Var = 0.25 // 0.25
switch ly.GP.GPType {
case GPePr:
switch pj.Send.Type {
case VSMatrixLayer, DSMatrixLayer:
pj.Params.PathScale.Abs = 1 // MtxNoToGPePr -- primary NoGo pathway
case GPLayer:
pj.Params.PathScale.Abs = 4.5 // 4.5 best for DS; GPePrToGPePr -- must be very strong
case STNLayer:
pj.Params.PathScale.Abs = 0.5 // STNToGPePr
}
case GPeAk:
switch pj.Send.Type {
case VSMatrixLayer, DSMatrixLayer:
pj.Params.PathScale.Abs = 0.6 // MtxGoToGPeAk
case GPLayer:
pj.Params.PathScale.Abs = 1 // GPePrToGPeAk
case STNLayer:
pj.Params.PathScale.Abs = 0.1 // STNToGPAk
}
}
}
if ly.GP.GPType == GPi {
lly.GPiDefaults()
}
}
func (lly *Layer) GPiDefaults() {
ly := lly.Params
ly.Acts.Init.GeBase = 0.3
ly.Acts.Init.GeVar = 0.1
ly.Acts.Init.GiVar = 0.1
// note: GPLayer took care of STN input paths
for _, pj := range lly.RecvPaths {
pj.Params.SetFixedWts()
pj.Params.SWts.Init.Mean = 0.75 // 0.75 see above
pj.Params.SWts.Init.Var = 0.25 // 0.25
if pj.Send.Type == VSMatrixLayer { // MatrixGoToGPi
pj.Params.PathScale.Abs = 0.2
} else if pj.Send.Type == DSMatrixLayer {
pj.Params.PathScale.Abs = 1
} else if pj.Send.Type == GPLayer { // GPePrToGPi
pj.Params.PathScale.Abs = 1
} else if pj.Send.Type == STNLayer { // STNToGPi
pj.Params.PathScale.Abs = 0.2
}
}
}
func (lly *Layer) GPPostBuild() {
ly := lly.Params
gpnm, err := lly.BuildConfigByName("GPType")
if err == nil {
errors.Log(ly.GP.GPType.SetString(gpnm))
}
}
//////// STN
func (lly *Layer) STNDefaults() {
ly := lly.Params
// STN is tonically self-active and has no FFFB inhibition
ly.Acts.Init.GeBase = 0.1 // was 0.3
ly.Acts.Init.GeVar = 0.1
ly.Acts.Init.GiVar = 0.1
ly.Acts.SKCa.Gk = 2
ly.Acts.SKCa.CaRDecayTau = 150 // 150 > 80 for longer theta windows
ly.Acts.Kir.Gk = 10 // 10 > 5 -- key for pause
ly.Acts.Decay.Act = 0
ly.Acts.Decay.Glong = 0
ly.Acts.Decay.LearnCa = 1 // key for non-spaced trials, to refresh immediately
ly.Acts.Dend.SSGi = 0
ly.Acts.NMDA.Ge = 0 // fine with 0
ly.Acts.GabaB.Gk = 0
ly.Inhib.Layer.On.SetBool(true)
ly.Inhib.Layer.Gi = 0.5
ly.Inhib.Layer.FB = 0
ly.Inhib.Pool.On.SetBool(false)
ly.Inhib.Pool.Gi = 0.5
ly.Inhib.Pool.FB = 0
ly.Inhib.ActAvg.Nominal = 0.15
ly.Learn.NeuroMod.AChDisInhib = 0 // was 2,
// if ly.Cls == "VSTNLayer" {
// ly.Inhib.Layer.On.SetBool(false)
// } else {
// ly.Inhib.Layer.On.SetBool(true)
// }
for _, pj := range lly.RecvPaths {
pj.Params.SetFixedWts()
pj.Params.SWts.Init.Mean = 0.75
pj.Params.SWts.Init.Var = 0.25
if pj.Send.Type == GPLayer { // GPePrToSTN
pj.Params.PathScale.Abs = 0.4
} else {
pj.Params.PathScale.Abs = 2.0 // pfc inputs
}
}
}
//////// BGThal
func (lly *Layer) BGThalDefaults() {
ly := lly.Params
// note: not tonically active
// ly.Acts.NMDA.Ge = 0 // needs NMDA
ly.Acts.Decay.Act = 1
ly.Acts.Decay.Glong = 0.6
ly.Acts.Dend.SSGi = 0
ly.Inhib.ActAvg.Nominal = 0.1
ly.Inhib.Layer.On.SetBool(true)
ly.Inhib.Layer.Gi = 0.6
ly.Inhib.Pool.On.SetBool(false)
ly.Inhib.Pool.Gi = 0.6
ly.Learn.NeuroMod.AChDisInhib = 1
for _, pj := range lly.RecvPaths {
pj.Params.SetFixedWts()
pj.Params.SWts.Init.Mean = 0.75
pj.Params.SWts.Init.Var = 0.0
if strings.HasSuffix(pj.Send.Name, "GPi") { // GPiToBGThal
pj.Params.PathScale.Abs = 5 // can now be much stronger with PTMaint mod and maint dynamics
pj.AddClass("GPiToBGThal")
}
}
}
//////// VSGated
func (ly *LayerParams) VSGatedDefaults() {
ly.Inhib.ActAvg.Nominal = 0.5
ly.Inhib.Layer.On.SetBool(true)
ly.Inhib.Layer.Gi = 1
ly.Inhib.Pool.On.SetBool(false)
ly.Inhib.Pool.Gi = 1
ly.Acts.Decay.Act = 1
ly.Acts.Decay.Glong = 1
}
// Copyright (c) 2022, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
import (
"github.com/emer/emergent/v2/paths"
)
var (
GPePrDoc = "GPePr is a prototypical GPe (globus pallidus, externa), which is tonically active and sends inhibition to the SNr/GPi output, so that Matrix No pathway disinhibition results in more output inhibition and thus less final output activity (No). It also inhibits other GPePr neurons, the GPeAk neurons, and the STN. The STN inhibition initiates the initial excitation of the GPe neurons."
GPeAkDoc = "GPeAk is the arkypallidal GPe (globus pallidus, externa) which is tonically active and receives inhibition from the MatrixGo D1 pathway and sends inhibition to the striatum Matrix layers. Thus inhibition of GPeAk disinhibits the striatum, while disinhibition (net excitation) of GPeAk via GPePr and the No pathway further inhibits the striatum. This dynamic allows the GPePr, GPeAk and Matrix pathways to dynamically interact over time to resolve the Go vs. No balance, acting like a drift-diffusion integrator."
GPiDoc = "SNr (substantia nigra pars reticulata) / GPi (globus pallidus interna) are the major output pathways from BG, with tonic levels of activity that can be inhibited to disinhibit the downstream targets of BG output"
)
// AddVentralBG adds Ventral Basal Ganglia layers, using the PCore Pallidal Core
// framework where GPe plays a central role.
// Returns VMatrixGo, VMatrixNo, VGPePr, VGPeAk, VSTN, VGPi layers,
// with given optional prefix.
// Only the Matrix has pool-based 4D shape by default -- use pool for "role" like
// elements where matches need to be detected.
// All GP / STN layers have gpNeur neurons.
// Appropriate connections are made between layers, using standard styles.
// space is the spacing between layers (2 typical).
func (net *Network) AddVentralBG(prefix string, nPoolsY, nPoolsX, nNeurY, nNeurX, gpNeurY, gpNeurX int, space float32) (matrixGo, matrixNo, gpePr, gpeAk, stn, gpi *Layer) {
bglay := "VBG"
gpi = net.AddGPiLayer2D(prefix+"VGPi", bglay, gpNeurY, gpNeurX)
gpePr = net.AddGPeLayer2D(prefix+"VGPePr", bglay, gpNeurY, gpNeurX)
gpePr.SetBuildConfig("GPType", "GPePr")
gpePr.Doc = GPePrDoc
gpeAk = net.AddGPeLayer2D(prefix+"VGPeAk", bglay, gpNeurY, gpNeurX)
gpeAk.SetBuildConfig("GPType", "GPeAk")
gpeAk.Doc = GPeAkDoc
stn = net.AddSTNLayer2D(prefix+"VSTN", "VSTNLayer", gpNeurY, gpNeurX)
matrixGo = net.AddVMatrixLayer(prefix+"VMatrixGo", nPoolsY, nPoolsX, nNeurY, nNeurX, D1Mod)
matrixNo = net.AddVMatrixLayer(prefix+"VMatrixNo", nPoolsY, nPoolsX, nNeurY, nNeurX, D2Mod)
matrixGo.SetBuildConfig("OtherName", matrixNo.Name)
matrixNo.SetBuildConfig("OtherName", matrixGo.Name)
mp := func(ly *LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.1 / float32(nPoolsX*nPoolsY)
ly.Acts.Dend.ModACh.SetBool(true)
}
matrixGo.AddDefaultParams(mp)
matrixNo.AddDefaultParams(mp)
full := paths.NewFull()
p1to1 := paths.NewPoolOneToOne()
net.ConnectLayers(matrixNo, gpePr, full, InhibPath)
pt := net.ConnectLayers(matrixNo, matrixGo, p1to1, InhibPath)
pt.AddDefaultParams(func(pt *PathParams) {
pt.Learn.Learn.SetBool(false)
pt.PathScale.Rel = 0.05
})
bgclass := "VBGInhib"
net.ConnectLayers(gpePr, gpePr, full, InhibPath).AddClass(bgclass)
net.ConnectLayers(gpePr, gpeAk, full, InhibPath).AddClass(bgclass)
net.ConnectLayers(gpePr, stn, full, InhibPath).AddClass(bgclass)
net.ConnectLayers(gpePr, gpi, full, InhibPath).AddClass(bgclass)
net.ConnectLayers(matrixGo, gpi, full, InhibPath).AddClass(bgclass)
net.ConnectLayers(matrixGo, gpeAk, full, InhibPath).AddClass(bgclass)
// this doesn't make that much diff -- bit cleaner RT without:
// net.ConnectLayers(matrixGo, gpePr, full, InhibPath).AddClass(bgclass)
net.ConnectLayers(gpeAk, matrixGo, full, InhibPath).AddClass(bgclass)
net.ConnectLayers(gpeAk, matrixNo, full, InhibPath).AddClass(bgclass)
stnclass := "VSTNExcite"
net.ConnectLayers(stn, gpePr, full, ForwardPath).AddClass(stnclass)
net.ConnectLayers(stn, gpeAk, full, ForwardPath).AddClass(stnclass)
net.ConnectLayers(stn, gpi, full, ForwardPath).AddClass(stnclass)
gpeAk.PlaceBehind(gpi, space)
gpePr.PlaceRightOf(gpeAk, space)
stn.PlaceRightOf(gpi, space)
matrixGo.PlaceBehind(gpePr, space)
matrixNo.PlaceRightOf(matrixGo, space)
return
}
// TODO: need to integrate Patch into net signal, send that to corresponding dorsal pools.
// The PF signal as GModSyn is also probably pretty variable relative to actual activity --
// would be good to have longer time-average signal.
// Probably just put in Pool directly instead of having these synaptic signals, or trying to
// stick them onto global.
// AddDorsalBG adds Dorsal Basal Ganglia layers, using the PCore Pallidal Core
// framework where GPe plays a central role.
// Returns DMatrixGo, DMatrixNo, DGPePr, DGPeAk, DSTN, DGPi, PF layers, with given optional prefix.
// Makes 4D pools throughout the GP layers, with Pools representing separable
// gating domains, i.e., action domains.
// All GP / STN layers have gpNeur neurons.
// Appropriate PoolOneToOne connections are made between layers, using standard styles.
// space is the spacing between layers (2 typical)
func (net *Network) AddDorsalBG(prefix string, poolSTN bool, nPoolsY, nPoolsX, nNeurY, nNeurX, gpNeurY, gpNeurX int, space float32) (matrixGo, matrixNo, patchD1, patchD2, gpePr, gpeAk, stn, gpi, pf *Layer) {
bglay := "DBG"
gpi = net.AddGPiLayer4D(prefix+"DGPi", bglay, nPoolsY, nPoolsX, gpNeurY, gpNeurX)
gpePr = net.AddGPeLayer4D(prefix+"DGPePr", bglay, nPoolsY, nPoolsX, gpNeurY, gpNeurX)
gpePr.SetBuildConfig("GPType", "GPePr")
gpePr.Doc = GPePrDoc
gpeAk = net.AddGPeLayer4D(prefix+"DGPeAk", bglay, nPoolsY, nPoolsX, gpNeurY, gpNeurX)
gpeAk.SetBuildConfig("GPType", "GPeAk")
gpeAk.Doc = GPeAkDoc
if poolSTN {
stn = net.AddSTNLayer4D(prefix+"DSTN", "DSTNLayer", nPoolsY, nPoolsX, gpNeurY, gpNeurX)
} else {
stn = net.AddSTNLayer2D(prefix+"DSTN", "DSTNLayer", gpNeurY, gpNeurX)
}
matrixGo = net.AddDMatrixLayer(prefix+"DMatrixGo", nPoolsY, nPoolsX, nNeurY, nNeurX, D1Mod)
matrixNo = net.AddDMatrixLayer(prefix+"DMatrixNo", nPoolsY, nPoolsX, nNeurY, nNeurX, D2Mod)
patchD1, patchD2 = net.AddDSPatchLayers(prefix, nPoolsY, nPoolsX, nNeurY, nNeurX, space)
pfp := func(ly *LayerParams) {
ly.Inhib.Layer.On.SetBool(false)
ly.Inhib.Pool.On.SetBool(false)
}
pf = net.AddLayer4D(prefix+"PF", SuperLayer, nPoolsY, nPoolsX, nNeurY, 1)
pf.AddDefaultParams(pfp)
pf.Doc = "PF is parafasicular thalamus, including CM (centromedian) and IL (intralaminar) areas, that provide feedback projections from the SNr / GPi BG output back into the striatum. It is critical for driving the striosomes (Patch) layers and the CIN (cholinergic interneurons) to provide localized dopaminergic training signals based on the final output activity of the each region of the BG."
matrixGo.SetBuildConfig("OtherName", matrixNo.Name)
matrixNo.SetBuildConfig("OtherName", matrixGo.Name)
matrixGo.SetBuildConfig("PFName", pf.Name)
matrixNo.SetBuildConfig("PFName", pf.Name)
matrixGo.SetBuildConfig("PatchD1Name", patchD1.Name)
matrixGo.SetBuildConfig("PatchD2Name", patchD2.Name)
matrixNo.SetBuildConfig("PatchD1Name", patchD1.Name)
matrixNo.SetBuildConfig("PatchD2Name", patchD2.Name)
patchD1.SetBuildConfig("OtherName", patchD2.Name)
patchD2.SetBuildConfig("OtherName", patchD1.Name)
patchD1.SetBuildConfig("PFName", pf.Name)
patchD2.SetBuildConfig("PFName", pf.Name)
p1to1 := paths.NewPoolOneToOne()
full := paths.NewFull()
var stnPath paths.Pattern
if poolSTN {
stnPath = p1to1
} else {
stnPath = full
}
net.ConnectLayers(matrixNo, gpePr, p1to1, InhibPath)
pt := net.ConnectLayers(matrixNo, matrixGo, p1to1, InhibPath)
pt.AddDefaultParams(func(pt *PathParams) {
pt.Learn.Learn.SetBool(false)
pt.PathScale.Rel = 0.1
})
bgclass := "DBGInhib"
net.ConnectLayers(gpePr, gpePr, full, InhibPath).AddClass(bgclass)
net.ConnectLayers(gpePr, gpeAk, p1to1, InhibPath).AddClass(bgclass)
net.ConnectLayers(gpePr, stn, stnPath, InhibPath).AddClass(bgclass)
net.ConnectLayers(gpePr, gpi, p1to1, InhibPath).AddClass(bgclass)
net.ConnectLayers(matrixGo, gpi, p1to1, InhibPath).AddClass(bgclass)
net.ConnectLayers(matrixGo, gpeAk, p1to1, InhibPath).AddClass(bgclass)
// not much diff with this: basically is an offset that can be learned
// net.ConnectLayers(matrixGo, gpePr, full, InhibPath).AddClass(bgclass)
net.ConnectLayers(gpeAk, matrixGo, p1to1, InhibPath).AddClass(bgclass)
net.ConnectLayers(gpeAk, matrixNo, p1to1, InhibPath).AddClass(bgclass)
net.ConnectLayers(gpi, pf, p1to1, InhibPath).AddClass(bgclass)
stnclass := "DSTNExcite"
net.ConnectLayers(stn, gpePr, stnPath, ForwardPath).AddClass(stnclass)
net.ConnectLayers(stn, gpeAk, stnPath, ForwardPath).AddClass(stnclass)
net.ConnectLayers(stn, gpi, stnPath, ForwardPath).AddClass(stnclass)
// direct pf better..
pfm := func(pt *PathParams) {
pt.Learn.Learn.SetBool(false)
pt.Com.GType = ModulatoryG
pt.PathScale.Abs = 1
}
pt = net.ConnectLayers(pf, matrixGo, p1to1, ForwardPath).AddClass("PFToDMatrix")
pt.AddDefaultParams(pfm)
pt = net.ConnectLayers(pf, matrixNo, p1to1, ForwardPath).AddClass("PFToDMatrix")
pt.AddDefaultParams(pfm)
pt = net.ConnectLayers(pf, patchD1, p1to1, ForwardPath).AddClass("PFToDPatch")
pt.AddDefaultParams(pfm)
pt = net.ConnectLayers(pf, patchD2, p1to1, ForwardPath).AddClass("PFToDPatch")
pt.AddDefaultParams(pfm)
gpePr.PlaceBehind(gpi, space)
gpeAk.PlaceRightOf(gpePr, space)
stn.PlaceRightOf(gpi, space)
matrixGo.PlaceBehind(gpePr, space)
matrixNo.PlaceRightOf(matrixGo, space)
patchD1.PlaceBehind(matrixGo, space)
return
}
// AddBGThalLayer4D adds a BG gated thalamus (e.g., VA/VL/VM, MD) Layer
// of given size, with given name.
// This version has a 4D structure, with Pools representing separable gating domains.
func (net *Network) AddBGThalLayer4D(name string, nPoolsY, nPoolsX, nNeurY, nNeurX int) *Layer {
ly := net.AddLayer4D(name, BGThalLayer, nPoolsY, nPoolsX, nNeurY, nNeurX)
ly.AddClass("BG")
return ly
}
// AddBGThalLayer2D adds a BG gated thalamus (e.g., VA/VL/VM, MD) Layer
// of given size, with given name.
// This version has a 2D structure
func (net *Network) AddBGThalLayer2D(name string, nNeurY, nNeurX int) *Layer {
ly := net.AddLayer2D(name, BGThalLayer, nNeurY, nNeurX)
ly.AddClass("BG")
return ly
}
// AddVMatrixLayer adds a Ventral MatrixLayer of given size, with given name.
// Assumes that a 4D structure will be used, with Pools representing separable gating domains.
// da gives the DaReceptor type (D1R = Go, D2R = NoGo)
func (net *Network) AddVMatrixLayer(name string, nPoolsY, nPoolsX, nNeurY, nNeurX int, da DAModTypes) *Layer {
ly := net.AddLayer4D(name, VSMatrixLayer, nPoolsY, nPoolsX, nNeurY, nNeurX)
ly.SetBuildConfig("DAMod", da.String())
ly.AddClass("VSMatrixLayer")
return ly
}
// AddDMatrixLayer adds a Dorsal MatrixLayer of given size, with given name.
// Assumes that a 4D structure will be used, with Pools representing separable gating domains.
// da gives the DaReceptor type (D1R = Go, D2R = NoGo)
func (net *Network) AddDMatrixLayer(name string, nPoolsY, nPoolsX, nNeurY, nNeurX int, da DAModTypes) *Layer {
ly := net.AddLayer4D(name, DSMatrixLayer, nPoolsY, nPoolsX, nNeurY, nNeurX)
ly.SetBuildConfig("DAMod", da.String())
ly.AddClass("DSMatrixLayer")
return ly
}
// ConnectToVSMatrix adds a VSMatrixPath from given sending layer to a matrix layer
func (net *Network) ConnectToVSMatrix(send, recv *Layer, pat paths.Pattern) *Path {
return net.ConnectLayers(send, recv, pat, VSMatrixPath)
}
// ConnectToDSMatrix adds a DSMatrixPath from given sending layer
// to matrix Go, No layers, adding given classes if present.
func (net *Network) ConnectToDSMatrix(send, matrixGo, matrixNo *Layer, pat paths.Pattern, class ...string) (*Path, *Path) {
gp := net.ConnectLayers(send, matrixGo, pat, DSMatrixPath)
np := net.ConnectLayers(send, matrixNo, pat, DSMatrixPath)
if len(class) > 0 {
gp.AddClass(class...)
np.AddClass(class...)
}
return gp, np
}
// AddGPLayer2D adds a GPLayer of given size, with given name.
// Must set the GPType BuildConfig setting to appropriate GPLayerType
func (net *Network) AddGPeLayer2D(name, class string, nNeurY, nNeurX int) *Layer {
ly := net.AddLayer2D(name, GPLayer, nNeurY, nNeurX)
ly.AddClass(class)
return ly
}
// AddGPiLayer2D adds an SNr / GPiLayer of given size, with given name.
func (net *Network) AddGPiLayer2D(name, class string, nNeurY, nNeurX int) *Layer {
ly := net.AddLayer2D(name, GPLayer, nNeurY, nNeurX)
ly.Doc = GPiDoc
ly.SetBuildConfig("GPType", "GPi")
ly.AddClass(class)
return ly
}
// AddSTNLayer2D adds a subthalamic nucleus Layer of given size, with given name.
func (net *Network) AddSTNLayer2D(name, class string, nNeurY, nNeurX int) *Layer {
ly := net.AddLayer2D(name, STNLayer, nNeurY, nNeurX)
// note: type based doc is fine
ly.AddClass(class)
return ly
}
// AddGPLayer4D adds a GPLayer of given size, with given name.
// Makes a 4D structure with Pools representing separable gating domains.
func (net *Network) AddGPeLayer4D(name, class string, nPoolsY, nPoolsX, nNeurY, nNeurX int) *Layer {
ly := net.AddLayer4D(name, GPLayer, nPoolsY, nPoolsX, nNeurY, nNeurX)
ly.AddClass(class)
return ly
}
// AddGPiLayer4D adds a GPiLayer of given size, with given name.
// Makes a 4D structure with Pools representing separable gating domains.
func (net *Network) AddGPiLayer4D(name, class string, nPoolsY, nPoolsX, nNeurY, nNeurX int) *Layer {
ly := net.AddLayer4D(name, GPLayer, nPoolsY, nPoolsX, nNeurY, nNeurX)
ly.Doc = GPiDoc
ly.SetBuildConfig("GPType", "GPi")
ly.AddClass(class)
return ly
}
// AddSTNLayer4D adds a subthalamic nucleus Layer of given size, with given name.
// Makes a 4D structure with Pools representing separable gating domains.
func (net *Network) AddSTNLayer4D(name, class string, nPoolsY, nPoolsX, nNeurY, nNeurX int) *Layer {
ly := net.AddLayer4D(name, STNLayer, nPoolsY, nPoolsX, nNeurY, nNeurX)
// note: type based doc is fine
ly.AddClass(class)
return ly
}
// AddVSGatedLayer adds a VSGatedLayer with given number of Y units
// and 2 pools, first one represents JustGated, second is HasGated.
func (net *Network) AddVSGatedLayer(prefix string, nYunits int) *Layer {
ly := net.AddLayer4D(prefix+"VSGated", VSGatedLayer, 1, 2, nYunits, 1)
return ly
}
// AddDSPatchLayers adds DSPatch (Pos, D1, D2)
func (nt *Network) AddDSPatchLayers(prefix string, nPoolsY, nPoolsX, nNeurY, nNeurX int, space float32) (d1, d2 *Layer) {
d1 = nt.AddLayer4D(prefix+"DSPatchD1", DSPatchLayer, nPoolsY, nPoolsX, nNeurY, nNeurX)
d1.SetBuildConfig("DAMod", "D1Mod")
d1.SetBuildConfig("Valence", "Positive")
d1.Doc = "DSPatch are dorsal striatum patch (striosome) neurons that provide a local critic reward-prediction-error (RPE) signal for the corresponding pool of Matrix neurons. D1 = learns from DA bursts."
d2 = nt.AddLayer4D(prefix+"DSPatchD2", DSPatchLayer, nPoolsY, nPoolsX, nNeurY, nNeurX)
d2.SetBuildConfig("DAMod", "D2Mod")
d2.SetBuildConfig("Valence", "Positive")
d2.Doc = "DSPatch are dorsal striatum patch (striosome) neurons that provide a local critic reward-prediction-error (RPE) signal for the corresponding pool of Matrix neurons. D2 = learns from DA dips."
d2.PlaceBehind(d1, space)
return
}
// ConnectToDSPatch adds a DSPatchPath from given sending layer
// to DSPatchD1, D2 layers, adding given classes if present.
func (nt *Network) ConnectToDSPatch(send, dspD1, dspD2 *Layer, pat paths.Pattern, class ...string) (*Path, *Path) {
d1 := nt.ConnectLayers(send, dspD1, pat, DSPatchPath)
d2 := nt.ConnectLayers(send, dspD2, pat, DSPatchPath)
if len(class) > 0 {
d1.AddClass(class...)
d2.AddClass(class...)
}
return d1, d2
}
// Copyright (c) 2022, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
import "cogentcore.org/lab/gosl/slbool"
//gosl:start
// DSMatrixPathParams for trace-based learning in the MatrixPath.
// A trace of synaptic co-activity is formed, and then modulated by dopamine
// whenever it occurs. This bridges the temporal gap between gating activity
// and subsequent activity, and is based biologically on synaptic tags.
// Trace is applied to DWt and reset at the time of reward.
type DSMatrixPathParams struct {
// PatchDA is proportion of Credit trace factor for learning
// to modulate by PatchDA versus just standard s*r activity factor.
PatchDA float32 `default:"0.5"`
// Credit is proportion of trace activity driven by the credit assignment factor
// based on the PF modulatory inputs, synaptic activity (send * recv),
// and Patch DA, which indicates extent to which gating at this time is net
// associated with subsequent reward or not.
Credit float32 `default:"0.6"`
// Delta is weight for trace activity that is a function of the minus-plus delta
// activity signal on the receiving SPN neuron, independent of PF modulation.
// This should always be 1 except for testing disabling: adjust NonDelta
// relative to it, and the overall learning rate.
Delta float32 `default:"1"`
// D2Scale is a scaling factor for the DAD2 learning factor relative to
// the DAD1 contribution (which is 1 - DAD1).
D2Scale float32 `default:"1"`
// OffTrace is a multiplier on trace contribution when action output
// communicated by PF is not above threshold.
OffTrace float32 `default:"0.1"`
pad, pad1, pad2 float32
}
func (tp *DSMatrixPathParams) Defaults() {
tp.PatchDA = 0.5
tp.Credit = 0.6
tp.Delta = 1
tp.D2Scale = 1
tp.OffTrace = 0.1
}
func (tp *DSMatrixPathParams) Update() {
}
// VSMatrixPathParams for trace-based learning in the VSMatrixPath,
// for ventral striatum paths.
// A trace of synaptic co-activity is formed, and then modulated by dopamine
// whenever it occurs. This bridges the temporal gap between gating activity
// and subsequent activity, and is based biologically on synaptic tags.
// Trace is applied to DWt and reset at the time of reward.
type VSMatrixPathParams struct {
// RewActLearn makes learning based on activity at time of reward,
// in inverse proportion to the GoalMaint activity: i.e., if there was no
// goal maintenance, learn at reward to encourage goal engagement next time,
// but otherwise, do not further reinforce at time of reward, because the
// actual goal gating learning trace is a better learning signal.
// Otherwise, only uses accumulated trace but doesn't include rew-time activity,
// e.g., for testing cases that do not have GoalMaint.
RewActLearn slbool.Bool `default:"true"`
// Delta is weight for trace activity that is a function of the minus-plus delta
// activity signal on the receiving SPN neuron, independent of PF modulation.
// This should always be 1 except for testing disabling: adjust NonDelta
// relative to it, and the overall learning rate.
Delta float32 `default:"1"`
// Credit is proportion of trace activity driven by the credit assignment factor
// based on the PF modulatory inputs, synaptic activity (send * recv),
// and Patch DA, which indicates extent to which gating at this time is net
// associated with subsequent reward or not.
Credit float32 `default:"0.6"`
pad float32
}
func (tp *VSMatrixPathParams) Defaults() {
tp.RewActLearn.SetBool(true)
tp.Delta = 1
tp.Credit = 0.6
}
func (tp *VSMatrixPathParams) Update() {
}
//gosl:end
func (pj *PathParams) MatrixDefaults() {
pj.SWts.Adapt.On.SetBool(false)
pj.SWts.Adapt.SigGain = 6 // not 1 -- could be for some cases
pj.SWts.Init.Sym.SetBool(false)
pj.SWts.Init.SPct = 0
pj.SWts.Init.Mean = 0.5
pj.SWts.Init.Var = 0.4
pj.Learn.LRate.Base = 0.01
pj.Learn.DWt.LearnThr = 0.1 // note: higher values prevent ability to learn to gate again after extinction
}
// Code generated by "goal build"; DO NOT EDIT.
//line pool.goal:1
// Copyright (c) 2019, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
import (
"cogentcore.org/core/math32"
"cogentcore.org/lab/base/atomicx"
"github.com/emer/axon/v2/fsfffb"
"log/slog"
"sync/atomic"
)
//gosl:start
//gosl:import "github.com/emer/axon/v2/fsfffb"
type PoolIndexVars int32 //enums:enum
const (
// PoolNeurSt is the starting layer-wise index within the list
// of neurons in this pool.
// Add layer starting neuron index (NeurSt) to get index into global
// network neurons list.
PoolNeurSt PoolIndexVars = iota
// PoolNeurEd is the ending (exclusive) layer-wise index within the list
// of neurons in this pool.
// Add layer starting neuron index (NeurSt) to get index into global
// network neurons list.
PoolNeurEd
// PoolLayerIdx is the layer index for this pool.
PoolLayerIdx
// PoolIsLayer is true (> 0) if this pool represents the entire layer,
// which is always the first pool in the list of pools for a layer.
PoolIsLayer
)
// PoolIntVars are int32 pool variables, for computing fsfffb inhibition etc.
// Note that we use int32 instead of uint32 so that overflow errors can be detected.
// See [PoolVars] for float32 variables.
type PoolIntVars int32 //enums:enum
const (
// Clamped if true (!=0), this layer is hard-clamped and should
// use GeExts exclusively for PV.
Clamped PoolIntVars = iota
// PoolGated is true (> 0) if this pool gated (for [MatrixLayer], [BGThalLayer])
PoolGated
// FFsRawInt is the int32 atomic add compatible integration of [fsfffb.FFsRaw].
FFsRawInt
// FBsRawInt is the int32 atomic add compatible integration of [fsfffb.FBsRaw].
FBsRawInt
// GeExtRawInt is the int32 atomic add compatible integration of [fsfffb.GeExtRaw].
GeExtRawInt
// PoolIntAvgMaxStart is the starting point for int32 AvgMax variables.
// Use AvgMaxIntVarIndex to get the relevant variable index.
// There are only values for Cycle phase, for the different variables.
PoolIntAvgMaxStart
)
// AvgMax are Avg and Max
type AvgMax int32 //enums:enum
const (
Avg AvgMax = iota
Max
)
// AvgMaxPhases are the different Phases over which AvgMax values are tracked.
type AvgMaxPhases int32 //enums:enum -trim-prefix AM
const (
// Cycle is the current cycle, which is the source for the rest.
AMCycle AvgMaxPhases = iota
// Minus is at the end of the minus phase.
AMMinus
// Plus is at the end of the plus phase.
AMPlus
// Prev is at the end of the previous plus phase.
AMPrev
)
// AvgMaxVars are the different Neuron variables for which [AvgMaxPhases]
// is computed.
type AvgMaxVars int32 //enums:enum -trim-prefix AM
const (
// CaP is the primary variable for tracking overall pool activity
// over a recent timescale, integrated at roughly 40 msec time constant.
AMCaP AvgMaxVars = iota
// CaD is a slower moving activation signal, capable of reflecting
// activity over the entire trial.
AMCaD
// CaPMax is the maximum CaP over the trial of processing.
AMCaPMax
// Act is the computed rate-code equivalent of current spike rate.
AMAct
// GeInt is the integrated running-average value of excitatory conductance.
AMGeInt
// GiInt is the integrated running-average value of inhibitory conductance.
AMGiInt
// AvgDif is the integrated AvgDif between ActPct - TrgAvg.
// Only the Plus phase is used.
AMAvgDif
)
const (
// poolFloatAvgMaxStart is the starting index for AvgMax float32 variables.
poolFloatAvgMaxStart = fsfffb.InhibVarsN
PoolVarsTotal = poolFloatAvgMaxStart + fsfffb.InhibVars(int32(AvgMaxVarsN)*int32(AvgMaxN)*int32(AvgMaxPhasesN))
PoolIntVarsTot = PoolIntAvgMaxStart + PoolIntVars(int32(AvgMaxVarsN)*int32(AvgMaxN))
)
// avgMaxToNeuron is the mapping from AvgMaxVars to neuron vars.
var avgMaxToNeuron = [AMAvgDif]NeuronVars{CaP, CaD, CaPMax, Act, GeInt, GiInt}
// AvgMaxVarIndex returns the variable index for accessing
// [Pools] AvgMax float32 variables.
func AvgMaxVarIndex(vr AvgMaxVars, phase AvgMaxPhases, am AvgMax) uint32 {
return uint32(poolFloatAvgMaxStart) + uint32(vr)*uint32(AvgMaxN)*uint32(AvgMaxPhasesN) + uint32(phase)*uint32(AvgMaxN) + uint32(am)
}
// AvgMaxIntVarIndex returns the variable index for accessing
// [Pools] AvgMax int32 variables. Avg = Sum actually.
// There are only values for the Cycle phase level.
func AvgMaxIntVarIndex(vr AvgMaxVars, am AvgMax) uint32 {
return uint32(PoolIntAvgMaxStart) + uint32(vr)*uint32(AvgMaxN) + uint32(am)
}
// PoolAvgMax returns an AvgMax value for given variable, phase,
// and Avg or Max, for given pool index and data index.
func PoolAvgMax(vr AvgMaxVars, phase AvgMaxPhases, am AvgMax, pi, di uint32) float32 {
return Pools.Value(int(pi), int(di), int(AvgMaxVarIndex(vr, phase, am)))
}
// PoolNNeurons returns the number of neurons in the given pool.
// pi = global pool index.
func PoolNNeurons(pi uint32) int32 {
return int32(PoolIxs.Value(int(pi), int(PoolNeurEd)) - PoolIxs.Value(int(pi), int(PoolNeurSt)))
}
// PoolAvgMaxInit initializes the AvgMax Int accumulators for Cycle vals
// for update start. always left init'd so generally unnecessary.
// pi = global pool index.
func PoolAvgMaxInit(pi, di uint32) {
for vr := range AMAvgDif {
PoolsInt.Set(0, int(pi), int(di), int(AvgMaxIntVarIndex(vr, Avg)))
PoolsInt.Set(0, int(pi), int(di), int(AvgMaxIntVarIndex(vr, Max)))
}
}
// PoolAvgMaxZero initializes all the AvgMax values to zero.
// pi = global pool index.
func PoolAvgMaxZero(pi, di uint32) {
PoolAvgMaxInit(pi, di)
for vr := range AMAvgDif {
for ph := range AvgMaxPhasesN {
Pools.Set(0.0, int(pi), int(di), int(AvgMaxVarIndex(vr, ph, Avg)))
Pools.Set(0.0, int(pi), int(di), int(AvgMaxVarIndex(vr, ph, Max)))
}
}
}
// PoolAvgMaxUpdateVar updates the AvgMax value based on given value.
// pi = global pool index.
func PoolAvgMaxUpdateVar(vr AvgMaxVars, pi, di uint32, val float32) {
n := float32(PoolNNeurons(pi))
floatToInt := float32(uint32(1) << 20)
floatToSum := floatToInt / n
vis := AvgMaxIntVarIndex(vr, Avg)
vim := AvgMaxIntVarIndex(vr, Max)
atomic.AddInt32(PoolsInt.ValuePtr(int(pi), int(di), int(vis)), int32(val*floatToSum))
atomicx.MaxInt32(PoolsInt.ValuePtr(int(pi), int(di), int(vim)), int32(val*floatToInt))
}
// PoolAvgMaxUpdateVarNonAtomic updates the AvgMax value based on given value.
// non-atomic version: only when explicitly looping over neurons.
// pi = global pool index.
func PoolAvgMaxUpdateVarNonAtomic(vr AvgMaxVars, pi, di uint32, val float32) {
n := float32(PoolNNeurons(pi))
floatToInt := float32(uint32(1) << 20)
floatToSum := floatToInt / n
vis := AvgMaxIntVarIndex(vr, Avg)
vim := AvgMaxIntVarIndex(vr, Max)
PoolsInt.SetAdd(int32(val*floatToSum), int(pi), int(di), int(vis))
PoolsInt.Set(max(PoolsInt.Value(int(pi), int(di), int(vim)), int32(val*floatToInt)), int(pi), int(di), int(vim))
}
// PoolAvgMaxUpdate updates the AvgMax values based on current neuron values.
// pi = global pool index.
func PoolAvgMaxUpdate(pi, di, ni uint32) {
PoolAvgMaxUpdateVar(AMCaP, pi, di, math32.Abs(Neurons.Value(int(ni), int(di), int(avgMaxToNeuron[AMCaP]))))
PoolAvgMaxUpdateVar(AMCaD, pi, di, math32.Abs(Neurons.Value(int(ni), int(di), int(avgMaxToNeuron[AMCaD]))))
PoolAvgMaxUpdateVar(AMCaPMax, pi, di, math32.Abs(Neurons.Value(int(ni), int(di), int(avgMaxToNeuron[AMCaPMax]))))
PoolAvgMaxUpdateVar(AMAct, pi, di, math32.Abs(Neurons.Value(int(ni), int(di), int(avgMaxToNeuron[AMAct]))))
PoolAvgMaxUpdateVar(AMGeInt, pi, di, math32.Abs(Neurons.Value(int(ni), int(di), int(avgMaxToNeuron[AMGeInt]))))
PoolAvgMaxUpdateVar(AMGiInt, pi, di, math32.Abs(Neurons.Value(int(ni), int(di), int(avgMaxToNeuron[AMGiInt]))))
}
// PoolAvgMaxCalcVar does Calc on Cycle level, and re-inits, for given Var
func PoolAvgMaxCalcVar(vr AvgMaxVars, pi, di uint32) {
floatFromInt := float32(1.0) / float32(uint32(1)<<20)
vis := AvgMaxIntVarIndex(vr, Avg)
sum := PoolsInt.Value(int(pi), int(di), int(vis))
if sum < 0 {
//gosl:end
slog.Warn("PoolAvgMaxCalc overflow in Sum", "pi:", pi, "di:", di, "sum:", sum)
//gosl:start
sum = int32(uint32(1) << 20)
}
Pools.Set(float32(sum)*floatFromInt, int(pi), int(di), int(AvgMaxVarIndex(vr, AMCycle, Avg)))
PoolsInt.Set(0, int(pi), int(di), int(vis))
vim := AvgMaxIntVarIndex(vr, Max)
mx := PoolsInt.Value(int(pi), int(di), int(vim))
if mx < 0 {
//gosl:end
slog.Warn("PoolAvgMaxCalc overflow in Max", "pi:", pi, "di:", di, "max:", mx)
//gosl:start
mx = int32(uint32(1) << 20)
}
PoolsInt.Set(0, int(pi), int(di), int(vim))
Pools.Set(float32(mx)*floatFromInt, int(pi), int(di), int(AvgMaxVarIndex(vr, AMCycle, Max)))
}
// PoolAvgMaxCalc does Calc on Cycle level, and re-inits
func PoolAvgMaxCalc(pi, di uint32) {
for vr := range AMAvgDif { // don't do AvgDif
PoolAvgMaxCalcVar(vr, pi, di)
}
}
// PoolAvgDifInit initializes the AvgMax AvgDif Int accumulators for Cycle vals
// for update start. always left init'd so generally unnecessary.
// pi = global pool index.
func PoolAvgDifInit(pi, di uint32) {
PoolsInt.Set(0, int(pi), int(di), int(AvgMaxIntVarIndex(AMAvgDif, Avg)))
PoolsInt.Set(0, int(pi), int(di), int(AvgMaxIntVarIndex(AMAvgDif, Max)))
}
// PoolAvgDifUpdate updates the AvgMax values for AvgDif Var.
// pi = global pool index.
func PoolAvgDifUpdate(pi, di uint32, avdif float32) {
PoolAvgMaxUpdateVarNonAtomic(AMAvgDif, pi, di, avdif)
}
// PoolAvgDifCalc does Calc on Cycle level, and re-inits
func PoolAvgDifCalc(pi, di uint32) {
PoolAvgMaxCalcVar(AMAvgDif, pi, di)
}
// PoolCycleToMinus grabs current Cycle values into the Minus phase values,
// and Plus values into Prev.
func PoolCycleToMinus(pi, di uint32) {
for vr := range AMAvgDif { // don't do AvgDif
Pools.Set(Pools.Value(int(pi), int(di), int(AvgMaxVarIndex(vr, AMCycle, Avg))), int(pi), int(di), int(AvgMaxVarIndex(vr, AMMinus, Avg)))
Pools.Set(Pools.Value(int(pi), int(di), int(AvgMaxVarIndex(vr, AMCycle, Max))), int(pi), int(di), int(AvgMaxVarIndex(vr, AMMinus, Max)))
Pools.Set(Pools.Value(int(pi), int(di), int(AvgMaxVarIndex(vr, AMPlus, Avg))), int(pi), int(di), int(AvgMaxVarIndex(vr, AMPrev, Avg)))
Pools.Set(Pools.Value(int(pi), int(di), int(AvgMaxVarIndex(vr, AMPlus, Max))), int(pi), int(di), int(AvgMaxVarIndex(vr, AMPrev, Max)))
}
}
// PoolCycleToPlus grabs current Cycle values into the Plus phase values.
func PoolCycleToPlus(pi, di uint32) {
for vr := range AMAvgDif { // don't do AvgDif
Pools.Set(Pools.Value(int(pi), int(di), int(AvgMaxVarIndex(vr, AMCycle, Avg))), int(pi), int(di), int(AvgMaxVarIndex(vr, AMPlus, Avg)))
Pools.Set(Pools.Value(int(pi), int(di), int(AvgMaxVarIndex(vr, AMCycle, Max))), int(pi), int(di), int(AvgMaxVarIndex(vr, AMPlus, Max)))
}
}
// PoolInit is callled during InitActs
func PoolInit(pi, di uint32) {
PoolInhibInit(pi, di)
PoolsInt.Set(0, int(pi), int(di), int(PoolGated))
PoolAvgMaxZero(pi, di)
}
// PoolPoolGi computes the total inhibitory conductance for the pool.
func PoolPoolGi(ctx *Context, pi, di uint32) {
if PoolIxs.Value(int(pi), int(PoolIsLayer)) > 0 {
return
}
li := PoolIxs.Value(int(pi), int(PoolLayerIdx))
PoolAvgMaxCalc(pi, di)
PoolInhibIntToRaw(pi, di)
ly := GetLayers(uint32(li))
giMult := LayerStates.Value(int(li), int(di), int(LayerGiMult))
lyIsOn := (ly.Inhib.Layer.On == 1)
lpi := ly.PoolIndex(uint32(0))
ly.SubPoolGiFromSpikes(ctx, lpi, pi, di, lyIsOn, giMult)
}
//gosl:end
// IndexToAvgMaxVar returns the AvgMaxVar indexes from overall Pool variable index.
func IndexToAvgMaxVar(vi uint32) (vr AvgMaxVars, phase AvgMaxPhases, am AvgMax) {
vi -= uint32(poolFloatAvgMaxStart)
vr = AvgMaxVars(vi / (uint32(AvgMaxN) * uint32(AvgMaxPhasesN)))
rmdr := vi % (uint32(AvgMaxN) * uint32(AvgMaxPhasesN))
phase = AvgMaxPhases(rmdr / uint32(AvgMaxN))
am = AvgMax(rmdr % uint32(AvgMaxN))
return
}
func PoolVarName(vi uint32) string {
if vi < uint32(fsfffb.InhibVarsN) {
return fsfffb.InhibVars(vi).String()
}
vr, phase, am := IndexToAvgMaxVar(vi)
return vr.String() + "_" + phase.String() + "_" + am.String()
}
// IndexToAvgMaxIntVar returns the AvgMaxVar indexes from overall PoolInt variable index.
func IndexToAvgMaxIntVar(vi uint32) (vr AvgMaxVars, am AvgMax) {
vi -= uint32(PoolIntAvgMaxStart)
vr = AvgMaxVars(vi / uint32(AvgMaxN))
am = AvgMax(vi % uint32(AvgMaxN))
return
}
func PoolIntVarName(vi uint32) string {
if vi < uint32(PoolIntAvgMaxStart) {
return PoolIntVars(vi).String()
}
vr, am := IndexToAvgMaxIntVar(vi)
return vr.String() + "_" + am.String()
}
// TestValues returns a map of CaD.Avg, which provides an
// integrated summary of pool activity for testing
func PoolTestValues(pi, di uint32, layKey string, vals map[string]float32) {
vals[layKey+" CaD Avg"] = PoolAvgMax(AMCaD, AMCycle, Avg, pi, di)
}
package axon
import (
"cogentcore.org/lab/gosl/slrand"
)
//gosl:start
type RandFunIndex uint32
// We use this enum to store a unique index for each function that
// requires random number generation. If you add a new function, you need to add
// a new enum entry here.
// RandFunIndexN is the total number of random functions. It autoincrements due to iota.
const (
RandFunActPGe RandFunIndex = iota
RandFunActPGi
RandFunActSMaintP
RandFunIndexN
)
// GetRandomNumber returns a random number that depends on the index,
// counter and function index.
// We increment the counter after each cycle, so that we get new random numbers.
// This whole scheme exists to ensure equal results under different multithreading settings.
func GetRandomNumber(index uint32, counter uint64, funIndex RandFunIndex) float32 {
return slrand.Float32(counter, uint32(funIndex), index)
}
//gosl:end
// Copyright (c) 2020, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
import (
"cogentcore.org/core/math32/minmax"
)
//gosl:start
// RWPredParams parameterizes reward prediction for a simple Rescorla-Wagner
// learning dynamic (i.e., PV learning in the Rubicon framework).
type RWPredParams struct {
// default 0.1..0.99 range of predictions that can be represented -- having a truncated range preserves some sensitivity in dopamine at the extremes of good or poor performance
PredRange minmax.F32
}
func (rp *RWPredParams) Defaults() {
rp.PredRange.Set(0.01, 0.99)
}
func (rp *RWPredParams) Update() {
}
// RWDaParams computes a dopamine (DA) signal using simple Rescorla-Wagner
// learning dynamic (i.e., PV learning in the Rubicon framework).
type RWDaParams struct {
// tonic baseline Ge level for DA = 0 -- +/- are between 0 and 2*TonicGe -- just for spiking display of computed DA value
TonicGe float32
// idx of RWPredLayer to get reward prediction from -- set during Build from BuildConfig RWPredLayName
RWPredLayIndex int32 `edit:"-"`
pad, pad1 uint32
}
func (rp *RWDaParams) Defaults() {
rp.TonicGe = 0.2
}
func (rp *RWDaParams) Update() {
}
// GeFromDA returns excitatory conductance from DA dopamine value
func (rp *RWDaParams) GeFromDA(da float32) float32 {
ge := rp.TonicGe * (1.0 + da)
if ge < 0 {
ge = 0
}
return ge
}
// TDIntegParams are params for reward integrator layer
type TDIntegParams struct {
// discount factor -- how much to discount the future prediction from TDPred
Discount float32
// gain factor on TD rew pred activations
PredGain float32
// idx of TDPredLayer to get reward prediction from -- set during Build from BuildConfig TDPredLayName
TDPredLayIndex int32 `edit:"-"`
pad uint32
}
func (tp *TDIntegParams) Defaults() {
tp.Discount = 0.9
tp.PredGain = 1
}
func (tp *TDIntegParams) Update() {
}
// TDDaParams are params for dopamine (DA) signal as the temporal difference (TD)
// between the TDIntegLayer activations in the minus and plus phase.
type TDDaParams struct {
// tonic baseline Ge level for DA = 0 -- +/- are between 0 and 2*TonicGe -- just for spiking display of computed DA value
TonicGe float32
// idx of TDIntegLayer to get reward prediction from -- set during Build from BuildConfig TDIntegLayName
TDIntegLayIndex int32 `edit:"-"`
pad, pad1 uint32
}
func (tp *TDDaParams) Defaults() {
tp.TonicGe = 0.3
}
func (tp *TDDaParams) Update() {
}
// GeFromDA returns excitatory conductance from DA dopamine value
func (tp *TDDaParams) GeFromDA(da float32) float32 {
return tp.TonicGe * (1.0 + da)
}
//gosl:end
// note: Defaults not called on GPU
func (ly *LayerParams) RWDefaults() {
ly.Inhib.ActAvg.Nominal = .5
}
func (ly *LayerParams) RWPredDefaults() {
ly.Acts.Decay.Act = 1
ly.Acts.Decay.Glong = 1
ly.Acts.Dt.GeTau = 40
}
// RWDaPostBuild does post-Build config
func (ly *Layer) RWDaPostBuild() {
ly.Params.RWDa.RWPredLayIndex = ly.BuildConfigFindLayer("RWPredLayName", true)
}
func (ly *LayerParams) TDDefaults() {
ly.Inhib.ActAvg.Nominal = .5
}
func (ly *LayerParams) TDPredDefaults() {
ly.Acts.Decay.Act = 1
ly.Acts.Decay.Glong = 1
ly.Acts.Dt.GeTau = 40
}
func (ly *Layer) LDTPostBuild() {
ly.Params.LDT.SrcLay1Index = ly.BuildConfigFindLayer("SrcLay1Name", false) // optional
ly.Params.LDT.SrcLay2Index = ly.BuildConfigFindLayer("SrcLay2Name", false) // optional
ly.Params.LDT.SrcLay3Index = ly.BuildConfigFindLayer("SrcLay3Name", false) // optional
ly.Params.LDT.SrcLay4Index = ly.BuildConfigFindLayer("SrcLay4Name", false) // optional
}
// TDIntegPostBuild does post-Build config
func (ly *Layer) TDIntegPostBuild() {
ly.Params.TDInteg.TDPredLayIndex = ly.BuildConfigFindLayer("TDPredLayName", true)
}
// TDDaPostBuild does post-Build config
func (ly *Layer) TDDaPostBuild() {
ly.Params.TDDa.TDIntegLayIndex = ly.BuildConfigFindLayer("TDIntegLayName", true)
}
// Copyright (c) 2020, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
import (
"github.com/emer/emergent/v2/paths"
"github.com/emer/emergent/v2/relpos"
)
// AddRewLayer adds a RewLayer of given name
func (nt *Network) AddRewLayer(name string) *Layer {
ly := nt.AddLayer2D(name, RewLayer, 1, 2)
return ly
}
// AddClampDaLayer adds a ClampDaLayer of given name
func (nt *Network) AddClampDaLayer(name string) *Layer {
da := nt.AddLayer2D(name, InputLayer, 1, 1)
return da
}
// AddTDLayers adds the standard TD temporal differences layers, generating a DA signal.
// Pathway from Rew to RewInteg is given class TDToInteg -- should
// have no learning and 1 weight.
func (nt *Network) AddTDLayers(prefix string, rel relpos.Relations, space float32) (rew, rp, ri, td *Layer) {
rew = nt.AddRewLayer(prefix + "Rew")
rp = nt.AddLayer2D(prefix+"RewPred", TDPredLayer, 1, 2)
ri = nt.AddLayer2D(prefix+"RewInteg", TDIntegLayer, 1, 2)
td = nt.AddLayer2D(prefix+"TD", TDDaLayer, 1, 1)
ri.SetBuildConfig("TDPredLayName", rp.Name)
td.SetBuildConfig("TDIntegLayName", ri.Name)
if rel == relpos.Behind {
rp.PlaceBehind(rew, space)
ri.PlaceBehind(rp, space)
td.PlaceBehind(ri, space)
} else {
rp.PlaceRightOf(rew, space)
ri.PlaceRightOf(rp, space)
td.PlaceRightOf(ri, space)
}
return
}
// AddRWLayers adds simple Rescorla-Wagner (PV only) dopamine system, with a primary
// Reward layer, a RWPred prediction layer, and a dopamine layer that computes diff.
// Only generates DA when Rew layer has external input -- otherwise zero.
func (nt *Network) AddRWLayers(prefix string, rel relpos.Relations, space float32) (rew, rp, da *Layer) {
rew = nt.AddRewLayer(prefix + "Rew")
rp = nt.AddLayer2D(prefix+"RWPred", RWPredLayer, 1, 2)
da = nt.AddLayer2D(prefix+"DA", RWDaLayer, 1, 1)
da.SetBuildConfig("RWPredLayName", rp.Name)
if rel == relpos.Behind {
rp.PlaceBehind(rew, space)
da.PlaceBehind(rp, space)
} else {
rp.PlaceRightOf(rew, space)
da.PlaceRightOf(rp, space)
}
return
}
// ConnectToRWPred adds a RWPath from given sending layer to a RWPred layer
func (nt *Network) ConnectToRWPath(send, recv *Layer, pat paths.Pattern) *Path {
return nt.ConnectLayers(send, recv, pat, RWPath)
}
// Copyright (c) 2020, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
//gosl:start
// RLPredPathParams does dopamine-modulated learning for reward prediction: Da * Send.Act
// Used by RWPath and TDPredPath within corresponding RWPredLayer or TDPredLayer
// to generate reward predictions based on its incoming weights, using linear activation
// function. Has no weight bounds or limits on sign etc.
type RLPredPathParams struct {
// how much to learn on opposite DA sign coding neuron (0..1)
OppSignLRate float32
// tolerance on DA -- if below this abs value, then DA goes to zero and there is no learning -- prevents prediction from exactly learning to cancel out reward value, retaining a residual valence of signal
DaTol float32
pad, pad1 float32
}
func (pj *RLPredPathParams) Defaults() {
pj.OppSignLRate = 1.0
}
func (pj *RLPredPathParams) Update() {
}
//gosl:end
func (pj *PathParams) RLPredDefaults() {
pj.SWts.Adapt.SigGain = 1
pj.SWts.Init.Mean = 0
pj.SWts.Init.Var = 0
pj.SWts.Init.Sym.SetBool(false)
}
// Code generated by "goal build"; DO NOT EDIT.
//line rubicon-layer.goal:1
// Copyright (c) 2022, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
import (
"strings"
"cogentcore.org/core/base/errors"
"cogentcore.org/core/math32"
"cogentcore.org/lab/gosl/slbool"
)
//gosl:start
// LDTParams compute reward salience as ACh global neuromodulatory signal
// as a function of the MAX activation of its inputs from salience detecting
// layers (e.g., the superior colliculus: SC), and whenever there is an external
// US outcome input (signalled by the global GvHasRew flag).
// ACh from salience inputs is discounted by GoalMaint activity,
// reducing distraction when pursuing a goal, but US ACh activity is not so reduced.
// ACh modulates excitability of goal-gating layers.
type LDTParams struct {
// SrcThr is the threshold per input source, on absolute value (magnitude),
// to count as a significant reward event, which then drives maximal ACh.
// Set to 0 to disable this nonlinear behavior.
SrcThr float32 `default:"0.05"`
// Rew uses the global Context.NeuroMod.HasRew flag to drive ACh:
// if there is some kind of external reward being given, then
// ACh goes to 1, else 0 for this component.
Rew slbool.Bool `default:"true"`
// MaintInhib is the extent to which active goal maintenance (via Global GoalMaint)
// inhibits ACh signals: when goal engaged, distractability is lower.
MaintInhib float32 `default:"0.8" max:"1" min:"0"`
// index of Layer to get max activity from; set during Build from BuildConfig
// SrcLay1Name if present -- -1 if not used.
SrcLay1Index int32 `edit:"-"`
// index of Layer to get max activity from; set during Build from BuildConfig
// SrcLay2Name if present -- -1 if not used.
SrcLay2Index int32 `edit:"-"`
// index of Layer to get max activity from; set during Build from BuildConfig
// SrcLay3Name if present -- -1 if not used.
SrcLay3Index int32 `edit:"-"`
// index of Layer to get max activity from; set during Build from BuildConfig
// SrcLay4Name if present -- -1 if not used.
SrcLay4Index int32 `edit:"-"`
pad float32
}
func (lp *LDTParams) Defaults() {
lp.SrcThr = 0.05
lp.Rew.SetBool(true)
lp.MaintInhib = 0.8
}
func (lp *LDTParams) Update() {
}
// Thr applies SrcThr threshold to given value
func (lp *LDTParams) Thr(val float32) float32 {
vl := math32.Abs(val) // only abs makes sense -- typically positive anyway
if lp.SrcThr <= 0 {
return vl
}
if vl < lp.SrcThr {
return 0
}
return 1
}
// MaxSrcAct returns the updated maxSrcAct value from given
// source layer activity value.
func (lp *LDTParams) MaxSrcAct(maxSrcAct, srcLayAct float32) float32 {
act := lp.Thr(srcLayAct)
return max(act, maxSrcAct)
}
// ACh returns the computed ACh salience value based on given
// source layer activations and key values from the ctx Context.
func (lp *LDTParams) ACh(ctx *Context, di uint32, srcLay1Act, srcLay2Act, srcLay3Act, srcLay4Act float32) float32 {
maxSrcAct := float32(0)
maxSrcAct = lp.MaxSrcAct(maxSrcAct, srcLay1Act)
maxSrcAct = lp.MaxSrcAct(maxSrcAct, srcLay2Act)
maxSrcAct = lp.MaxSrcAct(maxSrcAct, srcLay3Act)
maxSrcAct = lp.MaxSrcAct(maxSrcAct, srcLay4Act)
maintInh := lp.MaintInhib * GlobalScalars.Value(int(GvGoalMaint), int(di))
maintInh = min(1.0, maintInh)
maxSrcAct *= (1.0 - maintInh)
ach := maxSrcAct
if GlobalScalars.Value(int(GvHasRew), int(di)) > 0 {
ach = 1
} else {
ach = math32.Max(ach, GlobalScalars.Value(int(GvUrgency), int(di)))
}
return ach
}
// VTAParams are for computing overall VTA DA based on LHb PVDA
// (primary value -- at US time, computed at start of each trial
// and stored in LHbPVDA global value)
// and Amygdala (CeM) CS / learned value (LV) activations, which update
// every cycle.
type VTAParams struct {
// gain on CeM activity difference (CeMPos - CeMNeg) for generating LV CS-driven dopamine values
CeMGain float32 `default:"0.75"`
// gain on computed LHb DA (Burst - Dip) -- for controlling DA levels
LHbGain float32 `default:"1.25"`
// threshold on ACh level required to generate LV CS-driven dopamine burst
AChThr float32 `default:"0.5"`
pad float32
}
func (vt *VTAParams) Defaults() {
vt.CeMGain = 0.75
vt.LHbGain = 1.25
vt.AChThr = 0.5
}
func (vt *VTAParams) Update() {
}
// VTADA computes the final DA value from LHb values
// ACh value from LDT is passed as a parameter.
func (vt *VTAParams) VTADA(ctx *Context, di uint32, ach float32, hasRew bool) {
pvDA := vt.LHbGain * GlobalScalars.Value(int(GvLHbPVDA), int(di))
csNet := GlobalScalars.Value(int(GvCeMpos), int(di)) - GlobalScalars.Value(int(GvCeMneg), int(di))
achMod := float32(0)
if ach >= vt.AChThr {
achMod = ach
}
vsPatch := GlobalScalars.Value(int(GvVSPatchPosThr), int(di)) // note: critical to use thresholded version
if csNet > 0 {
csNet = max(0.0, csNet-vsPatch) // vspatch can shunt positive CS DA, but no dipping! that is lhb
}
csDA := achMod * vt.CeMGain * csNet
// note that ach is only on cs -- should be 1 for PV events anyway..
netDA := float32(0)
if hasRew {
netDA = pvDA
} else {
netDA = csDA
}
GlobalScalars.Set(netDA, int(GvVtaDA), int(di)) // note: keeping this separately just for semantics
GlobalScalars.Set(netDA, int(GvDA), int(di)) // general neuromod DA
}
//gosl:end
func (ly *Layer) BLADefaults() {
isAcq := strings.Contains(ly.Name, "Acq") || strings.Contains(ly.Name, "Novel")
lp := ly.Params
lp.Acts.Decay.Act = 0.2
lp.Acts.Decay.Glong = 0.6
lp.Acts.Dend.SSGi = 0
lp.Inhib.Layer.On.SetBool(true)
if isAcq {
lp.Inhib.Layer.Gi = 2 // acq has more input
} else {
lp.Inhib.Layer.Gi = 1.8
lp.Acts.Gbar.L = 25 // needed to not be active at start
}
lp.Inhib.Pool.On.SetBool(true)
lp.Inhib.Pool.Gi = 1
lp.Inhib.ActAvg.Nominal = 0.025
lp.Learn.RLRate.SigmoidMin = 1.0
lp.Learn.TrgAvgAct.RescaleOn.SetBool(false)
lp.Learn.RLRate.Diff.SetBool(true)
lp.Learn.RLRate.DiffThr = 0.01
lp.CT.DecayTau = 0
lp.CT.GeGain = 0.1 // 0.1 has effect, can go a bit lower if need to
lp.Learn.NeuroMod.DAModGain = 0.5
if isAcq {
lp.Learn.NeuroMod.DALRateMod = 0.5
lp.Learn.NeuroMod.BurstGain = 0.2
lp.Learn.NeuroMod.DipGain = 0
} else {
lp.Learn.NeuroMod.DAModGain = 0 // critical to be 0 here, otherwise penalizes CS onset activity!
lp.Learn.NeuroMod.BurstGain = 1
lp.Learn.NeuroMod.DipGain = 1
}
lp.Learn.NeuroMod.AChLRateMod = 1
lp.Learn.NeuroMod.AChDisInhib = 0 // needs to be always active
}
// RubiconPostBuild is used for BLA, VSPatch, and PVLayer types to set NeuroMod params
func (ly *Layer) RubiconPostBuild() {
dm, err := ly.BuildConfigByName("DAMod")
if err == nil {
errors.Log(ly.Params.Learn.NeuroMod.DAMod.SetString(dm))
}
vl, err := ly.BuildConfigByName("Valence")
if err == nil {
errors.Log(ly.Params.Learn.NeuroMod.Valence.SetString(vl))
}
}
func (ly *Layer) CeMDefaults() {
lp := ly.Params
lp.Acts.Decay.Act = 1
lp.Acts.Decay.Glong = 1
lp.Acts.Dend.SSGi = 0
lp.Inhib.Layer.On.SetBool(true)
lp.Inhib.Layer.Gi = 0.5
lp.Inhib.Pool.On.SetBool(true)
lp.Inhib.Pool.Gi = 0.3
lp.Inhib.ActAvg.Nominal = 0.15
lp.Learn.TrgAvgAct.RescaleOn.SetBool(false)
lp.Learn.RLRate.SigmoidMin = 1.0 // doesn't matter -- doesn't learn..
for _, pj := range ly.RecvPaths {
pj.Params.SetFixedWts()
pj.Params.PathScale.Abs = 1
}
}
func (ly *Layer) LDTDefaults() {
lp := ly.Params
lp.Inhib.ActAvg.Nominal = 0.1
lp.Inhib.Layer.On.SetBool(true)
lp.Inhib.Layer.Gi = 1 // todo: explore
lp.Inhib.Pool.On.SetBool(false)
lp.Acts.Decay.Act = 1
lp.Acts.Decay.Glong = 1
lp.Acts.Decay.LearnCa = 1 // uses CaD as a readout!
lp.Learn.TrgAvgAct.RescaleOn.SetBool(false)
// lp.Rubicon.Thr = 0.2
// lp.Rubicon.Gain = 2
for _, pj := range ly.RecvPaths {
pj.Params.SetFixedWts()
pj.Params.PathScale.Abs = 1
}
}
func (ly *LayerParams) VSPatchDefaults() {
ly.Acts.Decay.Act = 1
ly.Acts.Decay.Glong = 1
ly.Acts.Decay.LearnCa = 1 // uses CaD as a readout!
ly.Inhib.Pool.On.SetBool(true)
ly.Inhib.Layer.On.SetBool(true)
ly.Inhib.Layer.Gi = 0.5
ly.Inhib.Layer.FB = 0
ly.Inhib.Pool.FB = 0
ly.Inhib.Pool.Gi = 0.5
ly.Inhib.ActAvg.Nominal = 0.2
ly.Learn.RLRate.Diff.SetBool(false)
ly.Learn.RLRate.SigmoidMin = 0.01 // 0.01 > 0.05
ly.Learn.TrgAvgAct.RescaleOn.SetBool(false)
ly.Learn.TrgAvgAct.GiBaseInit = 0.5
// ms.Learn.NeuroMod.DAMod needs to be set via BuildConfig
ly.Learn.NeuroMod.DALRateSign.SetBool(true)
ly.Learn.NeuroMod.AChLRateMod = 0.8 // ACh now active for extinction, so this is ok
ly.Learn.NeuroMod.AChDisInhib = 0 // essential: has to fire when expected but not present!
ly.Learn.NeuroMod.BurstGain = 1
ly.Learn.NeuroMod.DipGain = 1 // now must be balanced -- otherwise overshoots
}
func (ly *LayerParams) DrivesDefaults() {
ly.Inhib.ActAvg.Nominal = 0.01
ly.Inhib.Layer.On.SetBool(false)
ly.Inhib.Pool.On.SetBool(true)
ly.Inhib.Pool.Gi = 0.5
ly.Acts.PopCode.On.SetBool(true)
ly.Acts.PopCode.MinAct = 0.2 // low activity for low drive -- also has special 0 case = nothing
ly.Acts.PopCode.MinSigma = 0.08
ly.Acts.PopCode.MaxSigma = 0.12
ly.Acts.Decay.Act = 1
ly.Acts.Decay.Glong = 1
ly.Learn.TrgAvgAct.RescaleOn.SetBool(false)
}
func (ly *LayerParams) UrgencyDefaults() {
ly.Inhib.ActAvg.Nominal = 0.2
ly.Inhib.Layer.On.SetBool(true)
ly.Inhib.Layer.Gi = 0.5
ly.Inhib.Pool.On.SetBool(false)
ly.Acts.PopCode.On.SetBool(true) // use only popcode
ly.Acts.PopCode.MinAct = 0
ly.Acts.Decay.Act = 1
ly.Acts.Decay.Glong = 1
ly.Learn.TrgAvgAct.RescaleOn.SetBool(false)
}
func (ly *LayerParams) USDefaults() {
ly.Inhib.ActAvg.Nominal = 0.05
ly.Inhib.Layer.On.SetBool(false)
ly.Inhib.Pool.On.SetBool(true)
ly.Inhib.Pool.Gi = 0.5
ly.Acts.PopCode.On.SetBool(true)
ly.Acts.PopCode.MinAct = 0.2 // low activity for low val -- also has special 0 case = nothing
ly.Acts.PopCode.MinSigma = 0.08
ly.Acts.PopCode.MaxSigma = 0.12
ly.Acts.Decay.Act = 1
ly.Acts.Decay.Glong = 1
ly.Learn.TrgAvgAct.RescaleOn.SetBool(false)
}
func (ly *LayerParams) PVDefaults() {
ly.Inhib.ActAvg.Nominal = 0.2
ly.Inhib.Layer.On.SetBool(true)
ly.Inhib.Layer.Gi = 1
ly.Inhib.Pool.On.SetBool(false)
ly.Acts.PopCode.On.SetBool(true)
// note: may want to modulate rate code as well:
ly.Acts.PopCode.Ge = 0.4
// ly.Acts.PopCode.MinAct = 0.2
// ly.Acts.PopCode.MinSigma = 0.08
// ly.Acts.PopCode.MaxSigma = 0.12
ly.Acts.Decay.Act = 1
ly.Acts.Decay.Glong = 1
ly.Learn.TrgAvgAct.RescaleOn.SetBool(false)
}
// Copyright (c) 2022, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
import (
"github.com/emer/emergent/v2/paths"
"github.com/emer/emergent/v2/relpos"
)
// AddLDTLayer adds a LDTLayer
func (nt *Network) AddLDTLayer(prefix string) *Layer {
ldt := nt.AddLayer2D(prefix+"LDT", LDTLayer, 1, 1)
return ldt
}
// AddBLALayers adds two BLA layers, acquisition / extinction / D1 / D2,
// for positive or negative valence
func (nt *Network) AddBLALayers(prefix string, pos bool, nUs, nNeurY, nNeurX int, rel relpos.Relations, space float32) (acq, ext *Layer) {
if pos {
d1 := nt.AddLayer4D(prefix+"BLAposAcqD1", BLALayer, 1, nUs, nNeurY, nNeurX)
d1.SetBuildConfig("DAMod", "D1Mod")
d1.SetBuildConfig("Valence", "Positive")
d2 := nt.AddLayer4D(prefix+"BLAposExtD2", BLALayer, 1, nUs, nNeurY, nNeurX)
d2.SetBuildConfig("DAMod", "D2Mod")
d2.SetBuildConfig("Valence", "Positive")
acq = d1
ext = d2
} else {
d1 := nt.AddLayer4D(prefix+"BLAnegExtD1", BLALayer, 1, nUs, nNeurY, nNeurX)
d1.SetBuildConfig("DAMod", "D1Mod")
d1.SetBuildConfig("Valence", "Negative")
d2 := nt.AddLayer4D(prefix+"BLAnegAcqD2", BLALayer, 1, nUs, nNeurY, nNeurX)
d2.SetBuildConfig("DAMod", "D2Mod")
d2.SetBuildConfig("Valence", "Negative")
d2.AddDefaultParams(func(ly *LayerParams) {
ly.Inhib.Layer.Gi = 1.2 // weaker
})
acq = d2
ext = d1
}
pt := nt.ConnectLayers(ext, acq, paths.NewPoolOneToOne(), InhibPath)
pt.AddDefaultParams(func(pt *PathParams) {
pt.PathScale.Abs = 0.5 // key param for efficacy of inhibition -- may need to tweak
})
pt.AddClass("BLAExtToAcq")
pt.AddDefaultParams(func(pt *PathParams) {
pt.Learn.Learn.SetBool(false)
pt.PathScale.Abs = 1 // 1 needed for inhibition
pt.SWts.Init.SPct = 0
pt.SWts.Init.Mean = 0.8
pt.SWts.Init.Var = 0.0
})
pt = nt.ConnectLayers(acq, ext, paths.NewOneToOne(), CTCtxtPath)
pt.AddClass("BLAAcqToExt")
pt.AddDefaultParams(func(pt *PathParams) {
pt.PathScale.Abs = 2
})
pt = nt.ConnectLayers(acq, acq, NewBLANovelPath(), InhibPath)
pt.AddClass("BLANovelInhib")
pt.AddDefaultParams(func(pt *PathParams) {
pt.Learn.Learn.SetBool(false)
pt.PathScale.Abs = 0.5
pt.SWts.Init.SPct = 0
pt.SWts.Init.Mean = 0.8
pt.SWts.Init.Var = 0.0
})
pt = nt.ConnectLayers(ext, acq, NewBLANovelPath(), InhibPath)
pt.AddClass("BLANovelInhib")
pt.AddDefaultParams(func(pt *PathParams) {
pt.Learn.Learn.SetBool(false)
pt.PathScale.Abs = 0.5
pt.SWts.Init.SPct = 0
pt.SWts.Init.Mean = 0.8
pt.SWts.Init.Var = 0.0
})
if rel == relpos.Behind {
ext.PlaceBehind(acq, space)
} else {
ext.PlaceRightOf(acq, space)
}
acq.AddClass("BLA")
ext.AddClass("BLA")
return
}
// AddAmygdala adds a full amygdala complex including BLA,
// CeM, and LDT. Inclusion of negative valence is optional with neg
// arg -- neg* layers are nil if not included.
// Uses the network Rubicon.NPosUSs and NNegUSs for number of pools --
// must be configured prior to calling this.
func (nt *Network) AddAmygdala(prefix string, neg bool, nNeurY, nNeurX int, space float32) (blaPosAcq, blaPosExt, blaNegAcq, blaNegExt, cemPos, cemNeg, blaNov *Layer) {
nUSpos := int(nt.Rubicon.NPosUSs)
nUSneg := int(nt.Rubicon.NNegUSs)
blaPosAcq, blaPosExt = nt.AddBLALayers(prefix, true, nUSpos, nNeurY, nNeurX, relpos.Behind, space)
if neg {
blaNegAcq, blaNegExt = nt.AddBLALayers(prefix, false, nUSneg, nNeurY, nNeurX, relpos.Behind, space)
blaPosAcq.SetBuildConfig("LayInhib1Name", blaNegAcq.Name)
blaNegAcq.SetBuildConfig("LayInhib1Name", blaPosAcq.Name)
}
cemPos = nt.AddLayer4D(prefix+"CeMPos", CeMLayer, 1, nUSpos, 1, nNeurX)
cemPos.SetBuildConfig("DAMod", "D1Mod") // not relevant but avoids warning
cemPos.SetBuildConfig("Valence", "Positive")
if neg {
cemNeg = nt.AddLayer4D(prefix+"CeMNeg", CeMLayer, 1, nUSneg, 1, nNeurX)
cemNeg.SetBuildConfig("DAMod", "D2Mod") // not relevant but avoids warning
cemNeg.SetBuildConfig("Valence", "Negative")
}
blaNov = nt.AddLayer4D(prefix+"BLANovelCS", BLALayer, 1, 1, 4, 4)
blaNov.SetBuildConfig("DAMod", "D1Mod")
blaNov.SetBuildConfig("Valence", "Positive")
blaNov.AddDefaultParams(func(ly *LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.05
ly.Inhib.Layer.Gi = 0.8
ly.Inhib.Pool.On.SetBool(false)
ly.Learn.NeuroMod.DAModGain = 0
ly.Learn.RLRate.On.SetBool(false)
})
p1to1 := paths.NewPoolOneToOne()
nt.ConnectLayers(blaPosAcq, cemPos, p1to1, ForwardPath).AddClass("BLAToCeM_Excite")
nt.ConnectLayers(blaPosExt, cemPos, p1to1, InhibPath).AddClass("BLAToCeM_Inhib")
// default Abs = 1 works for both of these
if neg {
nt.ConnectLayers(blaNegAcq, cemNeg, p1to1, ForwardPath).AddClass("BLAToCeM_Excite")
nt.ConnectLayers(blaNegExt, cemNeg, p1to1, InhibPath).AddClass("BLAToCeM_Inhib")
}
pt := nt.ConnectLayers(blaNov, blaPosAcq, p1to1, ForwardPath)
// dilutes everyone else, so make it weaker Rel, compensate with Abs
pt.AddDefaultParams(func(pt *PathParams) {
pt.Learn.Learn.SetBool(false)
pt.SWts.Adapt.On.SetBool(false)
pt.PathScale.Rel = 0.1
pt.PathScale.Abs = 2 // 3 competes with CS too strongly
pt.SWts.Init.SPct = 0
pt.SWts.Init.Mean = 0.5
pt.SWts.Init.Var = 0.4
})
pt.AddClass("BLAFromNovel")
cemPos.PlaceBehind(blaPosExt, space)
if neg {
blaNegAcq.PlaceBehind(blaPosExt, space)
cemPos.PlaceBehind(blaNegExt, space)
cemNeg.PlaceBehind(cemPos, space)
blaNov.PlaceBehind(cemNeg, space)
} else {
blaNov.PlaceBehind(cemPos, space)
}
return
}
// ConnectToBLAAcq adds a BLAPath from given sending layer to a BLA layer,
// and configures it for acquisition parameters. Sets class to BLAAcqPath.
// This is for any CS or contextual inputs that drive acquisition.
func (nt *Network) ConnectToBLAAcq(send, recv *Layer, pat paths.Pattern) *Path {
pt := nt.ConnectLayers(send, recv, pat, BLAPath)
pt.AddDefaultParams(func(pt *PathParams) {
pt.Learn.LRate.Base = 0.02
pt.BLA.NegDeltaLRate = 0.01 // slow for acq -- could be 0
})
pt.AddClass("BLAAcqPath")
return pt
}
// ConnectToBLAExt adds a BLAPath from given sending layer to a BLA layer,
// and configures it for extinctrion parameters. Sets class to BLAExtPath.
// This is for any CS or contextual inputs that drive extinction neurons to fire
// and override the acquisition ones.
func (nt *Network) ConnectToBLAExt(send, recv *Layer, pat paths.Pattern) *Path {
pt := nt.ConnectLayers(send, recv, pat, BLAPath)
pt.AddDefaultParams(func(pt *PathParams) {
pt.PathScale.Abs = 4
pt.Learn.LRate.Base = 0.05 // 0.02 for pvlv CS 50% balance
pt.BLA.NegDeltaLRate = 1 // fast for extinction unlearning -- could be slower
})
pt.AddClass("BLAExtPath")
return pt
}
// ConnectCSToBLApos connects the CS input to BLAposAcqD1, BLANovelCS layers
// using fixed, higher-variance weights, full pathway.
// Sets classes to: CSToBLApos, CSToBLANovel with default params
func (nt *Network) ConnectCSToBLApos(cs, blaAcq, blaNov *Layer) (toAcq, toNov, novInhib *Path) {
toAcq = nt.ConnectLayers(cs, blaAcq, paths.NewFull(), BLAPath)
toAcq.AddDefaultParams(func(pt *PathParams) { // stronger
pt.PathScale.Abs = 1.5
pt.Learn.LRate.Base = 0.1 // faster learning
pt.BLA.NegDeltaLRate = 0.01 // slow for acq -- could be 0
})
toAcq.AddClass("CSToBLApos")
toNov = nt.ConnectLayers(cs, blaNov, paths.NewFull(), BLAPath)
// dilutes everyone else, so make it weaker Rel, compensate with Abs
toNov.AddDefaultParams(func(pt *PathParams) {
pt.SWts.Init.SPct = 0
pt.SWts.Init.Mean = 0.75
pt.SWts.Init.Var = 0.25
pt.SWts.Adapt.On.SetBool(false)
pt.Learn.Learn.SetBool(false)
})
toNov.AddClass("CSToBLANovel")
novInhib = nt.ConnectLayers(cs, blaNov, paths.NewFull(), InhibPath)
novInhib.AddDefaultParams(func(pt *PathParams) {
pt.SWts.Init.SPct = 0
pt.SWts.Init.Mean = 0.1
pt.SWts.Init.Var = 0.05
pt.SWts.Adapt.On.SetBool(false)
pt.Learn.LRate.Base = 0.01
pt.Learn.Hebb.On.SetBool(true)
pt.Learn.Hebb.Down = 0 // only goes up
})
novInhib.AddClass("CSToBLANovelInhib")
return
}
// ConnectUSToBLA connects the US input to BLApos(Neg)AcqD1(D2) and
// BLApos(Neg)ExtD2(D1) layers,
// using fixed, higher-variance weights, full pathway.
// Sets classes to: USToBLAAcq and USToBLAExt
func (nt *Network) ConnectUSToBLA(us, blaAcq, blaExt *Layer) (toAcq, toExt *Path) {
toAcq = nt.ConnectLayers(us, blaAcq, paths.NewPoolOneToOne(), BLAPath)
toAcq.AddDefaultParams(func(pt *PathParams) {
pt.PathScale.Rel = 0.5
pt.PathScale.Abs = 6
pt.SWts.Init.SPct = 0
pt.SWts.Init.Mean = 0.75
pt.SWts.Init.Var = 0.25
pt.Learn.LRate.Base = 0.001 // could be 0
pt.BLA.NegDeltaLRate = 0.01 // slow for acq -- could be 0
})
toAcq.AddClass("USToBLAAcq")
toExt = nt.ConnectLayers(us, blaExt, paths.NewPoolOneToOne(), InhibPath)
// actual US inhibits exinction -- must be strong enough to block ACh enh
toExt.AddDefaultParams(func(pt *PathParams) {
pt.PathScale.Abs = 0.5 // note: key param
pt.SWts.Init.SPct = 0
pt.SWts.Init.Mean = 0.8
pt.SWts.Init.Var = 0
pt.SWts.Adapt.On.SetBool(false)
pt.Learn.Learn.SetBool(false)
})
toExt.AddClass("USToBLAExtInhib")
return
}
// AddUSLayers adds USpos, USneg, and Cost layers for positive or negative valence
// unconditioned stimuli (USs), using a pop-code representation of US magnitude.
// These track the Global USpos, USneg, Cost for visualization and predictive learning.
// Actual US inputs are set in Rubicon.
// Uses the network Rubicon.NPosUSs, NNegUSs, and NCosts for number of pools --
// must be configured prior to calling this.
func (nt *Network) AddUSLayers(popY, popX int, rel relpos.Relations, space float32) (usPos, usNeg, cost, costFinal *Layer) {
nUSpos := int(nt.Rubicon.NPosUSs)
nUSneg := int(nt.Rubicon.NNegUSs)
nCost := int(nt.Rubicon.NCosts)
usPos = nt.AddLayer4D("USpos", USLayer, 1, nUSpos, popY, popX)
usPos.SetBuildConfig("DAMod", "D1Mod") // not relevant but avoids warning
usPos.SetBuildConfig("Valence", "Positive")
usNeg = nt.AddLayer4D("USneg", USLayer, 1, nUSneg, popY, popX)
usNeg.SetBuildConfig("DAMod", "D2Mod") // not relevant but avoids warning
usNeg.SetBuildConfig("Valence", "Negative")
cost = nt.AddLayer4D("Cost", USLayer, 1, nCost, popY, popX)
cost.SetBuildConfig("DAMod", "D1Mod") // d1mod = incremental current
cost.SetBuildConfig("Valence", "Cost")
costFinal = nt.AddLayer4D("CostFin", USLayer, 1, nCost, popY, popX)
costFinal.SetBuildConfig("DAMod", "D2Mod") // d2mod = final
costFinal.SetBuildConfig("Valence", "Cost")
cost.PlaceRightOf(usNeg, space*2)
costFinal.PlaceBehind(cost, space)
if rel == relpos.Behind {
usNeg.PlaceBehind(usPos, space)
} else {
usNeg.PlaceRightOf(usPos, space)
}
return
}
// AddUSPulvLayers adds USpos, USneg, and Cost layers for positive or negative valence
// unconditioned stimuli (USs), using a pop-code representation of US magnitude.
// These track the Global USpos, USneg, Cost, for visualization and predictive learning.
// Actual US inputs are set in Rubicon.
// Adds Pulvinar predictive layers for each.
func (nt *Network) AddUSPulvLayers(popY, popX int, rel relpos.Relations, space float32) (usPos, usNeg, cost, costFinal, usPosP, usNegP, costP *Layer) {
usPos, usNeg, cost, costFinal = nt.AddUSLayers(popY, popX, rel, space)
usPosP = nt.AddPulvForLayer(usPos, space)
usPosP.SetBuildConfig("Valence", "Positive")
usNegP = nt.AddPulvForLayer(usNeg, space)
usNegP.SetBuildConfig("Valence", "Negative")
costP = nt.AddPulvForLayer(cost, space)
costP.SetBuildConfig("Valence", "Cost")
if rel == relpos.Behind {
costFinal.PlaceBehind(costP, space)
usNeg.PlaceBehind(usPosP, space)
}
usParams := func(ly *LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.05
ly.Inhib.Layer.On.SetBool(false)
ly.Inhib.Pool.On.SetBool(true)
ly.Inhib.Pool.Gi = 0.5
}
usPosP.AddDefaultParams(usParams)
usPosP.AddClass("USLayer")
usNegP.AddDefaultParams(usParams)
usNegP.AddClass("USLayer")
costP.AddDefaultParams(usParams)
costP.AddClass("USLayer")
costFinal.AddDefaultParams(func(ly *LayerParams) {
ly.Inhib.Pool.Gi = 1
ly.Acts.PopCode.Ge = 1.0
})
return
}
// AddPVLayers adds PVpos and PVneg layers for positive or negative valence
// primary value representations, representing the total drive and effort weighted
// USpos outcome, or total USneg outcome.
// Uses a PopCode representation based on LayerParams.Act.PopCode, distributed over
// given numbers of neurons in the X and Y dimensions.
func (nt *Network) AddPVLayers(nNeurY, nNeurX int, rel relpos.Relations, space float32) (pvPos, pvNeg *Layer) {
pvPos = nt.AddLayer2D("PVpos", PVLayer, nNeurY, nNeurX)
pvPos.SetBuildConfig("DAMod", "D1Mod") // not relevant but avoids warning
pvPos.SetBuildConfig("Valence", "Positive")
pvNeg = nt.AddLayer2D("PVneg", PVLayer, nNeurY, nNeurX)
pvNeg.SetBuildConfig("DAMod", "D2Mod") // not relevant but avoids warning
pvNeg.SetBuildConfig("Valence", "Negative")
if rel == relpos.Behind {
pvNeg.PlaceBehind(pvPos, space)
} else {
pvNeg.PlaceRightOf(pvPos, space)
}
return
}
// AddPVLayers adds PVpos and PVneg layers for positive or negative valence
// primary value representations, representing the total drive and effort weighted
// USpos outcomes, or total USneg outcomes.
// Uses a PopCode representation based on LayerParams.Act.PopCode, distributed over
// given numbers of neurons in the X and Y dimensions.
// Adds Pulvinar predictive layers for each.
func (nt *Network) AddPVPulvLayers(nNeurY, nNeurX int, rel relpos.Relations, space float32) (pvPos, pvNeg, pvPosP, pvNegP *Layer) {
pvPos, pvNeg = nt.AddPVLayers(nNeurX, nNeurY, rel, space)
pvPosP = nt.AddPulvForLayer(pvPos, space)
pvNegP = nt.AddPulvForLayer(pvNeg, space)
if rel == relpos.Behind {
pvNeg.PlaceBehind(pvPosP, space)
}
pvParams := func(ly *LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.2
ly.Inhib.Layer.Gi = 0.5
}
pvPosP.AddDefaultParams(pvParams)
pvPosP.AddClass("PVLayer")
pvNegP.AddDefaultParams(pvParams)
pvNegP.AddClass("PVLayer")
return
}
// AddVSPatchLayers adds VSPatch (Pos, D1, D2)
func (nt *Network) AddVSPatchLayers(prefix string, nUs, nNeurY, nNeurX int, space float32) (d1, d2 *Layer) {
d1 = nt.AddLayer4D(prefix+"VSPatchD1", VSPatchLayer, 1, nUs, nNeurY, nNeurX)
d1.SetBuildConfig("DAMod", "D1Mod")
d1.SetBuildConfig("Valence", "Positive")
d2 = nt.AddLayer4D(prefix+"VSPatchD2", VSPatchLayer, 1, nUs, nNeurY, nNeurX)
d2.SetBuildConfig("DAMod", "D2Mod")
d2.SetBuildConfig("Valence", "Positive")
d2.PlaceBehind(d1, space)
return
}
// ConnectToVSPatch adds a VSPatchPath from given sending layer to VSPatchD1, D2 layers
func (nt *Network) ConnectToVSPatch(send, vspD1, vspD2 *Layer, pat paths.Pattern) (*Path, *Path) {
d1 := nt.ConnectLayers(send, vspD1, pat, VSPatchPath)
d2 := nt.ConnectLayers(send, vspD2, pat, VSPatchPath)
return d1, d2
}
// AddVTALHbLDTLayers adds VTA dopamine, LHb DA dipping, and LDT ACh layers
// which are driven by corresponding values in Global
func (nt *Network) AddVTALHbLDTLayers(rel relpos.Relations, space float32) (vta, lhb, ldt *Layer) {
vta = nt.AddLayer2D("VTA", VTALayer, 1, 1)
lhb = nt.AddLayer2D("LHb", LHbLayer, 1, 2)
ldt = nt.AddLDTLayer("")
if rel == relpos.Behind {
lhb.PlaceBehind(vta, space)
ldt.PlaceBehind(lhb, space)
} else {
lhb.PlaceRightOf(vta, space)
ldt.PlaceRightOf(lhb, space)
}
return
}
// AddSCLayer2D adds superior colliculcus 2D layer
// which computes stimulus onset via trial-delayed inhibition
// (Inhib.FFPrv) -- connect with fixed random input from sensory
// input layers. Sets base name and class name to SC.
// Must set Inhib.FFPrv > 0 and Act.Decay.* = 0
func (nt *Network) AddSCLayer2D(prefix string, nNeurY, nNeurX int) *Layer {
sc := nt.AddLayer2D(prefix+"SC", SuperLayer, nNeurY, nNeurX)
sc.AddDefaultParams(func(ly *LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.1
ly.Inhib.Layer.On.SetBool(true)
ly.Inhib.Layer.Gi = 1.2
ly.Inhib.Pool.On.SetBool(false)
ly.Acts.Decay.Act = 1 // key for rapid updating
ly.Acts.Decay.Glong = 0.0
ly.Acts.Decay.LearnCa = 1.0 // uses CaD as a readout -- clear
ly.Acts.Decay.OnRew.SetBool(true)
ly.Acts.KNa.TrialSlow.SetBool(true)
ly.Acts.KNa.Slow.Gk = 0.05 // 0.1 enough to fully inhibit over several trials
})
sc.AddClass("SC")
return sc
}
// AddSCLayer4D adds superior colliculcus 4D layer
// which computes stimulus onset via trial-delayed inhibition
// (Inhib.FFPrv) -- connect with fixed random input from sensory
// input layers. Sets base name and class name to SC.
// Must set Inhib.FFPrv > 0 and Act.Decay.* = 0
func (nt *Network) AddSCLayer4D(prefix string, nPoolsY, nPoolsX, nNeurY, nNeurX int) *Layer {
sc := nt.AddLayer4D(prefix+"SC", SuperLayer, nPoolsY, nPoolsX, nNeurY, nNeurX)
sc.AddDefaultParams(func(ly *LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.1
ly.Inhib.Layer.On.SetBool(true)
ly.Inhib.Layer.Gi = 1.2
ly.Inhib.Pool.On.SetBool(true)
ly.Inhib.Pool.Gi = 1.2
ly.Acts.Decay.Act = 1 // key for rapid updating
ly.Acts.Decay.Glong = 0.0
ly.Acts.Decay.LearnCa = 1.0 // uses CaD as a readout -- clear
ly.Acts.Decay.OnRew.SetBool(true)
ly.Acts.KNa.TrialSlow.SetBool(true)
ly.Acts.KNa.Slow.Gk = 1
})
sc.AddClass("SC")
return sc
}
// ConnectToSC adds a ForwardPath from given sending layer to
// a SC layer, setting class as ToSC -- should set params
// as fixed random with more variance than usual.
func (nt *Network) ConnectToSC(send, recv *Layer, pat paths.Pattern) *Path {
pt := nt.ConnectLayers(send, recv, pat, ForwardPath)
pt.AddClass("ToSC")
return pt
}
// ConnectToSC1to1 adds a 1to1 ForwardPath from given sending layer to
// a SC layer, copying the geometry of the sending layer,
// setting class as ToSC. The conection weights are set to uniform.
func (nt *Network) ConnectToSC1to1(send, recv *Layer) *Path {
recv.Shape.CopyFrom(&send.Shape)
pt := nt.ConnectLayers(send, recv, paths.NewOneToOne(), ForwardPath)
pt.AddDefaultParams(func(pt *PathParams) {
pt.Learn.Learn.SetBool(false)
pt.SWts.Init.SPct = 0
pt.SWts.Adapt.On.SetBool(false)
pt.SWts.Init.Mean = 0.8
pt.SWts.Init.Var = 0.0
})
pt.AddClass("ToSC")
return pt
}
// AddDrivesLayer adds Rubicon layer representing current drive activity,
// from Global Drive.Drives.
// Uses a PopCode representation based on LayerParams.Act.PopCode, distributed over
// given numbers of neurons in the X and Y dimensions, per drive pool.
func (nt *Network) AddDrivesLayer(nNeurY, nNeurX int) *Layer {
nix := nt.NetIxs()
drv := nt.AddLayer4D("Drives", DrivesLayer, 1, int(nix.RubiconNPosUSs), nNeurY, nNeurX)
return drv
}
// AddDrivesPulvLayer adds Rubicon layer representing current drive activity,
// from Global Drive.Drives.
// Uses a PopCode representation based on LayerParams.Act.PopCode, distributed over
// given numbers of neurons in the X and Y dimensions, per drive pool.
// Adds Pulvinar predictive layers for Drives.
func (nt *Network) AddDrivesPulvLayer(nNeurY, nNeurX int, space float32) (drv, drvP *Layer) {
drv = nt.AddDrivesLayer(nNeurY, nNeurX)
drvP = nt.AddPulvForLayer(drv, space)
drvP.AddDefaultParams(func(ly *LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.01
ly.Inhib.Layer.On.SetBool(false)
ly.Inhib.Pool.On.SetBool(true)
ly.Inhib.Pool.Gi = 0.5
})
drvP.AddClass("DrivesLayer")
return
}
// AddUrgencyLayer adds Rubicon layer representing current urgency factor,
// from Global Urgency.Urge
// Uses a PopCode representation based on LayerParams.Act.PopCode, distributed over
// given numbers of neurons in the X and Y dimensions.
func (nt *Network) AddUrgencyLayer(nNeurY, nNeurX int) *Layer {
urge := nt.AddLayer2D("Urgency", UrgencyLayer, nNeurY, nNeurX)
return urge
}
// AddRubiconPulvLayers adds Rubicon layers for PV-related information visualizing
// the internal states of the Global state, with Pulvinar prediction
// layers for training PFC layers.
// Uses the network Rubicon.NPosUSs, NNegUSs, NCosts for number of pools --
// must be configured prior to calling this.
// * drives = popcode representation of drive strength (no activity for 0)
// number of active drives comes from Context; popY, popX neurons per pool.
// * urgency = popcode representation of urgency Go bias factor, popY, popX neurons.
// * us = popcode per US, positive & negative, cost
// * pv = popcode representation of final primary value on positive and negative
// valences -- this is what the dopamine value ends up conding (pos - neg).
// Layers are organized in depth per type: USs in one column, PVs in the next,
// with Drives in the back; urgency behind that.
func (nt *Network) AddRubiconPulvLayers(nYneur, popY, popX int, space float32) (drives, drivesP, urgency, usPos, usNeg, cost, costFinal, usPosP, usNegP, costP, pvPos, pvNeg, pvPosP, pvNegP *Layer) {
rel := relpos.Behind
usPos, usNeg, cost, costFinal, usPosP, usNegP, costP = nt.AddUSPulvLayers(popY, popX, rel, space)
pvPos, pvNeg, pvPosP, pvNegP = nt.AddPVPulvLayers(popY, popX, rel, space)
drives, drivesP = nt.AddDrivesPulvLayer(popY, popX, space)
urgency = nt.AddUrgencyLayer(popY, popX)
pvPos.PlaceRightOf(usPos, space)
drives.PlaceBehind(usNegP, space)
urgency.PlaceBehind(usNegP, space)
return
}
// AddOFCpos adds orbital frontal cortex positive US-coding layers,
// for given number of pos US pools (first is novelty / curiosity pool),
// with given number of units per pool.
func (nt *Network) AddOFCpos(nUSs, nY, ofcY, ofcX int, space float32) (ofc, ofcCT, ofcPT, ofcPTp, ofcMD *Layer) {
ofc, ofcCT, ofcPT, ofcPTp, ofcMD = nt.AddPFC4D("OFCpos", "MD", 1, nUSs, ofcY, ofcX, true, true, space)
ofc.AddDefaultParams(func(ly *LayerParams) { ly.Inhib.Pool.Gi = 1 })
ofcPT.AddDefaultParams(func(ly *LayerParams) { ly.Inhib.ActAvg.Nominal = 0.02 })
ofcPT.AddDefaultParams(func(ly *LayerParams) { ly.Inhib.Pool.On.SetBool(true) })
// ofcPT.AddDefaultParams(func(ly *LayerParams) { ly.Inhib.Pool.Gi = 2.0 })
ofcPT.AddDefaultParams(func(ly *LayerParams) {
ly.Acts.Dend.ModACh.SetBool(true)
ly.CT.OFCposPT.SetBool(true)
})
ofcPTp.AddDefaultParams(func(ly *LayerParams) { ly.Inhib.Pool.Gi = 1.0 })
ofcPTp.AddDefaultParams(func(ly *LayerParams) { ly.Inhib.ActAvg.Nominal = 0.1 })
return
}
// AddOFCneg adds orbital frontal cortex negative US-coding layers,
// for given number of neg US pools with given number of units per pool.
func (nt *Network) AddOFCneg(nUSs, ofcY, ofcX int, space float32) (ofc, ofcCT, ofcPT, ofcPTp, ofcMD *Layer) {
ofc, ofcCT, ofcPT, ofcPTp, ofcMD = nt.AddPFC4D("OFCneg", "MD", 1, nUSs, ofcY, ofcX, true, true, space)
ofc.AddDefaultParams(func(ly *LayerParams) { ly.Inhib.Pool.Gi = 1 })
ofc.AddDefaultParams(func(ly *LayerParams) { ly.Inhib.ActAvg.Nominal = 0.1 })
ofc.AddDefaultParams(func(ly *LayerParams) { ly.Inhib.Layer.Gi = 1.2 })
// ofcPT.AddDefaultParams(func(ly *LayerParams) { ly.Inhib.ActAvg.Nominal = 0.2 })
// ofcPT.AddDefaultParams(func(ly *LayerParams) { ly.Inhib.Pool.Gi = 3.0 })
ofcPT.AddDefaultParams(func(ly *LayerParams) { ly.Acts.Dend.ModACh.SetBool(true) })
ofcPTp.AddDefaultParams(func(ly *LayerParams) { ly.Inhib.Pool.Gi = 1.4 })
ofcPTp.AddDefaultParams(func(ly *LayerParams) { ly.Inhib.ActAvg.Nominal = 0.1 })
return
}
// AddACCost adds anterior cingulate cost coding layers,
// for given number of cost pools (typically 2: time, effort),
// with given number of units per pool.
func (nt *Network) AddACCost(nCosts, accY, accX int, space float32) (acc, accCT, accPT, accPTp, accMD *Layer) {
acc, accCT, accPT, accPTp, accMD = nt.AddPFC4D("ACCcost", "MD", 1, nCosts, accY, accX, true, true, space)
acc.AddDefaultParams(func(ly *LayerParams) { ly.Inhib.Layer.On.SetBool(false) }) // no match
acc.AddDefaultParams(func(ly *LayerParams) { ly.Inhib.Pool.Gi = 1 })
acc.AddDefaultParams(func(ly *LayerParams) { ly.Inhib.ActAvg.Nominal = 0.1 })
acc.AddDefaultParams(func(ly *LayerParams) { ly.Inhib.Layer.Gi = 1.2 })
accCT.AddDefaultParams(func(ly *LayerParams) { ly.Inhib.Layer.On.SetBool(false) })
accCT.AddDefaultParams(func(ly *LayerParams) { ly.Inhib.Pool.Gi = 1.8 })
// accPT.AddDefaultParams(func(ly *LayerParams) { ly.Inhib.ActAvg.Nominal = 0.2 })
// accPT.AddDefaultParams(func(ly *LayerParams) { ly.Inhib.Pool.Gi = 3.0 })
accPT.AddDefaultParams(func(ly *LayerParams) { ly.Inhib.Layer.On.SetBool(false) })
accPT.AddDefaultParams(func(ly *LayerParams) { ly.Acts.Dend.ModACh.SetBool(true) })
accPTp.AddDefaultParams(func(ly *LayerParams) { ly.Inhib.Layer.On.SetBool(false) })
accPTp.AddDefaultParams(func(ly *LayerParams) { ly.Inhib.Pool.Gi = 1.2 })
accPTp.AddDefaultParams(func(ly *LayerParams) { ly.Inhib.ActAvg.Nominal = 0.1 })
return
}
// AddRubiconOFCus builds a complete Rubicon network with OFCpos
// (orbital frontal cortex) US-coding layers,
// ILpos infralimbic abstract positive value,
// OFCneg for negative value inputs, and ILneg value layers,
// and ACCost cost prediction layers.
// Uses the network Rubicon.NPosUSs, NNegUSs, NCosts for number of pools --
// must be configured prior to calling this. Calls:
// * AddVTALHbLDTLayers
// * AddRubiconPulvLayers
// * AddVS
// * AddAmygdala
// * AddOFCpos
// * AddOFCneg
// Makes all appropriate interconnections and sets default parameters.
// Needs CS -> BLA, OFC connections to be made.
// Returns layers most likely to be used for remaining connections and positions.
func (nt *Network) AddRubiconOFCus(nYneur, popY, popX, bgY, bgX, ofcY, ofcX int, space float32) (vSgpi, vSmtxGo, vSmtxNo, vSpatchD1, vSpatchD2, urgency, usPos, pvPos, usNeg, usNegP, pvNeg, pvNegP, blaPosAcq, blaPosExt, blaNegAcq, blaNegExt, blaNov, ofcPos, ofcPosCT, ofcPosPT, ofcPosPTp, ilPos, ilPosCT, ilPosPT, ilPosPTp, ilPosMD, ofcNeg, ofcNegCT, ofcNegPT, ofcNegPTp, accCost, accCostCT, accCostPT, accCostPTp, accCostMD, ilNeg, ilNegCT, ilNegPT, ilNegPTp, ilNegMD, sc *Layer) {
nUSpos := int(nt.Rubicon.NPosUSs)
nUSneg := int(nt.Rubicon.NNegUSs)
nCosts := int(nt.Rubicon.NCosts)
vta, lhb, ldt := nt.AddVTALHbLDTLayers(relpos.Behind, space)
_ = lhb
_ = ldt
drives, drivesP, urgency, usPos, usNeg, cost, costFinal, usPosP, usNegP, costP, pvPos, pvNeg, pvPosP, pvNegP := nt.AddRubiconPulvLayers(nYneur, popY, popX, space)
_ = urgency
vSmtxGo, vSmtxNo, vSgpePr, vSgpeAk, vSstn, vSgpi := nt.AddVentralBG("", 1, nUSpos, bgY, bgX, bgY, bgX, space)
_, _ = vSgpeAk, vSgpePr
vSgated := nt.AddVSGatedLayer("", nYneur)
vSpatchD1, vSpatchD2 = nt.AddVSPatchLayers("", nUSpos, bgY, bgX, space)
vSpatchD1.PlaceRightOf(vSstn, space)
sc = nt.AddSCLayer2D("", ofcY, ofcX)
vSgated.PlaceRightOf(sc, space)
ldt.SetBuildConfig("SrcLay1Name", sc.Name)
blaPosAcq, blaPosExt, blaNegAcq, blaNegExt, cemPos, cemNeg, blaNov := nt.AddAmygdala("", true, ofcY, ofcX, space)
_, _, _, _, _ = blaNegAcq, blaNegExt, cemPos, cemNeg, blaNov
ofcPos, ofcPosCT, ofcPosPT, ofcPosPTp, ofcPosMD := nt.AddOFCpos(nUSpos, nYneur, ofcY, ofcX, space)
_ = ofcPosPT
ofcNeg, ofcNegCT, ofcNegPT, ofcNegPTp, ofcNegMD := nt.AddOFCneg(nUSneg, ofcY, ofcX, space)
_ = ofcNegPT
ilPos, ilPosCT, ilPosPT, ilPosPTp, ilPosMD = nt.AddPFC2D("ILpos", "MD", ofcY, ofcX, true, true, space)
_ = ilPosPT
ilNeg, ilNegCT, ilNegPT, ilNegPTp, ilNegMD = nt.AddPFC2D("ILneg", "MD", ofcY, ofcX, true, true, space)
_ = ilNegPT
ilPosPT.AddDefaultParams(func(ly *LayerParams) { ly.Acts.Dend.ModACh.SetBool(true) })
ilNegPT.AddDefaultParams(func(ly *LayerParams) { ly.Acts.Dend.ModACh.SetBool(true) })
accCost, accCostCT, accCostPT, accCostPTp, accCostMD = nt.AddACCost(nCosts, ofcY, ofcX, space)
_ = accCostPT
p1to1 := paths.NewPoolOneToOne()
// p1to1rnd := paths.NewPoolUniformRand()
// p1to1rnd.PCon = 0.5
full := paths.NewFull()
var pt, bpj *Path
pathClass := "PFCPath"
vSmtxGo.SetBuildConfig("ThalLay1Name", ofcPosMD.Name)
vSmtxNo.SetBuildConfig("ThalLay1Name", ofcPosMD.Name)
nt.ConnectLayers(vSgpi, ofcPosMD, full, InhibPath) // BGThal sets defaults for this
vSmtxGo.SetBuildConfig("ThalLay2Name", ofcNegMD.Name)
vSmtxNo.SetBuildConfig("ThalLay2Name", ofcNegMD.Name)
nt.ConnectLayers(vSgpi, ofcNegMD, full, InhibPath)
vSmtxGo.SetBuildConfig("ThalLay3Name", ilPosMD.Name)
vSmtxNo.SetBuildConfig("ThalLay3Name", ilPosMD.Name)
nt.ConnectLayers(vSgpi, ilPosMD, full, InhibPath)
vSmtxGo.SetBuildConfig("ThalLay4Name", ilNegMD.Name)
vSmtxNo.SetBuildConfig("ThalLay4Name", ilNegMD.Name)
nt.ConnectLayers(vSgpi, ilNegMD, full, InhibPath) // BGThal configs
vSmtxGo.SetBuildConfig("ThalLay5Name", accCostMD.Name)
vSmtxNo.SetBuildConfig("ThalLay5Name", accCostMD.Name)
nt.ConnectLayers(vSgpi, accCostMD, full, InhibPath) // BGThal configs
pfc2m := func(pt *PathParams) { // contextual, not driving -- weaker
pt.PathScale.Rel = 1 // 0.1 todo was
}
// neg val goes to nogo
pt = nt.ConnectToVSMatrix(ilNeg, vSmtxNo, full)
pt.AddDefaultParams(pfc2m)
pt.AddClass("PFCToVSMtx")
nt.ConnectToVSPatch(ilNegPTp, vSpatchD1, vSpatchD2, full)
//////// BLA
nt.ConnectUSToBLA(usPos, blaPosAcq, blaPosExt)
nt.ConnectUSToBLA(usNeg, blaNegAcq, blaNegExt)
pt = nt.ConnectLayers(blaPosAcq, ofcPos, p1to1, ForwardPath) // main driver strong input
pt.AddDefaultParams(func(pt *PathParams) {
pt.PathScale.Abs = 2
pt.SWts.Init.Mean = 0.5
pt.SWts.Init.Var = 0.4
})
pt.AddClass("BLAToOFC", pathClass)
pt = nt.ConnectLayers(blaNegAcq, ofcNeg, p1to1, ForwardPath)
pt.AddDefaultParams(func(pt *PathParams) {
pt.PathScale.Abs = 2
pt.SWts.Init.Mean = 0.5
pt.SWts.Init.Var = 0.4
})
pt.AddClass("BLAToOFC", pathClass)
pt = nt.ConnectLayers(ofcPosPTp, blaPosExt, p1to1, BLAPath)
pt.AddDefaultParams(func(pt *PathParams) {
pt.Com.GType = ModulatoryG
pt.PathScale.Abs = 1
pt.SWts.Init.Mean = 0.5
pt.SWts.Init.Var = 0.4
})
pt.AddClass("PTpToBLAExt", pathClass)
///////////////////////////////////////////
// VS
d1, d2 := nt.ConnectToVSPatch(drives, vSpatchD1, vSpatchD2, p1to1)
// modulatory -- critical that it drives full GeModSyn=1 in Matrix at max drive act
driveToVsp := func(pt *PathParams) {
pt.Learn.Learn.SetBool(false)
pt.PathScale.Abs = 1
pt.PathScale.Rel = 1
pt.SWts.Init.SPct = 0
pt.SWts.Init.Mean = 0.8
pt.SWts.Init.Var = 0.0
pt.Com.GType = ModulatoryG
}
d1.AddDefaultParams(driveToVsp)
d2.AddDefaultParams(driveToVsp)
d1.AddClass("DrivesToVSPatch")
d2.AddClass("DrivesToVSPatch")
nt.ConnectToVSPatch(ofcPosPTp, vSpatchD1, vSpatchD2, p1to1)
nt.ConnectToVSPatch(ilPosPTp, vSpatchD1, vSpatchD2, full)
nt.ConnectToVSPatch(ofcNegPTp, vSpatchD1, vSpatchD2, full)
nt.ConnectToVSPatch(ilNegPTp, vSpatchD1, vSpatchD2, full)
nt.ConnectToVSPatch(pvPosP, vSpatchD1, vSpatchD2, full)
// same paths to stn as mtxgo
nt.ConnectToVSMatrix(usPos, vSmtxGo, p1to1)
// net.ConnectToVSMatrix(usPos, vSmtxNo, p1to1)
// pj.DefaultParams = params.Params{
// pt.PathScale.Abs = "2", // strong
// pt.PathScale.Rel = ".2",
// }
nt.ConnectToVSMatrix(blaPosAcq, vSmtxNo, p1to1)
pt = nt.ConnectToVSMatrix(blaPosAcq, vSmtxGo, p1to1)
pt.AddClass("BLAAcqToGo")
pt.AddDefaultParams(func(pt *PathParams) {
pt.PathScale.Abs = 2 // key strength driver
pt.PathScale.Rel = 1
})
// The usPos version is needed for US gating to clear goal.
// it is not clear that direct usNeg should drive nogo directly.
// pj = net.ConnectToVSMatrix(usNeg, vSmtxNo, full)
// pj.AddDefaultParams(func(pt *PathParams) {
// pt.PathScale.Abs = 2, // strong
// pt.PathScale.Rel = .2
// })
nt.ConnectToVSMatrix(blaNegAcq, vSmtxNo, full).AddClass("BLAAcqToGo") // neg -> nogo
// pj.AddDefaultParams(func(pt *PathParams) {
// pt.PathScale.Abs = 2
// pt.PathScale.Rel = 1
// })
nt.ConnectLayers(blaPosAcq, vSstn, full, ForwardPath)
nt.ConnectLayers(blaNegAcq, vSstn, full, ForwardPath)
// todo: ofc -> STN?
pt = nt.ConnectToVSMatrix(blaPosExt, vSmtxNo, p1to1)
pt.AddDefaultParams(func(pt *PathParams) {
pt.PathScale.Abs = 0.1 // extinction is mostly within BLA
pt.PathScale.Rel = 1
})
pt.AddClass("BLAExtToNo")
// pt = net.ConnectToVSMatrix(blaNegExt, vSmtxGo, full) // no neg -> go
// Note: this impairs perf in basic examples/boa, and is questionable functionally
// pt.AddDefaultParams(func(pt *PathParams) {
// pt.PathScale.Abs = 0.1 // extinction is mostly within BLA
// pt.PathScale.Rel = 1
// })
// pt.AddClass("BLAExtToNo")
// modulatory -- critical that it drives full GeModSyn=1 in Matrix at max drive act
d2m := func(pt *PathParams) {
pt.Learn.Learn.SetBool(false)
pt.PathScale.Abs = 1
pt.PathScale.Rel = 1
pt.SWts.Init.SPct = 0
pt.SWts.Init.Mean = 0.8
pt.SWts.Init.Var = 0.0
pt.Com.GType = ModulatoryG
}
pt = nt.ConnectToVSMatrix(drives, vSmtxGo, p1to1)
pt.AddDefaultParams(d2m)
pt.AddClass("DrivesToMtx")
pt = nt.ConnectToVSMatrix(drives, vSmtxNo, p1to1)
pt.AddDefaultParams(d2m)
pt.AddClass("DrivesToMtx")
pt = nt.ConnectToVSMatrix(ofcPos, vSmtxGo, p1to1)
pt.AddDefaultParams(pfc2m)
pt.AddClass("PFCToVSMtx")
pt = nt.ConnectToVSMatrix(ofcPos, vSmtxNo, p1to1)
pt.AddDefaultParams(pfc2m)
pt.AddClass("PFCToVSMtx")
nt.ConnectLayers(ofcPos, vSstn, full, ForwardPath)
pt = nt.ConnectToVSMatrix(ilPos, vSmtxGo, full)
pt.AddDefaultParams(pfc2m)
pt.AddClass("PFCToVSMtx")
pt = nt.ConnectToVSMatrix(ilPos, vSmtxNo, full)
pt.AddDefaultParams(pfc2m)
pt.AddClass("PFCToVSMtx")
nt.ConnectLayers(ilPos, vSstn, full, ForwardPath)
pt = nt.ConnectToVSMatrix(ofcNeg, vSmtxGo, full)
pt.AddDefaultParams(pfc2m)
pt.AddClass("PFCToVSMtx")
pt = nt.ConnectToVSMatrix(ofcNeg, vSmtxNo, full)
pt.AddDefaultParams(pfc2m)
pt.AddClass("PFCToVSMtx")
pt = nt.ConnectToVSMatrix(ilNeg, vSmtxGo, full)
pt.AddDefaultParams(pfc2m)
pt.AddClass("PFCToVSMtx")
pt = nt.ConnectToVSMatrix(ilNeg, vSmtxNo, full)
pt.AddDefaultParams(pfc2m)
pt.AddClass("PFCToVSMtx")
pt = nt.ConnectToVSMatrix(accCost, vSmtxGo, full)
pt.AddDefaultParams(pfc2m)
pt.AddClass("PFCToVSMtx")
pt = nt.ConnectToVSMatrix(accCost, vSmtxNo, full)
pt.AddDefaultParams(pfc2m)
pt.AddClass("PFCToVSMtx")
// pj = net.ConnectToVSMatrix(urgency, vSmtxGo, full)
// pj.AddDefaultParams(func(pt *PathParams) {
// pt.PathScale.Rel = 0.1 // don't dilute from others
// pt.PathScale.Abs = 4 // but make it strong
// pt.SWts.Init.SPct = 0
// pt.SWts.Init.Mean = 0.5
// pt.SWts.Init.Var = 0.4
// pt.Learn.Learn = false
// })
//////// OFCpos
// Drives -> ofcPos then activates ofcPos -> VS -- ofcPos needs to be strongly BLA dependent
// to reflect either current CS or maintained CS but not just echoing drive state.
// and not adding drives -> deep layers
pt = nt.ConnectLayers(drives, ofcPos, p1to1, ForwardPath)
pt.AddDefaultParams(func(pt *PathParams) {
pt.PathScale.Rel = 0.2 // weaker to not drive in absence of BLA
})
pt.AddClass("DrivesToOFC ", pathClass)
// net.ConnectCTSelf(ilPosCT, full, pathClass) // todo: test
nt.ConnectLayers(pvPos, ofcPos, full, BackPath).AddClass("OFCPath", pathClass)
nt.ConnectLayers(usPos, ofcPos, p1to1, BackPath).AddClass("OFCPath", pathClass)
// note: these are all very static, lead to static PT reps:
// need a more dynamic US / value representation to predict.
nt.ConnectToPulv(ofcPos, ofcPosCT, drivesP, p1to1, p1to1, "OFCPath")
nt.ConnectToPulv(ofcPos, ofcPosCT, usPosP, p1to1, p1to1, "OFCPath")
nt.ConnectToPulv(ofcPos, ofcPosCT, pvPosP, full, full, "OFCPath")
nt.ConnectPTpToPulv(ofcPosPTp, drivesP, p1to1, p1to1, "OFCPath")
nt.ConnectPTToPulv(ofcPosPT, ofcPosPTp, usPosP, p1to1, p1to1, "OFCPath")
nt.ConnectPTpToPulv(ofcPosPTp, pvPosP, p1to1, p1to1, "OFCPath")
nt.ConnectLayers(ofcPosPT, pvPos, full, ForwardPath)
///////// ILpos
// net.ConnectCTSelf(ilPosCT, full, pathClass) // todo: test
pt, bpj = nt.BidirConnectLayers(ofcPos, ilPos, full)
pt.AddClass("ILPath", pathClass)
pt.AddDefaultParams(func(pt *PathParams) {
pt.PathScale.Abs = 3 // val needs stronger input
})
bpj.AddClass("ILPath", pathClass)
// note: do *not* bidirectionally connect PTp layers -- too much sustained activity
nt.ConnectToPFC(pvPos, pvPosP, ilPos, ilPosCT, nil, ilPosPTp, full, "ILPath")
nt.ConnectPTpToPulv(ilPosPTp, pvPosP, full, full, "ILPath")
nt.BidirConnectLayers(ilPosPT, pvPos, full)
// note: not connecting deeper CT and PT layers to vSmtxGo at this point
// could explore that later
//////// OFCneg
// net.ConnectCTSelf(ofcNegValCT, full, pathClass) // todo: test
nt.ConnectLayers(pvNeg, ofcNeg, full, BackPath).AddClass("OFCPath", pathClass)
nt.ConnectLayers(usNeg, ofcNeg, p1to1, BackPath).AddClass("OFCPath", pathClass)
// note: these are all very static, lead to static PT reps:
// need a more dynamic US / value representation to predict.
nt.ConnectToPulv(ofcNeg, ofcNegCT, usNegP, p1to1, p1to1, "OFCPath")
nt.ConnectToPulv(ofcNeg, ofcNegCT, pvNegP, full, full, "OFCPath")
nt.ConnectPTToPulv(ofcNegPT, ofcNegPTp, usNegP, p1to1, p1to1, "OFCPath")
nt.ConnectPTpToPulv(ofcNegPTp, pvNegP, full, full, "OFCPath")
nt.ConnectLayers(ofcNegPT, pvNeg, full, ForwardPath)
///////////////////////////////////////////
// Costs
nt.ConnectLayers(pvNeg, accCost, full, BackPath).AddClass("ACCPath", pathClass)
nt.ConnectLayers(cost, accCost, p1to1, BackPath).AddClass("ACCPath", pathClass)
nt.ConnectToPulv(accCost, accCostCT, costP, p1to1, p1to1, "ACCPath")
nt.ConnectPTpToPulv(accCostPTp, costP, p1to1, p1to1, "ACCPath")
pt = nt.ConnectLayers(accCostPT, costFinal, p1to1, ForwardPath)
// pj, _ = net.BidirConnectLayers(accCostPT, costFinal, p1to1)
pt.AddClass("ACCCostToFinal")
pt.AddDefaultParams(func(pt *PathParams) {
pt.PathScale.Abs = .2 // PT is too strong
})
//////// ILneg
// net.ConnectCTSelf(ilNegCT, full, "ILPath") // todo: test
pt, bpj = nt.BidirConnectLayers(ofcNeg, ilNeg, full)
pt.AddClass("ILPath", pathClass)
pt.AddDefaultParams(func(pt *PathParams) {
pt.PathScale.Abs = 3 // val needs stronger input
})
bpj.AddClass("ILPath", pathClass)
pt, bpj = nt.BidirConnectLayers(accCost, ilNeg, full)
pt.AddClass("ACCPath", pathClass)
pt.AddDefaultParams(func(pt *PathParams) {
pt.PathScale.Abs = 3 // val needs stronger input
})
bpj.AddClass("ILPath", pathClass)
// note: do *not* bidirectionally connect PTp layers -- too much sustained activity
nt.ConnectToPFC(pvNeg, pvNegP, ilNeg, ilNegCT, nil, ilNegPTp, full, "ILPath")
nt.ConnectPTpToPulv(ilNegPTp, pvNegP, full, full, "ILPath")
nt.BidirConnectLayers(ilNegPT, pvNeg, full)
// note: not connecting deeper CT and PT layers to vSmtxGo at this point
// could explore that later
//////// position
vSgpi.PlaceRightOf(vta, space)
drives.PlaceBehind(vSmtxGo, space)
drivesP.PlaceBehind(vSmtxNo, space)
blaNov.PlaceBehind(vSgated, space)
sc.PlaceRightOf(vSpatchD1, space)
usPos.PlaceAbove(vta)
blaPosAcq.PlaceAbove(usPos)
ofcPos.PlaceRightOf(blaPosAcq, space)
ofcNeg.PlaceRightOf(ofcPos, space)
ilPos.PlaceRightOf(ofcNeg, space*3)
ilNeg.PlaceRightOf(ilPos, space)
accCost.PlaceRightOf(ilNeg, space)
return
}
// AddRubicon builds a complete Rubicon model for goal-driven decision making.
// Uses the network Rubicon.NPosUSs and NNegUSs for number of pools --
// must be configured prior to calling this. Calls:
// * AddRubiconOFCus -- Rubicon, and OFC us coding
// Makes all appropriate interconnections and sets default parameters.
// Needs CS -> BLA, OFC connections to be made.
// Returns layers most likely to be used for remaining connections and positions.
func (nt *Network) AddRubicon(nYneur, popY, popX, bgY, bgX, pfcY, pfcX int, space float32) (vSgpi, vSmtxGo, vSmtxNo, urgency, pvPos, blaPosAcq, blaPosExt, blaNegAcq, blaNegExt, blaNov, ofcPos, ofcPosCT, ofcPosPT, ofcPosPTp, ilPos, ilPosCT, ilPosPT, ilPosPTp, ofcNeg, ofcNegCT, ofcNegPT, ofcNegPTp, ilNeg, ilNegCT, ilNegPT, ilNegPTp, accCost, plUtil, sc *Layer) {
full := paths.NewFull()
var pt *Path
vSgpi, vSmtxGo, vSmtxNo, vSpatchD1, vSpatchD2, urgency, usPos, pvPos, usNeg, usNegP, pvNeg, pvNegP, blaPosAcq, blaPosExt, blaNegAcq, blaNegExt, blaNov, ofcPos, ofcPosCT, ofcPosPT, ofcPosPTp, ilPos, ilPosCT, ilPosPT, ilPosPTp, ilPosMD, ofcNeg, ofcNegCT, ofcNegPT, ofcNegPTp, accCost, accCostCT, accCostPT, accCostPTp, accCostMD, ilNeg, ilNegCT, ilNegPT, ilNegPTp, ilNegMD, sc := nt.AddRubiconOFCus(nYneur, popY, popX, bgY, bgX, pfcY, pfcX, space)
_, _, _, _, _, _, _ = usPos, usNeg, usNegP, pvNeg, pvNegP, ilPosCT, ilNegMD
_, _, _ = accCost, accCostCT, accCostPTp
_, _ = blaNegAcq, blaNegExt
_, _, _, _, _ = ofcPosPT, ofcNegPT, ilPosPT, ilNegPT, accCostPT
// ILposP is what PLutil predicts, in order to learn about value (reward)
ilPosP := nt.AddPulvForSuper(ilPos, space)
// ILnegP is what PLutil predicts, in order to learn about negative US
ilNegP := nt.AddPulvForSuper(ilNeg, space)
// ACCcostP is what PLutil predicts, in order to learn about cost
accCostP := nt.AddPulvForSuper(accCost, space)
pfc2m := func(pt *PathParams) { // contextual, not driving -- weaker
pt.PathScale.Rel = 0.1
}
plUtil, plUtilCT, plUtilPT, plUtilPTp, plUtilMD := nt.AddPFC2D("PLutil", "MD", pfcY, pfcX, true, true, space)
vSmtxGo.SetBuildConfig("ThalLay5Name", plUtilMD.Name)
vSmtxNo.SetBuildConfig("ThalLay5Name", plUtilMD.Name)
nt.ConnectLayers(vSgpi, plUtilMD, full, InhibPath)
plUtilPT.AddDefaultParams(func(ly *LayerParams) { ly.Acts.Dend.ModACh.SetBool(true) })
pt = nt.ConnectToVSMatrix(plUtil, vSmtxGo, full)
pt.AddDefaultParams(pfc2m)
pt.AddClass("PFCToVSMtx")
pt = nt.ConnectToVSMatrix(plUtil, vSmtxNo, full)
pt.AddDefaultParams(pfc2m)
pt.AddClass("PFCToVSMtx")
nt.ConnectToVSPatch(plUtilPTp, vSpatchD1, vSpatchD2, full)
//////// ILneg
// net.ConnectCTSelf(ilNegCT, full) // todo: test
// todo: ofcNeg
// net.ConnectToPFC(effort, effortP, ilNeg, ilNegCT, ilNegPT, ilNegPTp, full)
// note: can provide input from *other* relevant inputs not otherwise being predicted
// net.ConnectLayers(dist, ilNegPTPred, full, ForwardPath).AddClass("ToPTPred")
//////// PLutil
// net.ConnectCTSelf(plUtilCT, full) // todo: test
// util predicts OFCval and ILneg
pt, _ = nt.ConnectToPFCBidir(ilPos, ilPosP, plUtil, plUtilCT, plUtilPT, plUtilPTp, full, "ILToPL")
pt.AddDefaultParams(func(pt *PathParams) {
pt.PathScale.Abs = 1 // not good to make this stronger actually
})
pt, _ = nt.ConnectToPFCBidir(ilNeg, ilNegP, plUtil, plUtilCT, plUtilPT, plUtilPTp, full, "ILToPL")
pt.AddDefaultParams(func(pt *PathParams) {
pt.PathScale.Abs = 3 // drive pl stronger -- only this one works well
})
pt, _ = nt.ConnectToPFCBidir(accCost, accCostP, plUtil, plUtilCT, plUtilPT, plUtilPTp, full, "ACCToPL")
pt.AddDefaultParams(func(pt *PathParams) {
pt.PathScale.Abs = 3 // drive pl stronger?
})
// todo: try PTPred predicting the input layers to PT
ilPosP.PlaceBehind(ilPosMD, space)
ilNegP.PlaceBehind(ilNegMD, space)
accCostP.PlaceBehind(accCostMD, space)
plUtil.PlaceRightOf(accCost, space)
return
}
// Copyright (c) 2022, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
//gosl:start
// BLAPathParams has parameters for basolateral amygdala learning.
// Learning is driven by the Tr trace as function of ACh * Send Act
// recorded prior to US, and at US, recv unit delta: CaP - CaDPrev
// times normalized GeIntNorm for recv unit credit assignment.
type BLAPathParams struct {
// use 0.01 for acquisition (don't unlearn) and 1 for extinction.
// negative delta learning rate multiplier
NegDeltaLRate float32 `default:"0.01,1"`
// threshold on this layer's ACh level for trace learning updates
AChThr float32 `default:"0.1"`
// proportion of US time stimulus activity to use for the trace component of
USTrace float32 `default:"0,0.5"`
pad float32
}
func (bp *BLAPathParams) Defaults() {
bp.NegDeltaLRate = 0.01
bp.AChThr = 0.1
bp.USTrace = 0.5
}
func (bp *BLAPathParams) Update() {
}
//gosl:end
func (pj *PathParams) BLADefaults() {
pj.SWts.Adapt.On.SetBool(false)
pj.SWts.Adapt.SigGain = 1
pj.SWts.Init.SPct = 0
pj.SWts.Init.Mean = 0.1
pj.SWts.Init.Var = 0.05
pj.SWts.Init.Sym.SetBool(false)
pj.Learn.DWt.Update()
pj.Learn.LRate.Base = 0.02
}
func (pj *PathParams) VSPatchDefaults() {
pj.PathScale.Abs = 4 // needs strong drive in general
pj.SWts.Adapt.On.SetBool(false)
pj.SWts.Adapt.SigGain = 1
pj.SWts.Init.SPct = 0
pj.SWts.Init.Mean = 0.5
pj.SWts.Init.Var = 0.25
pj.SWts.Init.Sym.SetBool(false)
pj.Learn.DWt.LearnThr = 0 // 0.3
pj.Learn.DWt.Update()
pj.Learn.LRate.Base = 0.02 // 0.02 needed for smooth integ on vspatch test
}
// Code generated by "goal build"; DO NOT EDIT.
//line rubicon.goal:1
// Copyright (c) 2022, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
import (
"fmt"
"log/slog"
"cogentcore.org/core/base/num"
"cogentcore.org/core/math32"
"cogentcore.org/lab/base/randx"
"github.com/emer/emergent/v2/popcode"
)
// DriveParams manages the drive parameters for computing and updating drive state.
// Most of the params are for optional case where drives are automatically
// updated based on US consumption (which satisfies drives) and time passing
// (which increases drives).
type DriveParams struct {
// minimum effective drive value, which is an automatic baseline ensuring
// that a positive US results in at least some minimal level of reward.
// Unlike Base values, this is not reflected in the activity of the drive
// values, and applies at the time of reward calculation as a minimum baseline.
DriveMin float32
// baseline levels for each drive, which is what they naturally trend toward
// in the absence of any input. Set inactive drives to 0 baseline,
// active ones typically elevated baseline (0-1 range).
Base []float32
// time constants in ThetaCycle (trial) units for natural update toward
// Base values. 0 values means no natural update (can be updated externally).
Tau []float32
// decrement in drive value when US is consumed, thus partially satisfying
// the drive. Positive values are subtracted from current Drive value.
Satisfaction []float32
// 1/Tau
Dt []float32 `display:"-"`
}
func (dp *DriveParams) Alloc(nDrives int) {
if len(dp.Base) == nDrives {
return
}
dp.Base = make([]float32, nDrives)
dp.Tau = make([]float32, nDrives)
dp.Dt = make([]float32, nDrives)
dp.Satisfaction = make([]float32, nDrives)
}
func (dp *DriveParams) Defaults() {
dp.DriveMin = 0.5
for i := range dp.Satisfaction {
dp.Satisfaction[i] = 0
}
dp.Update()
}
func (dp *DriveParams) Update() {
for i, tau := range dp.Tau {
if tau <= 0 {
dp.Dt[i] = 0
} else {
dp.Dt[i] = 1 / tau
}
}
}
// VarToZero sets all values of given drive-sized variable to 0
func (dp *DriveParams) VarToZero(di uint32, gvar GlobalVectorVars) {
for i := range dp.Base {
GlobalVectors.Set(0, int(gvar), int(i), int(di))
}
}
// ToZero sets all drives to 0
func (dp *DriveParams) ToZero(di uint32) {
dp.VarToZero(di, GvDrives)
}
// ToBaseline sets all drives to their baseline levels
func (dp *DriveParams) ToBaseline(di uint32) {
for i := range dp.Base {
GlobalVectors.Set(dp.Base[i], int(GvDrives), int(i), int(di))
}
}
// AddTo increments drive by given amount, subject to 0-1 range clamping.
// Returns new val.
func (dp *DriveParams) AddTo(di uint32, drv uint32, delta float32) float32 {
dv := GlobalVectors.Value(int(GvDrives), int(drv), int(di)) + delta
if dv > 1 {
dv = 1
} else if dv < 0 {
dv = 0
}
GlobalVectors.Set(dv, int(GvDrives), int(drv), int(di))
return dv
}
// SoftAdd increments drive by given amount, using soft-bounding to 0-1 extremes.
// if delta is positive, multiply by 1-val, else val. Returns new val.
func (dp *DriveParams) SoftAdd(di uint32, drv uint32, delta float32) float32 {
dv := GlobalVectors.Value(int(GvDrives), int(drv), int(di))
if delta > 0 {
dv += (1 - dv) * delta
} else {
dv += dv * delta
}
if dv > 1 {
dv = 1
} else if dv < 0 {
dv = 0
}
GlobalVectors.Set(dv, int(GvDrives), int(drv), int(di))
return dv
}
// ExpStep updates drive with an exponential step with given dt value
// toward given baseline value.
func (dp *DriveParams) ExpStep(di uint32, drv uint32, dt, base float32) float32 {
dv := GlobalVectors.Value(int(GvDrives), int(drv), int(di))
dv += dt * (base - dv)
if dv > 1 {
dv = 1
} else if dv < 0 {
dv = 0
}
GlobalVectors.Set(dv, int(GvDrives), int(drv), int(di))
return dv
}
// ExpStepAll updates given drives with an exponential step using dt values
// toward baseline values.
func (dp *DriveParams) ExpStepAll(di uint32) {
for i := range dp.Base {
dp.ExpStep(di, uint32(i), dp.Dt[i], dp.Base[i])
}
}
// EffectiveDrive returns the Max of Drives at given index and DriveMin.
// note that index 0 is the novelty / curiosity drive, which doesn't use DriveMin.
func (dp *DriveParams) EffectiveDrive(di uint32, i uint32) float32 {
if i == 0 {
return GlobalVectors.Value(int(GvDrives), int(0), int(di))
}
return math32.Max(GlobalVectors.Value(int(GvDrives), int(i), int(di)), dp.DriveMin)
}
//////// UrgencyParams
// UrgencyParams has urgency (increasing pressure to do something)
// and parameters for updating it.
// Raw urgency integrates effort when _not_ goal engaged
// while effort (negative US 0) integrates when a goal _is_ engaged.
type UrgencyParams struct {
// value of raw urgency where the urgency activation level is 50%
U50 float32
// exponent on the urge factor -- valid numbers are 1,2,4,6
Power int32 `default:"4"`
// threshold for urge -- cuts off small baseline values
Thr float32 `default:"0.2"`
// gain factor for driving tonic DA levels as a function of urgency
DAtonic float32 `default:"50"`
}
func (ur *UrgencyParams) Defaults() {
ur.U50 = 10
ur.Power = 4
ur.Thr = 0.2
ur.DAtonic = 50
}
func (ur *UrgencyParams) Update() {
}
// UrgeFun is the urgency function: urgency / (urgency + 1) where
// urgency = (Raw / U50)^Power
func (ur *UrgencyParams) UrgeFun(urgency float32) float32 {
urgency /= ur.U50
switch ur.Power {
case 2:
urgency *= urgency
case 4:
urgency *= urgency * urgency * urgency
case 6:
urgency *= urgency * urgency * urgency * urgency * urgency
}
return urgency / (1.0 + urgency)
}
// Reset resets the raw urgency back to zero -- at start of new gating event
func (ur *UrgencyParams) Reset(di uint32) {
GlobalScalars.Set(0, int(GvUrgencyRaw), int(di))
GlobalScalars.Set(0, int(GvUrgency), int(di))
}
// Urge computes normalized Urge value from Raw, and sets DAtonic from that
func (ur *UrgencyParams) Urge(di uint32) float32 {
urge := ur.UrgeFun(GlobalScalars.Value(int(GvUrgencyRaw), int(di)))
if urge < ur.Thr {
urge = 0
}
GlobalScalars.Set(urge, int(GvUrgency), int(di))
GlobalScalars.Set(ur.DAtonic*urge, int(GvDAtonic), int(di)) // simple equation for now
return urge
}
// AddEffort adds an effort increment of urgency and updates the Urge factor
func (ur *UrgencyParams) AddEffort(di uint32, inc float32) {
GlobalScalars.SetAdd(inc, int(GvUrgencyRaw), int(di))
ur.Urge(di)
}
//////// USParams
// RubiconLNormFun is the normalizing function applied to the sum of all
// weighted raw values: 1 - (1 / (1 + usRaw.Sum()))
func RubiconNormFun(raw float32) float32 {
return 1.0 - (1.0 / (1.0 + raw))
}
// USParams control how positive and negative USs and Costs are
// weighted and integrated to compute an overall PV primary value.
type USParams struct {
// gain factor applied to sum of weighted, drive-scaled positive USs
// to compute PVpos primary summary value.
// This is multiplied prior to 1/(1+x) normalization.
// Use this to adjust the overall scaling of PVpos reward within 0-1
// normalized range (see also PVnegGain).
// Each USpos is assumed to be in 0-1 range, with a default of 1.
PVposGain float32 `default:"2"`
// gain factor applied to sum of weighted negative USs and Costs
// to compute PVneg primary summary value.
// This is multiplied prior to 1/(1+x) normalization.
// Use this to adjust overall scaling of PVneg within 0-1
// normalized range (see also PVposGain).
PVnegGain float32 `default:"1"`
// Negative US gain factor for encoding each individual negative US,
// within their own separate input pools, multiplied prior to 1/(1+x)
// normalization of each term for activating the USneg pools.
// These gains are _not_ applied in computing summary PVneg value
// (see PVnegWts), and generally must be larger than the weights to leverage
// the dynamic range within each US pool.
USnegGains []float32
// Cost gain factor for encoding the individual Time, Effort etc costs
// within their own separate input pools, multiplied prior to 1/(1+x)
// normalization of each term for activating the Cost pools.
// These gains are _not_ applied in computing summary PVneg value
// (see CostWts), and generally must be larger than the weights to use
// the full dynamic range within each US pool.
CostGains []float32
// weight factor applied to each separate positive US on the way to computing
// the overall PVpos summary value, to control the weighting of each US
// relative to the others. Each pos US is also multiplied by its dynamic
// Drive factor as well.
// Use PVposGain to control the overall scaling of the PVpos value.
PVposWts []float32
// weight factor applied to each separate negative US on the way to computing
// the overall PVneg summary value, to control the weighting of each US
// relative to the others, and to the Costs. These default to 1.
PVnegWts []float32
// weight factor applied to each separate Cost (Time, Effort, etc) on the
// way to computing the overall PVneg summary value, to control the weighting
// of each Cost relative to the others, and relative to the negative USs.
// The first pool is Time, second is Effort, and these are typically weighted
// lower (.02) than salient simulation-specific USs (1).
PVcostWts []float32
// computed estimated US values, based on OFCposPT and VSMatrix gating, in PVposEst
USposEst []float32 `edit:"-"`
}
func (us *USParams) Alloc(nPos, nNeg, nCost int) {
if len(us.PVposWts) != nPos {
us.PVposWts = make([]float32, nPos)
us.USposEst = make([]float32, nPos)
}
if len(us.PVnegWts) != nNeg {
us.USnegGains = make([]float32, nNeg)
us.PVnegWts = make([]float32, nNeg)
}
if len(us.PVcostWts) != nCost {
us.CostGains = make([]float32, nCost)
us.PVcostWts = make([]float32, nCost)
}
}
func (us *USParams) Defaults() {
us.PVposGain = 2
us.PVnegGain = 1
for i := range us.PVposWts {
us.PVposWts[i] = 1
}
for i := range us.USnegGains {
us.USnegGains[i] = 2
us.PVnegWts[i] = 1
}
for i := range us.CostGains {
us.CostGains[i] = 0.1
us.PVcostWts[i] = 0.02
}
}
func (us *USParams) Update() {
}
// USnegCostFromRaw sets normalized NegUS, Cost values from Raw values
func (us *USParams) USnegCostFromRaw(di uint32) {
for i, ng := range us.USnegGains {
raw := GlobalVectors.Value(int(GvUSnegRaw), int(i), int(di))
norm := RubiconNormFun(ng * raw)
GlobalVectors.Set(norm, int(GvUSneg), int(i), int(di))
}
for i, ng := range us.CostGains {
raw := GlobalVectors.Value(int(GvCostRaw), int(i), int(di))
norm := RubiconNormFun(ng * raw)
GlobalVectors.Set(norm, int(GvCost), int(i), int(di))
}
}
// USnegToZero sets all values of USneg, USnegRaw to zero
func (us *USParams) USnegToZero(di uint32) {
for i := range us.USnegGains {
GlobalVectors.Set(0, int(GvUSneg), int(i), int(di))
GlobalVectors.Set(0, int(GvUSnegRaw), int(i), int(di))
}
}
// CostToZero sets all values of Cost, CostRaw to zero
func (us *USParams) CostToZero(di uint32) {
for i := range us.CostGains {
GlobalVectors.Set(0, int(GvCost), int(i), int(di))
GlobalVectors.Set(0, int(GvCostRaw), int(i), int(di))
}
}
// USposToZero sets all values of USpos to zero
func (us *USParams) USposToZero(di uint32) {
for i := range us.PVposWts {
GlobalVectors.Set(0, int(GvUSpos), int(i), int(di))
}
}
//////// LHb & RMTg
// LHbParams has values for computing LHb & RMTg which drives dips / pauses in DA firing.
// LHb handles all US-related (PV = primary value) processing.
// Positive net LHb activity drives dips / pauses in VTA DA activity,
// e.g., when predicted pos > actual or actual neg > predicted.
// Negative net LHb activity drives bursts in VTA DA activity,
// e.g., when actual pos > predicted (redundant with LV / Amygdala)
// or "relief" burst when actual neg < predicted.
type LHbParams struct {
// threshold on VSPatch prediction during a non-reward trial
VSPatchNonRewThr float32 `default:"0.1"`
// gain on the VSPatchD1 - D2 difference to drive the net VSPatch DA
// prediction signal, which goes in VSPatchPos and RewPred global variables
VSPatchGain float32 `default:"4"`
// decay time constant for computing the temporal variance in VSPatch
// values over time
VSPatchVarTau float32 `default:"2"`
// threshold factor that multiplies integrated pvNeg value
// to establish a threshold for whether the integrated pvPos value
// is good enough to drive overall net positive reward.
// If pvPos wins, it is then multiplicatively discounted by pvNeg;
// otherwise, pvNeg is discounted by pvPos.
NegThr float32 `default:"1"`
// gain multiplier on PVpos for purposes of generating bursts
// (not for discounting negative dips).
BurstGain float32 `default:"1"`
// gain multiplier on PVneg for purposes of generating dips
// (not for discounting positive bursts).
DipGain float32 `default:"1"`
// 1/tau
VSPatchVarDt float32 `display:"-"`
}
func (lh *LHbParams) Defaults() {
lh.VSPatchNonRewThr = 0.1
lh.VSPatchGain = 4
lh.VSPatchVarTau = 2
lh.NegThr = 1
lh.BurstGain = 1
lh.DipGain = 1
}
func (lh *LHbParams) Update() {
lh.VSPatchVarDt = 1 / lh.VSPatchVarTau
}
// Reset resets all LHb vars back to 0
func (lh *LHbParams) Reset(di uint32) {
GlobalScalars.Set(0, int(GvLHbDip), int(di))
GlobalScalars.Set(0, int(GvLHbBurst), int(di))
GlobalScalars.Set(0, int(GvLHbPVDA), int(di))
GlobalScalars.Set(0, int(GvVSPatchPosRPE), int(di))
}
// DAFromPVs computes the overall PV DA in terms of LHb burst and dip
// activity from given pvPos, pvNeg, and vsPatchPos values.
// Also returns the net "reward" value as the discounted PV value,
// separate from the vsPatchPos prediction error factor.
func (lh *LHbParams) DAFromPVs(pvPos, pvNeg, vsPatchPos, vsPatchPosSum float32) (burst, dip, da, rew float32) {
thr := lh.NegThr * pvNeg
net := pvPos - thr // if > 0, net positive outcome; else net negative (not worth it)
if net > 0 { // worth it
rew = lh.BurstGain * pvPos * (1 - pvNeg) // positive reward value: pos with mult neg discount factor
rpe := rew - vsPatchPos // prediction error relative to pos reward value
if rpe < 0 {
dip = -rpe // positive dip = negative value
} else {
burst = rpe
}
} else { // not worth it: net negative but moderated (discounted) by strength of positive
// also ensure that we at least get the accumulated expectation from vspatch
neg := max(lh.DipGain*pvNeg*(1-pvPos), vsPatchPosSum)
rew = -neg
dip = neg // magnitude
}
da = burst - dip
return
}
// DAforUS computes the overall LHb Dip or Burst (one is always 0),
// and PVDA ~= Burst - Dip, for case when there is a primary
// positive reward value or a give-up state has triggered.
// Returns the overall net reward magnitude, prior to VSPatch discounting.
func (lh *LHbParams) DAforUS(di uint32, pvPos, pvNeg, vsPatchPos, vsPatchPosSum float32) float32 {
burst, dip, da, rew := lh.DAFromPVs(pvPos, pvNeg, vsPatchPos, vsPatchPosSum)
GlobalScalars.Set(dip, int(GvLHbDip), int(di))
GlobalScalars.Set(burst, int(GvLHbBurst), int(di))
GlobalScalars.Set(da, int(GvLHbPVDA), int(di))
GlobalScalars.Set(vsPatchPos, int(GvVSPatchPosThr), int(di)) // no thresholding for US
GlobalScalars.Set(da, int(GvVSPatchPosRPE), int(di))
return rew
}
// DAforNoUS computes the LHb response when there is _NOT_ a primary
// positive reward value or a give-up state.
// In this case, inhibition of VS via tonic ACh is assumed to prevent
// activity of PVneg (and there is no PVpos).
// Because the LHb only responds when it decides to GiveUp,
// there is no response in this case.
// DA is instead driven by CS-based computation, in rubicon_layers.go, VTAParams.VTADA
func (lh *LHbParams) DAforNoUS(di uint32) float32 {
GlobalScalars.Set(0, int(GvLHbDip), int(di))
GlobalScalars.Set(0, int(GvLHbBurst), int(di))
GlobalScalars.Set(0, int(GvLHbPVDA), int(di))
return 0
}
//////// GiveUpParams
// GiveUpParams are parameters for computing when to give up,
// based on Utility, Timing and Progress factors.
type GiveUpParams struct {
// threshold on GiveUp probability, below which no give up is triggered
ProbThr float32 `default:"0.5"`
// minimum GiveUpSum value, which is the denominator in the sigmoidal function.
// This minimum prevents division by zero and any other degenerate values.
MinGiveUpSum float32 `default:"0.1"`
// the factor multiplying utility values: cost and expected positive outcome
Utility float32 `default:"1"`
// the factor multiplying timing values from VSPatch
Timing float32 `default:"2"`
// the factor multiplying progress values based on time-integrated progress
// toward the goal
Progress float32 `default:"1"`
// minimum utility cost and reward estimate values -- when they are below
// these levels (at the start) then utility is effectively neutral,
// so the other factors take precedence.
MinUtility float32 `default:"0.2"`
// maximum VSPatchPosSum for normalizing the value for give-up weighing
VSPatchSumMax float32 `default:"1"`
// maximum VSPatchPosVar for normalizing the value for give-up weighing
VSPatchVarMax float32 `default:"0.5"`
// time constant for integrating the ProgressRate
// values over time
ProgressRateTau float32 `default:"2"`
// 1/tau
ProgressRateDt float32 `display:"-"`
}
func (gp *GiveUpParams) Defaults() {
gp.ProbThr = 0.5
gp.MinGiveUpSum = 0.1
gp.Utility = 1
gp.Timing = 2
gp.Progress = 1
gp.MinUtility = 0.2
gp.VSPatchSumMax = 1
gp.VSPatchVarMax = 0.5
gp.ProgressRateTau = 2
}
func (gp *GiveUpParams) Update() {
gp.ProgressRateDt = 1 / gp.ProgressRateTau
}
// SigmoidFun is the sigmoid function for computing give up probabilities
func SigmoidFun(cnSum, guSum float32) float32 {
return 1 / (1 + (cnSum / guSum))
}
// Prob returns the probability and discrete bool give up for giving up based on
// given sums of continue and give up factors
func (gp *GiveUpParams) Prob(cnSum, guSum float32, rnd randx.Rand) (float32, bool) {
prob := SigmoidFun(cnSum, guSum)
giveUp := randx.BoolP32(prob, rnd)
if prob <= gp.ProbThr {
giveUp = false
}
return prob, giveUp
}
// Sums computes the summed weighting factors that drive continue and give up
// contributions to the probability function.
func (gp *GiveUpParams) Sums(di uint32) (cnSum, guSum float32) {
negSum := GlobalScalars.Value(int(GvPVnegSum), int(di))
guU := gp.Utility * max(gp.MinUtility, negSum)
cnU := gp.Utility * max(gp.MinUtility, GlobalScalars.Value(int(GvPVposEst), int(di))) // todo: var?
GlobalScalars.Set(guU, int(GvGiveUpUtility), int(di))
GlobalScalars.Set(cnU, int(GvContUtility), int(di))
vspSum := min(GlobalScalars.Value(int(GvVSPatchPosSum), int(di)), gp.VSPatchSumMax) / gp.VSPatchSumMax
vspVar := min(GlobalScalars.Value(int(GvVSPatchPosVar), int(di)), gp.VSPatchVarMax) / gp.VSPatchVarMax
guT := gp.Timing * vspSum * (1 - vspVar)
cnT := gp.Timing * (1 - vspSum) * vspVar
GlobalScalars.Set(guT, int(GvGiveUpTiming), int(di))
GlobalScalars.Set(cnT, int(GvContTiming), int(di))
prog := GlobalScalars.Value(int(GvProgressRate), int(di))
guP := -gp.Progress * prog
cnP := gp.Progress * prog
GlobalScalars.Set(guP, int(GvGiveUpProgress), int(di))
GlobalScalars.Set(cnP, int(GvContProgress), int(di))
guSum = guU + guT + guP
cnSum = cnU + cnT + cnP
GlobalScalars.Set(guSum, int(GvGiveUpSum), int(di))
GlobalScalars.Set(cnSum, int(GvContSum), int(di))
if guSum < gp.MinGiveUpSum {
guSum = gp.MinGiveUpSum
}
return
}
//////// Rubicon
// Rubicon implements core elements of the Rubicon goal-directed motivational
// model, representing the core brainstem-level (hypothalamus) bodily drives
// and resulting dopamine from US (unconditioned stimulus) inputs,
// subsuming the earlier Rubicon model of primary value (PV)
// and learned value (LV), describing the functions of the Amygala,
// Ventral Striatum, VTA and associated midbrain nuclei (LDT, LHb, RMTg).
// Core LHb (lateral habenula) and VTA (ventral tegmental area) dopamine
// are computed in equations using inputs from specialized network layers
// (LDTLayer driven by BLA, CeM layers, VSPatchLayer).
// The Drives, Effort, US and resulting LHb PV dopamine computation all happens at the
// at the start of each trial (NewState, Step). The LV / CS dopamine is computed
// cycle-by-cycle by the VTA layer using parameters set by the VTA layer.
// Renders USLayer, PVLayer, DrivesLayer representations based on state updated here.
type Rubicon struct {
// number of possible positive US states and corresponding drives.
// The first is always reserved for novelty / curiosity.
// Must be set programmatically via SetNUSs method,
// which allocates corresponding parameters.
NPosUSs uint32 `edit:"-"`
// number of possible phasic negative US states (e.g., shock, impact etc).
// Must be set programmatically via SetNUSs method, which allocates corresponding
// parameters.
NNegUSs uint32 `edit:"-"`
// number of possible costs, typically including accumulated time and effort costs.
// Must be set programmatically via SetNUSs method, which allocates corresponding
// parameters.
NCosts uint32 `edit:"-"`
// parameters and state for built-in drives that form the core motivations
// of the agent, controlled by lateral hypothalamus and associated
// body state monitoring such as glucose levels and thirst.
Drive DriveParams `display:"inline"`
// urgency (increasing pressure to do something) and parameters for
//
// updating it. Raw urgency is incremented by same units as effort,
//
// but is only reset with a positive US.
Urgency UrgencyParams `display:"inline"`
// controls how positive and negative USs are weighted and integrated to
// compute an overall PV primary value.
USs USParams `display:"add-fields"`
// lateral habenula (LHb) parameters and state, which drives
// dipping / pausing in dopamine when the predicted positive
// outcome > actual, or actual negative outcome > predicted.
// Can also drive bursting for the converse, and via matrix phasic firing.
LHb LHbParams `display:"inline"`
// parameters for giving up based on PV pos - neg difference
GiveUp GiveUpParams `display:"add-fields"`
// population code decoding parameters for estimates from layers
ValDecode popcode.OneD `display:"inline"`
decodeActs []float32
}
func (rp *Rubicon) Defaults() {
if rp.LHb.VSPatchGain != 0 { // already done
return
}
rp.Drive.Defaults()
rp.Urgency.Defaults()
rp.USs.Defaults()
rp.LHb.Defaults()
rp.GiveUp.Defaults()
rp.ValDecode.Defaults()
rp.Update()
}
func (rp *Rubicon) Update() {
rp.Drive.Update()
rp.Urgency.Update()
rp.USs.Update()
rp.LHb.Update()
rp.GiveUp.Update()
}
// USposIndex adds 1 to the given _simulation specific_ positive US index
// to get the actual US / Drive index, where the first pool is reserved
// for curiosity / novelty.
func (rp *Rubicon) USposIndex(simUsIndex int) int {
return simUsIndex + 1
}
// USnegIndex allows for the possibility of automatically managed
// negative USs, by adding those to the given _simulation specific_
// negative US index to get the actual US index.
func (rp *Rubicon) USnegIndex(simUsIndex int) int {
return simUsIndex
}
// SetNUSs sets the number of _additional_ simulation-specific
// phasic positive and negative USs (primary value outcomes).
// This must be called _before_ network Build, which allocates global values
// that depend on these numbers. Any change must also call network.BuildGlobals.
// 1 PosUS (curiosity / novelty) is managed automatically by the Rubicon code.
// Two costs (Time, Effort) are also automatically allocated and managed.
// The USs specified here need to be managed by the simulation via the SetUS method.
// Positive USs each have corresponding Drives.
func (rp *Rubicon) SetNUSs(nPos, nNeg int) {
nix := GetNetworkIxs(0)
nPos = rp.USposIndex(max(nPos, 1))
nNeg = rp.USnegIndex(max(nNeg, 1)) // ensure at least 1
rp.NPosUSs = uint32(nPos)
rp.NNegUSs = uint32(nNeg)
rp.NCosts = 2 // default
nix.RubiconNPosUSs = rp.NPosUSs
nix.RubiconNNegUSs = rp.NNegUSs
nix.RubiconNCosts = rp.NCosts
rp.Drive.Alloc(nPos)
rp.USs.Alloc(nPos, nNeg, int(rp.NCosts))
}
// Reset resets all Rubicon state
func (rp *Rubicon) Reset(di uint32) {
rp.Drive.ToBaseline(di)
rp.TimeEffortReset(di)
rp.Urgency.Reset(di)
rp.InitUS(di)
rp.LHb.Reset(di)
rp.Drive.VarToZero(di, GvVSPatchD1)
rp.Drive.VarToZero(di, GvVSPatchD2)
rp.ResetGoalState(di)
GlobalScalars.Set(0, int(GvVtaDA), int(di))
GlobalScalars.Set(0, int(GvVSMatrixJustGated), int(di))
GlobalScalars.Set(0, int(GvVSMatrixHasGated), int(di))
GlobalScalars.Set(0, int(GvHadRew), int(di))
// pp.HasPosUSPrev.SetBool(false) // key to not reset!!
}
// InitUS initializes all the USs to zero
func (rp *Rubicon) InitUS(di uint32) {
rp.USs.USposToZero(di)
rp.USs.USnegToZero(di)
rp.USs.CostToZero(di)
GlobalScalars.Set(0, int(GvHasRew), int(di))
GlobalScalars.Set(0, int(GvRew), int(di))
}
// InitDrives initializes all the Drives to baseline values (default = 0)
func (rp *Rubicon) InitDrives(di uint32) {
rp.Drive.ToBaseline(di)
}
// AddTimeEffort adds a unit of time and an increment of effort
func (rp *Rubicon) AddTimeEffort(di uint32, effort float32) {
GlobalScalars.SetAdd(1, int(GvTime), int(di))
tm := GlobalScalars.Value(int(GvTime), int(di))
GlobalVectors.Set(tm, int(GvCostRaw), int(0), int(di)) // time is neg 0
GlobalScalars.SetAdd(effort, int(GvEffort), int(di))
eff := GlobalScalars.Value(int(GvEffort), int(di))
GlobalVectors.Set(eff, int(GvCostRaw), int(1), int(di)) // effort is neg 1
}
// EffortUrgencyUpdate updates the Effort or Urgency based on
// given effort increment.
// Effort is incremented when VSMatrixHasGated (i.e., goal engaged)
// and Urgency updates otherwise (when not goal engaged)
// Call this at the start of the trial, in ApplyRubicon method,
// after NewState.
func (rp *Rubicon) EffortUrgencyUpdate(di uint32, effort float32) {
if GlobalScalars.Value(int(GvVSMatrixHasGated), int(di)) > 0 {
rp.AddTimeEffort(di, effort)
} else {
rp.Urgency.AddEffort(di, effort)
}
}
// TimeEffortReset resets the raw time and effort back to zero,
// at start of new gating event
func (rp *Rubicon) TimeEffortReset(di uint32) {
GlobalScalars.Set(0, int(GvTime), int(di))
GlobalScalars.Set(0, int(GvEffort), int(di))
GlobalVectors.Set(0, int(GvCostRaw), int(0), int(di)) // effort is neg 0
GlobalVectors.Set(0, int(GvCost), int(0), int(di))
}
// PVposFromDriveEffort returns the net primary value ("reward") based on
// given US value and drive for that value (typically in 0-1 range),
// and total effort, from which the effort discount factor is computed an applied:
// usValue * drive * Effort.DiscFun(effort).
// This is not called directly in the Rubicon code -- can be used to compute
// what the Rubicon code itself will compute -- see LHbPVDA
// todo: this is not very meaningful anymore
// func (pp *Rubicon) PVposFromDriveEffort(usValue, drive, effort float32) float32 {
// return usValue * drive * (1 - RubiconNormFun(pp.USs.PVnegWts[0]*effort))
// }
// RubiconSetDrive sets given Drive to given value
func (rp *Rubicon) SetDrive(di uint32, dr uint32, val float32) {
GlobalVectors.Set(val, int(GvDrives), int(dr), int(di))
}
// SetDrives is used when directly controlling drive levels externally.
// curiosity sets the strength for the curiosity drive
// and drives are strengths of the remaining sim-specified drives, in order.
// any drives not so specified are at the InitDrives baseline level.
func (rp *Rubicon) SetDrives(di uint32, curiosity float32, drives ...float32) {
rp.InitDrives(di)
rp.SetDrive(di, 0, curiosity)
for i, v := range drives {
rp.SetDrive(di, uint32(1+i), v)
}
}
// DriveUpdate is used when auto-updating drive levels based on US consumption,
// which partially satisfies (decrements) corresponding drive,
// and on time passing, where drives adapt to their overall baseline levels.
func (rp *Rubicon) DriveUpdate(di uint32) {
rp.Drive.ExpStepAll(di)
nd := rp.NPosUSs
for i := uint32(0); i < nd; i++ {
us := GlobalVectors.Value(int(GvUSpos), int(i), int(di))
nwdrv := GlobalVectors.Value(int(GvDrives), int(i), int(di)) - us*rp.Drive.Satisfaction[i]
if nwdrv < 0 {
nwdrv = 0
}
GlobalVectors.Set(nwdrv, int(GvDrives), int(i), int(di))
}
}
// SetUS sets the given _simulation specific_ unconditioned
// stimulus (US) state for Rubicon algorithm. usIndex = 0 is first US, etc.
// The US then drives activity of relevant Rubicon-rendered inputs, and dopamine,
// and sets the global HasRew flag, thus triggering a US learning event.
// Note that costs can be used to track negative USs that are not strong
// enough to trigger a US learning event.
func (rp *Rubicon) SetUS(di uint32, valence ValenceTypes, usIndex int, magnitude float32) {
GlobalScalars.Set(1, int(GvHasRew), int(di))
if valence == Positive {
usIndex = rp.USposIndex(usIndex)
GlobalVectors.Set(magnitude, int(GvUSpos), int(usIndex), int(di))
} else {
usIndex = rp.USnegIndex(usIndex)
GlobalVectors.Set(magnitude, int(GvUSnegRaw), int(usIndex), int(di))
GlobalScalars.Set(1, int(GvNegUSOutcome), int(di))
}
}
// ResetGoalState resets all the goal-engaged global values.
// Critically, this is only called after goal accomplishment,
// not after goal gating -- prevents "shortcutting" by re-gating.
func (rp *Rubicon) ResetGoalState(di uint32) {
GlobalScalars.Set(0, int(GvVSMatrixHasGated), int(di))
rp.Urgency.Reset(di)
rp.TimeEffortReset(di)
rp.USs.USnegToZero(di) // all negs restart
rp.USs.CostToZero(di)
rp.ResetGiveUp(di)
GlobalScalars.Set(0, int(GvVSPatchPos), int(di))
GlobalScalars.Set(0, int(GvVSPatchPosSum), int(di))
GlobalScalars.Set(0, int(GvVSPatchPosPrev), int(di))
GlobalScalars.Set(0, int(GvVSPatchPosVar), int(di))
GlobalScalars.Set(0, int(GvRewPred), int(di))
GlobalScalars.Set(0, int(GvGoalDistEst), int(di))
GlobalScalars.Set(0, int(GvGoalDistPrev), int(di))
GlobalScalars.Set(0, int(GvProgressRate), int(di))
nd := rp.NPosUSs
for i := uint32(0); i < nd; i++ {
GlobalVectors.Set(0, int(GvOFCposPTMaint), int(i), int(di))
GlobalVectors.Set(0, int(GvVSMatrixPoolGated), int(i), int(di))
}
}
// ResetGiveUp resets all the give-up related global values.
func (rp *Rubicon) ResetGiveUp(di uint32) {
GlobalScalars.Set(0, int(GvPVposEst), int(di))
GlobalScalars.Set(0, int(GvPVposVar), int(di))
GlobalScalars.Set(0, int(GvGiveUpProb), int(di))
GlobalScalars.Set(0, int(GvGiveUp), int(di))
}
// NewState is called at very start of new state (trial) of processing.
// sets HadRew = HasRew from last trial -- used to then reset various things
// after reward.
func (rp *Rubicon) NewState(di uint32, rnd randx.Rand) {
hadRewF := GlobalScalars.Value(int(GvHasRew), int(di))
hadRew := num.ToBool(hadRewF)
GlobalScalars.Set(hadRewF, int(GvHadRew), int(di))
GlobalScalars.Set(GlobalScalars.Value(int(GvHasPosUS), int(di)), int(GvHadPosUS), int(di))
GlobalScalars.Set(GlobalScalars.Value(int(GvNegUSOutcome), int(di)), int(GvHadNegUSOutcome), int(di))
GlobalScalars.Set(GlobalScalars.Value(int(GvGiveUp), int(di)), int(GvGaveUp), int(di))
GlobalScalars.Set(0, int(GvHasRew), int(di))
GlobalScalars.Set(0, int(GvNegUSOutcome), int(di))
rp.VSPatchNewState(di)
if hadRew {
rp.ResetGoalState(di)
} else if GlobalScalars.Value(int(GvVSMatrixJustGated), int(di)) > 0 {
GlobalScalars.Set(1, int(GvVSMatrixHasGated), int(di))
rp.Urgency.Reset(di)
}
GlobalScalars.Set(0, int(GvVSMatrixJustGated), int(di))
rp.USs.USposToZero(di) // pos USs must be set fresh every time
}
// Step does one step (trial) after applying USs, Drives,
// and updating Effort. It should be the final call in ApplyRubicon.
// Calls PVDA which does all US, PV, LHb, GiveUp updating.
func (rp *Rubicon) Step(di uint32, rnd randx.Rand) {
rp.PVDA(di, rnd)
}
// SetGoalMaintFromLayer sets the GoalMaint global state variable
// from the average activity (CaD) of the given layer name.
// GoalMaint is normalized 0-1 based on the given max activity level,
// with anything out of range clamped to 0-1 range.
// Returns (and logs) an error if layer name not found.
func (rp *Rubicon) SetGoalMaintFromLayer(di uint32, net *Network, layName string, maxAct float32) error {
ly := net.LayerByName(layName)
if ly == nil {
err := fmt.Errorf("SetGoalMaintFromLayer: layer named: %q not found", layName)
slog.Error(err.Error())
return err
}
lpi := ly.Params.PoolIndex(0)
act := PoolAvgMax(AMCaD, AMCycle, Avg, lpi, di)
gm := float32(0)
if act > maxAct {
gm = 1
} else {
gm = act / maxAct
}
GlobalScalars.Set(gm, int(GvGoalMaint), int(di))
return nil
}
// DecodeFromLayer decodes value and variance from the average activity (CaD)
// of the given layer name. Use for decoding PVposEst and Var, and PVnegEst and Var
func (rp *Rubicon) DecodeFromLayer(di uint32, net *Network, layName string) (val, vr float32, err error) {
ly := net.LayerByName(layName)
if ly == nil {
err = fmt.Errorf("DecodeFromLayer: layer named: %q not found", layName)
slog.Error(err.Error())
return
}
ly.UnitValues(&rp.decodeActs, "CaD", int(di))
val = ly.Params.Acts.PopCode.Decode(rp.decodeActs)
vr = ly.Params.Acts.PopCode.Uncertainty(val, rp.decodeActs)
return
}
// DecodePVEsts decodes estimated PV outcome values from PVposP and PVnegP
// prediction layers, saves in global PVposEst, Var and PVnegEst, Var
func (rp *Rubicon) DecodePVEsts(di uint32, net *Network) {
posEst, posVar, err := rp.DecodeFromLayer(di, net, "PVposP")
if err == nil {
GlobalScalars.Set(posEst, int(GvPVposEst), int(di))
GlobalScalars.Set(posVar, int(GvPVposVar), int(di))
}
negEst, negVar, err := rp.DecodeFromLayer(di, net, "PVnegP")
if err == nil {
GlobalScalars.Set(negEst, int(GvPVnegEst), int(di))
GlobalScalars.Set(negVar, int(GvPVnegVar), int(di))
}
}
// SetGoalDistEst sets the current estimated distance to the goal,
// in trial step units, which should decrease to 0 at the goal.
// This should be set at the start of every trial.
// Also computes the ProgressRate.
func (rp *Rubicon) SetGoalDistEst(di uint32, dist float32) {
if GlobalScalars.Value(int(GvVSMatrixHasGated), int(di)) == 0 {
GlobalScalars.Set(dist, int(GvGoalDistPrev), int(di))
GlobalScalars.Set(dist, int(GvGoalDistEst), int(di))
GlobalScalars.Set(0, int(GvProgressRate), int(di))
return
}
prev := GlobalScalars.Value(int(GvGoalDistEst), int(di))
GlobalScalars.Set(prev, int(GvGoalDistPrev), int(di))
GlobalScalars.Set(dist, int(GvGoalDistEst), int(di))
rate := GlobalScalars.Value(int(GvProgressRate), int(di))
del := prev - dist
rate += rp.GiveUp.ProgressRateDt * (del - rate)
GlobalScalars.Set(rate, int(GvProgressRate), int(di))
}
//////// methods below used in computing Rubicon state, not generally called from sims
// HasPosUS returns true if there is at least one non-zero positive US
func (rp *Rubicon) HasPosUS(di uint32) bool {
nd := rp.NPosUSs
for i := uint32(0); i < nd; i++ {
if GlobalVectors.Value(int(GvUSpos), int(i), int(di)) > 0 {
return true
}
}
return false
}
// PVpos returns the summed weighted positive value of
// current positive US state, where each US is multiplied by
// its current drive and weighting factor (pvPosSum),
// and the normalized version of this sum (PVpos = overall positive PV)
// as 1 / (1 + (PVposGain * pvPosSum))
func (rp *Rubicon) PVpos(di uint32) (pvPosSum, pvPos float32) {
nd := rp.NPosUSs
wts := rp.USs.PVposWts
for i := uint32(0); i < nd; i++ {
pvPosSum += wts[i] * GlobalVectors.Value(int(GvUSpos), int(i), int(di)) * rp.Drive.EffectiveDrive(di, i)
}
pvPos = RubiconNormFun(rp.USs.PVposGain * pvPosSum)
return
}
// PVneg returns the summed weighted negative value of current
// costs and negative US state, where each US is multiplied
// by a weighting factor and summed (usNegSum)
// and the normalized version of this sum (PVneg = overall negative PV)
// as 1 / (1 + (PVnegGain * PVnegSum))
func (rp *Rubicon) PVneg(di uint32) (pvNegSum, pvNeg float32) {
nn := rp.NNegUSs
wts := rp.USs.PVnegWts
for i := uint32(0); i < nn; i++ {
pvNegSum += wts[i] * GlobalVectors.Value(int(GvUSnegRaw), int(i), int(di))
}
nn = rp.NCosts
wts = rp.USs.PVcostWts
for i := uint32(0); i < nn; i++ {
pvNegSum += wts[i] * GlobalVectors.Value(int(GvCostRaw), int(i), int(di))
}
pvNeg = RubiconNormFun(rp.USs.PVnegGain * pvNegSum)
return
}
// PVsFromUSs updates the current PV summed, weighted, normalized values
// from the underlying US values.
func (rp *Rubicon) PVsFromUSs(di uint32) {
pvPosSum, pvPos := rp.PVpos(di)
GlobalScalars.Set(pvPosSum, int(GvPVposSum), int(di))
GlobalScalars.Set(pvPos, int(GvPVpos), int(di))
GlobalScalars.Set(num.FromBool[float32](rp.HasPosUS(di)), int(GvHasPosUS), int(di))
pvNegSum, pvNeg := rp.PVneg(di)
GlobalScalars.Set(pvNegSum, int(GvPVnegSum), int(di))
GlobalScalars.Set(pvNeg, int(GvPVneg), int(di))
}
// VSPatchNewState does VSPatch processing in NewState:
// updates global VSPatchPos and VSPatchPosSum, sets to RewPred.
// uses max across recorded VSPatch activity levels.
func (rp *Rubicon) VSPatchNewState(di uint32) {
prev := GlobalScalars.Value(int(GvVSPatchPos), int(di))
GlobalScalars.Set(prev, int(GvVSPatchPosPrev), int(di))
mx := float32(0)
nd := rp.NPosUSs
for i := uint32(0); i < nd; i++ {
vsD1 := GlobalVectors.Value(int(GvVSPatchD1), int(i), int(di))
vsD2 := GlobalVectors.Value(int(GvVSPatchD2), int(i), int(di))
vs := rp.LHb.VSPatchGain * (vsD1 - vsD2)
if vs > mx {
mx = vs
}
}
v := math32.Abs(mx - prev)
pv := GlobalScalars.Value(int(GvVSPatchPosVar), int(di))
if v > pv {
pv = v
} else {
pv += rp.LHb.VSPatchVarDt * (v - pv) // decay -- negative
}
GlobalScalars.Set(pv, int(GvVSPatchPosVar), int(di))
GlobalScalars.Set(mx, int(GvVSPatchPos), int(di))
GlobalScalars.Set(mx, int(GvRewPred), int(di))
thr := mx
if mx < rp.LHb.VSPatchNonRewThr {
thr = 0
}
GlobalScalars.Set(-mx, int(GvVSPatchPosRPE), int(di)) // default for non-us cases
GlobalScalars.Set(thr, int(GvVSPatchPosThr), int(di))
GlobalScalars.SetAdd(thr, int(GvVSPatchPosSum), int(di)) // key: use thresholded!
}
// PVposEstFromUSs returns the estimated positive PV value
// based on drives and given US values. This can be used
// to compute estimates to compare network performance.
func (rp *Rubicon) PVposEstFromUSs(di uint32, uss []float32) (pvPosSum, pvPos float32) {
nd := rp.NPosUSs
if len(uss) < int(nd) {
nd = uint32(len(uss))
}
wts := rp.USs.PVposWts
for i := uint32(0); i < nd; i++ {
pvPosSum += wts[i] * uss[i] * rp.Drive.EffectiveDrive(di, i)
}
pvPos = RubiconNormFun(rp.USs.PVposGain * pvPosSum)
return
}
// PVposEstFromUSsDrives returns the estimated positive PV value
// based on given externally provided drives and US values.
// This can be used to compute estimates to compare network performance.
func (rp *Rubicon) PVposEstFromUSsDrives(uss, drives []float32) (pvPosSum, pvPos float32) {
nd := rp.NPosUSs
if len(uss) < int(nd) {
nd = uint32(len(uss))
}
wts := rp.USs.PVposWts
for i := uint32(0); i < nd; i++ {
pvPosSum += wts[i] * uss[i] * drives[i]
}
pvPos = RubiconNormFun(rp.USs.PVposGain * pvPosSum)
return
}
// PVnegEstFromUSs returns the estimated negative PV value
// based on given externally provided US values.
// This can be used to compute estimates to compare network performance.
func (rp *Rubicon) PVnegEstFromUSs(uss []float32) (pvNegSum, pvNeg float32) {
nn := rp.NNegUSs
wts := rp.USs.PVnegWts
for i := uint32(0); i < nn; i++ {
pvNegSum += wts[i] * uss[i]
}
pvNeg = RubiconNormFun(rp.USs.PVnegGain * pvNegSum)
return
}
// PVcostEstFromUSs returns the estimated negative PV value
// based on given externally provided Cost values.
// This can be used to compute estimates to compare network performance.
func (rp *Rubicon) PVcostEstFromCosts(costs []float32) (pvCostSum, pvNeg float32) {
nn := rp.NCosts
wts := rp.USs.PVcostWts
for i := uint32(0); i < nn; i++ {
pvCostSum += wts[i] * costs[i]
}
pvNeg = RubiconNormFun(rp.USs.PVnegGain * pvCostSum)
return
}
// DAFromPVs computes the overall PV DA in terms of LHb burst and dip
// activity from given pvPos, pvNeg, and vsPatchPos values.
// Also returns the net "reward" value as the discounted PV value,
// separate from the vsPatchPos prediction error factor.
func (rp *Rubicon) DAFromPVs(pvPos, pvNeg, vsPatchPos, vsPatchPosSum float32) (burst, dip, da, rew float32) {
return rp.LHb.DAFromPVs(pvPos, pvNeg, vsPatchPos, vsPatchPosSum)
}
// GiveUpOnGoal determines whether to give up on current goal
// based on Utility, Timing, and Progress weight factors.
func (rp *Rubicon) GiveUpOnGoal(di uint32, rnd randx.Rand) bool {
cnSum, guSum := rp.GiveUp.Sums(di)
prob, giveUp := rp.GiveUp.Prob(cnSum, guSum, rnd)
GlobalScalars.Set(prob, int(GvGiveUpProb), int(di))
GlobalScalars.Set(num.FromBool[float32](giveUp), int(GvGiveUp), int(di))
return giveUp
}
// PVDA computes the PV (primary value) based dopamine
// based on current state information, at the start of a trial.
// PV DA is computed by the VS (ventral striatum) and the LHb / RMTg,
// and the resulting values are stored in global variables.
// Called after updating USs, Effort, Drives at start of trial step,
// in Step.
func (rp *Rubicon) PVDA(di uint32, rnd randx.Rand) {
rp.USs.USnegCostFromRaw(di)
rp.PVsFromUSs(di)
hasRew := (GlobalScalars.Value(int(GvHasRew), int(di)) > 0)
pvPos := GlobalScalars.Value(int(GvPVpos), int(di))
pvNeg := GlobalScalars.Value(int(GvPVneg), int(di))
vsPatchPos := GlobalScalars.Value(int(GvVSPatchPos), int(di))
vsPatchPosSum := GlobalScalars.Value(int(GvVSPatchPosSum), int(di))
if hasRew {
rp.ResetGiveUp(di)
rew := rp.LHb.DAforUS(di, pvPos, pvNeg, vsPatchPos, vsPatchPosSum) // only when actual pos rew
GlobalScalars.Set(rew, int(GvRew), int(di))
return
}
if GlobalScalars.Value(int(GvVSMatrixHasGated), int(di)) > 0 {
giveUp := rp.GiveUpOnGoal(di, rnd)
if giveUp {
GlobalScalars.Set(1, int(GvHasRew), int(di)) // key for triggering reset
rew := rp.LHb.DAforUS(di, pvPos, pvNeg, vsPatchPos, vsPatchPosSum) // only when actual rew
GlobalScalars.Set(rew, int(GvRew), int(di))
return
}
}
// no US regular case
rp.LHb.DAforNoUS(di)
GlobalScalars.Set(0, int(GvRew), int(di))
}
// GlobalSetRew is a convenience function for setting the external reward
// state in Globals variables
func GlobalSetRew(di uint32, rew float32, hasRew bool) {
GlobalScalars.Set(num.FromBool[float32](hasRew), int(GvHasRew), int(di))
if hasRew {
GlobalScalars.Set(rew, int(GvRew), int(di))
} else {
GlobalScalars.Set(0, int(GvRew), int(di))
}
}
//gosl:start
// RubiconUSStimValue returns stimulus value for US at given index
// and valence (includes Cost). If US > 0.01, a full 1 US activation is returned.
func RubiconUSStimValue(di uint32, usIndex uint32, valence ValenceTypes) float32 {
nix := GetNetworkIxs(0)
us := float32(0)
switch valence {
case Positive:
if usIndex < nix.RubiconNPosUSs {
us = GlobalVectors.Value(int(GvUSpos), int(usIndex), int(di))
}
case Negative:
if usIndex < nix.RubiconNNegUSs {
us = GlobalVectors.Value(int(GvUSneg), int(usIndex), int(di))
}
case Cost:
if usIndex < nix.RubiconNCosts {
us = GlobalVectors.Value(int(GvCost), int(usIndex), int(di))
}
default:
}
return us
}
//gosl:end
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
import (
"math"
"reflect"
"strings"
"time"
"cogentcore.org/core/base/errors"
"cogentcore.org/core/base/metadata"
"cogentcore.org/core/base/timer"
"cogentcore.org/core/enums"
"cogentcore.org/lab/matrix"
"cogentcore.org/lab/plot"
"cogentcore.org/lab/stats/metric"
"cogentcore.org/lab/stats/stats"
"cogentcore.org/lab/table"
"cogentcore.org/lab/tensor"
"cogentcore.org/lab/tensorfs"
"github.com/emer/emergent/v2/looper"
)
// StatsNode returns tensorfs Dir Node for given mode, level.
func StatsNode(statsDir *tensorfs.Node, mode, level enums.Enum) *tensorfs.Node {
modeDir := statsDir.Dir(mode.String())
return modeDir.Dir(level.String())
}
func StatsLayerValues(net *Network, curDir *tensorfs.Node, mode enums.Enum, di int, layName, varName string) *tensor.Float64 {
curModeDir := curDir.Dir(mode.String())
ly := net.LayerByName(layName)
tsr := curModeDir.Float64(layName+"_"+varName, ly.Shape.Sizes...)
ly.UnitValuesTensor(tsr, varName, di)
return tsr
}
// LogFilename returns a standard log file name as netName_runName_logName.tsv
func LogFilename(netName, runName, logName string) string {
return netName + "_" + runName + "_" + logName + ".tsv"
}
// OpenLogFile, if on == true, sets the log file for given table using given
// netName, runName, and logName in order.
func OpenLogFile(on bool, dt *table.Table, netName, runName, logName string) {
if !on {
return
}
fnm := LogFilename(netName, runName, logName)
tensor.SetPrecision(dt, 4)
dt.OpenLog(fnm, tensor.Tab)
}
// OpenLogFiles opens the log files for modes and levels of the looper,
// based on the lists of level names, ordered by modes in numerical order.
// The netName and runName are used for naming the file, along with
// the mode_level in lower case.
func OpenLogFiles(ls *looper.Stacks, statsDir *tensorfs.Node, netName, runName string, modeLevels [][]string) {
modes := ls.Modes()
for i, mode := range modes {
if i >= len(modeLevels) {
return
}
levels := modeLevels[i]
st := ls.Stacks[mode]
for _, level := range st.Order {
on := false
for _, lev := range levels {
if lev == level.String() {
on = true
break
}
}
if !on {
continue
}
logName := strings.ToLower(mode.String() + "_" + level.String())
dt := tensorfs.DirTable(StatsNode(statsDir, mode, level), nil)
fnm := LogFilename(netName, runName, logName)
tensor.SetPrecision(dt, 4)
dt.OpenLog(fnm, tensor.Tab)
}
}
}
// CloseLogFiles closes all the log files for each mode and level of the looper,
// Excluding given level(s).
func CloseLogFiles(ls *looper.Stacks, statsDir *tensorfs.Node, exclude ...enums.Enum) {
modes := ls.Modes() // mode enum order
for _, mode := range modes {
st := ls.Stacks[mode]
for _, level := range st.Order {
if StatExcludeLevel(level, exclude...) {
continue
}
dt := tensorfs.DirTable(StatsNode(statsDir, mode, level), nil)
dt.CloseLog()
}
}
}
// StatExcludeLevel returns true if given level is among the list of levels to exclude.
func StatExcludeLevel(level enums.Enum, exclude ...enums.Enum) bool {
bail := false
for _, ex := range exclude {
if level == ex {
bail = true
break
}
}
return bail
}
// StatLoopCounters adds the counters from each stack, loop level for given
// looper Stacks to the given tensorfs stats. This is typically the first
// Stat to add, so these counters will be used for X axis values.
// The stat is run with start = true before returning, so that the stats
// are already initialized first before anything else.
// The first mode's counters (typically Train) are automatically added to all
// subsequent modes so they automatically track training levels.
// - currentDir is a tensorfs directory to store the current values of each counter.
// - trialLevel is the Trial level enum, which automatically handles the
// iteration over ndata parallel trials.
// - exclude is a list of loop levels to exclude (e.g., Cycle).
func StatLoopCounters(statsDir, currentDir *tensorfs.Node, ls *looper.Stacks, net *Network, trialLevel enums.Enum, exclude ...enums.Enum) func(mode, level enums.Enum, start bool) {
modes := ls.Modes() // mode enum order
fun := func(mode, level enums.Enum, start bool) {
for mi := range 2 {
st := ls.Stacks[mode]
prefix := ""
if mi == 0 {
if modes[mi].Int64() == mode.Int64() { // skip train in train..
continue
}
ctrMode := modes[mi]
st = ls.Stacks[ctrMode]
prefix = ctrMode.String()
}
for _, lev := range st.Order {
// don't record counter for levels above it
if level.Int64() > lev.Int64() {
continue
}
if StatExcludeLevel(lev, exclude...) {
continue
}
name := prefix + lev.String() // name of stat = level
ndata := int(net.Context().NData)
modeDir := statsDir.Dir(mode.String())
curModeDir := currentDir.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
tsr := levelDir.Int(name)
if start {
tsr.SetNumRows(0)
plot.SetFirstStyler(tsr, func(s *plot.Style) {
s.Range.SetMin(0)
})
if level.Int64() == trialLevel.Int64() {
for di := range ndata {
curModeDir.Int(name, ndata).SetInt1D(0, di)
}
}
metadata.SetDoc(tsr, "Loop counter for given level")
continue
}
ctr := st.Loops[lev].Counter.Cur
if level.Int64() == trialLevel.Int64() {
for di := range ndata {
curModeDir.Int(name, ndata).SetInt1D(ctr, di)
tsr.AppendRowInt(ctr)
if lev.Int64() == trialLevel.Int64() {
ctr++
}
}
} else {
curModeDir.Int(name, 1).SetInt1D(ctr, 0)
tsr.AppendRowInt(ctr)
}
}
}
}
for _, md := range modes {
st := ls.Stacks[md]
for _, lev := range st.Order {
if StatExcludeLevel(lev, exclude...) {
continue
}
fun(md, lev, true)
}
}
return fun
}
// StatRunName adds a "RunName" stat to every mode and level of looper,
// subject to exclusion list, which records the current value of the
// "RunName" string in ss.Current, which identifies the parameters and tag
// for this run.
func StatRunName(statsDir, currentDir *tensorfs.Node, ls *looper.Stacks, net *Network, trialLevel enums.Enum, exclude ...enums.Enum) func(mode, level enums.Enum, start bool) {
return func(mode, level enums.Enum, start bool) {
name := "RunName"
modeDir := statsDir.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
tsr := levelDir.StringValue(name)
ndata := int(net.Context().NData)
runNm := currentDir.StringValue(name, 1).String1D(0)
if start {
tsr.SetNumRows(0)
metadata.SetDoc(tsr, "Identifies the current parameters and tag label for current runs, can be used to sort different runs during interactive exploration")
return
}
if level.Int64() == trialLevel.Int64() {
for range ndata {
tsr.AppendRowString(runNm)
}
} else {
tsr.AppendRowString(runNm)
}
}
}
// StatTrialName adds a "TrialName" stat to the given Trial level in every mode of looper,
// which records the current value of the "TrialName" string in ss.Current, which
// contains a string description of the current trial.
func StatTrialName(statsDir, currentDir *tensorfs.Node, ls *looper.Stacks, net *Network, trialLevel enums.Enum) func(mode, level enums.Enum, start bool) {
return func(mode, level enums.Enum, start bool) {
if level.Int64() != trialLevel.Int64() {
return
}
name := "TrialName"
modeDir := statsDir.Dir(mode.String())
curModeDir := currentDir.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
tsr := levelDir.StringValue(name)
ndata := int(net.Context().NData)
if start {
tsr.SetNumRows(0)
metadata.SetDoc(tsr, "Name of the current trial, generated by the environment to describe current input patterns")
return
}
for di := range ndata {
trlNm := curModeDir.StringValue(name, ndata).String1D(di)
tsr.AppendRowString(trlNm)
}
}
}
// StatPerTrialMSec returns a Stats function that reports the number of milliseconds
// per trial, for the given levels and training mode enum values.
// Stats will be recorded a levels above the given trial level.
func StatPerTrialMSec(statsDir *tensorfs.Node, trainMode enums.Enum, trialLevel enums.Enum) func(mode, level enums.Enum, start bool) {
var epcTimer timer.Time
levels := make([]enums.Enum, 10) // should be enough
levels[0] = trialLevel
return func(mode, level enums.Enum, start bool) {
levi := int(level.Int64() - trialLevel.Int64())
if mode.Int64() != trainMode.Int64() || levi <= 0 {
return
}
levels[levi] = level
name := "PerTrialMSec"
modeDir := statsDir.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
tsr := levelDir.Float64(name)
if start {
tsr.SetNumRows(0)
plot.SetFirstStyler(tsr, func(s *plot.Style) {
s.Range.SetMin(0)
})
metadata.SetDoc(tsr, "Milliseconds per trial of wall-clock time, averaged over an epoch, to provide computational timing info")
return
}
switch levi {
case 1:
epcTimer.Stop()
subDir := modeDir.Dir(levels[0].String())
trls := errors.Ignore1(subDir.Values())[0] // must be a stat
epcTimer.N = trls.Len()
pertrl := float64(epcTimer.Avg()) / float64(time.Millisecond)
tsr.AppendRowFloat(pertrl)
epcTimer.ResetStart()
default:
subDir := modeDir.Dir(levels[levi-1].String())
tsr.AppendRow(stats.StatMean.Call(subDir.Value(name)))
}
}
}
// StatLayerActGe returns a Stats function that computes layer activity
// and Ge (excitatory conductdance; net input) stats, which are important targets
// of parameter tuning to ensure everything is in an appropriate dynamic range.
// It only runs for given trainMode at given trialLevel and above,
// with higher levels computing the Mean of lower levels.
func StatLayerActGe(statsDir *tensorfs.Node, net *Network, trainMode, trialLevel, runLevel enums.Enum, layerNames ...string) func(mode, level enums.Enum, start bool) {
statNames := []string{"ActMAvg", "ActMMax", "MaxGeM"}
statDocs := map[string]string{
"ActMAvg": "Minus phase average activity in given layer. In general it is important for this to be somewhere around 0.1, depending on the type of layer. Adjust the Inhib.Gi parameter to control activity levels",
"ActMMax": "Minus phase maximum activity in given layer. If a layer is not very active, then activity will not propagate efficiently to other layers, so this should be above .5 and ideally closer to .8 or .9. See MaxGeM to increase excitatory conductance if activity is too low, or Inhib.Gi can be reduced (see ActMAvg)",
"MaxGeM": "Minus phase maximum Ge excitatory conductance in given layer. In general this should be above 1, but not greater than 3 or so, but this can vary depending on layer type. If too low, then ActMMax can be too low. If too high, then neurons can be too binary, or numerical integration can even go awry. The per-path PathScale.Abs or sending layer Inhib.ActAvg.Nominal parameters can be adjusted to alter this value.",
}
levels := make([]enums.Enum, 10) // should be enough
return func(mode, level enums.Enum, start bool) {
levi := int(level.Int64() - trialLevel.Int64())
if mode.Int64() != trainMode.Int64() || levi < 0 {
return
}
levels[levi] = level
modeDir := statsDir.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
ndata := net.Context().NData
for _, lnm := range layerNames {
for si, statName := range statNames {
ly := net.LayerByName(lnm)
lpi := ly.Params.PoolIndex(0)
name := lnm + "_" + statName
tsr := levelDir.Float64(name)
if start {
tsr.SetNumRows(0)
plot.SetFirstStyler(tsr, func(s *plot.Style) {
s.Range.SetMin(0)
})
metadata.SetDoc(tsr, statDocs[statName])
continue
}
switch levi {
case 0:
for di := range ndata {
var stat float32
switch si {
case 0:
stat = PoolAvgMax(AMAct, AMMinus, Avg, lpi, di)
case 1:
stat = PoolAvgMax(AMAct, AMMinus, Max, lpi, di)
case 2:
stat = PoolAvgMax(AMGeInt, AMMinus, Max, lpi, di)
}
tsr.AppendRowFloat(float64(stat))
}
case int(runLevel.Int64() - trialLevel.Int64()):
subDir := modeDir.Dir(levels[levi-1].String())
tsr.AppendRow(stats.StatFinal.Call(subDir.Value(name)))
default:
subDir := modeDir.Dir(levels[levi-1].String())
if levi == 1 && si == 0 { // use official longer timescale avg stat here
tsr.AppendRowFloat(float64(LayerStates.Value(int(ly.Index), 0, int(LayerActMAvg))))
} else {
tsr.AppendRow(stats.StatMean.Call(subDir.Value(name)))
}
}
}
}
}
}
// StatLayerGiMult returns a Stats function that records [LayerGiMult] stats,
// for given layer names. This should be computed at the epoch level or above
// (not the trial level, because this value is not per-ndata and will not sync
// with other trial level stats).
func StatLayerGiMult(statsDir *tensorfs.Node, net *Network, trainMode, epochLevel, runLevel enums.Enum, layerNames ...string) func(mode, level enums.Enum, start bool) {
statNames := []string{"GiMult"}
statDocs := map[string]string{
"GiMult": "Adaptive multiplier on layer-level inhibitory conductance, when this is subject to adaptation via Inhib.ActAvg.AdaptGi parameters. Starts out at 1 and moves up or down to maintain target range of activity level (see ActMAvg stat for activity level).",
}
levels := make([]enums.Enum, 10) // should be enough
return func(mode, level enums.Enum, start bool) {
levi := int(level.Int64() - epochLevel.Int64())
if mode.Int64() != trainMode.Int64() || levi < 0 {
return
}
levels[levi] = level
modeDir := statsDir.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
for _, lnm := range layerNames {
for si, statName := range statNames {
ly := net.LayerByName(lnm)
li := ly.Params.Index
name := lnm + "_" + statName
tsr := levelDir.Float64(name)
if start {
tsr.SetNumRows(0)
plot.SetFirstStyler(tsr, func(s *plot.Style) {
s.Range.SetMin(0)
})
metadata.SetDoc(tsr, statDocs[statName])
continue
}
switch levi {
case 0:
var stat float32
switch si {
case 0:
stat = LayerStates.Value(int(li), int(0), int(LayerGiMult))
}
tsr.AppendRowFloat(float64(stat))
case int(runLevel.Int64() - epochLevel.Int64()):
subDir := modeDir.Dir(levels[levi-1].String())
tsr.AppendRow(stats.StatFinal.Call(subDir.Value(name)))
default:
subDir := modeDir.Dir(levels[levi-1].String())
tsr.AppendRow(stats.StatMean.Call(subDir.Value(name)))
}
}
}
}
}
// StatLayerState returns a Stats function that records layer state
// It runs for given mode and level, recording given variable
// for given layer names. if isTrialLevel is true, the level is a
// trial level that needs iterating over NData.
func StatLayerState(statsDir *tensorfs.Node, net *Network, smode, slevel enums.Enum, isTrialLevel bool, variable string, layerNames ...string) func(mode, level enums.Enum, start bool) {
return func(mode, level enums.Enum, start bool) {
if mode.Int64() != smode.Int64() || level.Int64() != slevel.Int64() {
return
}
modeDir := statsDir.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
ndata := int(net.Context().NData)
if !isTrialLevel {
ndata = 1
}
for _, lnm := range layerNames {
ly := net.LayerByName(lnm)
name := lnm + "_" + variable
sizes := []int{ndata}
sizes = append(sizes, ly.GetSampleShape().Sizes...)
tsr := levelDir.Float64(name, sizes...)
if start {
tsr.SetNumRows(0)
metadata.SetDoc(tsr, "Recording of given layer's state for given variable. This is a tensor quantity so plots only show the first value by default.")
continue
}
for di := range ndata {
row := tsr.DimSize(0)
tsr.SetNumRows(row + 1)
rtsr := tsr.RowTensor(row)
ly.UnitValuesSampleTensor(rtsr, variable, di)
}
}
}
}
// PCAStrongThr is the threshold for counting PCA eigenvalues as "strong".
var PCAStrongThr = 0.01
// StatPCA returns a Stats function that computes PCA NStrong, Top5, Next5, and Rest
// stats, which are important for tracking hogging dynamics where the representational
// space is not efficiently distributed. Uses Sample units for layers, and SVD computation
// is reasonably efficient.
// It only runs for given trainMode, from given Trial level upward,
// with higher levels computing the Mean of lower levels.
// Trial level just records ActM values for layers in a separate PCA subdir,
// which are input to next level computation where PCA is computed.
func StatPCA(statsDir, currentDir *tensorfs.Node, net *Network, interval int, trainMode, trialLevel, runLevel enums.Enum, layerNames ...string) func(mode, level enums.Enum, start bool, epc int) {
statNames := []string{"PCA_NStrong", "PCA_Top5", "PCA_Next", "PCA_Rest"}
statDocs := map[string]string{
"PCA_NStrong": "The number of PCA components in the covariance matrix of layer activity across the epoch, which have an eigenvalue above the PCAStrongThr (0.01 default), indicating that a component is making at least some kind of reasonable contribution to the overall representational space. In general this indicates how much `information' present in the layer's representations, with higher values generally being better.",
"PCA_Top5": "The summed strength of the eigenvalues for the top 5 PCA components in the covariance matrix of layer activity across the epoch. If these are very strong, it can indicate that the layer's activity is insufficiently diverse, and is being dominated by a few strong representations (i.e., `hog' units). See PCA_Next and PCA_Rest for comparisons; These values are only meaningful in evaluating the effects of different parameters that alter the balance of these factors: absolute values are not well defined.",
"PCA_Next": "The summed strength of the next 5 eigenvalues (see PCA_Top5). The relative strengths of this compared with Top5 gives a better sense of the balance vs. `hog' unit dynamic.",
"PCA_Rest": "The summed strength of the remaining eigenvalues beyond the top 10, which is useful in the same way PCA_Next is relative to PCA_Top5.",
}
levels := make([]enums.Enum, 10) // should be enough
return func(mode, level enums.Enum, start bool, epc int) {
levi := int(level.Int64() - trialLevel.Int64())
if mode.Int64() != trainMode.Int64() || levi < 0 {
return
}
levels[levi] = level
modeDir := statsDir.Dir(mode.String())
curModeDir := currentDir.Dir(mode.String())
curPCADir := curModeDir.Dir("PCA")
pcaDir := statsDir.Dir("PCA")
levelDir := modeDir.Dir(level.String())
ndata := int(net.Context().NData)
for _, lnm := range layerNames {
ly := net.LayerByName(lnm)
sizes := []int{ndata}
sizes = append(sizes, ly.GetSampleShape().Sizes...)
vtsr := pcaDir.Float64(lnm, sizes...)
if levi == 0 {
ltsr := curPCADir.Float64(lnm+"_ActM", ly.GetSampleShape().Sizes...)
if start {
vtsr.SetNumRows(0)
} else {
for di := range ndata {
ly.UnitValuesSampleTensor(ltsr, "ActM", di)
vtsr.AppendRow(ltsr)
}
}
continue
}
var svals [4]float64 // in statNames order
hasNew := false
if !start && levi == 1 {
if interval > 0 && epc%interval == 0 {
hasNew = true
vals := curPCADir.Float64("Vals_" + lnm)
covar := curPCADir.Float64("Covar_" + lnm)
metric.CovarianceMatrixOut(metric.Covariance, vtsr, covar)
matrix.SVDValuesOut(covar, vals)
ln := vals.Len()
for i := range ln {
v := vals.Float1D(i)
if v < PCAStrongThr {
svals[0] = float64(i)
break
}
}
for i := range 5 {
if ln >= 5 {
svals[1] += vals.Float1D(i)
}
if ln >= 10 {
svals[2] += vals.Float1D(i + 5)
}
}
svals[1] /= 5
svals[2] /= 5
if ln > 10 {
sum := stats.Sum(vals).Float1D(0)
svals[3] = (sum - (svals[1] + svals[2])) / float64(ln-10)
}
}
}
for si, statName := range statNames {
name := lnm + "_" + statName
tsr := levelDir.Float64(name)
if start {
tsr.SetNumRows(0)
plot.SetFirstStyler(tsr, func(s *plot.Style) {
s.Range.SetMin(0)
})
metadata.SetDoc(tsr, statDocs[statName])
continue
}
switch levi {
case 1:
var stat float64
nr := tsr.DimSize(0)
if nr > 0 {
stat = tsr.FloatRow(nr-1, 0)
}
if hasNew {
stat = svals[si]
}
tsr.AppendRowFloat(float64(stat))
case int(runLevel.Int64() - trialLevel.Int64()):
subDir := modeDir.Dir(levels[levi-1].String())
tsr.AppendRow(stats.StatFinal.Call(subDir.Value(name)))
default:
subDir := modeDir.Dir(levels[levi-1].String())
tsr.AppendRow(stats.StatMean.Call(subDir.Value(name)))
}
}
}
}
}
// StatCorSim returns a Stats function that records 1 - [LayerPhaseDiff] stats,
// i.e., Correlation-based similarity, for given layer names.
func StatCorSim(statsDir, currentDir *tensorfs.Node, net *Network, trialLevel, runLevel enums.Enum, layerNames ...string) func(mode, level enums.Enum, start bool) {
levels := make([]enums.Enum, 10) // should be enough
levels[0] = trialLevel
return func(mode, level enums.Enum, start bool) {
levi := int(level.Int64() - trialLevel.Int64())
if levi < 0 {
return
}
levels[levi] = level
modeDir := statsDir.Dir(mode.String())
curModeDir := currentDir.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
ndata := int(net.Context().NData)
for _, lnm := range layerNames {
ly := net.LayerByName(lnm)
li := ly.Params.Index
name := lnm + "_CorSim"
tsr := levelDir.Float64(name)
if start {
tsr.SetNumRows(0)
plot.SetFirstStyler(tsr, func(s *plot.Style) {
s.Range.SetMin(0).SetMax(1)
s.On = true
})
metadata.SetDoc(tsr, "The correlation-based similarity of the neural activity patterns between the minus and plus phase (1 = patterns are effectively identical). For target layers, this is good continuous, normalized measure of learning performance, which can be more sensitive than thresholded SSE measures. For hidden layers, it indicates the magnitude of the temporal derivative error signal, which will generally be larger for layers close to targets.")
continue
}
switch levi {
case 0: // trial
for di := range ndata {
stat := 1.0 - float64(LayerStates.Value(int(li), int(di), int(LayerPhaseDiff)))
curModeDir.Float64(name, ndata).SetFloat1D(stat, di)
tsr.AppendRowFloat(float64(stat))
}
case int(runLevel.Int64() - trialLevel.Int64()):
subDir := modeDir.Dir(levels[levi-1].String())
tsr.AppendRow(stats.StatFinal.Call(subDir.Value(name)))
default:
subDir := modeDir.Dir(levels[levi-1].String())
tsr.AppendRow(stats.StatMean.Call(subDir.Value(name)))
}
}
}
}
// StatPrevCorSim returns a Stats function that compute correlations
// between previous trial activity state and current minus phase and
// plus phase state. This is important for predictive learning.
func StatPrevCorSim(statsDir, currentDir *tensorfs.Node, net *Network, trialLevel, runLevel enums.Enum, layerNames ...string) func(mode, level enums.Enum, start bool) {
statNames := []string{"PrevToM", "PrevToP"}
statDocs := map[string]string{
"PrevToM": "The correlation-based similarity of the neural activity patterns between the previous trial's final state, and the current trial's minus phase (1 = activity patterns are effectively identical). In predictive learning, this indicates the extent to which the layer is just predicting that the same thing will persist, which can indicate a failure to learn actual time-varying predictions. Compare this value against CorSim as a kind of baseline, and against PrevToP as another baseline indicating the actual level of per-trial variation over time.",
"PrevToP": "The correlation-based similarity of the neural activity patterns between the previous trial's final state, and the current trial's plus phase (1 = activity patterns are effectively identical). This indicates the extent of actual change over time in an input or target layer, as a point of reference for interpreting PrevToM and CorSim stats.",
}
levels := make([]enums.Enum, 10) // should be enough
levels[0] = trialLevel
return func(mode, level enums.Enum, start bool) {
levi := int(level.Int64() - trialLevel.Int64())
if levi < 0 {
return
}
levels[levi] = level
modeDir := statsDir.Dir(mode.String())
curModeDir := currentDir.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
ndata := int(net.Context().NData)
for _, lnm := range layerNames {
for si, statName := range statNames {
ly := net.LayerByName(lnm)
name := lnm + "_" + statName
tsr := levelDir.Float64(name)
if start {
tsr.SetNumRows(0)
plot.SetFirstStyler(tsr, func(s *plot.Style) {
s.Range.SetMin(0).SetMax(1)
})
metadata.SetDoc(tsr, statDocs[statName])
continue
}
switch levi {
case 0:
// note: current lnm + _var is standard reusable unit vals buffer
actM := curModeDir.Float64(lnm+"_ActM", ly.GetSampleShape().Sizes...)
actP := curModeDir.Float64(lnm+"_ActP", ly.GetSampleShape().Sizes...)
// note: CaD is sufficiently stable that it is fine to compare with ActM and ActP
prev := curModeDir.Float64(lnm+"_CaDPrev", ly.GetSampleShape().Sizes...)
for di := range ndata {
ly.UnitValuesSampleTensor(prev, "CaDPrev", di)
prev.SetShapeSizes(prev.Len()) // set to 1D -- inexpensive and faster for computation
var stat float64
switch si {
case 0:
ly.UnitValuesSampleTensor(actM, "ActM", di)
actM.SetShapeSizes(actM.Len())
stat = metric.Correlation(actM, prev).Float1D(0)
case 1:
ly.UnitValuesSampleTensor(actP, "ActP", di)
actP.SetShapeSizes(actP.Len())
stat = metric.Correlation(actP, prev).Float1D(0)
}
curModeDir.Float64(name, ndata).SetFloat1D(stat, di)
tsr.AppendRowFloat(stat)
}
case int(runLevel.Int64() - trialLevel.Int64()):
subDir := modeDir.Dir(levels[levi-1].String())
tsr.AppendRow(stats.StatFinal.Call(subDir.Value(name)))
default:
subDir := modeDir.Dir(levels[levi-1].String())
tsr.AppendRow(stats.StatMean.Call(subDir.Value(name)))
}
}
}
}
}
// StatLevelAll returns a Stats function that copies stats from given mode
// and level, without resetting at the start, to accumulate all rows
// over time until reset manually. The styleFunc, if non-nil, does plot styling
// based on the current column.
func StatLevelAll(statsDir *tensorfs.Node, srcMode, srcLevel enums.Enum, styleFunc func(s *plot.Style, col tensor.Values)) func(mode, level enums.Enum, start bool) {
return func(mode, level enums.Enum, start bool) {
if srcMode.Int64() != mode.Int64() || srcLevel.Int64() != level.Int64() {
return
}
modeDir := statsDir.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
allDir := modeDir.Dir(level.String() + "All")
cols := levelDir.NodesFunc(nil) // all nodes
for _, cl := range cols {
clv := cl.Tensor.(tensor.Values)
if clv.NumDims() == 0 || clv.DimSize(0) == 0 {
continue
}
if start {
trg := tensorfs.ValueType(allDir, cl.Name(), clv.DataType(), clv.ShapeSizes()...)
if trg.Len() == 0 {
if styleFunc != nil {
plot.SetFirstStyler(trg, func(s *plot.Style) {
styleFunc(s, clv)
})
}
trg.SetNumRows(0)
}
} else {
trg := tensorfs.ValueType(allDir, cl.Name(), clv.DataType())
trg.AppendRow(clv.RowTensor(clv.DimSize(0) - 1))
}
}
}
}
// StatLearnNow returns a Stats function that records the mean
// and std deviation of the LearnNow signal in the given layers.
// This is useful for tracking the continuous learning mechanism.
func StatLearnNow(statsDir, currentDir *tensorfs.Node, net *Network, trialLevel, runLevel enums.Enum, layerNames ...string) func(mode, level enums.Enum, start bool) {
statNames := []string{"LrnNowMean", "LrnNowStDev", "TimeCycle"}
statDocs := map[string]string{
"LrnNowMean": "Mean LearnNow cycle, relative to the theta cycle (trial). Any ISICycles are shifted to the end, as if the structure was Minus, Plus, ISI.",
"LrnNowStdDev": "Standard deviation of LearnNow cycle.",
}
levels := make([]enums.Enum, 10) // should be enough
levels[0] = trialLevel
return func(mode, level enums.Enum, start bool) {
levi := int(level.Int64() - trialLevel.Int64())
if levi < 0 {
return
}
levels[levi] = level
modeDir := statsDir.Dir(mode.String())
curModeDir := currentDir.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
ndata := int(net.Context().NData)
ctx := net.Context()
stCyc := ctx.CyclesTotal - ctx.ThetaCycles
isiCyc := ctx.ISICycles
pmCyc := ctx.ThetaCycles - isiCyc
nan := math.NaN()
for _, lnm := range layerNames {
for si, statName := range statNames {
ly := net.LayerByName(lnm)
name := lnm + "_" + statName
tsr := levelDir.Float64(name)
if start {
tsr.SetNumRows(0)
// plot.SetFirstStyler(tsr, func(s *plot.Style) {
// s.Range.SetMin(0).SetMax(1)
// })
metadata.SetDoc(tsr, statDocs[statName])
continue
}
switch levi {
case 0:
// note: current lnm + _var is standard reusable unit vals buffer
anow := curModeDir.Float64(lnm+"_LearnNow", ly.GetSampleShape().Sizes...)
for di := range ndata {
switch si {
case 2:
ly.UnitValuesSampleTensor(anow, "TimeCycle", di)
default:
ly.UnitValuesSampleTensor(anow, "LearnNow", di)
}
n := anow.Len()
anow.SetShapeSizes(n) // set to 1D -- faster
for i := range n {
v := int32(anow.Float1D(i)) - stCyc
if v < 0 {
anow.SetFloat1D(nan, i)
} else if isiCyc > 0 {
if v <= isiCyc {
anow.SetFloat1D(float64(v+pmCyc), i)
} else {
anow.SetFloat1D(float64(v-isiCyc), i)
}
} else {
anow.SetFloat1D(float64(v), i)
}
}
var stat float64
switch si {
case 1:
stat = stats.Std(anow).Float1D(0)
default:
stat = stats.Mean(anow).Float1D(0)
}
if si != 1 && stat == 0 {
stat = nan
}
curModeDir.Float64(name, ndata).SetFloat1D(stat, di)
tsr.AppendRowFloat(stat)
}
case int(runLevel.Int64() - trialLevel.Int64()):
subDir := modeDir.Dir(levels[levi-1].String())
tsr.AppendRow(stats.StatFinal.Call(subDir.Value(name)))
default:
subDir := modeDir.Dir(levels[levi-1].String())
tsr.AppendRow(stats.StatMean.Call(subDir.Value(name)))
}
}
}
}
}
// FieldValue holds the value of a field in a struct.
type FieldValue struct {
Path string
Field reflect.StructField
Value, Parent reflect.Value
}
// StructValues returns a list of [FieldValue]s for fields of given struct,
// including any sub-fields, subject to filtering from the given should function
// which returns true for anything to include and false to exclude.
// You must pass a pointer to the object, so that the values are addressable.
func StructValues(obj any, should func(parent reflect.Value, field reflect.StructField, value reflect.Value) bool) []*FieldValue {
var vals []*FieldValue
val := reflect.ValueOf(obj).Elem()
parName := ""
WalkFields(val, should,
func(parent reflect.Value, field reflect.StructField, value reflect.Value) {
fkind := field.Type.Kind()
fname := field.Name
if val.Addr().Interface() == parent.Addr().Interface() { // top-level
if fkind == reflect.Struct {
parName = fname
return
}
} else {
fname = parName + "." + fname
}
sv := &FieldValue{Path: fname, Field: field, Value: value, Parent: parent}
vals = append(vals, sv)
})
return vals
}
func WalkFields(parent reflect.Value, should func(parent reflect.Value, field reflect.StructField, value reflect.Value) bool, walk func(parent reflect.Value, field reflect.StructField, value reflect.Value)) {
typ := parent.Type()
for i := 0; i < typ.NumField(); i++ {
field := typ.Field(i)
if !field.IsExported() {
continue
}
value := parent.Field(i)
if !should(parent, field, value) {
continue
}
if field.Type.Kind() == reflect.Struct {
walk(parent, field, value)
WalkFields(value, should, walk)
} else {
walk(parent, field, value)
}
}
}
// Code generated by "goal build"; DO NOT EDIT.
//line stats.goal:1
// Copyright (c) 2019, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
import (
"log"
"cogentcore.org/core/math32"
"cogentcore.org/core/math32/minmax"
)
// PctUnitErr returns the proportion of units where the thresholded value of
// Target (Target or Compare types) or ActP does not match that of ActM.
// If Act > ly.Params.Acts.Clamp.ErrThr, effective activity = 1 else 0
// robust to noisy activations.
// returns one result per data parallel index ([ctx.NData])
func (ly *Layer) PctUnitErr(ctx *Context) []float64 {
nn := ly.NNeurons
if nn == 0 {
return nil
}
errs := make([]float64, ctx.NData)
thr := ly.Params.Acts.Clamp.ErrThr
for di := uint32(0); di < ctx.NData; di++ {
wrong := 0
n := 0
for lni := uint32(0); lni < nn; lni++ {
ni := ly.NeurStIndex + lni
if NeuronIsOff(ni) {
continue
}
trg := false
if ly.Type == CompareLayer || ly.Type == TargetLayer {
if Neurons.Value(int(ni), int(di), int(Target)) > thr {
trg = true
}
} else {
if Neurons.Value(int(ni), int(di), int(ActP)) > thr {
trg = true
}
}
if Neurons.Value(int(ni), int(di), int(ActM)) > thr {
if !trg {
wrong++
}
} else {
if trg {
wrong++
}
}
n++
}
if n > 0 {
errs[di] = float64(wrong) / float64(n)
}
}
return errs
}
// LocalistErr2D decodes a 2D layer with Y axis = redundant units, X = localist units
// returning the indexes of the max activated localist value in the minus and plus phase
// activities, and whether these are the same or different (err = different)
// returns one result per data parallel index ([ctx.NData])
func (ly *Layer) LocalistErr2D(ctx *Context) (err []bool, minusIndex, plusIndex []int) {
err = make([]bool, ctx.NData)
minusIndex = make([]int, ctx.NData)
plusIndex = make([]int, ctx.NData)
ydim := ly.Shape.DimSize(0)
xdim := ly.Shape.DimSize(1)
for di := uint32(0); di < ctx.NData; di++ {
var maxM, maxP float32
var mIndex, pIndex int
for xi := 0; xi < xdim; xi++ {
var sumP, sumM float32
for yi := 0; yi < ydim; yi++ {
lni := uint32(yi*xdim + xi)
ni := ly.NeurStIndex + lni
sumM += Neurons.Value(int(ni), int(di), int(ActM))
sumP += Neurons.Value(int(ni), int(di), int(ActP))
}
if sumM > maxM {
mIndex = xi
maxM = sumM
}
if sumP > maxP {
pIndex = xi
maxP = sumP
}
}
er := mIndex != pIndex
err[di] = er
minusIndex[di] = mIndex
plusIndex[di] = pIndex
}
return
}
// LocalistErr4D decodes a 4D layer with each pool representing a localist value.
// Returns the flat 1D indexes of the max activated localist value in the minus and plus phase
// activities, and whether these are the same or different (err = different)
func (ly *Layer) LocalistErr4D(ctx *Context) (err []bool, minusIndex, plusIndex []int) {
err = make([]bool, ctx.NData)
minusIndex = make([]int, ctx.NData)
plusIndex = make([]int, ctx.NData)
npool := ly.Shape.DimSize(0) * ly.Shape.DimSize(1)
nun := ly.Shape.DimSize(2) * ly.Shape.DimSize(3)
for di := uint32(0); di < ctx.NData; di++ {
var maxM, maxP float32
var mIndex, pIndex int
for xi := 0; xi < npool; xi++ {
var sumP, sumM float32
for yi := 0; yi < nun; yi++ {
lni := uint32(xi*nun + yi)
ni := ly.NeurStIndex + lni
sumM += Neurons.Value(int(ni), int(di), int(ActM))
sumP += Neurons.Value(int(ni), int(di), int(ActP))
}
if sumM > maxM {
mIndex = xi
maxM = sumM
}
if sumP > maxP {
pIndex = xi
maxP = sumP
}
}
er := mIndex != pIndex
err[di] = er
minusIndex[di] = mIndex
plusIndex[di] = pIndex
}
return
}
// AvgMaxVarByPool returns the average and maximum value of given variable
// for given pool index (0 = entire layer, 1.. are subpools for 4D only).
// Uses fast index-based variable access.
func (ly *Layer) AvgMaxVarByPool(varNm string, poolIndex, di int) minmax.AvgMax32 {
var am minmax.AvgMax32
vidx, err := ly.UnitVarIndex(varNm)
if err != nil {
log.Printf("axon.Layer.AvgMaxVar: %s\n", err)
return am
}
pi := ly.Params.PoolIndex(uint32(poolIndex))
nsi := PoolIxs.Value(int(pi), int(PoolNeurSt))
nei := PoolIxs.Value(int(pi), int(PoolNeurEd))
am.Init()
for lni := nsi; lni < nei; lni++ {
ni := ly.NeurStIndex + uint32(lni)
if NeuronIsOff(ni) {
continue
}
vl := ly.UnitValue1D(vidx, int(lni), di)
am.UpdateValue(vl, int32(lni))
}
am.CalcAvg()
return am
}
//gosl:start
// PhaseDiffFromActs computes the phase-wise difference in the
// activity state between the minus [ActM] and plus [ActP] phases,
// measured using 1 minus the correlation (centered cosine aka
// normalized dot product). 0 = no difference, 2 = maximum difference.
func (ly *LayerParams) PhaseDiffFromActs(ctx *Context) {
li := ly.Index
for di := uint32(0); di < ctx.NData; di++ {
lpi := ly.PoolIndex(0)
avgM := PoolAvgMax(AMAct, AMMinus, Avg, lpi, di)
avgP := PoolAvgMax(AMAct, AMPlus, Avg, lpi, di)
cosv := float32(0)
ssm := float32(0)
ssp := float32(0)
nn := ly.Indexes.NNeurons
for lni := uint32(0); lni < nn; lni++ {
ni := ly.Indexes.NeurSt + lni
if NeuronIsOff(ni) {
continue
}
ap := Neurons.Value(int(ni), int(di), int(ActP)) - avgP // zero mean = correl
am := Neurons.Value(int(ni), int(di), int(ActM)) - avgM
cosv += ap * am
ssm += am * am
ssp += ap * ap
}
dist := math32.Sqrt(ssm * ssp)
if dist != 0 {
cosv /= dist
}
LayerStates.Set(1-cosv, int(li), int(di), int(LayerPhaseDiff))
avg := LayerStates.Value(int(li), int(di), int(LayerPhaseDiffAvg))
vr := LayerStates.Value(int(li), int(di), int(LayerPhaseDiffVar))
ly.Acts.Dt.AvgVarUpdate(&avg, &vr, 1-cosv)
LayerStates.Set(avg, int(li), int(di), int(LayerPhaseDiffAvg))
LayerStates.Set(vr, int(li), int(di), int(LayerPhaseDiffVar))
}
}
//gosl:end
// Copyright (c) 2019, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
import (
"fmt"
"strings"
)
//gosl:start
// SynapseVars are the synapse variables representing synaptic weights, etc.
// These do not depend on the data parallel index (di).
// See [SynapseTraceVars] for variables that do depend on di.
type SynapseVars int32 //enums:enum
const (
// Wt is the effective synaptic weight value, determining how much conductance
// one presynaptic spike drives into the receiving neuron. Biologically it represents
// the number of effective AMPA receptors in the synapse.
// Wt = [SWt] * WtSig([LWt]), where WtSig is the sigmoidal constrast enhancement
// function that produces values between 0-2 based on LWt, centered on 1.
Wt SynapseVars = iota
// LWt is the rapid, online learning, linear weight value. It learns on every
// trial according to the learning rate (LRate) parameter. Biologically,
// this represents the internal biochemical processes that drive the trafficking
// of AMPA receptors in the synaptic density.
LWt
// SWt is a slowly adapting structural weight value, which acts as a
// multiplicative scaling factor on net synaptic efficacy [Wt].
// Biologically it represents the physical size and efficacy of the dendritic spine.
// SWt values adapt in a slower outer loop along with synaptic scaling,
// with constraints to prevent runaway positive feedback loops and maintain
// variance and further capacity to learn. Initial weight variance is partially or
// fully captured in the SWt values, with LWt capturing the remainder.
SWt
// DWt is delta (change in) synaptic weight, from learning. This updates [LWt]
// on every trial. It is reset to 0 after it is applied, but the network view
// captures this value just prior to application.
DWt
// DSWt is the accumulated change in the [SWt] slow structural weight, computed
// as the accumulation of [DWt] values over the longer slow weight update window.
DSWt
)
// SynapseTraceVars are synaptic variables that depend on the data
// parallel index, for accumulating learning traces and weight changes per data.
type SynapseTraceVars int32 //enums:enum
const (
// Tr is trace of synaptic activity over time, which is used for
// credit assignment in learning.
// In MatrixPath this is a tag that is then updated later when US occurs.
Tr SynapseTraceVars = iota
// DTr is delta (change in) Tr trace of synaptic activity over time.
DTr
// DiDWt is delta weight for each data parallel index (Di).
// This is directly computed from the Ca values (in cortical version)
// and then aggregated into the overall DWt (which may be further
// integrated across MPI nodes), which then drives changes in Wt values.
DiDWt
)
// SynapseIndexVars are synapse-level indexes used to access neurons and paths
// from the individual synapse level of processing.
type SynapseIndexVars int32 //enums:enum
const (
// SynRecvIndex is receiving neuron index in network's global list of neurons
SynRecvIndex SynapseIndexVars = iota
// SynSendIndex is sending neuron index in network's global list of neurons
SynSendIndex
// SynPathIndex is pathway index in global list of pathways organized as [Layers][RecvPaths]
SynPathIndex
)
//gosl:end
// SynapseVarProps has all of the display properties for synapse variables, including desc tooltips
var SynapseVarProps = map[string]string{
"Wt": `cat:"Wts"`,
"LWt": `cat:"Wts"`,
"SWt": `cat:"Wts"`,
"DWt": `cat:"Wts" auto-scale:"+"`,
"DSWt": `cat:"Wts" auto-scale:"+"`,
"Tr": `cat:"Wts" auto-scale:"+"`,
"DTr": `cat:"Wts" auto-scale:"+"`,
"DiDWt": `cat:"Wts" auto-scale:"+"`,
}
var (
SynapseVarNames []string
SynapseVarsMap map[string]int
)
func init() {
SynapseVarsMap = make(map[string]int, int(SynapseVarsN)+int(SynapseTraceVarsN))
for i := Wt; i < SynapseVarsN; i++ {
vnm := i.String()
SynapseVarNames = append(SynapseVarNames, vnm)
SynapseVarsMap[vnm] = int(i)
tag := SynapseVarProps[vnm]
SynapseVarProps[vnm] = tag + ` doc:"` + strings.ReplaceAll(i.Desc(), "\n", " ") + `"`
}
for i := Tr; i < SynapseTraceVarsN; i++ {
vnm := i.String()
SynapseVarNames = append(SynapseVarNames, vnm)
SynapseVarsMap[vnm] = int(SynapseVarsN) + int(i)
tag := SynapseVarProps[vnm]
SynapseVarProps[vnm] = tag + ` doc:"` + strings.ReplaceAll(i.Desc(), "\n", " ") + `"`
}
}
// SynapseVarByName returns the index of the variable in the Synapse, or error
func SynapseVarByName(varNm string) (int, error) {
i, ok := SynapseVarsMap[varNm]
if !ok {
return -1, fmt.Errorf("Synapse VarByName: variable name: %s not valid", varNm)
}
return i, nil
}
// Copyright (c) 2022, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package axon
import (
"fmt"
"math"
"runtime"
"sort"
"time"
"cogentcore.org/core/base/timer"
"cogentcore.org/core/gpu"
)
// SetNThreads sets number of threads to use for CPU parallel processing.
// pass 0 to use a default heuristic number based on current GOMAXPROCS
// processors and the number of neurons in the network (call after building)
func (nt *Network) SetNThreads(nthr int) {
md := nt.NetIxs().MaxData
maxProcs := runtime.GOMAXPROCS(0) // query GOMAXPROCS
if nthr <= 0 {
nneur := nt.Neurons.Len()
nthr = int(math.Ceil(float64(nneur) / (float64(10000) / float64(md))))
if nthr < 1 { // shouldn't happen but justin..
nthr = 1
}
}
nt.NThreads = min(maxProcs, nthr)
gpu.NumThreads = nt.NThreads
}
//////////////////////////////////////////////////////////////
// Timing reports
// TimerReport reports the amount of time spent in each function, and in each thread
func (nt *Network) TimerReport() {
fmt.Printf("TimerReport: %v %d threads\n", nt.Name, nt.NThreads)
fmt.Printf("\t%13s \t%7s\t%7s\n", "Function Name", "Secs", "Pct")
nfn := len(nt.FunTimes)
fnms := make([]string, nfn)
idx := 0
for k := range nt.FunTimes {
fnms[idx] = k
idx++
}
sort.StringSlice(fnms).Sort()
pcts := make([]float64, nfn)
tot := 0.0
for i, fn := range fnms {
pcts[i] = float64(nt.FunTimes[fn].Total) / float64(time.Second)
tot += pcts[i]
}
for i, fn := range fnms {
fmt.Printf("\t%13s \t%7.3f\t%7.1f\n", fn, pcts[i], 100*(pcts[i]/tot))
}
fmt.Printf("\t%13s \t%7.3f\n", "Total", tot)
}
// FunTimerStart starts function timer for given function name -- ensures creation of timer
func (nt *Network) FunTimerStart(fun string) {
if !nt.RecFunTimes {
return
}
ft, ok := nt.FunTimes[fun]
if !ok {
ft = &timer.Time{}
nt.FunTimes[fun] = ft
}
ft.Start()
}
// FunTimerStop stops function timer -- timer must already exist
func (nt *Network) FunTimerStop(fun string) {
if !nt.RecFunTimes {
return
}
ft := nt.FunTimes[fun]
ft.Stop()
}
// Code generated by "core generate -add-types -gosl"; DO NOT EDIT.
package axon
import (
"cogentcore.org/core/types"
"cogentcore.org/lab/gosl/slbool"
"cogentcore.org/lab/gosl/slrand"
)
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.PathGTypes", IDName: "path-g-types", Doc: "PathGTypes represents the conductance (G) effects of a given pathway,\nincluding excitatory, inhibitory, and modulatory."})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.SynComParams", IDName: "syn-com-params", Doc: "SynComParams are synaptic communication parameters:\nused in the Path parameters. Includes delay and\nprobability of failure, and Inhib for inhibitory connections,\nand modulatory pathways that have multiplicative-like effects.", Fields: []types.Field{{Name: "GType", Doc: "type of conductance (G) communicated by this pathway"}, {Name: "Delay", Doc: "additional synaptic delay in msec for inputs arriving at this pathway.\nMust be <= MaxDelay which is set during network building based on MaxDelay\nof any existing Path in the network. Delay = 0 means a spike reaches\nreceivers in the next Cycle, which is the minimum time (1 msec).\nBiologically, subtract 1 from biological synaptic delay values to set\ncorresponding Delay value."}, {Name: "MaxDelay", Doc: "maximum value of Delay, based on MaxDelay values when the BuildGBuf\nfunction was called during [Network.Build]. Cannot set it longer than this,\nexcept by calling BuildGBuf on network after changing MaxDelay to a larger\nvalue in any pathway in the network."}, {Name: "DelLen", Doc: "delay length = actual length of the GBuf buffer per neuron = Delay+1; just for speed"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.PathScaleParams", IDName: "path-scale-params", Doc: "PathScaleParams are pathway scaling parameters: modulates overall strength of pathway,\nusing both absolute and relative factors.", Fields: []types.Field{{Name: "Rel", Doc: "relative scaling that shifts balance between different pathways -- this is subject to normalization across all other pathways into receiving neuron, and determines the GScale.Target for adapting scaling"}, {Name: "Abs", Doc: "absolute multiplier adjustment factor for the path scaling -- can be used to adjust for idiosyncrasies not accommodated by the standard scaling based on initial target activation level and relative scaling factors -- any adaptation operates by directly adjusting scaling factor from the initially computed value"}, {Name: "pad"}, {Name: "pad1"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.SpikeParams", IDName: "spike-params", Doc: "SpikeParams contains spiking activation function params.\nImplements a basic thresholded Vm model, and optionally\nthe AdEx adaptive exponential function.", Fields: []types.Field{{Name: "Thr", Doc: "Thr is the spiking threshold value Theta (Θ) for firing output activation,\nin mV (millivolts). See also ExpThr for the AdEx implementation,\nin which case this threshold is the V_t parameters for the exponential function."}, {Name: "VmR", Doc: "VmR is the post-spiking membrane potential to reset to, in mV.\nThis produces refractory effect if lower than VmInit.\n-70 is appropriate biologically based value for AdEx (Brette & Gurstner, 2005)\nparameters. See also RTau."}, {Name: "Tr", Doc: "Tr is the post-spiking explicit refractory period, in cycles.\nPrevents Vm updating for this number of cycles post firing.\nVm is reduced in exponential steps over this period according to RTau,\nbeing fixed at Tr to VmR exactly."}, {Name: "RTau", Doc: "RTau is the time constant for decaying Vm down to VmR. At end of Tr it is set\nto VmR exactly. This provides a more realistic shape of the post-spiking\nVm which is only relevant for more realistic channels that key off of Vm.\nDoes not otherwise affect standard computation."}, {Name: "Exp", Doc: "Exp turns on the AdEx exponential excitatory current that drives Vm rapidly\nupward for spiking as it gets past its nominal firing threshold (Thr).\nEfficiently captures the Hodgkin Huxley dynamics of Na and K channels\n(Brette & Gurstner 2005)."}, {Name: "ExpSlope", Doc: "ExpSlope is the slope in mV for extra exponential excitatory current in AdEx."}, {Name: "ExpThr", Doc: "ExpThr is the membrane potential threshold (mV) for actually triggering\na spike when using the exponential mechanism. Due to 1 ms time integration,\nthis doesn't have much impact as long as it is above nominal spike threshold,\nand inside the VmRange for clipping Vm."}, {Name: "MaxHz", Doc: "MaxHz is for translating spiking interval (rate) into rate-code activation\nequivalent, as the maximum firing rate associated with a maximum\nactivation value of 1."}, {Name: "ISITau", Doc: "ISITau is the time constant for integrating the spiking interval in\nestimating spiking rate."}, {Name: "ISIDt", Doc: "ISIDt = 1 / tau"}, {Name: "RDt", Doc: "RDt = 1 / tau"}, {Name: "pad"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.DendParams", IDName: "dend-params", Doc: "DendParams are the parameters for updating dendrite-specific dynamics", Fields: []types.Field{{Name: "GExp", Doc: "GExp is the dendrite-specific strength multiplier of the exponential\nspiking drive on Vm. E.g., .5 makes it half as strong as at the soma."}, {Name: "GR", Doc: "GR is the dendrite-specific additional conductance of Kdr delayed\nrectifier currents, used to reset membrane potential for dendrite.\nApplied for Tr cycles (ms)."}, {Name: "SSGi", Doc: "SSGi is the SST+ somatostatin positive slow spiking inhibition level\nspecifically affecting dendritic Vm (VmDend). This is important for countering\na positive feedback loop from NMDA getting stronger over the course\nof learning. Also typically requires SubMean = 1 for TrgAvgAct and\nlearning to fully counter this feedback loop."}, {Name: "HasMod", Doc: "HasMod is set automatically based on whether this layer has any recv pathways\nthat have a GType conductance type of Modulatory.\nIf so, then multiply GeSyn etc by GModSyn."}, {Name: "ModGain", Doc: "ModGain is a multiplicative gain factor on the total modulatory input.\nThis can also be controlled by the PathScale.Abs factor on\nModulatoryG inputs, but it is convenient to be able to control\non the layer as well."}, {Name: "ModACh", Doc: "ModACh if true, modulatory signal also includes ACh multiplicative factor."}, {Name: "ModBase", Doc: "ModBase is the baseline modulatory level for modulatory effects.\nNet modulation is ModBase + ModGain * GModSyn"}, {Name: "pad"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.ActInitParams", IDName: "act-init-params", Doc: "ActInitParams are initial values for key network state variables.\nInitialized in InitActs called by InitWeights, and provides target values\nfor DecayState.", Fields: []types.Field{{Name: "Vm", Doc: "Vm initial membrane potential in mV (millivolts).\nSee Erev.L for the resting potential, typically -70."}, {Name: "Act", Doc: "Act is the initial activation value. Typically 0."}, {Name: "GeBase", Doc: "GeBase is the baseline level of excitatory conductance (net input).\nGe is initialized to this value, and it is added in as a constant\nbackground level of excitatory input, to capture all the other\ninputs not represented in the model, and intrinsic excitability, etc."}, {Name: "GiBase", Doc: "GiBase baseline level of inhibitory conductance (net input)\nGi is initialized to this value, and it is added in as a constant\nbackground level of inhibitory input. Captures all the other inputs\nnot represented in the model."}, {Name: "GeVar", Doc: "GeVar is the variance (sigma) of gaussian distribution around baseline\nGe values, per neuron, to establish variability in intrinsic excitability.\nValue never goes < 0."}, {Name: "GiVar", Doc: "GiVar is the variance (sigma) of gaussian distribution around baseline\nGi values, per neuron, to establish variability in intrinsic excitability.\nValue never goes < 0"}, {Name: "pad"}, {Name: "pad1"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.DecayParams", IDName: "decay-params", Doc: "DecayParams control the decay of activation state in the DecayState function\ncalled in NewState when a new state is to be processed.", Directives: []types.Directive{{Tool: "gosl", Directive: "start"}}, Fields: []types.Field{{Name: "Act", Doc: "Act is proportion to decay most activation state variables toward initial\nvalues at start of every ThetaCycle (except those controlled separately below).\nIf 1 it is effectively equivalent to full clear, resetting other derived values.\nISI is reset every AlphaCycle to get a fresh sample of activations\n(doesn't affect direct computation -- only readout)."}, {Name: "Glong", Doc: "Glong is proportion to decay long-lasting conductances, NMDA and GABA,\nand also the dendritic membrane potential -- when using random stimulus\norder, it is important to decay this significantly to allow a fresh start,\nbut set Act to 0 to enable ongoing activity to keep neurons in their\nsensitive regime."}, {Name: "AHP", Doc: "AHP is decay of afterhyperpolarization currents, including mAHP, sAHP,\nand KNa, Kir. Has a separate decay because often useful to have this\nnot decay at all even if decay is on."}, {Name: "LearnCa", Doc: "LearnCa is decay of Ca variables driven by spiking activity used in learning:\nCaSpike* and Ca* variables. These are typically not decayed but may\nneed to be in some situations."}, {Name: "OnRew", Doc: "OnRew means decay layer at end of ThetaCycle when there is a global reward.\ntrue by default for PTPred, PTMaint and PFC Super layers."}, {Name: "pad"}, {Name: "pad1"}, {Name: "pad2"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.DtParams", IDName: "dt-params", Doc: "DtParams are time and rate constants for temporal derivatives in Axon (Vm, G)", Fields: []types.Field{{Name: "Integ", Doc: "Integ is the overall rate constant for numerical integration, for all equations\nat the neuron level. All time constants are specified in ms millisecond units,\nwith one cycle = 1 ms. If you instead want to make one cycle = 2 ms, you can do\nthis globally by setting this integ value to 2 (etc).\nHowever, stability issues will likely arise if you go too high.\nFor improved numerical stability, you may even need to reduce this value\nto 0.5 or possibly even lower (typically however this is not necessary)."}, {Name: "VmC", Doc: "VmC is the membrane potential capacitance in pF (picofarads), which\ndetermines the rate of Vm updating over time."}, {Name: "VmDendC", Doc: "VmDendC is the effective dendritic membrane capacitance in pF (picofarads),\nwhich is typically slower than VmC (also reflecting other dendritic dynamics)."}, {Name: "VmSteps", Doc: "VmSteps are the number of integration steps to take in computing new Vm value.\nThis is the one computation that can be most numerically unstable\nso taking multiple steps with proportionally smaller dt is beneficial."}, {Name: "GeTau", Doc: "GeTau is the time constant for decay of excitatory AMPA receptor\nconductance in ms (milliseconds)."}, {Name: "GiTau", Doc: "GiTau is the time constant for decay of inhibitory GABA-A receptor\nconductance in ms (milliseconds)."}, {Name: "IntTau", Doc: "IntTau is a time constant for integrating values over timescale of an\nindividual input state (e.g., roughly the 200 msec theta cycle),\nused in computing ActInt, GeInt from Ge, and GiInt from GiSyn.\nThis is used for scoring performance, not for learning, in cycles,\nwhich should be milliseconds typically\n(Tau is roughly 2/3 of the way to asymptote)."}, {Name: "LongAvgTau", Doc: "LongAvgTau is a time constant for integrating slower long-time-scale averages,\nsuch as ActAvg, Pool.ActsMAvg, ActsPAvg. Computed in NewState\nwhen a new input state is present (i.e., not msec but in units\nof a theta cycle) (Tau is roughly 2/3 of the way to asymptote).\nSet lower for smaller models."}, {Name: "MaxCycStart", Doc: "maxCycStart is the cycle to start updating the CaPMaxCa, CaPMax values\nwithin a theta cycle. Early cycles often reflect prior state."}, {Name: "VmDt", Doc: "VmDT = Integ / VmC"}, {Name: "VmDendDt", Doc: "VmDendDt = Integ / VmDendC"}, {Name: "DtStep", Doc: "DtStep = 1 / VmSteps"}, {Name: "GeDt", Doc: "GeDt = Integ / GeTau"}, {Name: "GiDt", Doc: "GiDt = Integ / GiTau"}, {Name: "IntDt", Doc: "IntDt = Integ / IntTau"}, {Name: "LongAvgDt", Doc: "LongAvgDt = 1 / LongAvgTau"}, {Name: "MaxI", Doc: "MaxI = VmC * 100 nS nominal max conductance = maximum I current step."}, {Name: "pad"}, {Name: "pad1"}, {Name: "pad2"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.SpikeNoiseParams", IDName: "spike-noise-params", Doc: "SpikeNoiseParams parameterizes background spiking activity impinging on the neuron,\nsimulated using a poisson spiking process.", Fields: []types.Field{{Name: "On", Doc: "On switch to add noise simulating background spiking levels."}, {Name: "GeHz", Doc: "GeHz is the mean frequency of excitatory spikes. Typically 50Hz but multiple\ninputs increase rate. This is a poisson lambda parameter, also the variance."}, {Name: "Ge", Doc: "Ge is the excitatory conductance per spike. 0.001 has minimal impact,\n0.01 can be strong, and .15 is needed to influence timing of clamped inputs."}, {Name: "GiHz", Doc: "GiHz is the mean frequency of inhibitory spikes. Typically 100Hz fast spiking\nbut multiple inputs increase rate. This is a poisson lambda parameter,\nalso the variance."}, {Name: "Gi", Doc: "Gi is the excitatory conductance per spike. 0.001 has minimal impact,\n0.01 can be strong, and .15 is needed to influence timing of clamped inputs."}, {Name: "MaintGe", Doc: "MaintGe adds Ge noise to GeMaintRaw instead of standard Ge.\nused for PTMaintLayer for example."}, {Name: "GeExpInt", Doc: "GeExpInt = Exp(-Interval) which is the threshold for GeNoiseP as it is updated."}, {Name: "GiExpInt", Doc: "GiExpInt = Exp(-Interval) which is the threshold for GiNoiseP as it is updated."}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.ClampParams", IDName: "clamp-params", Doc: "ClampParams specify how external inputs drive excitatory conductances\n(like a current clamp) -- either adds or overwrites existing conductances.\nNoise is added in either case.", Fields: []types.Field{{Name: "Ge", Doc: "Ge is the contribution to Ge(t) for clamped external input.\nGenerally use .8 for Target layers, 1.50 for Input layers.\nThis is later multiplied by overall gbar_e which converts to nS units."}, {Name: "Add", Doc: "Add external conductance on top of any existing.\nGenerally this is not a good idea for target layers\n(creates a main effect that learning can never match),\nbut may be ok for input layers."}, {Name: "ErrThr", Doc: "ErrThr is the threshold on neuron Act activity to count as active for\ncomputing the error relative to target in PctErr method."}, {Name: "pad"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.SMaintParams", IDName: "s-maint-params", Doc: "SMaintParams for self-maintenance simulating a population of\nNMDA-interconnected spiking neurons", Fields: []types.Field{{Name: "On", Doc: "On switch for self maintenance."}, {Name: "NNeurons", Doc: "NNeurons is the number of neurons within the self-maintenance pool,\neach of which is assumed to have the same probability of spiking."}, {Name: "Ge", Doc: "Ge is the excitatory conductance multiplier for self maintenance synapses."}, {Name: "Inhib", Doc: "Inhib controls how much of the extra maintenance conductance goes\nto the GeExt, which drives extra proportional inhibition."}, {Name: "ISI", Doc: "ISI (inter spike interval) range. Min is used as min ISIAvg\nfor poisson spike rate expected from the population,\nand above Max, no additional maintenance conductance is added."}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.PopCodeParams", IDName: "pop-code-params", Doc: "PopCodeParams provides an encoding of scalar value using population code,\nwhere a single continuous (scalar) value is encoded as a gaussian bump\nacross a population of neurons (1 dimensional).\nIt can also modulate rate code and number of neurons active according to the value.\nThis is for layers that represent values as in the Rubicon system.\nBoth normalized activation values (1 max) and Ge conductance values can be generated.", Fields: []types.Field{{Name: "On", Doc: "On toggles use of popcode encoding of variable(s) that this layer represents."}, {Name: "Ge", Doc: "Ge multiplier for driving excitatory conductance based on PopCode.\nMultiplies normalized activation values and adds to total Ge(t)\nwhich is later multiplied by Gbar.E for pA unit scaling."}, {Name: "Min", Doc: "Min is the minimum value representable. For GaussBump, typically include\nextra to allow mean with activity on either side to represent\nthe lowest value you want to encode."}, {Name: "Max", Doc: "Max is the maximum value representable. For GaussBump, typically include\nextra to allow mean with activity on either side to represent\nthe lowest value you want to encode."}, {Name: "MinAct", Doc: "MinAct is an activation multiplier for values at Min end of range,\nwhere values at Max end have an activation of 1.\nIf this is < 1, then there is a rate code proportional\nto the value in addition to the popcode pattern. See also MinSigma, MaxSigma."}, {Name: "MinSigma", Doc: "MinSigma is the sigma parameter of a gaussian specifying the tuning width\nof the coarse-coded units, in normalized 0-1 range, for values at the Min\nend of the range. If MinSigma < MaxSigma then more units are activated\nfor Max values vs. Min values, proportionally."}, {Name: "MaxSigma", Doc: "MaxSigma is the sigma parameter of a gaussian specifying the tuning width\nof the coarse-coded units, in normalized 0-1 range, for values at the Max\nend of the range. If MinSigma < MaxSigma then more units are activated\nfor Max values vs. Min values, proportionally."}, {Name: "Clip", Doc: "Clip ensures that encoded and decoded value remains within specified range."}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.ActParams", IDName: "act-params", Doc: "ActParams contains all the neural activity computation params and functions\nfor Axon, at the neuron level. This is included in [LayerParams].", Fields: []types.Field{{Name: "Spikes", Doc: "Spikes are spiking function parameter, including the AdEx spiking function."}, {Name: "Dend", Doc: "Dend are dendrite-specific parameters, which more accurately approximate\nthe electrical dynamics present in dendrites vs the soma."}, {Name: "Init", Doc: "Init has initial values for key network state variables.\nInitialized in InitActs called by InitWeights, and provides target\nvalues for DecayState."}, {Name: "Decay", Doc: "Decay is the amount to decay between theta cycles, simulating the passage\nof time and effects of saccades etc. It is especially important for\nenvironments with random temporal structure (e.g., most standard neural net\ntraining corpora)."}, {Name: "Dt", Doc: "Dt has time and rate constants for temporal derivatives / updating of\nactivation state."}, {Name: "Gbar", Doc: "Gbar has maximal conductances levels for channels, in nS (nanosiemens).\nMost other conductances are computed as time-varying proportions of these\nvalues (strict 1 max is not enforced and can be exceeded)."}, {Name: "Erev", Doc: "Erev are reversal / driving potentials for each channel, in mV (millivolts).\nCurrent is a function of the difference between these driving potentials\nand the membrane potential Vm, and goes to 0 (and reverses sign) as it\ncrosses equality."}, {Name: "Clamp", Doc: "Clamp determines how external inputs drive excitatory conductance."}, {Name: "Noise", Doc: "Noise specifies how, where, when, and how much noise to add."}, {Name: "VmRange", Doc: "VmRange constrains the range of the Vm membrane potential,\nwhich helps to prevent numerical instability."}, {Name: "Mahp", Doc: "Mahp is the M-type medium time-scale afterhyperpolarization (mAHP) current.\nThis is the primary form of adaptation on the time scale of\nmultiple sequences of spikes."}, {Name: "Sahp", Doc: "Sahp is the slow time-scale afterhyperpolarization (sAHP) current.\nIt integrates CaD at theta cycle intervals and produces a hard cutoff\non sustained activity for any neuron."}, {Name: "KNa", Doc: "KNa has the sodium-gated potassium channel adaptation parameters.\nIt activates a leak-like current as a function of neural activity\n(firing = Na influx) at two different time-scales (Slick = medium, Slack = slow)."}, {Name: "Kir", Doc: "Kir is the potassium (K) inwardly rectifying (ir) current, which\nis similar to GABA-B (which is a GABA modulated Kir channel).\nThis channel is off by default but plays a critical role in making medium\nspiny neurons (MSNs) relatively quiet in the striatum."}, {Name: "NMDA", Doc: "NMDA has channel parameters used in computing the Gnmda conductance\nthat is maximal for more depolarized neurons (due to unblocking of\nMg++ ions), and thus helps keep active neurons active, thereby promoting\noverall neural stability over time. See also Learn.LearnNMDA for\ndistinct parameters used for Ca++ influx driving learning, and\nMaintNMDA for specialized NMDA driven by maintenance pathways."}, {Name: "MaintNMDA", Doc: "MaintNMDA has channel parameters used in computing the Gnmda conductance\nbased on pathways of the MaintG conductance type, e.g., in the PT PFC neurons.\nThis is typically stronger and longer lasting than standard NMDA."}, {Name: "GabaB", Doc: "GabaB has GABA-B channel parameters for long-lasting inhibition\nthat is inwardly rectified (GIRK coupled) and maximal for more hyperpolarized\nneurons, thus keeping inactive neurons inactive. This is synergistic with\nNMDA for supporting stable activity patterns over the theta cycle."}, {Name: "VGCC", Doc: "VGCC are voltage gated calcium channels, which provide a key additional\nsource of Ca for learning and positive-feedback loop upstate for active\nneurons when they are spiking."}, {Name: "AK", Doc: "AK is the A-type potassium (K) channel that is particularly important\nfor limiting the runaway excitation from VGCC channels."}, {Name: "SKCa", Doc: "SKCa is the small-conductance calcium-activated potassium channel produces\nthe pausing function as a consequence of rapid bursting. These are not active\nby default but are critical for subthalamic nucleus (STN) neurons."}, {Name: "SMaint", Doc: "SMaint provides a simplified self-maintenance current for a population of\nNMDA-interconnected spiking neurons."}, {Name: "PopCode", Doc: "PopCode provides encoding population codes, used to represent a single\ncontinuous (scalar) value, across a population of units / neurons\n(1 dimensional)."}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.BLANovelPath", IDName: "bla-novel-path", Doc: "BLANovelPath connects all other pools to the first, Novelty, pool in a BLA layer.\nThis allows the known US representations to specifically inhibit the novelty pool."})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.Context", IDName: "context", Doc: "Context contains all of the global context state info\nthat is shared across every step of the computation.\nIt is passed around to all relevant computational functions,\nand is updated on the CPU and synced to the GPU after every cycle.\nIt contains timing, Testing vs. Training mode, random number context, etc.\nThere is one canonical instance on the network as Ctx, always get it from\nthe network.Context() method.", Directives: []types.Directive{{Tool: "types", Directive: "add", Args: []string{"-setters"}}}, Fields: []types.Field{{Name: "NData", Doc: "number of data parallel items to process currently."}, {Name: "Mode", Doc: "current running mode, using sim-defined enum, e.g., Train, Test, etc."}, {Name: "Testing", Doc: "Testing is true if the model is being run in a testing mode,\nso no weight changes or other associated computations should be done.\nThis flag should only affect learning-related behavior."}, {Name: "MinusPhase", Doc: "MinusPhase is true if this is the minus phase, when a stimulus is present\nand learning is occuring. Could also be in a non-learning phase when\nno stimulus is present. This affects accumulation of CaBins values only."}, {Name: "PlusPhase", Doc: "PlusPhase is true if this is the plus phase, when the outcome / bursting\nis occurring, driving positive learning; else minus or non-learning phase."}, {Name: "PhaseCycle", Doc: "Cycle within current phase, minus or plus."}, {Name: "Cycle", Doc: "Cycle within Trial: number of iterations of activation updating (settling)\non the current state. This is reset at NewState."}, {Name: "ThetaCycles", Doc: "ThetaCycles is the length of the theta cycle (i.e., Trial),\nin terms of 1 msec Cycles. Some network update steps depend on doing something\nat the end of the theta cycle (e.g., CTCtxtPath).\nShould be ISICycles + MinusCycles + PlusCycles"}, {Name: "ISICycles", Doc: "ISICycles is the number of inter-stimulus-interval cycles,\nwhich happen prior to the minus phase (i.e., after the last plus phase)."}, {Name: "MinusCycles", Doc: "MinusCycles is the number of cycles in the minus phase. Typically 150,\nbut may be set longer if ThetaCycles is above default of 200."}, {Name: "PlusCycles", Doc: "PlusCycles is the number of cycles in the plus phase. Typically 50,\nbut may be set longer if ThetaCycles is above default of 200."}, {Name: "CyclesTotal", Doc: "CyclesTotal is the accumulated cycle count, which increments continuously\nfrom whenever it was last reset. Typically this is the number of milliseconds\nin simulation time."}, {Name: "Time", Doc: "Time is the accumulated amount of time the network has been running,\nin simulation-time (not real world time), in seconds."}, {Name: "TrialsTotal", Doc: "TrialsTotal is the total trial count, which increments continuously in NewState\n_only in Train mode_ from whenever it was last reset. Can be used for synchronizing\nweight updates across nodes."}, {Name: "TimePerCycle", Doc: "TimePerCycle is the amount of Time to increment per cycle."}, {Name: "SlowInterval", Doc: "SlowInterval is how frequently in Trials to perform slow adaptive processes\nsuch as synaptic scaling, associated in the brain with sleep,\nvia the SlowAdapt method. This should be long enough for meaningful changes\nto accumulate. 100 is default but could easily be longer in larger models.\nBecause SlowCounter is incremented by NData, high NData cases (e.g. 16) likely need to\nincrease this value, e.g., 400 seems to produce overall consistent results in various models."}, {Name: "SlowCounter", Doc: "SlowCounter increments for each training trial, to trigger SlowAdapt at SlowInterval.\nThis is incremented by NData to maintain consistency across different values of this parameter."}, {Name: "AdaptGiInterval", Doc: "AdaptGiInterval is how frequently in Trials to perform inhibition adaptation,\nwhich needs to be even slower than the SlowInterval."}, {Name: "AdaptGiCounter", Doc: "AdaptGiCounter increments for each training trial, to trigger AdaptGi at AdaptGiInterval.\nThis is incremented by NData to maintain consistency across different values of this parameter."}, {Name: "pad"}, {Name: "RandCounter", Doc: "RandCounter is the random counter, incremented by maximum number of\npossible random numbers generated per cycle, regardless of how\nmany are actually used. This is shared across all layers so must\nencompass all possible param settings."}}})
// SetNData sets the [Context.NData]:
// number of data parallel items to process currently.
func (t *Context) SetNData(v uint32) *Context { t.NData = v; return t }
// SetMode sets the [Context.Mode]:
// current running mode, using sim-defined enum, e.g., Train, Test, etc.
func (t *Context) SetMode(v int32) *Context { t.Mode = v; return t }
// SetTesting sets the [Context.Testing]:
// Testing is true if the model is being run in a testing mode,
// so no weight changes or other associated computations should be done.
// This flag should only affect learning-related behavior.
func (t *Context) SetTesting(v slbool.Bool) *Context { t.Testing = v; return t }
// SetMinusPhase sets the [Context.MinusPhase]:
// MinusPhase is true if this is the minus phase, when a stimulus is present
// and learning is occuring. Could also be in a non-learning phase when
// no stimulus is present. This affects accumulation of CaBins values only.
func (t *Context) SetMinusPhase(v slbool.Bool) *Context { t.MinusPhase = v; return t }
// SetPlusPhase sets the [Context.PlusPhase]:
// PlusPhase is true if this is the plus phase, when the outcome / bursting
// is occurring, driving positive learning; else minus or non-learning phase.
func (t *Context) SetPlusPhase(v slbool.Bool) *Context { t.PlusPhase = v; return t }
// SetPhaseCycle sets the [Context.PhaseCycle]:
// Cycle within current phase, minus or plus.
func (t *Context) SetPhaseCycle(v int32) *Context { t.PhaseCycle = v; return t }
// SetCycle sets the [Context.Cycle]:
// Cycle within Trial: number of iterations of activation updating (settling)
// on the current state. This is reset at NewState.
func (t *Context) SetCycle(v int32) *Context { t.Cycle = v; return t }
// SetThetaCycles sets the [Context.ThetaCycles]:
// ThetaCycles is the length of the theta cycle (i.e., Trial),
// in terms of 1 msec Cycles. Some network update steps depend on doing something
// at the end of the theta cycle (e.g., CTCtxtPath).
// Should be ISICycles + MinusCycles + PlusCycles
func (t *Context) SetThetaCycles(v int32) *Context { t.ThetaCycles = v; return t }
// SetISICycles sets the [Context.ISICycles]:
// ISICycles is the number of inter-stimulus-interval cycles,
// which happen prior to the minus phase (i.e., after the last plus phase).
func (t *Context) SetISICycles(v int32) *Context { t.ISICycles = v; return t }
// SetMinusCycles sets the [Context.MinusCycles]:
// MinusCycles is the number of cycles in the minus phase. Typically 150,
// but may be set longer if ThetaCycles is above default of 200.
func (t *Context) SetMinusCycles(v int32) *Context { t.MinusCycles = v; return t }
// SetPlusCycles sets the [Context.PlusCycles]:
// PlusCycles is the number of cycles in the plus phase. Typically 50,
// but may be set longer if ThetaCycles is above default of 200.
func (t *Context) SetPlusCycles(v int32) *Context { t.PlusCycles = v; return t }
// SetCyclesTotal sets the [Context.CyclesTotal]:
// CyclesTotal is the accumulated cycle count, which increments continuously
// from whenever it was last reset. Typically this is the number of milliseconds
// in simulation time.
func (t *Context) SetCyclesTotal(v int32) *Context { t.CyclesTotal = v; return t }
// SetTime sets the [Context.Time]:
// Time is the accumulated amount of time the network has been running,
// in simulation-time (not real world time), in seconds.
func (t *Context) SetTime(v float32) *Context { t.Time = v; return t }
// SetTrialsTotal sets the [Context.TrialsTotal]:
// TrialsTotal is the total trial count, which increments continuously in NewState
// _only in Train mode_ from whenever it was last reset. Can be used for synchronizing
// weight updates across nodes.
func (t *Context) SetTrialsTotal(v int32) *Context { t.TrialsTotal = v; return t }
// SetTimePerCycle sets the [Context.TimePerCycle]:
// TimePerCycle is the amount of Time to increment per cycle.
func (t *Context) SetTimePerCycle(v float32) *Context { t.TimePerCycle = v; return t }
// SetSlowInterval sets the [Context.SlowInterval]:
// SlowInterval is how frequently in Trials to perform slow adaptive processes
// such as synaptic scaling, associated in the brain with sleep,
// via the SlowAdapt method. This should be long enough for meaningful changes
// to accumulate. 100 is default but could easily be longer in larger models.
// Because SlowCounter is incremented by NData, high NData cases (e.g. 16) likely need to
// increase this value, e.g., 400 seems to produce overall consistent results in various models.
func (t *Context) SetSlowInterval(v int32) *Context { t.SlowInterval = v; return t }
// SetSlowCounter sets the [Context.SlowCounter]:
// SlowCounter increments for each training trial, to trigger SlowAdapt at SlowInterval.
// This is incremented by NData to maintain consistency across different values of this parameter.
func (t *Context) SetSlowCounter(v int32) *Context { t.SlowCounter = v; return t }
// SetAdaptGiInterval sets the [Context.AdaptGiInterval]:
// AdaptGiInterval is how frequently in Trials to perform inhibition adaptation,
// which needs to be even slower than the SlowInterval.
func (t *Context) SetAdaptGiInterval(v int32) *Context { t.AdaptGiInterval = v; return t }
// SetAdaptGiCounter sets the [Context.AdaptGiCounter]:
// AdaptGiCounter increments for each training trial, to trigger AdaptGi at AdaptGiInterval.
// This is incremented by NData to maintain consistency across different values of this parameter.
func (t *Context) SetAdaptGiCounter(v int32) *Context { t.AdaptGiCounter = v; return t }
// SetRandCounter sets the [Context.RandCounter]:
// RandCounter is the random counter, incremented by maximum number of
// possible random numbers generated per cycle, regardless of how
// many are actually used. This is shared across all layers so must
// encompass all possible param settings.
func (t *Context) SetRandCounter(v slrand.Counter) *Context { t.RandCounter = v; return t }
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.BurstParams", IDName: "burst-params", Doc: "BurstParams determine how the 5IB Burst activation is computed from\nCaP integrated spiking values in Super layers -- thresholded.", Directives: []types.Directive{{Tool: "gosl", Directive: "start"}}, Fields: []types.Field{{Name: "ThrRel", Doc: "Relative component of threshold on superficial activation value,\nbelow which it does not drive Burst (and above which, Burst = CaP).\nThis is the distance between the average and maximum activation values\nwithin layer (e.g., 0 = average, 1 = max). Overall effective threshold\nis MAX of relative and absolute thresholds."}, {Name: "ThrAbs", Doc: "Absolute component of threshold on superficial activation value,\nbelow which it does not drive Burst (and above which, Burst = CaP).\nOverall effective threshold is MAX of relative and absolute thresholds."}, {Name: "pad"}, {Name: "pad1"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.CTParams", IDName: "ct-params", Doc: "CTParams control the CT corticothalamic neuron special behavior", Fields: []types.Field{{Name: "GeGain", Doc: "GeGain is the gain factor for context excitatory input, which is\nconstant as compared to the spiking input from other pathways, so it\nmust be downscaled accordingly. This can make a difference\nand may need to be scaled up or down."}, {Name: "DecayTau", Doc: "DecayTau is the decay time constant for context Ge input.\nif > 0, decays over time so intrinsic circuit dynamics have to take over.\nFor single-step copy-based cases, set to 0, while longer-time-scale\ndynamics should use ~50 or more."}, {Name: "OFCposPT", Doc: "OFCposPT is set for the OFCposPT PTMaintLayer, which sets the\nGvOFCposPTMaint global variable."}, {Name: "DecayDt", Doc: "1 / tau"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.PulvinarParams", IDName: "pulvinar-params", Doc: "PulvinarParams provides parameters for how the plus-phase (outcome)\nstate of Pulvinar thalamic relay cell neurons is computed from\nthe corresponding driver neuron Burst activation (or CaP if not Super)", Fields: []types.Field{{Name: "DriveScale", Doc: "DriveScale is the multiplier on driver input strength,\nwhich multiplies CaP from driver layer to produce Ge excitatory\ninput to CNiIO unit."}, {Name: "FullDriveAct", Doc: "FullDriveAct is the level of Max driver layer CaP at which the drivers\nfully drive the burst phase activation. If there is weaker driver input,\nthen (Max/FullDriveAct) proportion of the non-driver inputs remain and\nthis critically prevents the network from learning to turn activation\noff, which is difficult and severely degrades learning."}, {Name: "DriveLayIndex", Doc: "DriveLayIndex of layer that generates the driving activity into this one\nset via SetBuildConfig(DriveLayName) setting"}, {Name: "pad"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.GlobalScalarVars", IDName: "global-scalar-vars", Doc: "GlobalScalarVars are network-wide scalar variables, such as neuromodulators,\nreward, etc including the state for the Rubicon phasic dopamine model.\nThese are stored in the Network.GlobalScalars tensor and corresponding global variable."})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.GlobalVectorVars", IDName: "global-vector-vars", Doc: "GlobalVectorVars are network-wide vector variables, such as drives,\ncosts, US outcomes, with [MaxGlobalVecN] values per variable.\nThese are stored in the Network.GlobalVectors tensor and\ncorresponding global variable."})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.GPUVars", IDName: "gpu-vars", Doc: "GPUVars is an enum for GPU variables, for specifying what to sync."})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.HipConfig", IDName: "hip-config", Doc: "HipConfig have the hippocampus size and connectivity parameters", Fields: []types.Field{{Name: "EC2Size", Doc: "size of EC2"}, {Name: "EC3NPool", Doc: "number of EC3 pools (outer dimension)"}, {Name: "EC3NNrn", Doc: "number of neurons in one EC3 pool"}, {Name: "CA1NNrn", Doc: "number of neurons in one CA1 pool"}, {Name: "CA3Size", Doc: "size of CA3"}, {Name: "DGRatio", Doc: "size of DG / CA3"}, {Name: "EC3ToEC2PCon", Doc: "percent connectivity from EC3 to EC2"}, {Name: "EC2ToDGPCon", Doc: "percent connectivity from EC2 to DG"}, {Name: "EC2ToCA3PCon", Doc: "percent connectivity from EC2 to CA3"}, {Name: "CA3ToCA1PCon", Doc: "percent connectivity from CA3 to CA1"}, {Name: "DGToCA3PCon", Doc: "percent connectivity into CA3 from DG"}, {Name: "EC2LatRadius", Doc: "lateral radius of connectivity in EC2"}, {Name: "EC2LatSigma", Doc: "lateral gaussian sigma in EC2 for how quickly weights fall off with distance"}, {Name: "MossyDelta", Doc: "proportion of full mossy fiber strength (PathScale.Rel) for CA3 EDL in training, applied at the start of a trial to reduce DG -> CA3 strength. 1 = fully reduce strength, .5 = 50% reduction, etc"}, {Name: "MossyDeltaTest", Doc: "proportion of full mossy fiber strength (PathScale.Rel) for CA3 EDL in testing, applied during 2nd-3rd quarters to reduce DG -> CA3 strength. 1 = fully reduce strength, .5 = 50% reduction, etc"}, {Name: "ThetaLow", Doc: "low theta modulation value for temporal difference EDL -- sets PathScale.Rel on CA1 <-> EC paths consistent with Theta phase model"}, {Name: "ThetaHigh", Doc: "high theta modulation value for temporal difference EDL -- sets PathScale.Rel on CA1 <-> EC paths consistent with Theta phase model"}, {Name: "EC5Clamp", Doc: "flag for clamping the EC5 from EC5ClampSrc"}, {Name: "EC5ClampSrc", Doc: "source layer for EC5 clamping activations in the plus phase -- biologically it is EC3 but can use an Input layer if available"}, {Name: "EC5ClampTest", Doc: "clamp the EC5 from EC5ClampSrc during testing as well as training -- this will overwrite any target values that might be used in stats (e.g., in the basic hip example), so it must be turned off there"}, {Name: "EC5ClampThr", Doc: "threshold for binarizing EC5 clamp values -- any value above this is clamped to 1, else 0 -- helps produce a cleaner learning signal. Set to 0 to not perform any binarization."}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.HipPathParams", IDName: "hip-path-params", Doc: "HipPathParams define behavior of hippocampus paths, which have special learning rules", Directives: []types.Directive{{Tool: "gosl", Directive: "start"}}, Fields: []types.Field{{Name: "Hebb", Doc: "Hebbian learning proportion"}, {Name: "Err", Doc: "EDL proportion"}, {Name: "SAvgCor", Doc: "proportion of correction to apply to sending average activation for hebbian learning component (0=none, 1=all, .5=half, etc)"}, {Name: "SAvgThr", Doc: "threshold of sending average activation below which learning does not occur (prevents learning when there is no input)"}, {Name: "SNominal", Doc: "sending layer Nominal (need to manually set it to be the same as the sending layer)"}, {Name: "pad"}, {Name: "pad1"}, {Name: "pad2"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.ActAvgParams", IDName: "act-avg-params", Doc: "ActAvgParams represents the nominal average activity levels in the layer\nand parameters for adapting the computed Gi inhibition levels to maintain\naverage activity within a target range.", Directives: []types.Directive{{Tool: "gosl", Directive: "start"}, {Tool: "gosl", Directive: "import", Args: []string{"github.com/emer/axon/v2/fsfffb"}}}, Fields: []types.Field{{Name: "Nominal", Doc: "Nominal is the estimated average activity level in the layer, which is\nused in computing the scaling factor on sending pathways from this layer.\nIn general it should roughly match the layer ActAvg.ActMAvg value, which\ncan be logged using the axon.LogAddDiagnosticItems function.\nIf layers receiving from this layer are not getting enough Ge excitation,\nthen this Nominal level can be lowered to increase pathway strength\n(fewer active neurons means each one contributes more, so scaling factor\n\n\tgoes as the inverse of activity level), or vice-versa if Ge is too high.\n\nIt is also the basis for the target activity level used for the AdaptGi\n\n\toption: see the Offset which is added to this value."}, {Name: "RTThr", Doc: "RTThr is the reaction time (RT) threshold activity level in the layer,\nin terms of the maximum CaP level of any neuron in the layer. The\nLayerStates LayerRT value is recorded for the cycle at which this\nlevel is exceeded within a theta cycle, after Acts.Dt.MaxCycStart cycles."}, {Name: "AdaptGi", Doc: "AdaptGi enables adapting of layer inhibition Gi multiplier factor\n(stored in layer GiMult value) to maintain a target layer level of\nActAvg.Nominal. This generally works well and improves the long-term\nstability of the models. It is not enabled by default because it depends\non having established a reasonable Nominal + Offset target activity level."}, {Name: "Offset", Doc: "Offset is added to Nominal for the target average activity that drives\nadaptation of Gi for this layer. Typically the Nominal level is good,\nbut sometimes Nominal must be adjusted up or down to achieve desired Ge\nscaling, so this Offset can compensate accordingly."}, {Name: "HiTol", Doc: "HiTol is the tolerance for higher than Target target average activation\nas a proportion of that target value (0 = exactly the target, 0.2 = 20%\nhigher than target). Only once activations move outside this tolerance\n\n\tare inhibitory values adapted."}, {Name: "LoTol", Doc: "LoTol is the tolerance for lower than Target target average activation\nas a proportion of that target value (0 = exactly the target, 0.5 = 50%\nlower than target). Only once activations move outside this tolerance are\n\n\tinhibitory values adapted."}, {Name: "AdaptRate", Doc: "AdaptRate is the rate of Gi adaptation as function of\nAdaptRate * (Target - ActMAvg) / Target. This occurs at spaced intervals\ndetermined by Network.SlowInterval value. Slower values such as 0.05 may\nbe needed for large networks and sparse layers."}, {Name: "AdaptMax", Doc: "AdaptMax is the maximum adaptation step magnitude to take at any point."}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.InhibParams", IDName: "inhib-params", Doc: "InhibParams contains all the inhibition computation params and functions for basic Axon.\nThis is included in LayerParams to support computation.\nAlso includes the expected average activation in the layer, which is used for\nG conductance rescaling and potentially for adapting inhibition over time.", Fields: []types.Field{{Name: "ActAvg", Doc: "ActAvg has layer-level and pool-level average activation initial values\nand updating / adaptation thereof.\nInitial values help determine initial scaling factors."}, {Name: "Layer", Doc: "Layer determines inhibition across the entire layer.\nInput layers generally use Gi = 0.8 or 0.9, 1.3 or higher for sparse layers.\nIf the layer has sub-pools (4D shape) then this is effectively between-pool inhibition."}, {Name: "Pool", Doc: "Pool determines inhibition within sub-pools of units, for layers with 4D shape.\nThis is almost always necessary if the layer has sub-pools."}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.Layer", IDName: "layer", Doc: "Layer implements the basic Axon spiking activation function,\nand manages learning in the pathways.", Methods: []types.Method{{Name: "InitWeights", Doc: "InitWeights initializes the weight values in the network, i.e., resetting learning\nAlso calls InitActs", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"ctx", "nt"}}, {Name: "InitActs", Doc: "InitActs fully initializes activation state -- only called automatically during InitWeights", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"ctx"}}, {Name: "Defaults", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "UnLesionNeurons", Doc: "UnLesionNeurons unlesions (clears the Off flag) for all neurons in the layer", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "LesionNeurons", Doc: "LesionNeurons lesions (sets the Off flag) for given proportion (0-1) of neurons in layer\nreturns number of neurons lesioned. Emits error if prop > 1 as indication that percent\nmight have been passed", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Args: []string{"prop"}, Returns: []string{"int"}}}, Embeds: []types.Field{{Name: "LayerBase"}}, Fields: []types.Field{{Name: "Params", Doc: "Params are layer parameters (pointer to item in Network.LayerParams)."}, {Name: "Network", Doc: "our parent network, in case we need to use it to find\nother layers etc; set when added by network."}, {Name: "Type", Doc: "Type is the type of layer, which drives specialized computation as needed."}, {Name: "NNeurons", Doc: "NNeurons is the number of neurons in the layer."}, {Name: "NeurStIndex", Doc: "NeurStIndex is the starting index of neurons for this layer within\nthe global Network list."}, {Name: "NPools", Doc: "NPools is the number of inhibitory pools based on layer shape,\nwith the first one representing the entire set of neurons in the layer,\nand 4D shaped layers have sub-pools after that."}, {Name: "MaxData", Doc: "MaxData is the maximum amount of input data that can be processed in\nparallel in one pass of the network (copied from [NetworkIndexes]).\nNeuron, Pool, Values storage is allocated to hold this amount."}, {Name: "RecvPaths", Doc: "RecvPaths is the list of receiving pathways into this layer from other layers."}, {Name: "SendPaths", Doc: "SendPaths is the list of sending pathways from this layer to other layers."}, {Name: "BuildConfig", Doc: "BuildConfig has configuration data set when the network is configured,\nthat is used during the network Build() process via PostBuild method,\nafter all the structure of the network has been fully constructed.\nIn particular, the Params is nil until Build, so setting anything\nspecific in there (e.g., an index to another layer) must be done\nas a second pass. Note that Params are all applied after Build\nand can set user-modifiable params, so this is for more special\nalgorithm structural parameters set during ConfigNet() methods."}, {Name: "DefaultParams", Doc: "DefaultParams are closures that apply default parameters\nprior to user-set parameters. These are useful for specific layer\nfunctionality in specialized brain areas (e.g., Rubicon, BG etc)\nnot associated with a layer type, which otherwise is used to hard-code\ninitial default parameters."}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.LayerIndexes", IDName: "layer-indexes", Doc: "LayerIndexes contains index access into network global arrays for GPU.", Directives: []types.Directive{{Tool: "gosl", Directive: "start"}}, Fields: []types.Field{{Name: "NPools", Doc: "NPools is the total number of pools for this layer, including layer-wide."}, {Name: "NeurSt", Doc: "start of neurons for this layer in global array (same as Layer.NeurStIndex)"}, {Name: "NNeurons", Doc: "number of neurons in layer"}, {Name: "RecvSt", Doc: "start index into RecvPaths global array"}, {Name: "RecvN", Doc: "number of recv pathways"}, {Name: "SendSt", Doc: "start index into RecvPaths global array"}, {Name: "SendN", Doc: "number of recv pathways"}, {Name: "ExtsSt", Doc: "starting neuron index in global Exts list of external input for this layer.\nOnly for Input / Target / Compare layer types"}, {Name: "ShpPlY", Doc: "layer shape Pools Y dimension -- 1 for 2D"}, {Name: "ShpPlX", Doc: "layer shape Pools X dimension -- 1 for 2D"}, {Name: "ShpUnY", Doc: "layer shape Units Y dimension"}, {Name: "ShpUnX", Doc: "layer shape Units X dimension"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.LayerInhibIndexes", IDName: "layer-inhib-indexes", Doc: "LayerInhibIndexes contains indexes of layers for between-layer inhibition.", Fields: []types.Field{{Name: "Index1", Doc: "idx of Layer to get layer-level inhibition from -- set during Build from BuildConfig LayInhib1Name if present -- -1 if not used"}, {Name: "Index2", Doc: "idx of Layer to get layer-level inhibition from -- set during Build from BuildConfig LayInhib2Name if present -- -1 if not used"}, {Name: "Index3", Doc: "idx of Layer to get layer-level inhibition from -- set during Build from BuildConfig LayInhib3Name if present -- -1 if not used"}, {Name: "Index4", Doc: "idx of Layer to geta layer-level inhibition from -- set during Build from BuildConfig LayInhib4Name if present -- -1 if not used"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.LayerParams", IDName: "layer-params", Doc: "LayerParams contains all of the layer parameters.\nThese values must remain constant over the course of computation.\nOn the GPU, they are loaded into a read-only data storage buffer.", Fields: []types.Field{{Name: "Type", Doc: "Type is the functional type of layer, which determines the code path\nfor specialized layer types, and is synchronized with [Layer.Type]."}, {Name: "Index", Doc: "Index of this layer in [Layers] list."}, {Name: "MaxData", Doc: "MaxData is the maximum number of data parallel elements."}, {Name: "PoolSt", Doc: "PoolSt is the start of pools for this layer; first one is always the layer-wide pool."}, {Name: "Acts", Doc: "Activation parameters and methods for computing activations"}, {Name: "Inhib", Doc: "Inhibition parameters and methods for computing layer-level inhibition"}, {Name: "LayInhib", Doc: "LayInhib has indexes of layers that contribute between-layer inhibition\nto this layer. Set these indexes via BuildConfig LayInhibXName (X = 1, 2...)."}, {Name: "Learn", Doc: "Learn has learning parameters and methods that operate at the neuron level."}, {Name: "Bursts", Doc: "Bursts has [BurstParams] that determine how the 5IB Burst activation\nis computed from CaP integrated spiking values in Super layers."}, {Name: "CT", Doc: "CT has params for the CT corticothalamic layer and PTPred layer that\ngenerates predictions over the Pulvinar using context. Uses the CtxtGe\nexcitatory input plus stronger NMDA channels to maintain context trace."}, {Name: "Pulvinar", Doc: "Pulvinar has parameters for how the plus-phase (outcome) state of Pulvinar\nthalamic relay cell neurons is computed from the corresponding driver\nneuron Burst activation (or CaP if not Super)."}, {Name: "DSMatrix", Doc: "DSMatrixParams has parameters for dorsal Matrix layers, for SPN / MSN\ndirect and indirect pathways."}, {Name: "Striatum", Doc: "Striatum has params and indexes for striatum layers: DSMatrix, VSMatrix, DSPatch."}, {Name: "GP", Doc: "GP has params for GP (globus pallidus) of the BG layers."}, {Name: "IO", Doc: "IOParams has parameters for the IO inferior olive neurons,\nwhich compute a temporal offset error signal between CNiIO inhibitory\npredictions and excitatory sensory input, contingent on initial\nabove-threshold efferent copy motor trigger input (modulatory)."}, {Name: "Nuclear", Doc: "Nuclear has parameters for learning in the cerebellum, according\nto the Nuclear model (not just nucleus neurons)."}, {Name: "LDT", Doc: "LDT has parameters for laterodorsal tegmentum ACh salience neuromodulatory\nsignal, driven by superior colliculus stimulus novelty, US input / absence,\nand OFC / ACC inhibition."}, {Name: "VTA", Doc: "VTA has parameters for ventral tegmental area dopamine (DA) based on\nLHb PVDA (primary value -- at US time, computed at start of each trial\nand stored in LHbPVDA global value) and Amygdala (CeM) CS / learned\nvalue (LV) activations, which update every cycle."}, {Name: "RWPred", Doc: "RWPred has parameters for reward prediction using a simple Rescorla-Wagner\nlearning rule (i.e., PV learning in the Rubicon framework)."}, {Name: "RWDa", Doc: "RWDa has parameters for reward prediction dopamine using a simple\nRescorla-Wagner learning rule (i.e., PV learning in the Rubicon framework)."}, {Name: "TDInteg", Doc: "TDInteg has parameters for temporal differences (TD) reward integration layer."}, {Name: "TDDa", Doc: "TDDa has parameters for dopamine (DA) signal as the temporal difference\n(TD) between the TDIntegLayer activations in the minus and plus phase."}, {Name: "Indexes", Doc: "Indexes has recv and send pathway array access info."}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.LayerTypes", IDName: "layer-types", Doc: "LayerTypes enumerates all the different types of layers,\nfor the different algorithm types supported.\nClass parameter styles automatically key off of these types."})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.LayerVars", IDName: "layer-vars", Doc: "LayerVars are layer-level state values."})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.LearnCaParams", IDName: "learn-ca-params", Doc: "LearnCaParams parameterizes the neuron-level calcium signals driving learning:\nLearnCa = NMDA + VGCC Ca sources, where VGCC can be simulated from spiking or\nuse the more complex and dynamaic VGCC channel directly.\nLearnCa is then integrated in a cascading manner at multiple time scales:\nCaM (as in calmodulin), CaP (ltP, CaMKII, plus phase), CaD (ltD, DAPK1, minus phase).", Directives: []types.Directive{{Tool: "gosl", Directive: "start"}, {Tool: "gosl", Directive: "import", Args: []string{"github.com/emer/axon/v2/kinase"}}}, Fields: []types.Field{{Name: "Norm", Doc: "Norm is the denominator used for normalizing [LearnCa], so the\nmax is roughly 1 - 1.5 or so, which works best in terms of previous\nstandard learning rules, and overall learning performance."}, {Name: "SpikeVGCC", Doc: "SpikeVGCC uses spikes to generate VGCC instead of actual VGCC current.\nSee SpikeVGCCa for calcium contribution from each spike."}, {Name: "SpikeVgccCa", Doc: "SpikeVgccCa is the multiplier on spike for computing Ca contribution\nto [LearnCa], in SpikeVGCC mode."}, {Name: "VgccTau", Doc: "VgccTau is the time constant of decay for VgccCa calcium.\nIt is highly transient around spikes, so decay and diffusion\nfactors are more important than for long-lasting NMDA factor.\nVgccCa is integrated separately in [VgccCaInt] prior to adding\ninto NMDA Ca in [LearnCa]."}, {Name: "PosBias", Doc: "PosBias is a multiplier on [LearnCaP] in computing [CaDiff] that drives learning.\nIn some rare cases this can be useful in adjusting overall weight dynamics."}, {Name: "ETraceTau", Doc: "ETraceTau is the time constant for integrating an eligibility trace factor,\nwhich computes an exponential integrator of local neuron-wise error gradients."}, {Name: "ETraceScale", Doc: "ETraceScale multiplies the contribution of the ETrace to learning, determining\nthe strength of its effect. This is definitely beneficial in cases that can\nbenefit from longer traces, such as the deep music sim.\nWhere beneficial, 0.1 or so is a useful value."}, {Name: "pad"}, {Name: "Dt", Doc: "Dt are time constants for integrating [LearnCa] across\nM, P and D cascading levels."}, {Name: "VgccDt", Doc: "VgccDt rate = 1 / tau"}, {Name: "ETraceDt", Doc: "ETraceDt rate = 1 / tau"}, {Name: "NormInv", Doc: "NormInv = 1 / Norm"}, {Name: "pad2"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.LearnTimingParams", IDName: "learn-timing-params", Doc: "LearnTimingParams parameterizes the timing of Ca-driven Kinase\nalgorithm learning, based on detecting the first major peak of\ndifferential fast - slow activity associated with the start of\nthe minus phases: [TimePeak]. Learning occurs a fixed number of\nCycles (ms) offset from the peak.", Fields: []types.Field{{Name: "SynCaCycles", Doc: "SynCaCycles is the number of cycles over which to integrate the synaptic\npre * post calcium trace, which provides the credit assignment factor.\nMust be a multiple of CaBinCycles (10). Used for all learning (timed or not)."}, {Name: "LearnThr", Doc: "LearnThr is the threshold on CaD that must be reached in order to be\neligible for learning. Applies to non-timing based learning too."}, {Name: "On", Doc: "On indicates whether to use the timing parameters to drive\nlearning timing, or instead just learn at the end of the trial\nautomatically."}, {Name: "Refractory", Doc: "Refractory makes new learning depend on dropping below the learning\nthreshold. Applies only to timing based learning."}, {Name: "Cycles", Doc: "Cycles is the number of cycles (ms) after the [TimePeak] before\nlearning occurs, or the peak detection is reset to start anew."}, {Name: "TimeDiffTau", Doc: "Time constant for integrating [TimeDiff] as the absolute value of\nCaDiff integrated over time to smooth out significant local bumps."}, {Name: "TimeDiffDt", Doc: "Dt is 1/Tau"}, {Name: "pad"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.TrgAvgActParams", IDName: "trg-avg-act-params", Doc: "TrgAvgActParams govern the target and actual long-term average activity in neurons.\nTarget value is adapted by neuron-wise error and difference in actual vs. target.\ndrives synaptic scaling at a slow timescale (Network.SlowInterval).", Fields: []types.Field{{Name: "GiBaseInit", Doc: "GiBaseInit sets an initial [GiBase] value, as a proportion of TrgRange.Max - [TrgAvg].\nThis gives neurons differences in intrinsic inhibition / leak as a starting bias.\nThis is independent of using the target values to scale synaptic weights. Only used if > 0."}, {Name: "RescaleOn", Doc: "RescaleOn is whether to use target average activity mechanism to rescale\nsynaptic weights, so that activity tracks the target values."}, {Name: "ErrLRate", Doc: "ErrLRate is the learning rate for adjustments to [TrgAvg] value based on the\nneuron-level error signal. Population TrgAvg values are renormalized to\na fixed overall average, in TrgRange. Generally, deviating from the default value\nof this parameter doesn't make much difference."}, {Name: "SynScaleRate", Doc: "SynScaleRate is a rate parameter for how much to scale synaptic weights\nin proportion to the [AvgDif] between target and actual proportion activity.\nThis determines the effective strength of the constraint, and larger models\nmay need more than the weaker default value."}, {Name: "SubMean", Doc: "SubMean is the amount of the mean [TrgAvg] change to subtract when updating.\n1 = full zero sum changes. 1 works best in general, but in some cases it\nmay be better to start with 0 and then increase using network SetSubMean\nmethod at a later point."}, {Name: "Permute", Doc: "Permute the order of TrgAvg values within layer. Otherwise they are just\nassigned in order from highest to lowest for easy visualization.\nGenerally must be true if any topographic weights are being used."}, {Name: "Pool", Doc: "Pool means use pool-level target values if pool-level inhibition and\n4D pooled layers are present. If pool sizes are relatively small,\nthen may not be useful to distribute targets just within pool."}, {Name: "pad"}, {Name: "TrgRange", Doc: "TrgRange is the range of target normalized average activations.\nIndividual neuron [TrgAvg] values are assigned values within this range,\nand clamped within this range. This is a critical parameter and the default\nusually works best."}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.RLRateParams", IDName: "rl-rate-params", Doc: "RLRateParams are receiving neuron learning rate modulation parameters.\nHas two factors: the derivative of the sigmoid based on CaD\nactivity levels, and the max-normalized phase-wise differences in activity\n(Diff): |CaP - CaD| / max(CaP, CaD).", Fields: []types.Field{{Name: "On", Doc: "On toggles use of learning rate modulation."}, {Name: "SigmoidLinear", Doc: "SigmoidLinear uses a linear sigmoid function: if act > .5: 1-act; else act\notherwise use the actual sigmoid derivative which is squared: a(1-a).\nThis can improve learning in some cases but is generally not beneficial."}, {Name: "SigmoidMin", Doc: "SigmoidMin is the minimum learning rate multiplier for sigmoidal\nact (1-act) factor, which prevents lrate from going too low for extreme values.\nSet to 1 to disable Sigmoid derivative factor, which is default for Target layers."}, {Name: "Diff", Doc: "Diff modulates learning rate as a function of max-normalized plus - minus\ndifferences, which reduces learning for more active neurons and emphasizes\nit for less active ones. This is typically essential.\nDiff = |CaP - CaD| / max(CaP, CaD)."}, {Name: "SpikeThr", Doc: "SpikeThr is the threshold on Max(CaP, CaD) below which Min lrate applies.\nMust be > 0 to prevent div by zero."}, {Name: "DiffThr", Doc: "DiffThr is the threshold on recv neuron error delta, i.e., |CaP - CaD|\nbelow which lrate is at Min value."}, {Name: "Min", Doc: "Min is the minimum learning rate value when |CaP - CaD| Diff is below DiffThr."}, {Name: "pad"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.LearnNeuronParams", IDName: "learn-neuron-params", Doc: "LearnNeuronParams manages learning-related parameters at the neuron-level.\nThis is mainly the running average activations that drive learning", Fields: []types.Field{{Name: "CaLearn", Doc: "CaLearn parameterizes the neuron-level calcium signals driving learning:\nLearnCa = NMDA + VGCC Ca sources, where VGCC can be simulated from spiking\nor use the more complex and dynamic VGCC channel directly. LearnCa is then\nintegrated in a cascading manner at multiple time scales:\nLearnCaM (as in calmodulin), LearnCaP (ltP, CaMKII, plus phase),\nLearnCaD (ltD, DAPK1, minus phase)."}, {Name: "Timing", Doc: "LearnTimingParams parameterizes the timing of Ca-driven Kinase\nalgorithm learning, based on detecting the first major peak of\ndifferential fast - slow activity associated with the start of\nthe minus phases: [TimePeak]. Learning occurs a fixed number of\nCycles (ms) offset from the peak."}, {Name: "CaSpike", Doc: "CaSpike parameterizes the neuron-level spike-driven calcium signals:\nCaM (calmodulin), CaP (ltP, CaMKII, plus phase), CaD (ltD, DAPK1, minus phase).\nThese values are used in various cases as a proxy for the activation (spiking)\nbased learning signal."}, {Name: "LearnNMDA", Doc: "NMDA channel parameters used for learning, vs. the ones driving activation.\nThis allows exploration of learning parameters independent of their effects\non active maintenance contributions of NMDA, and may be supported by different\nreceptor subtypes."}, {Name: "TrgAvgAct", Doc: "TrgAvgAct has the synaptic scaling parameters for regulating overall average\nactivity compared to neuron's own target level."}, {Name: "RLRate", Doc: "RLRate has the recv neuron learning rate modulation params: an additional\nerror-based modulation of learning for receiver side:\nRLRate = |CaP - CaD| / Max(CaP, CaD)"}, {Name: "NeuroMod", Doc: "NeuroMod parameterizes neuromodulation effects on learning rate and activity,\nas a function of layer-level DA and ACh values, which are updated from global\nContext values, and computed from reinforcement learning algorithms."}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.SWtInitParams", IDName: "s-wt-init-params", Doc: "SWtInitParams for initial SWt (slow, structural weight) values.", Fields: []types.Field{{Name: "SPct", Doc: "SPct is how much of the initial random weights to capture in the\nslow, structural SWt values, with the rest going into the online learning\nLWt values. 1 gives the strongest initial biasing effect, for larger\nmodels that need more structural support. 0.5 should work for most models\nwhere stronger constraints are not needed."}, {Name: "Mean", Doc: "Mean is the target mean weight value across receiving neuron's pathway.\nThe mean SWt values are constrained to remain at this value.\nSome pathways may benefit from lower mean of .4."}, {Name: "Var", Doc: "Var is the initial variance in weight values, prior to constraints."}, {Name: "Sym", Doc: "Sym symmetrizes the initial weight values with those in reciprocal pathway.\nTypically true for bidirectional excitatory connections."}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.SWtAdaptParams", IDName: "s-wt-adapt-params", Doc: "SWtAdaptParams manages adaptation of the [SWt] (slow, structural weight) values.", Fields: []types.Field{{Name: "On", Doc: "On enables adaptation of [SWt] values at a slower time scale. If false, SWt\nvalues are not updated, in which case it is generally good to set Init.SPct=0 too."}, {Name: "LRate", Doc: "LRate is the learning rate multiplier on the accumulated [DWt] values\n(which already have fast LRate applied), to drive updating of [SWt]\nduring slow outer loop updating. Lower values impose stronger constraints,\nfor larger networks that need more structural support, e.g., 0.001 is better\nafter 1,000 epochs in large models. 0.1 is fine for smaller models."}, {Name: "SubMean", Doc: "SubMean is the amount of the mean to subtract from [SWt] delta when updating,\nto impose a zero-sum constraint on overall structural weight strengths.\nGenerally best to set to 1. There is a separate SubMean factor for [LWt]."}, {Name: "HiMeanDecay", Doc: "HiMeanDecay specifies a decay factor applied across all [LWt] weights\nin proportion to the deviation of the average effective weight value [Wt]\nabove the HiMeanThr threshold. This is applied at the slow learning interval\nand should be very slow, for counteracting a gradual accumulation in overall\nweights that can occur even with SubMean factors (which only operate on weights\nthat are actually changing on the current trial)."}, {Name: "HiMeanThr", Doc: "HiMeanThr specifies a decay factor applied across all [LWt] weights\nin proportion to the deviation of the average effective weight value [Wt]\naway from SWt.Init.Mean. This is applied at the slow learning interval\nand should be very slow, for counteracting a gradual accumulation in overall\nweights that can occur even with SubMean factors, which only operate on weights\nthat are actually changing on the current trial."}, {Name: "SigGain", Doc: "SigGain is the gain of the sigmoidal constrast enhancement function\nused to transform learned, linear [LWt] values into [Wt] values.\nThis is critical to offset the damping effect of exponential soft bounding,\nbut some special cases with different learning rules may benefit by making\nthis linear (1) instead."}, {Name: "pad"}, {Name: "pad1"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.SWtParams", IDName: "s-wt-params", Doc: "SWtParams manages structural, slowly adapting weight values [SWt],\nin terms of initialization and updating over course of learning.\nSWts impose initial and slowly adapting constraints on neuron connectivity\nto encourage differentiation of neuron representations and overall good behavior\nin terms of not hogging the representational space.\nThe [TrgAvg] activity constraint is not enforced through SWt: it needs to be\nmore dynamic and is supported by the regular learned weights [LWt].", Fields: []types.Field{{Name: "Init", Doc: "Init controls the initialization of [SWt] values."}, {Name: "Adapt", Doc: "Adapt controls adaptation of [SWt] values in response to [LWt] learning."}, {Name: "Limit", Doc: "Limit limits the range of [SWt] values, so that they do not fully\ndetermine the effective overall weight value."}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.LRateParams", IDName: "l-rate-params", Doc: "LRateParams manages learning rate parameters for scaling [DWt] delta\nweight values that then update [LWt] online learned weights.\nIt has two optional modulation factors on top of a Base learning rate.", Directives: []types.Directive{{Tool: "gosl", Directive: "start"}}, Fields: []types.Field{{Name: "Base", Doc: "Base learning rate for this pathway, which can be modulated\nby the other factors below. Generally larger networks use slower rates."}, {Name: "Sched", Doc: "Sched is a scheduled learning rate multiplier, simulating reduction\nin plasticity over aging. Use the [Network.LRateSched] method to apply\na given value to all pathways in the network."}, {Name: "Mod", Doc: "Mod is a dynamic learning rate modulation factor, typically driven by\nneuromodulation (e.g., dopamine)."}, {Name: "Eff", Doc: "Eff is the net effective actual learning rate multiplier used in\ncomputing [DWt]: Eff = Mod * Sched * Base"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.DWtParams", IDName: "d-wt-params", Doc: "DWtParams has misc parameters for computing weight changes ([DWt]) for the default\nkinase trace-based error-driven cortical learning rule, and for other specialized\nlearning rules.", Fields: []types.Field{{Name: "SubMean", Doc: "SubMean is the amount of the mean [dWt] to subtract for updating the online\nlearning [LWt] values, producing a zero-sum effect. 1.0 = full zero-sum dWt.\nOnly applies to non-zero DWts. There is a separate such factor for [SWt].\nTypically set to 0 for standard trace learning pathways, although some require it\nfor stability over the long haul. Can use [Network.SetSubMean] to set to 1 after\nsignificant early learning has occurred with 0.\nSome special path types (e.g., Hebb) benefit from SubMean = 1 always."}, {Name: "SynTraceTau", Doc: "SynTraceTau is the time constant for integrating the synaptic trace [Tr]\nas a function of the synaptic activity credit assignment factor at the end\nof the theta cycle learning timescale. Larger values (greater than 1)\nproduce longer time windows of integration, and should only be used when\nthere is temporal structure to be learned across these longer timescales.\nThis synaptic trace is beneficial in addition to the receiver-based\neligibility trace [ETrLearn]."}, {Name: "LearnThr", Doc: "LearnThr is the threshold for learning, applied to SynCa CaP and CaD for Kinase\ncortical learning rule.\nIn Matrix and VSPatch it applies to normalized GeIntNorm value: setting this relatively\nhigh encourages sparser representations."}, {Name: "SynCa20", Doc: "SynCa20 uses an effective 20msec time window for synaptic calcium computation\nfrom the [CaBins] values for send and recv neurons in computing the SynCa\nsynaptic calcium value. Only applicable for pathways to [TargetLayer] layers\n(including [PulvinarLayer]), which use synaptic CaP - CaD directly for learning.\nDefault of 10msec (1 bin), works well for most cases.\nInternal cortical layers use integrated CaD-like value directly, see SynCaCycles\nin [LearnTimingParams]. This is only used for long ThetaCycle window (> 250 ms)."}, {Name: "CaPScale", Doc: "CaPScale is a separate multiplier for the CaP component of synaptic calcium, to\nallow separate weighting of potentiation (CaP) vs. depression (CaD) factors.\nOnly applicable for pathways to [TargetLayer] layers (including [PulvinarLayer]).\nThe default of 1 works best in most cases -- only adjust in special cases.\nAn increased CaP level results in an overall potentiation bias, which acts\nlike a hebbian learning factor, whereas a lower value produces more negatively\nbiased synaptic weight changes."}, {Name: "SynTraceDt", Doc: "Dt rate = 1 / tau"}, {Name: "pad"}, {Name: "pad1"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.HebbParams", IDName: "hebb-params", Doc: "HebbParams for optional hebbian learning that replaces the\ndefault learning rule, based on S = sending activity,\nR = receiving activity", Fields: []types.Field{{Name: "On", Doc: "On turns on the use of the Hebbian learning rule instead of the default."}, {Name: "Up", Doc: "Up is the strength multiplier for hebbian increases, based on R * S * (1-LWt)."}, {Name: "Down", Doc: "Down is the strength multiplier for hebbian decreases, based on R * (1 - S) * LWt."}, {Name: "pad"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.LearnSynParams", IDName: "learn-syn-params", Doc: "LearnSynParams manages learning-related parameters at the synapse-level.", Fields: []types.Field{{Name: "Learn", Doc: "Learn enables learning for this pathway."}, {Name: "pad"}, {Name: "pad1"}, {Name: "pad2"}, {Name: "LRate", Doc: "LRateParams manages learning rate parameters for scaling [DWt] delta\nweight values that then update [LWt] online learned weights.\nIt has two optional modulation factors on top of a Base learning rate."}, {Name: "DWt", Doc: "DWtParams has misc parameters for computing weight changes ([DWt]) for the default\ntrace-based cortical learning rule and for other specialized learning rules."}, {Name: "Hebb", Doc: "hebbian learning option, which overrides the default learning rules"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.LRateMod", IDName: "l-rate-mod", Doc: "LRateMod implements global learning rate modulation, based on a performance-based\nfactor, for example error. Increasing levels of the factor = higher learning rate.\nThis can be added to a Sim and called prior to DWt() to dynamically change lrate\nbased on overall network performance. It is not used by default in the standard params.", Directives: []types.Directive{{Tool: "gosl", Directive: "end"}}, Fields: []types.Field{{Name: "On", Doc: "toggle use of this modulation factor"}, {Name: "Base", Doc: "baseline learning rate -- what you get for correct cases"}, {Name: "pad"}, {Name: "pad1"}, {Name: "Range", Doc: "defines the range over which modulation occurs for the modulator factor -- Min and below get the Base level of learning rate modulation, Max and above get a modulation of 1"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.ViewTimes", IDName: "view-times", Doc: "ViewTimes are the options for when the NetView can be updated."})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.NetViewUpdate", IDName: "net-view-update", Doc: "NetViewUpdate manages time scales for updating the NetView.\nUse one of these for each mode you want to control separately.", Fields: []types.Field{{Name: "On", Doc: "On toggles update of display on"}, {Name: "Time", Doc: "Time scale to update the network view (Cycle to Trial timescales)."}, {Name: "CounterFunc", Doc: "CounterFunc returns the counter string showing current counters etc."}, {Name: "View", Doc: "View is the network view."}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.NetworkIndexes", IDName: "network-indexes", Doc: "NetworkIndexes are indexes and sizes for processing network.", Directives: []types.Directive{{Tool: "gosl", Directive: "start"}}, Fields: []types.Field{{Name: "MaxData", Doc: "MaxData is the maximum number of data inputs that can be processed\nin parallel in one pass of the network.\nNeuron storage is allocated to hold this amount during\nBuild process, and this value reflects that."}, {Name: "MaxDelay", Doc: "MaxDelay is the maximum synaptic delay across all pathways at the time of\n[Network.Build]. This determines the size of the spike sending delay buffers."}, {Name: "NCaBins", Doc: "NCaBins is the total number of [CaBins] in the neuron state variables.\nSet to [Context.ThetaCycles] / [Context.CaBinCycles] in Build."}, {Name: "NLayers", Doc: "NLayers is the number of layers in the network."}, {Name: "NNeurons", Doc: "NNeurons is the total number of neurons."}, {Name: "NPools", Doc: "NPools is the total number of pools."}, {Name: "NPaths", Doc: "NPaths is the total number of paths."}, {Name: "NSyns", Doc: "NSyns is the total number of synapses."}, {Name: "RubiconNPosUSs", Doc: "RubiconNPosUSs is the total number of Rubicon Drives / positive USs."}, {Name: "RubiconNCosts", Doc: "RubiconNCosts is the total number of Rubicon Costs."}, {Name: "RubiconNNegUSs", Doc: "RubiconNNegUSs is the total number of .Rubicon Negative USs."}, {Name: "pad"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.Network", IDName: "network", Doc: "Network implements the Axon spiking model.\nMost of the fields are copied to the global vars, needed for GPU,\nvia the SetAsCurrent method, and must be slices or tensors so that\nthere is one canonical underlying instance of all such data.\nThere are also Layer and Path lists that are used to scaffold the\nbuilding and display of the network, but contain no data.", Directives: []types.Directive{{Tool: "gosl", Directive: "end"}}, Methods: []types.Method{{Name: "InitWeights", Doc: "InitWeights initializes synaptic weights and all other associated long-term state variables\nincluding running-average state values (e.g., layer running average activations etc)", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "InitActs", Doc: "InitActs fully initializes activation state -- not automatically called", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "ShowAllGlobals", Doc: "ShowAllGlobals shows a listing of all Global variables and values.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}}, {Name: "Build", Doc: "Build constructs the layer and pathway state based on the layer shapes\nand patterns of interconnectivity. Everything in the network must have been\nconfigured by this point, including key values in Context such as ThetaCycles\nand CaBinCycles which drive allocation of number of [CaBins] neuron\nvariables and corresponding [GvCaBinWts] global scalar variables.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Returns: []string{"error"}}}, Embeds: []types.Field{{Name: "NetworkBase"}}, Fields: []types.Field{{Name: "Rubicon", Doc: "Rubicon system for goal-driven motivated behavior,\nincluding Rubicon phasic dopamine signaling.\nManages internal drives, US outcomes. Core LHb (lateral habenula)\nand VTA (ventral tegmental area) dopamine are computed\nin equations using inputs from specialized network layers\n(LDTLayer driven by BLA, CeM layers, VSPatchLayer).\nRenders USLayer, PVLayer, DrivesLayer representations\nbased on state updated here."}, {Name: "Layers", Doc: "Layers is the array of layers, used for CPU initialization, not GPU computation."}, {Name: "Paths", Doc: "Paths has pointers to all pathways in the network, sender-based, for CPU initialization,\nnot GPU computation."}, {Name: "LayerClassMap", Doc: "LayerClassMap is a map from class name to layer names."}, {Name: "NThreads", Doc: "NThreads is number of threads to use for parallel processing."}, {Name: "RecFunTimes", Doc: "record function timer information."}, {Name: "FunTimes", Doc: "timers for each major function (step of processing)."}, {Name: "LayerParams", Doc: "LayerParams are all the layer parameters. [NLayers]"}, {Name: "PathParams", Doc: "PathParams are all the path parameters, in sending order. [NPaths]"}, {Name: "NetworkIxs", Doc: "NetworkIxs have indexes and sizes for entire network (one only)."}, {Name: "PoolIxs", Doc: "PoolIxs have index values for each Pool.\n[Layer * Pools][PoolIndexVars]"}, {Name: "NeuronIxs", Doc: "NeuronIxs have index values for each neuron: index into layer, pools.\n[Neurons][Indexes]"}, {Name: "SynapseIxs", Doc: "SynapseIxs have index values for each synapse:\nproviding index into recv, send neurons, path.\n[Indexes][NSyns]; NSyns = [Layer][SendPaths][SendNeurons][Syns]"}, {Name: "PathSendCon", Doc: "PathSendCon are starting offset and N cons for each sending neuron,\nfor indexing into the Syns synapses, which are organized sender-based.\n[NSendCon][StartNN]; NSendCon = [Layer][SendPaths][SendNeurons]"}, {Name: "RecvPathIxs", Doc: "RecvPathIxs indexes into Paths (organized by SendPath) organized\nby recv pathways. needed for iterating through recv paths efficiently on GPU.\n[NRecvPaths] = [Layer][RecvPaths]"}, {Name: "PathRecvCon", Doc: "PathRecvCon are the receiving path starting index and number of connections.\n[NRecvCon][StartNN]; NRecvCon = [Layer][RecvPaths][RecvNeurons]"}, {Name: "RecvSynIxs", Doc: "RecvSynIxs are the indexes into Synapses for each recv neuron, organized\ninto blocks according to PathRecvCon, for receiver-based access.\n[NSyns] = [Layer][RecvPaths][RecvNeurons][Syns]"}, {Name: "Ctx", Doc: "Ctx is the context state (one). Other copies of Context can be maintained\nand [SetContext] to update this one, but this instance is the canonical one."}, {Name: "Neurons", Doc: "Neurons are all the neuron state variables.\n[Neurons][Data][Vars]"}, {Name: "NeuronAvgs", Doc: "NeuronAvgs are variables with averages over the\nData parallel dimension for each neuron.\n[Neurons][Vars]"}, {Name: "Pools", Doc: "Pools are the [PoolVars] float32 state values for layer and sub-pool inhibition,\nIncluding the float32 AvgMax values by Phase and variable: use [AvgMaxVarIndex].\n[Layer * Pools][Data][PoolVars+AvgMax]"}, {Name: "PoolsInt", Doc: "PoolsInt are the [PoolIntVars] int32 state values for layer and sub-pool\ninhibition, AvgMax atomic integration, and other vars: use [AvgMaxIntVarIndex]\n[Layer * Pools][Data][PoolIntVars+AvgMax]"}, {Name: "LayerStates", Doc: "LayerStates holds layer-level state values, with variables defined in\n[LayerVars], for each layer and Data parallel index.\n[Layer][Data][LayerVarsN]"}, {Name: "GlobalScalars", Doc: "GlobalScalars are the global scalar state variables.\n[GlobalScalarVarsN+2*NCaWeights][Data]"}, {Name: "GlobalVectors", Doc: "GlobalVectors are the global vector state variables.\n[GlobalVectorsN][MaxGlobalVecN][Data]"}, {Name: "Exts", Doc: "Exts are external input values for all Input / Target / Compare layers\nin the network. The ApplyExt methods write to this per layer,\nand it is then actually applied in one consistent method.\n[NExts][Data]; NExts = [In / Out Layers][Neurons]"}, {Name: "PathGBuf", Doc: "PathGBuf is the conductance buffer for accumulating spikes.\nSubslices are allocated to each pathway.\nUses int-encoded values for faster GPU atomic integration.\n[NPathNeur][Data][MaxDel+1]; NPathNeur = [Layer][RecvPaths][RecvNeurons]"}, {Name: "PathGSyns", Doc: "PathGSyns are synaptic conductance integrated over time per pathway\nper recv neurons. spikes come in via PathBuf.\nsubslices are allocated to each pathway.\n[NPathNeur][Data]"}, {Name: "Synapses", Doc: "\tSynapses are the synapse level variables (weights etc).\n\nThese do not depend on the data parallel index, unlike [SynapseTraces].\n[NSyns][Vars]; NSyns = [Layer][SendPaths][SendNeurons][Syns]"}, {Name: "SynapseTraces", Doc: "SynapseTraces are synaptic variables that depend on the data\nparallel index, for accumulating learning traces and weight changes per data.\nThis is the largest data size, so multiple instances are used\nto handle larger networks.\n[NSyns][Data][Vars]; NSyns = [Layer][SendPaths][SendNeurons][Syns]"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.DAModTypes", IDName: "da-mod-types", Doc: "DAModTypes are types of dopamine modulation of neural activity."})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.ValenceTypes", IDName: "valence-types", Doc: "ValenceTypes are types of valence coding: positive or negative."})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.NeuroModParams", IDName: "neuro-mod-params", Doc: "NeuroModParams specifies the effects of neuromodulators on neural\nactivity and learning rate. These can apply to any neuron type,\nand are applied in the core cycle update equations.", Fields: []types.Field{{Name: "DAMod", Doc: "dopamine receptor-based effects of dopamine modulation\non excitatory and inhibitory conductances: D1 is excitatory,\nD2 is inhibitory as a function of increasing dopamine."}, {Name: "Valence", Doc: "valence coding of this layer, which may affect specific layer\ntypes but does not directly affect neuromodulators currently."}, {Name: "DAModGain", Doc: "dopamine modulation of excitatory and inhibitory conductances\n(i.e., \"performance dopamine\" effect: this does NOT affect\nlearning dopamine modulation in terms of RLrate): g *= 1 + (DAModGain * DA)."}, {Name: "DALRateSign", Doc: "modulate the sign of the learning rate factor according to\nthe DA sign, taking into account the DAMod sign reversal for D2Mod,\nalso using BurstGain and DipGain to modulate DA value.\nOtherwise, only the magnitude of the learning rate is modulated\nas a function of raw DA magnitude according to DALRateMod\n(without additional gain factors)."}, {Name: "DALRateMod", Doc: "if not using DALRateSign, this is the proportion of maximum learning\nrate that Abs(DA) magnitude can modulate.\ne.g., if 0.2, then DA = 0 = 80% of std learning rate, 1 = 100%."}, {Name: "AChLRateMod", Doc: "proportion of maximum learning rate that ACh can modulate.\ne.g., if 0.2, then ACh = 0 = 80% of std learning rate, 1 = 100%."}, {Name: "AChDisInhib", Doc: "amount of extra Gi inhibition added in proportion to 1 - ACh level.\nmakes ACh disinhibitory"}, {Name: "BurstGain", Doc: "multiplicative gain factor applied to positive dopamine signals.\nThis operates on the raw dopamine signal prior to any effect\nof D2 receptors in reversing its sign!"}, {Name: "DipGain", Doc: "multiplicative gain factor applied to negative dopamine signals.\nThis operates on the raw dopamine signal prior to any effect\nof D2 receptors in reversing its sign!\nshould be small for acq, but roughly equal to burst for ext."}, {Name: "pad"}, {Name: "pad1"}, {Name: "pad2"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.NeuronFlags", IDName: "neuron-flags", Doc: "NeuronFlags are bit-flags encoding relevant binary state for neurons"})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.NeuronVars", IDName: "neuron-vars", Doc: "NeuronVars are the neuron variables representing current active state,\nspecific to each input data state.\nSee NeuronAvgVars for vars shared across data."})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.NeuronAvgVars", IDName: "neuron-avg-vars", Doc: "NeuronAvgVars are mostly neuron variables involved in longer-term average activity\nwhich is aggregated over time and not specific to each input data state,\nalong with any other state that is not input data specific."})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.NeuronIndexVars", IDName: "neuron-index-vars", Doc: "NeuronIndexVars are neuron-level indexes used to access layers and pools\nfrom the individual neuron level."})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.NuclearParams", IDName: "nuclear-params", Doc: "NuclearParams has parameters that apply to all cerebellum Nuclear model neurons.\nNot just cerebellar nuclei neurons: also applies to PC.", Directives: []types.Directive{{Tool: "gosl", Directive: "start"}}, Fields: []types.Field{{Name: "ActionEnv", Doc: "ActionEnv is the total time envelope for actions to be tracked,\nin ms (cycles). Must be consistent across microzone elements."}, {Name: "SendTimeOff", Doc: "SendTimeOff is the time offset for sending activations used in learning,\nrelative to the IO-driven LearnNow time. Should be 0 for CNiUp.\nMust be an even multiple of [CaBinCycles]."}, {Name: "SendTimeWindow", Doc: "SendTimeWindow is the time window to integrate sending activations\nused in learning. Must be an even multiple of [CaBinCycles]."}, {Name: "ActTarget", Doc: "ActTarget is the target activity level, as measured by CaD.\nGeBase is adapted, along with excitatory MF inputs in proportion to activity,\nwhich is the source of very slow synaptic decay in these pathways."}, {Name: "Decay", Doc: "Decay is the rate of decay (prior to the learning rate multiplier)\nfor baseline non-learning trials."}, {Name: "GeBaseLRate", Doc: "GeBaseLRate is the learning rate for neuron-level [GeBase] baseline\nexcitatory conductance, to maintain target activity levels."}, {Name: "IOLayIndex", Doc: "IOLayIndex of IO (inferior olive) layer for sending error signals\nto this layer. Set via SetBuildConfig(IOLayName) setting."}, {Name: "SendTimeBins", Doc: "SendTimeBins = SendTimeWindow / [CaBinCycles]."}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.IOParams", IDName: "io-params", Doc: "IOParams has parameters for the IO inferior olive neurons,\nwhich compute a temporal offset error signal between CNiIO inhibitory\npredictions and excitatory sensory input, contingent on initial\nabove-threshold efferent copy motor trigger input (modulatory).\nNeuron [CaBins] are used to store TimeOff past inhibitory inputs.", Fields: []types.Field{{Name: "TimeOff", Doc: "TimeOff is the time offset for earlier predictive inhibitory inputs to\ncompare against current excitatory inputs to trigger an error,\nin ms (cycles). Must be an even multiple of [CaBinCycles]."}, {Name: "ErrThr", Doc: "ErrThr is the threshold on the GeSyn - GiSyn_(t-TimeOff) difference\nto trigger an error."}, {Name: "EfferentThr", Doc: "EfferentThr is the threshold for modulatory [GModSyn] from efferent copy\ninputs to trigger an activated IO window where error comparison occurs.\nEfferent inputs can continue post-threshold, but this is the point at which\nthe envelope opens."}, {Name: "EfferentOff", Doc: "EfferentOff is the offset from the time of the efferent signal before\nmeaningful sensory comparison can occur. The inhibitory prediction values\nare assumed to be strongly activated at this time.\nin ms (cycles). Must be an even multiple of [CaBinCycles]."}, {Name: "GTau", Doc: "GTau is the time constant in ms for integrating [GeSyn] and [GiSyn]\nexcitatory and inhibitory conductances for comparison.\nIntegration goes into GaM (only for IO neurons)."}, {Name: "GDt", Doc: "Dt = 1 / Tau"}, {Name: "pad"}, {Name: "pad1"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.LayerSheets", IDName: "layer-sheets", Doc: "LayerSheets contains Layer parameter Sheets."})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.LayerSheet", IDName: "layer-sheet", Doc: "LayerSheet is one Layer parameter Sheet."})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.LayerSel", IDName: "layer-sel", Doc: "LayerSel is one Layer parameter Selector."})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.LayerSearches", IDName: "layer-searches", Doc: "LayerSearches is a list of parameter Search elements."})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.PathSheets", IDName: "path-sheets", Doc: "PathSheets contains Path parameter Sheets."})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.PathSheet", IDName: "path-sheet", Doc: "PathSheet is one Path parameter Sheet."})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.PathSel", IDName: "path-sel", Doc: "PathSel is one Path parameter Selector."})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.PathSearches", IDName: "path-searches", Doc: "PathSearches is a list of parameter Search elements."})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.Params", IDName: "params", Doc: "Params contains the [LayerParams] and [PathParams] parameter setting functions\nprovided by the [emergent] [params] package.", Fields: []types.Field{{Name: "Layer", Doc: "Layer has the parameters to apply to the [LayerParams] for layers."}, {Name: "Path", Doc: "Path has the parameters to apply to the [PathParams] for paths."}, {Name: "ExtraSheets", Doc: "ExtraSheets has optional additional sheets of parameters to apply\nafter the default Base sheet. Use \"Script\" for default Script sheet.\nMultiple names separated by spaces can be used (don't put spaces in Sheet names!)"}, {Name: "Tag", Doc: "Tag is an optional additional tag to add to log file names to identify\na specific run of the model (typically set by a config file or args)."}, {Name: "Script", Doc: "Script is a parameter setting script, which adds to the Layer and Path sheets\ntypically using the \"Script\" set name."}, {Name: "Interp", Doc: "Interp is the yaegi interpreter for running the script."}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.Path", IDName: "path", Doc: "Path implements axon spiking communication and learning.", Embeds: []types.Field{{Name: "PathBase"}}, Fields: []types.Field{{Name: "Params", Doc: "path parameters."}, {Name: "Send", Doc: "sending layer for this pathway."}, {Name: "Recv", Doc: "receiving layer for this pathway."}, {Name: "Type", Doc: "type of pathway."}, {Name: "DefaultParams", Doc: "DefaultParams are functions to apply parameters prior to user-set\nparameters. These are useful for specific functionality in specialized\nbrain areas (e.g., Rubicon, BG etc) not associated with a path type,\nwhich otherwise is used to hard-code initial default parameters."}, {Name: "RecvConNAvgMax", Doc: "average and maximum number of recv connections in the receiving layer"}, {Name: "SendConNAvgMax", Doc: "average and maximum number of sending connections in the sending layer"}, {Name: "SynStIndex", Doc: "start index into global Synapse array:"}, {Name: "NSyns", Doc: "number of synapses in this pathway"}, {Name: "RecvCon", Doc: "starting offset and N cons for each recv neuron, for indexing into the RecvSynIndex array of indexes into the Syns synapses, which are organized sender-based. This is locally managed during build process, but also copied to network global PathRecvCons slice for GPU usage."}, {Name: "RecvSynIndex", Doc: "index into Syns synaptic state for each sending unit and connection within that, for the sending pathway which does not own the synapses, and instead indexes into recv-ordered list"}, {Name: "RecvConIndex", Doc: "for each recv synapse, this is index of *sending* neuron It is generally preferable to use the Synapse SendIndex where needed, instead of this slice, because then the memory access will be close by other values on the synapse."}, {Name: "SendCon", Doc: "starting offset and N cons for each sending neuron, for indexing into the Syns synapses, which are organized sender-based. This is locally managed during build process, but also copied to network global PathSendCons slice for GPU usage."}, {Name: "SendConIndex", Doc: "index of other neuron that receives the sender's synaptic input, ordered by the sending layer's order of units as the outer loop, and SendCon.N receiving units within that. It is generally preferable to use the Synapse RecvIndex where needed, instead of this slice, because then the memory access will be close by other values on the synapse."}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.StartN", IDName: "start-n", Doc: "StartN holds a starting offset index and a number of items\narranged from Start to Start+N (exclusive).\nThis is not 16 byte padded and only for use on CPU side.", Fields: []types.Field{{Name: "Start", Doc: "starting offset"}, {Name: "N", Doc: "number of items --"}, {Name: "pad"}, {Name: "pad1"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.PathIndexes", IDName: "path-indexes", Doc: "PathIndexes contains path-level index information into global memory arrays", Fields: []types.Field{{Name: "RecvLayer", Doc: "RecvLayer is the index of the receiving layer in global list of layers."}, {Name: "RecvNeurSt", Doc: "RecvNeurSt is the starting index of neurons in recv layer,\nso we don't need layer to get to neurons."}, {Name: "RecvNeurN", Doc: "RecvNeurN is the number of neurons in recv layer."}, {Name: "SendLayer", Doc: "SendLayer is the index of the sending layer in global list of layers."}, {Name: "SendNeurSt", Doc: "SendNeurSt is the starting index of neurons in sending layer,\nso we don't need layer to get to neurons."}, {Name: "SendNeurN", Doc: "SendNeurN is the number of neurons in send layer"}, {Name: "SynapseSt", Doc: "SynapseSt is the start index into global Synapse array.\n[Layer][SendPaths][Synapses]."}, {Name: "SendConSt", Doc: "SendConSt is the start index into global PathSendCon array.\n[Layer][SendPaths][SendNeurons]"}, {Name: "RecvConSt", Doc: "RecvConSt is the start index into global PathRecvCon array.\n[Layer][RecvPaths][RecvNeurons]"}, {Name: "RecvSynSt", Doc: "RecvSynSt is the start index into global sender-based Synapse index array.\n[Layer][SendPaths][Synapses]"}, {Name: "NPathNeurSt", Doc: "NPathNeurSt is the start NPathNeur index into PathGBuf, PathGSyns global arrays.\n[Layer][RecvPaths][RecvNeurons]"}, {Name: "pad"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.GScaleValues", IDName: "g-scale-values", Doc: "GScaleValues holds the conductance scaling values.\nThese are computed once at start and remain constant thereafter,\nand therefore belong on Params and not on PathValues.", Fields: []types.Field{{Name: "Scale", Doc: "scaling factor for integrating synaptic input conductances (G's), originally computed as a function of sending layer activity and number of connections, and typically adapted from there -- see Path.PathScale adapt params"}, {Name: "Rel", Doc: "normalized relative proportion of total receiving conductance for this pathway: PathScale.Rel / sum(PathScale.Rel across relevant paths)"}, {Name: "pad"}, {Name: "pad1"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.PathParams", IDName: "path-params", Doc: "PathParams contains all of the path parameters.\nThese values must remain constant over the course of computation.\nOn the GPU, they are loaded into a read-only storage buffer.", Fields: []types.Field{{Name: "Type", Doc: "Type is the functional type of path, which determines the code path\nfor specialized types, and is synchronized with [Path.Type]."}, {Name: "Index", Doc: "Index is the index of the pathway in global path list: [Layer][SendPaths]"}, {Name: "pad"}, {Name: "pad1"}, {Name: "Indexes", Doc: "recv and send neuron-level pathway index array access info"}, {Name: "Com", Doc: "synaptic communication parameters: delay, probability of failure"}, {Name: "PathScale", Doc: "pathway scaling parameters for computing GScale:\nmodulates overall strength of pathway, using both\nabsolute and relative factors, with adaptation option to maintain target max conductances"}, {Name: "SWts", Doc: "slowly adapting, structural weight value parameters,\nwhich control initial weight values and slower outer-loop adjustments"}, {Name: "Learn", Doc: "synaptic-level learning parameters for learning in the fast LWt values."}, {Name: "GScale", Doc: "conductance scaling values"}, {Name: "RLPred", Doc: "Params for RWPath and TDPredPath for doing dopamine-modulated learning\nfor reward prediction: Da * Send activity.\nUse in RWPredLayer or TDPredLayer typically to generate reward predictions.\nIf the Da sign is positive, the first recv unit learns fully; for negative,\nsecond one learns fully.\nLower lrate applies for opposite cases. Weights are positive-only."}, {Name: "VSMatrix", Doc: "VSMatrix has parameters for trace-based learning in the VSMatrixPath.\nA trace of synaptic co-activity is formed, and then modulated by\ndopamine whenever it occurs.\nThis bridges the temporal gap between gating activity and subsequent activity,\nand is based biologically on synaptic tags.\nDSPatch provides modulation of trace activity based on local critic signal."}, {Name: "DSMatrix", Doc: "DSMatrix has parameters for trace-based learning in the DSMatrixPath.\nA trace of synaptic co-activity is formed, and then modulated by\ndopamine whenever it occurs.\nThis bridges the temporal gap between gating activity and subsequent activity,\nand is based biologically on synaptic tags.\nDSPatch provides modulation of trace activity based on local critic signal."}, {Name: "BLA", Doc: "Basolateral Amygdala pathway parameters."}, {Name: "Hip", Doc: "Hip bench parameters."}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.PathTypes", IDName: "path-types", Doc: "PathTypes enumerates all the different types of axon pathways,\nfor the different algorithm types supported.\nClass parameter styles automatically key off of these types."})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.DSMatrixParams", IDName: "ds-matrix-params", Doc: "DSMatrixParams has parameters for DSMatrixLayer.\nDA, ACh learning rate modulation is pre-computed on the recv neuron\nRLRate variable via NeuroMod.\nMust set Learn.NeuroMod.DAMod = D1Mod or D2Mod via SetBuildConfig(\"DAMod\").", Directives: []types.Directive{{Tool: "gosl", Directive: "start"}}, Fields: []types.Field{{Name: "PatchD1Range", Doc: "PatchD1Range is the range of PatchD1 values to normalize into effective value."}, {Name: "PatchD2Range", Doc: "PatchD2Range is the range of PatchD2 values to normalize into effective value."}, {Name: "PatchDAModGain", Doc: "PatchDAModGain is a separate NeuroMod.DAModGain factor applying\nto DA performance gain effects from the Patch-based DA values.\nThe standard NeuroMod parameters apply only to the final outcome-based\ndopamine values."}, {Name: "PatchBurstGain", Doc: "PatchBurstGain is a separate NeuroMod.BurstGain-like factor applying\nto DA performance gain effects from the Patch-based DA values.\nThe standard NeuroMod parameters apply only to the final outcome-based\ndopamine values, which do not drive performance DA effects in dorsal striatum.\nNeuroMod.DAModGain does control overall performance gain from patch."}, {Name: "PatchD1Index", Doc: "Index of PatchD1 layer to get striosome modulation state from.\nSet during Build from BuildConfig PatchD1Name."}, {Name: "PatchD2Index", Doc: "Index of PatchD2 layer to get striosome modulation state from.\nSet during Build from BuildConfig PatchD2Name."}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.StriatumParams", IDName: "striatum-params", Doc: "StriatumParams has params and indexes for BG Striatum layers including\nDSMatrixLayer, VSMatrixLayer, and DSPatchLayer.", Fields: []types.Field{{Name: "GateThr", Doc: "GateThr is the threshold on layer Avg CaPMax for Matrix Go and BG Thal\nlayers to count as having gated."}, {Name: "OtherIndex", Doc: "Index of other layer (D2 if we are D1 and vice-versa).\nSet during Build from BuildConfig OtherName."}, {Name: "PFIndex", Doc: "Index of PF parafasciculus layer to get gating output state from.\nSet during Build from BuildConfig PFName."}, {Name: "ThalLay1Index", Doc: "Index of thalamus layer that we gate. needed to get gating information.\nSet during Build from BuildConfig ThalLay1Name if present -- -1 if not used"}, {Name: "ThalLay2Index", Doc: "Index of thalamus layer that we gate. needed to get gating information.\nSet during Build from BuildConfig ThalLay1Name if present -- -1 if not used"}, {Name: "ThalLay3Index", Doc: "Index of thalamus layer that we gate. needed to get gating information.\nSet during Build from BuildConfig ThalLay1Name if present -- -1 if not used"}, {Name: "ThalLay4Index", Doc: "Index of thalamus layer that we gate. needed to get gating information.\nSet during Build from BuildConfig ThalLay1Name if present -- -1 if not used"}, {Name: "ThalLay5Index", Doc: "Index of thalamus layer that we gate. needed to get gating information.\nSet during Build from BuildConfig ThalLay1Name if present -- -1 if not used"}, {Name: "ThalLay6Index", Doc: "Index of thalamus layer that we gate. needed to get gating information.\nSet during Build from BuildConfig ThalLay1Name if present -- -1 if not used"}, {Name: "pad"}, {Name: "pad1"}, {Name: "pad2"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.GPLayerTypes", IDName: "gp-layer-types", Doc: "GPLayerTypes is a GPLayer axon-specific layer type enum."})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.GPParams", IDName: "gp-params", Doc: "GPLayer represents a globus pallidus layer, including:\nGPePr, GPeAk (arkypallidal), and GPi (see GPType for type).\nTypically just a single unit per Pool representing a given stripe.", Fields: []types.Field{{Name: "GPType", Doc: "type of GP Layer -- must set during config using SetBuildConfig of GPType."}, {Name: "pad"}, {Name: "pad1"}, {Name: "pad2"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.DSMatrixPathParams", IDName: "ds-matrix-path-params", Doc: "DSMatrixPathParams for trace-based learning in the MatrixPath.\nA trace of synaptic co-activity is formed, and then modulated by dopamine\nwhenever it occurs. This bridges the temporal gap between gating activity\nand subsequent activity, and is based biologically on synaptic tags.\nTrace is applied to DWt and reset at the time of reward.", Directives: []types.Directive{{Tool: "gosl", Directive: "start"}}, Fields: []types.Field{{Name: "PatchDA", Doc: "PatchDA is proportion of Credit trace factor for learning\nto modulate by PatchDA versus just standard s*r activity factor."}, {Name: "Credit", Doc: "Credit is proportion of trace activity driven by the credit assignment factor\nbased on the PF modulatory inputs, synaptic activity (send * recv),\nand Patch DA, which indicates extent to which gating at this time is net\nassociated with subsequent reward or not."}, {Name: "Delta", Doc: "Delta is weight for trace activity that is a function of the minus-plus delta\nactivity signal on the receiving SPN neuron, independent of PF modulation.\nThis should always be 1 except for testing disabling: adjust NonDelta\nrelative to it, and the overall learning rate."}, {Name: "D2Scale", Doc: "D2Scale is a scaling factor for the DAD2 learning factor relative to\nthe DAD1 contribution (which is 1 - DAD1)."}, {Name: "OffTrace", Doc: "OffTrace is a multiplier on trace contribution when action output\ncommunicated by PF is not above threshold."}, {Name: "pad"}, {Name: "pad1"}, {Name: "pad2"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.VSMatrixPathParams", IDName: "vs-matrix-path-params", Doc: "VSMatrixPathParams for trace-based learning in the VSMatrixPath,\nfor ventral striatum paths.\nA trace of synaptic co-activity is formed, and then modulated by dopamine\nwhenever it occurs. This bridges the temporal gap between gating activity\nand subsequent activity, and is based biologically on synaptic tags.\nTrace is applied to DWt and reset at the time of reward.", Fields: []types.Field{{Name: "RewActLearn", Doc: "RewActLearn makes learning based on activity at time of reward,\nin inverse proportion to the GoalMaint activity: i.e., if there was no\ngoal maintenance, learn at reward to encourage goal engagement next time,\nbut otherwise, do not further reinforce at time of reward, because the\nactual goal gating learning trace is a better learning signal.\nOtherwise, only uses accumulated trace but doesn't include rew-time activity,\ne.g., for testing cases that do not have GoalMaint."}, {Name: "Delta", Doc: "Delta is weight for trace activity that is a function of the minus-plus delta\nactivity signal on the receiving SPN neuron, independent of PF modulation.\nThis should always be 1 except for testing disabling: adjust NonDelta\nrelative to it, and the overall learning rate."}, {Name: "Credit", Doc: "Credit is proportion of trace activity driven by the credit assignment factor\nbased on the PF modulatory inputs, synaptic activity (send * recv),\nand Patch DA, which indicates extent to which gating at this time is net\nassociated with subsequent reward or not."}, {Name: "pad"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.PoolIndexVars", IDName: "pool-index-vars"})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.PoolIntVars", IDName: "pool-int-vars", Doc: "PoolIntVars are int32 pool variables, for computing fsfffb inhibition etc.\nNote that we use int32 instead of uint32 so that overflow errors can be detected.\nSee [PoolVars] for float32 variables."})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.AvgMax", IDName: "avg-max", Doc: "AvgMax are Avg and Max"})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.AvgMaxPhases", IDName: "avg-max-phases", Doc: "AvgMaxPhases are the different Phases over which AvgMax values are tracked."})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.AvgMaxVars", IDName: "avg-max-vars", Doc: "AvgMaxVars are the different Neuron variables for which [AvgMaxPhases]\nis computed."})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.RandFunIndex", IDName: "rand-fun-index", Directives: []types.Directive{{Tool: "gosl", Directive: "start"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.RWPredParams", IDName: "rw-pred-params", Doc: "RWPredParams parameterizes reward prediction for a simple Rescorla-Wagner\nlearning dynamic (i.e., PV learning in the Rubicon framework).", Directives: []types.Directive{{Tool: "gosl", Directive: "start"}}, Fields: []types.Field{{Name: "PredRange", Doc: "default 0.1..0.99 range of predictions that can be represented -- having a truncated range preserves some sensitivity in dopamine at the extremes of good or poor performance"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.RWDaParams", IDName: "rw-da-params", Doc: "RWDaParams computes a dopamine (DA) signal using simple Rescorla-Wagner\nlearning dynamic (i.e., PV learning in the Rubicon framework).", Fields: []types.Field{{Name: "TonicGe", Doc: "tonic baseline Ge level for DA = 0 -- +/- are between 0 and 2*TonicGe -- just for spiking display of computed DA value"}, {Name: "RWPredLayIndex", Doc: "idx of RWPredLayer to get reward prediction from -- set during Build from BuildConfig RWPredLayName"}, {Name: "pad"}, {Name: "pad1"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.TDIntegParams", IDName: "td-integ-params", Doc: "TDIntegParams are params for reward integrator layer", Fields: []types.Field{{Name: "Discount", Doc: "discount factor -- how much to discount the future prediction from TDPred"}, {Name: "PredGain", Doc: "gain factor on TD rew pred activations"}, {Name: "TDPredLayIndex", Doc: "idx of TDPredLayer to get reward prediction from -- set during Build from BuildConfig TDPredLayName"}, {Name: "pad"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.TDDaParams", IDName: "td-da-params", Doc: "TDDaParams are params for dopamine (DA) signal as the temporal difference (TD)\nbetween the TDIntegLayer activations in the minus and plus phase.", Fields: []types.Field{{Name: "TonicGe", Doc: "tonic baseline Ge level for DA = 0 -- +/- are between 0 and 2*TonicGe -- just for spiking display of computed DA value"}, {Name: "TDIntegLayIndex", Doc: "idx of TDIntegLayer to get reward prediction from -- set during Build from BuildConfig TDIntegLayName"}, {Name: "pad"}, {Name: "pad1"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.RLPredPathParams", IDName: "rl-pred-path-params", Doc: "RLPredPathParams does dopamine-modulated learning for reward prediction: Da * Send.Act\nUsed by RWPath and TDPredPath within corresponding RWPredLayer or TDPredLayer\nto generate reward predictions based on its incoming weights, using linear activation\nfunction. Has no weight bounds or limits on sign etc.", Directives: []types.Directive{{Tool: "gosl", Directive: "start"}}, Fields: []types.Field{{Name: "OppSignLRate", Doc: "how much to learn on opposite DA sign coding neuron (0..1)"}, {Name: "DaTol", Doc: "tolerance on DA -- if below this abs value, then DA goes to zero and there is no learning -- prevents prediction from exactly learning to cancel out reward value, retaining a residual valence of signal"}, {Name: "pad"}, {Name: "pad1"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.LDTParams", IDName: "ldt-params", Doc: "LDTParams compute reward salience as ACh global neuromodulatory signal\nas a function of the MAX activation of its inputs from salience detecting\nlayers (e.g., the superior colliculus: SC), and whenever there is an external\nUS outcome input (signalled by the global GvHasRew flag).\nACh from salience inputs is discounted by GoalMaint activity,\nreducing distraction when pursuing a goal, but US ACh activity is not so reduced.\nACh modulates excitability of goal-gating layers.", Directives: []types.Directive{{Tool: "gosl", Directive: "start"}}, Fields: []types.Field{{Name: "SrcThr", Doc: "SrcThr is the threshold per input source, on absolute value (magnitude),\nto count as a significant reward event, which then drives maximal ACh.\nSet to 0 to disable this nonlinear behavior."}, {Name: "Rew", Doc: "Rew uses the global Context.NeuroMod.HasRew flag to drive ACh:\nif there is some kind of external reward being given, then\nACh goes to 1, else 0 for this component."}, {Name: "MaintInhib", Doc: "MaintInhib is the extent to which active goal maintenance (via Global GoalMaint)\ninhibits ACh signals: when goal engaged, distractability is lower."}, {Name: "SrcLay1Index", Doc: "index of Layer to get max activity from; set during Build from BuildConfig\nSrcLay1Name if present -- -1 if not used."}, {Name: "SrcLay2Index", Doc: "index of Layer to get max activity from; set during Build from BuildConfig\nSrcLay2Name if present -- -1 if not used."}, {Name: "SrcLay3Index", Doc: "index of Layer to get max activity from; set during Build from BuildConfig\nSrcLay3Name if present -- -1 if not used."}, {Name: "SrcLay4Index", Doc: "index of Layer to get max activity from; set during Build from BuildConfig\nSrcLay4Name if present -- -1 if not used."}, {Name: "pad"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.VTAParams", IDName: "vta-params", Doc: "VTAParams are for computing overall VTA DA based on LHb PVDA\n(primary value -- at US time, computed at start of each trial\nand stored in LHbPVDA global value)\nand Amygdala (CeM) CS / learned value (LV) activations, which update\nevery cycle.", Fields: []types.Field{{Name: "CeMGain", Doc: "gain on CeM activity difference (CeMPos - CeMNeg) for generating LV CS-driven dopamine values"}, {Name: "LHbGain", Doc: "gain on computed LHb DA (Burst - Dip) -- for controlling DA levels"}, {Name: "AChThr", Doc: "threshold on ACh level required to generate LV CS-driven dopamine burst"}, {Name: "pad"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.BLAPathParams", IDName: "bla-path-params", Doc: "BLAPathParams has parameters for basolateral amygdala learning.\nLearning is driven by the Tr trace as function of ACh * Send Act\nrecorded prior to US, and at US, recv unit delta: CaP - CaDPrev\ntimes normalized GeIntNorm for recv unit credit assignment.", Directives: []types.Directive{{Tool: "gosl", Directive: "start"}}, Fields: []types.Field{{Name: "NegDeltaLRate", Doc: "use 0.01 for acquisition (don't unlearn) and 1 for extinction.\nnegative delta learning rate multiplier"}, {Name: "AChThr", Doc: "threshold on this layer's ACh level for trace learning updates"}, {Name: "USTrace", Doc: "proportion of US time stimulus activity to use for the trace component of"}, {Name: "pad"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.DriveParams", IDName: "drive-params", Doc: "DriveParams manages the drive parameters for computing and updating drive state.\nMost of the params are for optional case where drives are automatically\nupdated based on US consumption (which satisfies drives) and time passing\n(which increases drives).", Fields: []types.Field{{Name: "DriveMin", Doc: "minimum effective drive value, which is an automatic baseline ensuring\nthat a positive US results in at least some minimal level of reward.\nUnlike Base values, this is not reflected in the activity of the drive\nvalues, and applies at the time of reward calculation as a minimum baseline."}, {Name: "Base", Doc: "baseline levels for each drive, which is what they naturally trend toward\nin the absence of any input. Set inactive drives to 0 baseline,\nactive ones typically elevated baseline (0-1 range)."}, {Name: "Tau", Doc: "time constants in ThetaCycle (trial) units for natural update toward\nBase values. 0 values means no natural update (can be updated externally)."}, {Name: "Satisfaction", Doc: "decrement in drive value when US is consumed, thus partially satisfying\nthe drive. Positive values are subtracted from current Drive value."}, {Name: "Dt", Doc: "1/Tau"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.UrgencyParams", IDName: "urgency-params", Doc: "UrgencyParams has urgency (increasing pressure to do something)\nand parameters for updating it.\nRaw urgency integrates effort when _not_ goal engaged\nwhile effort (negative US 0) integrates when a goal _is_ engaged.", Fields: []types.Field{{Name: "U50", Doc: "value of raw urgency where the urgency activation level is 50%"}, {Name: "Power", Doc: "exponent on the urge factor -- valid numbers are 1,2,4,6"}, {Name: "Thr", Doc: "threshold for urge -- cuts off small baseline values"}, {Name: "DAtonic", Doc: "gain factor for driving tonic DA levels as a function of urgency"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.USParams", IDName: "us-params", Doc: "USParams control how positive and negative USs and Costs are\nweighted and integrated to compute an overall PV primary value.", Fields: []types.Field{{Name: "PVposGain", Doc: "gain factor applied to sum of weighted, drive-scaled positive USs\nto compute PVpos primary summary value.\nThis is multiplied prior to 1/(1+x) normalization.\nUse this to adjust the overall scaling of PVpos reward within 0-1\nnormalized range (see also PVnegGain).\nEach USpos is assumed to be in 0-1 range, with a default of 1."}, {Name: "PVnegGain", Doc: "gain factor applied to sum of weighted negative USs and Costs\nto compute PVneg primary summary value.\nThis is multiplied prior to 1/(1+x) normalization.\nUse this to adjust overall scaling of PVneg within 0-1\nnormalized range (see also PVposGain)."}, {Name: "USnegGains", Doc: "Negative US gain factor for encoding each individual negative US,\nwithin their own separate input pools, multiplied prior to 1/(1+x)\nnormalization of each term for activating the USneg pools.\nThese gains are _not_ applied in computing summary PVneg value\n(see PVnegWts), and generally must be larger than the weights to leverage\nthe dynamic range within each US pool."}, {Name: "CostGains", Doc: "Cost gain factor for encoding the individual Time, Effort etc costs\nwithin their own separate input pools, multiplied prior to 1/(1+x)\nnormalization of each term for activating the Cost pools.\nThese gains are _not_ applied in computing summary PVneg value\n(see CostWts), and generally must be larger than the weights to use\nthe full dynamic range within each US pool."}, {Name: "PVposWts", Doc: "weight factor applied to each separate positive US on the way to computing\nthe overall PVpos summary value, to control the weighting of each US\nrelative to the others. Each pos US is also multiplied by its dynamic\nDrive factor as well.\nUse PVposGain to control the overall scaling of the PVpos value."}, {Name: "PVnegWts", Doc: "weight factor applied to each separate negative US on the way to computing\nthe overall PVneg summary value, to control the weighting of each US\nrelative to the others, and to the Costs. These default to 1."}, {Name: "PVcostWts", Doc: "weight factor applied to each separate Cost (Time, Effort, etc) on the\nway to computing the overall PVneg summary value, to control the weighting\nof each Cost relative to the others, and relative to the negative USs.\nThe first pool is Time, second is Effort, and these are typically weighted\nlower (.02) than salient simulation-specific USs (1)."}, {Name: "USposEst", Doc: "computed estimated US values, based on OFCposPT and VSMatrix gating, in PVposEst"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.LHbParams", IDName: "l-hb-params", Doc: "LHbParams has values for computing LHb & RMTg which drives dips / pauses in DA firing.\nLHb handles all US-related (PV = primary value) processing.\nPositive net LHb activity drives dips / pauses in VTA DA activity,\ne.g., when predicted pos > actual or actual neg > predicted.\nNegative net LHb activity drives bursts in VTA DA activity,\ne.g., when actual pos > predicted (redundant with LV / Amygdala)\nor \"relief\" burst when actual neg < predicted.", Fields: []types.Field{{Name: "VSPatchNonRewThr", Doc: "threshold on VSPatch prediction during a non-reward trial"}, {Name: "VSPatchGain", Doc: "gain on the VSPatchD1 - D2 difference to drive the net VSPatch DA\nprediction signal, which goes in VSPatchPos and RewPred global variables"}, {Name: "VSPatchVarTau", Doc: "decay time constant for computing the temporal variance in VSPatch\nvalues over time"}, {Name: "NegThr", Doc: "threshold factor that multiplies integrated pvNeg value\nto establish a threshold for whether the integrated pvPos value\nis good enough to drive overall net positive reward.\nIf pvPos wins, it is then multiplicatively discounted by pvNeg;\notherwise, pvNeg is discounted by pvPos."}, {Name: "BurstGain", Doc: "gain multiplier on PVpos for purposes of generating bursts\n(not for discounting negative dips)."}, {Name: "DipGain", Doc: "gain multiplier on PVneg for purposes of generating dips\n(not for discounting positive bursts)."}, {Name: "VSPatchVarDt", Doc: "1/tau"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.GiveUpParams", IDName: "give-up-params", Doc: "GiveUpParams are parameters for computing when to give up,\nbased on Utility, Timing and Progress factors.", Fields: []types.Field{{Name: "ProbThr", Doc: "threshold on GiveUp probability, below which no give up is triggered"}, {Name: "MinGiveUpSum", Doc: "minimum GiveUpSum value, which is the denominator in the sigmoidal function.\nThis minimum prevents division by zero and any other degenerate values."}, {Name: "Utility", Doc: "the factor multiplying utility values: cost and expected positive outcome"}, {Name: "Timing", Doc: "the factor multiplying timing values from VSPatch"}, {Name: "Progress", Doc: "the factor multiplying progress values based on time-integrated progress\ntoward the goal"}, {Name: "MinUtility", Doc: "minimum utility cost and reward estimate values -- when they are below\nthese levels (at the start) then utility is effectively neutral,\nso the other factors take precedence."}, {Name: "VSPatchSumMax", Doc: "maximum VSPatchPosSum for normalizing the value for give-up weighing"}, {Name: "VSPatchVarMax", Doc: "maximum VSPatchPosVar for normalizing the value for give-up weighing"}, {Name: "ProgressRateTau", Doc: "time constant for integrating the ProgressRate\nvalues over time"}, {Name: "ProgressRateDt", Doc: "1/tau"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.Rubicon", IDName: "rubicon", Doc: "Rubicon implements core elements of the Rubicon goal-directed motivational\nmodel, representing the core brainstem-level (hypothalamus) bodily drives\nand resulting dopamine from US (unconditioned stimulus) inputs,\nsubsuming the earlier Rubicon model of primary value (PV)\nand learned value (LV), describing the functions of the Amygala,\nVentral Striatum, VTA and associated midbrain nuclei (LDT, LHb, RMTg).\nCore LHb (lateral habenula) and VTA (ventral tegmental area) dopamine\nare computed in equations using inputs from specialized network layers\n(LDTLayer driven by BLA, CeM layers, VSPatchLayer).\nThe Drives, Effort, US and resulting LHb PV dopamine computation all happens at the\nat the start of each trial (NewState, Step). The LV / CS dopamine is computed\ncycle-by-cycle by the VTA layer using parameters set by the VTA layer.\nRenders USLayer, PVLayer, DrivesLayer representations based on state updated here.", Fields: []types.Field{{Name: "NPosUSs", Doc: "number of possible positive US states and corresponding drives.\nThe first is always reserved for novelty / curiosity.\nMust be set programmatically via SetNUSs method,\nwhich allocates corresponding parameters."}, {Name: "NNegUSs", Doc: "number of possible phasic negative US states (e.g., shock, impact etc).\nMust be set programmatically via SetNUSs method, which allocates corresponding\nparameters."}, {Name: "NCosts", Doc: "number of possible costs, typically including accumulated time and effort costs.\nMust be set programmatically via SetNUSs method, which allocates corresponding\nparameters."}, {Name: "Drive", Doc: "parameters and state for built-in drives that form the core motivations\nof the agent, controlled by lateral hypothalamus and associated\nbody state monitoring such as glucose levels and thirst."}, {Name: "Urgency", Doc: "urgency (increasing pressure to do something) and parameters for\n\n\tupdating it. Raw urgency is incremented by same units as effort,\n\nbut is only reset with a positive US."}, {Name: "USs", Doc: "controls how positive and negative USs are weighted and integrated to\ncompute an overall PV primary value."}, {Name: "LHb", Doc: "lateral habenula (LHb) parameters and state, which drives\ndipping / pausing in dopamine when the predicted positive\noutcome > actual, or actual negative outcome > predicted.\nCan also drive bursting for the converse, and via matrix phasic firing."}, {Name: "GiveUp", Doc: "parameters for giving up based on PV pos - neg difference"}, {Name: "ValDecode", Doc: "population code decoding parameters for estimates from layers"}, {Name: "decodeActs"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.FieldValue", IDName: "field-value", Doc: "FieldValue holds the value of a field in a struct.", Fields: []types.Field{{Name: "Path"}, {Name: "Field"}, {Name: "Value"}, {Name: "Parent"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.SynapseVars", IDName: "synapse-vars", Doc: "SynapseVars are the synapse variables representing synaptic weights, etc.\nThese do not depend on the data parallel index (di).\nSee [SynapseTraceVars] for variables that do depend on di."})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.SynapseTraceVars", IDName: "synapse-trace-vars", Doc: "SynapseTraceVars are synaptic variables that depend on the data\nparallel index, for accumulating learning traces and weight changes per data."})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.SynapseIndexVars", IDName: "synapse-index-vars", Doc: "SynapseIndexVars are synapse-level indexes used to access neurons and paths\nfrom the individual synapse level of processing."})
// Copyright (c) 2020, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package chans
import "cogentcore.org/core/math32"
//////// Simplified AK
//gosl:start
// AKsParams provides a highly simplified stateless A-type K+ channel
// that only has the voltage-gated activation (M) dynamic with a cutoff
// that ends up capturing a close approximation to the much more complex AK function.
// This is voltage gated with maximal activation around -37 mV.
// It is particularly important for counteracting the excitatory effects of
// voltage gated calcium channels which can otherwise drive runaway excitatory currents.
type AKsParams struct {
// strength of AK conductance as contribution to g_k(t) factor
// (which is then multiplied by gbar_k that provides pA unit scaling).
Gk float32 `default:"0.1,0.01,2"`
// Hf is the multiplier factor as a constant multiplier
// on overall M factor result. Rescales M to level consistent
// with H being present at full strength.
Hf float32 `default:"0.076"`
// Mf is the multiplier factor for M, determines slope of function.
Mf float32 `default:"0.075"`
// Voff is the voltage offset for M function.
Voff float32 `default:"2"`
// Vmax is the voltage level of maximum channel opening: stays flat above that.
Vmax float32 `default:-37" desc:""`
pad, pad1, pad2 int32
}
// Defaults sets the parameters for distal dendrites
func (ap *AKsParams) Defaults() {
ap.Gk = 0.1
ap.Hf = 0.076
ap.Mf = 0.075
ap.Voff = 2
ap.Vmax = -37
}
func (ap *AKsParams) Update() {
}
func (ap *AKsParams) ShouldDisplay(field string) bool {
switch field {
case "Gk":
return true
default:
return ap.Gk > 0
}
}
// MFromV returns the M gate function from v
func (ap *AKsParams) MFromV(v float32) float32 {
av := v
if v > ap.Vmax {
av = ap.Vmax
}
return ap.Hf / (1.0 + math32.FastExp(-ap.Mf*(av+ap.Voff)))
}
// Gak returns the conductance as a function of normalized Vm
// GBar * MFromV(v)
func (ap *AKsParams) Gak(v float32) float32 {
return ap.Gk * ap.MFromV(v)
}
//gosl:end
// Copyright (c) 2020, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package chanplots
//go:generate core generate -add-types -gosl
import (
"cogentcore.org/core/base/metadata"
"cogentcore.org/core/core"
"cogentcore.org/core/icons"
"cogentcore.org/core/math32"
"cogentcore.org/core/tree"
"cogentcore.org/lab/lab"
"cogentcore.org/lab/plot"
"cogentcore.org/lab/tensorfs"
"github.com/emer/axon/v2/chans"
)
type AKPlot struct {
// AKs simplified function
AKs chans.AKsParams
// AK function
AK AKParams
// starting voltage
Vstart float32 `default:"-100"`
// ending voltage
Vend float32 `default:"50"`
// voltage increment
Vstep float32 `default:"1"`
// number of time steps
TimeSteps int
// do spiking instead of voltage ramp
TimeSpike bool
// spiking frequency
SpikeFreq float32
// time-run starting membrane potential
TimeVstart float32
// time-run ending membrane potential
TimeVend float32
Dir *tensorfs.Node `display:"-"`
Tabs lab.Tabber `display:"-"`
}
// Config configures the plot
func (pl *AKPlot) Config(parent *tensorfs.Node, tabs lab.Tabber) {
pl.Dir = parent.Dir("AK")
pl.Tabs = tabs
pl.AK.Defaults()
pl.AK.Gk = 1
pl.AKs.Defaults()
pl.AKs.Gk = 1
pl.Vstart = -100
pl.Vend = 100
pl.Vstep = 1
pl.TimeSteps = 200
pl.TimeSpike = true
pl.SpikeFreq = 50
pl.TimeVstart = -50
pl.TimeVend = -20
pl.Update()
}
func (pl *AKPlot) Update() {
pl.AK.Update()
}
// GVRun plots the conductance G (and other variables) as a function of V.
func (pl *AKPlot) GVRun() { //types:add
pl.Update()
dir := pl.Dir.Dir("G_V")
ap := &pl.AK
nv := int((pl.Vend - pl.Vstart) / pl.Vstep)
for vi := range nv {
v := pl.Vstart + float32(vi)*pl.Vstep
k := ap.KFromV(v)
a := ap.AlphaFromVK(v, k)
b := ap.BetaFromVK(v, k)
mt := ap.MTauFromAlphaBeta(a, b)
ht := ap.HTauFromV(v)
m := ap.MFromAlpha(a)
h := ap.HFromV(v)
g := ap.Gak(m, h)
ms := pl.AKs.MFromV(v)
gs := pl.AKs.Gak(v)
dir.Float64("V", nv).SetFloat1D(float64(v), vi)
dir.Float64("Gaks", nv).SetFloat1D(float64(gs), vi)
dir.Float64("Gak", nv).SetFloat1D(float64(g), vi)
dir.Float64("M", nv).SetFloat1D(float64(m), vi)
dir.Float64("H", nv).SetFloat1D(float64(h), vi)
dir.Float64("MTau", nv).SetFloat1D(float64(mt), vi)
dir.Float64("HTau", nv).SetFloat1D(float64(ht), vi)
dir.Float64("K", nv).SetFloat1D(float64(k), vi)
dir.Float64("Alpha", nv).SetFloat1D(float64(a), vi)
dir.Float64("Beta", nv).SetFloat1D(float64(b), vi)
dir.Float64("Ms", nv).SetFloat1D(float64(ms), vi)
}
metadata.SetDoc(dir.Float64("Gaks"), "Gaks is the simplified AK conductance, actually used in models")
metadata.SetDoc(dir.Float64("Ms"), "Ms is the simplified AK M gate, actually used in models")
plot.SetFirstStyler(dir.Float64("V"), func(s *plot.Style) {
s.Role = plot.X
})
ons := []string{"Gak", "Gaks"}
for _, on := range ons {
plot.SetFirstStyler(dir.Float64(on), func(s *plot.Style) {
s.On = true
s.Plot.Title = "AK G(V)"
})
}
if pl.Tabs != nil {
pl.Tabs.AsLab().PlotTensorFS(dir)
}
}
// TimeRun runs the equations over time.
func (pl *AKPlot) TimeRun() { //types:add
pl.Update()
dir := pl.Dir.Dir("G_Time")
nv := pl.TimeSteps
ap := &pl.AK
m := float32(0)
h := float32(1)
msdt := float32(0.001)
v := pl.TimeVstart
vinc := float32(2) * (pl.TimeVend - pl.TimeVstart) / float32(pl.TimeSteps)
isi := int(1000 / pl.SpikeFreq)
var g float32
for ti := range nv {
t := float32(ti) * msdt
k := ap.KFromV(v)
a := ap.AlphaFromVK(v, k)
b := ap.BetaFromVK(v, k)
mt := ap.MTauFromAlphaBeta(a, b)
ht := ap.HTauFromV(v)
g = ap.Gak(m, h)
dm, dh := pl.AK.DMHFromV(v, m, h)
dir.Float64("Time", nv).SetFloat1D(float64(t), ti)
dir.Float64("Gak", nv).SetFloat1D(float64(g), ti)
dir.Float64("M", nv).SetFloat1D(float64(m), ti)
dir.Float64("H", nv).SetFloat1D(float64(h), ti)
dir.Float64("dM", nv).SetFloat1D(float64(dm), ti)
dir.Float64("dH", nv).SetFloat1D(float64(dh), ti)
dir.Float64("MTau", nv).SetFloat1D(float64(mt), ti)
dir.Float64("HTau", nv).SetFloat1D(float64(ht), ti)
dir.Float64("K", nv).SetFloat1D(float64(k), ti)
dir.Float64("Alpha", nv).SetFloat1D(float64(a), ti)
dir.Float64("Beta", nv).SetFloat1D(float64(b), ti)
g = pl.AK.Gak(m, h)
m += dm // already in msec time constants
h += dh
if pl.TimeSpike {
if ti%isi < 3 {
v = pl.TimeVend
} else {
v = pl.TimeVstart
}
} else {
v += vinc
if v > pl.TimeVend {
v = pl.TimeVend
}
}
}
plot.SetFirstStyler(dir.Float64("Time"), func(s *plot.Style) {
s.Role = plot.X
})
ons := []string{"Gak", "M", "H"}
for _, on := range ons {
plot.SetFirstStyler(dir.Float64(on), func(s *plot.Style) {
s.On = true
s.Plot.Title = "AK G(t)"
})
}
if pl.Tabs != nil {
pl.Tabs.AsLab().PlotTensorFS(dir)
}
}
func (pl *AKPlot) MakeToolbar(p *tree.Plan) {
tree.Add(p, func(w *core.FuncButton) {
w.SetFunc(pl.GVRun).SetIcon(icons.PlayArrow)
})
tree.Add(p, func(w *core.FuncButton) {
w.SetFunc(pl.TimeRun).SetIcon(icons.PlayArrow)
})
}
// AKParams control an A-type K+ channel, which is voltage gated with maximal
// activation around -37 mV. It has two state variables, M (v-gated opening)
// and H (v-gated closing), which integrate with fast and slow time constants,
// respectively. H relatively quickly hits an asymptotic level of inactivation
// for sustained activity patterns.
// It is particularly important for counteracting the excitatory effects of
// voltage gated calcium channels which can otherwise drive runaway excitatory currents.
// See AKsParams for a much simpler version that works fine when full AP-like spikes are
// not simulated, as in our standard axon models.
type AKParams struct {
// Gk is the strength of the AK conductance contribution to Gk(t) factor
// (which is then multiplied by Gbar.K that provides pA unit scaling).
Gk float32 `default:"0.1,0.01,1"`
// Beta multiplier for the beta term; 0.01446 for distal, 0.02039
// for proximal dendrites.
Beta float32 `default:"0.01446,02039"`
// Dm factor: 0.5 for distal, 0.25 for proximal
Dm float32 `default:"0.5,0.25"`
// K is the offset for K, 1.8 for distal, 1.5 for proximal.
Koff float32 `default:"1.8,1.5"`
// Voff is the voltage offset for alpha and beta functions: 1 for distal,
// 11 for proximal.
Voff float32 `default:"1,11"`
// Hf is the h multiplier factor, 0.1133 for distal, 0.1112 for proximal.
Hf float32 `default:"0.1133,0.1112"`
pad, pad1 float32
}
// Defaults sets the parameters for distal dendrites
func (ap *AKParams) Defaults() {
ap.Gk = 0.01
ap.Distal()
}
func (ap *AKParams) Update() {
}
func (ap *AKParams) ShouldDisplay(field string) bool {
switch field {
case "Gk":
return true
default:
return ap.Gk > 0
}
}
// Distal sets the parameters for distal dendrites
func (ap *AKParams) Distal() {
ap.Beta = 0.01446
ap.Dm = 0.5
ap.Koff = 1.8
ap.Voff = 1
ap.Hf = 0.1133
}
// Proximal sets parameters for proximal dendrites
func (ap *AKParams) Proximal() {
ap.Beta = 0.02039
ap.Dm = 0.25
ap.Koff = 1.5
ap.Voff = 11
ap.Hf = 0.1112
}
// AlphaFromVK returns the Alpha function from v (not normalized, must not exceed 0)
func (ap *AKParams) AlphaFromVK(v, k float32) float32 {
return math32.FastExp(0.03707 * k * (v - ap.Voff))
}
// BetaFromVK returns the Beta function from v (not normalized, must not exceed 0)
func (ap *AKParams) BetaFromVK(v, k float32) float32 {
return math32.FastExp(ap.Beta * k * (v - ap.Voff))
}
// KFromV returns the K value from v (not normalized, must not exceed 0)
func (ap *AKParams) KFromV(v float32) float32 {
return -ap.Koff - 1.0/(1.0+math32.FastExp((v+40)/5))
}
// HFromV returns the H gate value from v (not normalized, must not exceed 0)
func (ap *AKParams) HFromV(v float32) float32 {
return 1.0 / (1.0 + math32.FastExp(ap.Hf*(v+56)))
}
// HTauFromV returns the HTau rate constant in msec from v (clipped above 0)
func (ap *AKParams) HTauFromV(v float32) float32 {
ve := min(v, 0)
tau := 0.26 * (ve + 50)
if tau < 2 {
tau = 2
}
return tau
}
// MFromAlpha returns the M gate factor from alpha
func (ap *AKParams) MFromAlpha(alpha float32) float32 {
return 1.0 / (1.0 + alpha)
}
// MTauFromAlphaBeta returns the MTau rate constant in msec from alpha, beta
func (ap *AKParams) MTauFromAlphaBeta(alpha, beta float32) float32 {
return 1 + beta/(ap.Dm*(1+alpha)) // minimum of 1 msec
}
// DMHFromV returns the change at msec update scale in M, H factors
// as a function of V.
func (ap *AKParams) DMHFromV(v, m, h float32) (float32, float32) {
k := ap.KFromV(v)
a := ap.AlphaFromVK(v, k)
b := ap.BetaFromVK(v, k)
mt := ap.MTauFromAlphaBeta(a, b)
ht := ap.HTauFromV(v)
dm := (ap.MFromAlpha(a) - m) / mt
dh := (ap.HFromV(v) - h) / ht
return dm, dh
}
// Gak returns the AK net conductance from m, h gates.
func (ap *AKParams) Gak(m, h float32) float32 {
return ap.Gk * m * h
}
// Copyright (c) 2020, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package chanplots
import (
"math"
"cogentcore.org/core/core"
"cogentcore.org/core/icons"
"cogentcore.org/core/tree"
"cogentcore.org/lab/lab"
"cogentcore.org/lab/plot"
"cogentcore.org/lab/tensorfs"
"github.com/emer/axon/v2/chans"
)
type GABABPlot struct {
// standard chans version of GABAB
GABAB chans.GABABParams `display:"add-fields"`
// multiplier on GABA-B as function of voltage
Vgain float64 `default:"0.1"`
// voltage offset for GABA-B exponential function
Voff float64 `default:"10"`
// GABAb reversal / driving potential
Erev float64 `default:"-90"`
// starting voltage
Vstart float64 `default:"-90"`
// ending voltage
Vend float64 `default:"10"`
// voltage increment
Vstep float64 `default:"1"`
// max number of spikes
Smax int `default:"30"`
// total number of time steps to take
TimeSteps int
// time increment per step
TimeInc float64
// time in msec for inputs to remain on in TimeRun
TimeIn int
// frequency of spiking inputs at start of TimeRun
TimeHz float64
Dir *tensorfs.Node `display:"-"`
Tabs lab.Tabber `display:"-"`
}
// Config configures all the elements using the standard functions
func (pl *GABABPlot) Config(parent *tensorfs.Node, tabs lab.Tabber) {
pl.Dir = parent.Dir("GabaB")
pl.Tabs = tabs
pl.GABAB.Defaults()
pl.GABAB.GiSpike = 1
pl.Vgain = 0.1
pl.Voff = 10
pl.Erev = -90
pl.Vstart = -90
pl.Vend = 10
pl.Vstep = 1
pl.Smax = 30
pl.TimeSteps = 500
pl.TimeInc = .001
pl.TimeIn = 100
pl.TimeHz = 50
pl.Update()
}
// Update updates computed values
func (pl *GABABPlot) Update() {
pl.GABAB.Update()
}
// GVRun plots the conductance G (and other variables) as a function of V.
func (pl *GABABPlot) GVRun() { //types:add
pl.Update()
dir := pl.Dir.Dir("G_V")
nv := int((pl.Vend - pl.Vstart) / pl.Vstep)
for vi := range nv {
v := pl.Vstart + float64(vi)*pl.Vstep
g := float64(pl.GABAB.Gk) / (1 + math.Exp(pl.Vgain*((v-pl.Erev)+pl.Voff)))
i := (v - pl.Erev) * g
dir.Float64("V", nv).SetFloat1D(v, vi)
dir.Float64("Ggaba_b", nv).SetFloat1D(g, vi)
dir.Float64("Igaba_b", nv).SetFloat1D(i, vi)
}
plot.SetFirstStyler(dir.Float64("V"), func(s *plot.Style) {
s.Role = plot.X
})
ons := []string{"Ggaba_b", "Igaba_b"}
for _, on := range ons {
plot.SetFirstStyler(dir.Float64(on), func(s *plot.Style) {
s.On = true
s.Plot.Title = "GABA-B G(V)"
})
}
if pl.Tabs != nil {
pl.Tabs.AsLab().PlotTensorFS(dir)
}
}
// GSRun plots conductance as function of spiking rate.
func (pl *GABABPlot) GSRun() { //types:add
pl.Update()
dir := pl.Dir.Dir("G_Spike")
nv := int(float64(pl.Smax) / pl.Vstep)
for si := range nv {
s := float64(si) * pl.Vstep
g := 1.0 / (1.0 + math.Exp(-(s-7.1)/1.4))
dir.Float64("S", nv).SetFloat1D(s, si)
dir.Float64("GgabaB_max", nv).SetFloat1D(g, si)
}
plot.SetFirstStyler(dir.Float64("S"), func(s *plot.Style) {
s.Role = plot.X
})
ons := []string{"GgabaB_max"}
for _, on := range ons {
plot.SetFirstStyler(dir.Float64(on), func(s *plot.Style) {
s.On = true
s.Plot.Title = "GABAB G(spike)"
})
}
if pl.Tabs != nil {
pl.Tabs.AsLab().PlotTensorFS(dir)
}
}
// TimeRun runs the equations over time.
func (pl *GABABPlot) TimeRun() { //types:add
pl.Update()
dir := pl.Dir.Dir("G_Time")
nv := pl.TimeSteps
time := 0.0
m := 0.0
x := 0.0
for ti := range nv {
sin := 0.0
if ti >= 10 && ti < (10+pl.TimeIn) {
sin = float64(pl.TimeHz)
}
// record starting state first, then update
dir.Float64("Time", nv).SetFloat1D(time, ti)
dir.Float64("GababM", nv).SetFloat1D(m, ti)
dir.Float64("GababX", nv).SetFloat1D(x, ti)
gis := 1.0 / (1.0 + math.Exp(-(sin-7.1)/1.4))
dM := (float64(pl.GABAB.TauFact)*x - m) / float64(pl.GABAB.Rise)
dX := -x / float64(pl.GABAB.Decay)
m += dM
x += gis + dX
dir.Float64("dM", nv).SetFloat1D(dM, ti)
dir.Float64("dX", nv).SetFloat1D(dX, ti)
dir.Float64("Xmax", nv).SetFloat1D(gis, ti)
time += pl.TimeInc
}
plot.SetFirstStyler(dir.Float64("Time"), func(s *plot.Style) {
s.Role = plot.X
})
ons := []string{"GababM", "GababX"}
for _, on := range ons {
plot.SetFirstStyler(dir.Float64(on), func(s *plot.Style) {
s.On = true
s.Plot.Title = "GABAB G(t)"
})
}
if pl.Tabs != nil {
pl.Tabs.AsLab().PlotTensorFS(dir)
}
}
func (pl *GABABPlot) MakeToolbar(p *tree.Plan) {
tree.Add(p, func(w *core.FuncButton) {
w.SetFunc(pl.GVRun).SetIcon(icons.PlayArrow)
})
tree.Add(p, func(w *core.FuncButton) {
w.SetFunc(pl.GSRun).SetIcon(icons.PlayArrow)
})
tree.Add(p, func(w *core.FuncButton) {
w.SetFunc(pl.TimeRun).SetIcon(icons.PlayArrow)
})
}
// Copyright (c) 2020, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package chanplots
import (
"cogentcore.org/core/core"
"cogentcore.org/core/icons"
"cogentcore.org/core/tree"
"cogentcore.org/lab/lab"
"cogentcore.org/lab/plot"
"cogentcore.org/lab/tensorfs"
"github.com/emer/axon/v2/chans"
)
type KirPlot struct {
// Kir function
Kir chans.KirParams `display:"add-fields"`
// Vstart is starting voltage
Vstart float32 `default:"-100"`
// Vend is ending voltage
Vend float32 `default:"100"`
// Vstep is voltage increment
Vstep float32 `default:"1"`
// TimeSteps is number of time steps
TimeSteps int
// do spiking instead of voltage ramp
TimeSpike bool
// spiking frequency
SpikeFreq float32
// time-run starting membrane potential
TimeVstart float32
// time-run ending membrane potential
TimeVend float32
Dir *tensorfs.Node `display:"-"`
Tabs lab.Tabber `display:"-"`
}
// Config configures all the elements using the standard functions
func (pl *KirPlot) Config(parent *tensorfs.Node, tabs lab.Tabber) {
pl.Dir = parent.Dir("kIR")
pl.Tabs = tabs
pl.Kir.Defaults()
pl.Kir.Gk = 1
pl.Vstart = -100
pl.Vend = 0
pl.Vstep = 1
pl.TimeSteps = 300
pl.TimeSpike = true
pl.SpikeFreq = 50
pl.TimeVstart = -70
pl.TimeVend = -50
pl.Update()
}
// Update updates computed values
func (pl *KirPlot) Update() {
}
// VmRun plots the equation as a function of V
func (pl *KirPlot) GVRun() { //types:add
pl.Update()
dir := pl.Dir.Dir("G_V")
mp := &pl.Kir
nv := int((pl.Vend - pl.Vstart) / pl.Vstep)
m := mp.MinfRest()
for vi := 0; vi < nv; vi++ {
v := pl.Vstart + float32(vi)*pl.Vstep
g := mp.Gkir(v, m)
dm := mp.DM(v, m)
m += dm
minf := mp.Minf(v)
mtau := mp.MTau(v)
dir.Float64("V", nv).SetFloat1D(float64(v), vi)
dir.Float64("Gkir", nv).SetFloat1D(float64(g), vi)
dir.Float64("M", nv).SetFloat1D(float64(m), vi)
dir.Float64("Minf", nv).SetFloat1D(float64(minf), vi)
dir.Float64("Mtau", nv).SetFloat1D(float64(mtau), vi)
}
plot.SetFirstStyler(dir.Float64("V"), func(s *plot.Style) {
s.Role = plot.X
})
ons := []string{"Gkir", "M"}
for _, on := range ons {
plot.SetFirstStyler(dir.Float64(on), func(s *plot.Style) {
s.On = true
s.Plot.Title = "kIR G(V)"
})
}
if pl.Tabs != nil {
pl.Tabs.AsLab().PlotTensorFS(dir)
}
}
// TimeRun runs the equation over time.
func (pl *KirPlot) TimeRun() { //types:add
pl.Update()
dir := pl.Dir.Dir("G_Time")
nv := pl.TimeSteps
mp := &pl.Kir
m := mp.MinfRest()
msdt := float32(0.001)
v := pl.TimeVstart
vinc := float32(2) * (pl.TimeVend - pl.TimeVstart) / float32(pl.TimeSteps)
isi := int(1000 / pl.SpikeFreq)
for ti := range nv {
t := float32(ti+1) * msdt
g := mp.Gkir(v, m)
dm := mp.DM(v, m)
m += dm
minf := mp.Minf(v)
mtau := mp.MTau(v)
dir.Float64("Time", nv).SetFloat1D(float64(t), ti)
dir.Float64("V", nv).SetFloat1D(float64(v), ti)
dir.Float64("Gkir", nv).SetFloat1D(float64(g), ti)
dir.Float64("M", nv).SetFloat1D(float64(m), ti)
dir.Float64("Minf", nv).SetFloat1D(float64(minf), ti)
dir.Float64("Mtau", nv).SetFloat1D(float64(mtau), ti)
if pl.TimeSpike {
si := ti % isi
if si == 0 {
v = pl.TimeVend
} else {
v = pl.TimeVstart + (float32(si)/float32(isi))*(pl.TimeVend-pl.TimeVstart)
}
} else {
v += vinc
if v > pl.TimeVend {
v = pl.TimeVend
}
}
}
plot.SetFirstStyler(dir.Float64("Time"), func(s *plot.Style) {
s.Role = plot.X
})
plot.SetFirstStyler(dir.Float64("V"), func(s *plot.Style) {
s.On = true
s.Plot.Title = "Gkir G(t)"
s.RightY = true
})
ons := []string{"Gkir", "M"}
for _, on := range ons {
plot.SetFirstStyler(dir.Float64(on), func(s *plot.Style) {
s.On = true
})
}
if pl.Tabs != nil {
pl.Tabs.AsLab().PlotTensorFS(dir)
}
}
func (pl *KirPlot) MakeToolbar(p *tree.Plan) {
tree.Add(p, func(w *core.FuncButton) {
w.SetFunc(pl.GVRun).SetIcon(icons.PlayArrow)
})
tree.Add(p, func(w *core.FuncButton) {
w.SetFunc(pl.TimeRun).SetIcon(icons.PlayArrow)
})
}
// Copyright (c) 2020, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package chanplots
import (
"cogentcore.org/core/core"
"cogentcore.org/core/icons"
"cogentcore.org/core/tree"
"cogentcore.org/lab/lab"
"cogentcore.org/lab/plot"
"cogentcore.org/lab/tensorfs"
"github.com/emer/axon/v2/chans"
)
type MahpPlot struct {
// mAHP function
Mahp chans.MahpParams `display:"add-fields"`
// Vstart is starting voltage
Vstart float32 `default:"-100"`
// Vend is ending voltage
Vend float32 `default:"100"`
// Vstep is voltage increment
Vstep float32 `default:"1"`
// TimeSteps is number of time steps
TimeSteps int
// do spiking instead of voltage ramp
TimeSpike bool
// spiking frequency
SpikeFreq float32
// time-run starting membrane potential
TimeVstart float32
// time-run ending membrane potential
TimeVend float32
Dir *tensorfs.Node `display:"-"`
Tabs lab.Tabber `display:"-"`
}
// Config configures all the elements using the standard functions
func (pl *MahpPlot) Config(parent *tensorfs.Node, tabs lab.Tabber) {
pl.Dir = parent.Dir("Mahp")
pl.Tabs = tabs
pl.Mahp.Defaults()
pl.Mahp.Gk = 0.1
pl.Vstart = -100
pl.Vend = 100
pl.Vstep = 1
pl.TimeSteps = 300
pl.TimeSpike = true
pl.SpikeFreq = 50
pl.TimeVstart = -70
pl.TimeVend = -50
pl.Update()
}
// Update updates computed values
func (pl *MahpPlot) Update() {
}
// GVRun plots the conductance G (and other variables) as a function of V.
func (pl *MahpPlot) GVRun() { //types:add
pl.Update()
dir := pl.Dir.Dir("G_V")
mp := &pl.Mahp
nv := int((pl.Vend - pl.Vstart) / pl.Vstep)
for vi := range nv {
vbio := pl.Vstart + float32(vi)*pl.Vstep
var ninf, tau float32
mp.NinfTauFromV(vbio, &ninf, &tau)
dir.Float64("V", nv).SetFloat1D(float64(vbio), vi)
dir.Float64("Ninf", nv).SetFloat1D(float64(ninf), vi)
dir.Float64("Tau", nv).SetFloat1D(float64(tau), vi)
}
plot.SetFirstStyler(dir.Float64("V"), func(s *plot.Style) {
s.Role = plot.X
})
ons := []string{"Ninf", "Tau"}
for _, on := range ons {
plot.SetFirstStyler(dir.Float64(on), func(s *plot.Style) {
s.On = true
s.Plot.Title = "Mahp G(V)"
})
}
if pl.Tabs != nil {
pl.Tabs.AsLab().PlotTensorFS(dir)
}
}
// TimeRun runs the equation over time.
func (pl *MahpPlot) TimeRun() { //types:add
pl.Update()
dir := pl.Dir.Dir("G_Time")
nv := pl.TimeSteps
mp := &pl.Mahp
var n, tau float32
mp.NinfTauFromV(pl.TimeVstart, &n, &tau)
kna := float32(0)
msdt := float32(0.001)
v := pl.TimeVstart
vinc := float32(2) * (pl.TimeVend - pl.TimeVstart) / float32(pl.TimeSteps)
isi := int(1000 / pl.SpikeFreq)
for ti := range nv {
t := float32(ti+1) * msdt
var ninf, tau float32
mp.NinfTauFromV(v, &ninf, &tau)
g := mp.GmAHP(v, &n)
dir.Float64("Time", nv).SetFloat1D(float64(t), ti)
dir.Float64("V", nv).SetFloat1D(float64(v), ti)
dir.Float64("Gmahp", nv).SetFloat1D(float64(g), ti)
dir.Float64("N", nv).SetFloat1D(float64(n), ti)
dir.Float64("Ninf", nv).SetFloat1D(float64(ninf), ti)
dir.Float64("Tau", nv).SetFloat1D(float64(tau), ti)
dir.Float64("Kna", nv).SetFloat1D(float64(kna), ti)
if pl.TimeSpike {
si := ti % isi
if si == 0 {
v = pl.TimeVend
kna += 0.05 * (1 - kna)
} else {
v = pl.TimeVstart + (float32(si)/float32(isi))*(pl.TimeVend-pl.TimeVstart)
kna -= kna / 50
}
} else {
v += vinc
if v > pl.TimeVend {
v = pl.TimeVend
}
}
}
plot.SetFirstStyler(dir.Float64("Time"), func(s *plot.Style) {
s.Role = plot.X
})
ons := []string{"Gmahp", "N"}
for _, on := range ons {
plot.SetFirstStyler(dir.Float64(on), func(s *plot.Style) {
s.On = true
s.Plot.Title = "Mahp G(t)"
})
}
if pl.Tabs != nil {
pl.Tabs.AsLab().PlotTensorFS(dir)
}
}
func (pl *MahpPlot) MakeToolbar(p *tree.Plan) {
tree.Add(p, func(w *core.FuncButton) {
w.SetFunc(pl.GVRun).SetIcon(icons.PlayArrow)
})
tree.Add(p, func(w *core.FuncButton) {
w.SetFunc(pl.TimeRun).SetIcon(icons.PlayArrow)
})
}
// Copyright (c) 2020, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package chanplots
import (
"math"
"cogentcore.org/core/base/metadata"
"cogentcore.org/core/core"
"cogentcore.org/core/icons"
"cogentcore.org/core/tree"
"cogentcore.org/lab/lab"
"cogentcore.org/lab/plot"
"cogentcore.org/lab/tensorfs"
"github.com/emer/axon/v2/chans"
)
type NMDAPlot struct {
// standard NMDA implementation in chans
NMDA chans.NMDAParams `display:"add-fields"`
// multiplier on NMDA as function of voltage
Vgain float64 `default:"0.062"`
// denominator of NMDA function
Norm float64 `default:"3.57"`
// reversal / driving potential
Erev float64 `default:"0"`
// starting voltage
Vstart float64 `default:"-90"`
// ending voltage
Vend float64 `default:"10"`
// voltage increment
Vstep float64 `default:"1"`
// number of 1msec time steps for time run
TimeSteps int
// clamped voltage for TimeRun
TimeV float64
// time in msec for inputs to remain on in TimeRun
TimeIn int
// frequency of spiking inputs at start of TimeRun
TimeHz float64
// proportion activation of NMDA channels per spike
TimeGin float64
Dir *tensorfs.Node `display:"-"`
Tabs lab.Tabber `display:"-"`
}
// Config configures all the elements using the standard functions
func (pl *NMDAPlot) Config(parent *tensorfs.Node, tabs lab.Tabber) {
pl.Dir = parent.Dir("NMDA")
pl.Tabs = tabs
pl.NMDA.Defaults()
pl.NMDA.Ge = 1
pl.NMDA.Voff = 0
pl.Vgain = 0.062
pl.Norm = 3.57
pl.Erev = 0
pl.Vstart = -90 // -90 -- use -1 1 to test val around 0
pl.Vend = 10
pl.Vstep = 1
pl.TimeSteps = 500
pl.TimeV = -50
pl.TimeIn = 100
pl.TimeHz = 50
pl.TimeGin = .5
pl.Update()
}
// Update updates computed values
func (pl *NMDAPlot) Update() {
}
// Equation here:
// https://brian2.readthedocs.io/en/stable/examples/frompapers.Brunel_Wang_2001.html
// GVRun plots the conductance G (and other variables) as a function of V.
func (pl *NMDAPlot) GVRun() { //types:add
pl.Update()
dir := pl.Dir.Dir("G_V")
mgf := float64(pl.NMDA.MgC) / pl.Norm
nv := int((pl.Vend - pl.Vstart) / pl.Vstep)
for vi := range nv {
v := pl.Vstart + float64(vi)*pl.Vstep
g := float64(pl.NMDA.Ge) / (1 + mgf*math.Exp(-pl.Vgain*v))
i := (pl.Erev - v) * g
if v >= pl.Erev {
i = 0
}
ca := pl.NMDA.CaFromV(float32(v))
dir.Float64("V", nv).SetFloat1D(v, vi)
dir.Float64("Gnmda", nv).SetFloat1D(g, vi)
dir.Float64("Inmda", nv).SetFloat1D(i, vi)
dir.Float64("Ca", nv).SetFloat1D(float64(ca), vi)
}
plot.SetFirstStyler(dir.Float64("V"), func(s *plot.Style) {
s.Role = plot.X
})
ons := []string{"Gnmda", "Inmda"}
for _, on := range ons {
plot.SetFirstStyler(dir.Float64(on), func(s *plot.Style) {
s.On = true
s.Plot.Title = "NMDA G(V)"
})
}
metadata.SetDoc(dir.Float64("Gnmda_std"), "standard compute function used in axon sims")
if pl.Tabs != nil {
pl.Tabs.AsLab().PlotTensorFS(dir)
}
}
// TimeRun runs the equation over time.
func (pl *NMDAPlot) TimeRun() { //types:add
pl.Update()
dir := pl.Dir.Dir("G_Time")
nv := pl.TimeSteps
v := pl.TimeV
g := 0.0
nmda := 0.0
spikeInt := int(1000 / pl.TimeHz)
for ti := range nv {
t := float64(ti) * .001
gin := 0.0
if ti >= 10 && ti < (10+pl.TimeIn) && (ti-10)%spikeInt == 0 {
gin = pl.TimeGin
}
nmda += gin*(1-nmda) - (nmda / float64(pl.NMDA.Tau))
g = nmda / (1 + math.Exp(-pl.Vgain*v)/pl.Norm)
dir.Float64("Time", nv).SetFloat1D(t, ti)
dir.Float64("Gnmda", nv).SetFloat1D(g, ti)
dir.Float64("NMDA", nv).SetFloat1D(nmda, ti)
}
plot.SetFirstStyler(dir.Float64("Time"), func(s *plot.Style) {
s.Role = plot.X
})
ons := []string{"Gnmda", "NMDA"}
for _, on := range ons {
plot.SetFirstStyler(dir.Float64(on), func(s *plot.Style) {
s.On = true
s.Plot.Title = "NMDA G(t)"
})
}
if pl.Tabs != nil {
pl.Tabs.AsLab().PlotTensorFS(dir)
}
}
func (pl *NMDAPlot) MakeToolbar(p *tree.Plan) {
tree.Add(p, func(w *core.FuncButton) {
w.SetFunc(pl.GVRun).SetIcon(icons.PlayArrow)
})
tree.Add(p, func(w *core.FuncButton) {
w.SetFunc(pl.TimeRun).SetIcon(icons.PlayArrow)
})
}
// Copyright (c) 2020, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package chanplots
import (
"cogentcore.org/core/core"
"cogentcore.org/core/icons"
"cogentcore.org/core/tree"
"cogentcore.org/lab/lab"
"cogentcore.org/lab/plot"
"cogentcore.org/lab/tensorfs"
"github.com/emer/axon/v2/chans"
)
type SahpPlot struct {
// sAHP function
Sahp chans.SahpParams `display:"add-fields"`
// starting calcium
CaStart float32 `default:"0"`
// ending calcium
CaEnd float32 `default:"1.5"`
// calcium increment
CaStep float32 `default:"0.01"`
// number of time steps
TimeSteps int
// time-run starting calcium
TimeCaStart float32
// time-run CaD value at end of each theta cycle
TimeCaD float32
Dir *tensorfs.Node `display:"-"`
Tabs lab.Tabber `display:"-"`
}
// Config configures all the elements using the standard functions
func (pl *SahpPlot) Config(parent *tensorfs.Node, tabs lab.Tabber) {
pl.Dir = parent.Dir("sAHP")
pl.Tabs = tabs
pl.Sahp.Defaults()
pl.Sahp.Gk = 1
pl.CaStart = 0
pl.CaEnd = 1.5
pl.CaStep = 0.01
pl.TimeSteps = 30
pl.TimeCaStart = 0
pl.TimeCaD = 1
pl.Update()
}
// Update updates computed values
func (pl *SahpPlot) Update() {
}
// GCaRun plots the conductance G (and other variables) as a function of Ca.
func (pl *SahpPlot) GCaRun() { //types:add
pl.Update()
dir := pl.Dir.Dir("G_Ca")
mp := &pl.Sahp
nv := int((pl.CaEnd - pl.CaStart) / pl.CaStep)
for vi := range nv {
ca := pl.CaStart + float32(vi)*pl.CaStep
var ninf, tau float32
mp.NinfTauFromCa(ca, &ninf, &tau)
dir.Float64("Ca", nv).SetFloat1D(float64(ca), vi)
dir.Float64("Ninf", nv).SetFloat1D(float64(ninf), vi)
dir.Float64("Tau", nv).SetFloat1D(float64(tau), vi)
}
plot.SetFirstStyler(dir.Float64("Ca"), func(s *plot.Style) {
s.Role = plot.X
})
ons := []string{"Ninf"}
for _, on := range ons {
plot.SetFirstStyler(dir.Float64(on), func(s *plot.Style) {
s.On = true
s.Plot.Title = "sAHP G(Ca)"
})
}
plot.SetFirstStyler(dir.Float64("Tau"), func(s *plot.Style) {
s.On = true
s.RightY = true
})
if pl.Tabs != nil {
pl.Tabs.AsLab().PlotTensorFS(dir)
}
}
// TimeRun runs the equation over time.
func (pl *SahpPlot) TimeRun() { //types:add
pl.Update()
dir := pl.Dir.Dir("G_Time")
nv := pl.TimeSteps
mp := &pl.Sahp
var n, tau float32
mp.NinfTauFromCa(pl.TimeCaStart, &n, &tau)
ca := pl.TimeCaStart
for ti := range nv {
t := float32(ti + 1)
var ninf, tau float32
mp.NinfTauFromCa(ca, &ninf, &tau)
dn := mp.DNFromV(ca, n)
g := mp.GsAHP(n)
dir.Float64("Time", nv).SetFloat1D(float64(t), ti)
dir.Float64("Ca", nv).SetFloat1D(float64(ca), ti)
dir.Float64("Gsahp", nv).SetFloat1D(float64(g), ti)
dir.Float64("N", nv).SetFloat1D(float64(n), ti)
dir.Float64("dN", nv).SetFloat1D(float64(dn), ti)
dir.Float64("Ninf", nv).SetFloat1D(float64(ninf), ti)
dir.Float64("Tau", nv).SetFloat1D(float64(tau), ti)
ca = mp.CaInt(ca, pl.TimeCaD)
n += dn
}
plot.SetFirstStyler(dir.Float64("Time"), func(s *plot.Style) {
s.Role = plot.X
})
ons := []string{"Ca", "Gsahp", "N"}
for _, on := range ons {
plot.SetFirstStyler(dir.Float64(on), func(s *plot.Style) {
s.On = true
s.Plot.Title = "sAHP G(t)"
})
}
if pl.Tabs != nil {
pl.Tabs.AsLab().PlotTensorFS(dir)
}
}
func (pl *SahpPlot) MakeToolbar(p *tree.Plan) {
tree.Add(p, func(w *core.FuncButton) {
w.SetFunc(pl.GCaRun).SetIcon(icons.PlayArrow)
})
tree.Add(p, func(w *core.FuncButton) {
w.SetFunc(pl.TimeRun).SetIcon(icons.PlayArrow)
})
}
// Copyright (c) 2020, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package chanplots
import (
"cogentcore.org/core/core"
"cogentcore.org/core/icons"
"cogentcore.org/core/tree"
"cogentcore.org/lab/lab"
"cogentcore.org/lab/plot"
"cogentcore.org/lab/tensorfs"
"github.com/emer/axon/v2/chans"
"github.com/emer/axon/v2/kinase"
)
type SKCaPlot struct {
// SKCa params
SKCa chans.SKCaParams `display:"add-fields"`
// time constants for integrating Ca from spiking across M, P and D cascading levels
CaParams kinase.CaSpikeParams
// threshold of SK M gating factor above which the neuron cannot spike
NoSpikeThr float32 `default:"0.5"`
// Ca conc increment for M gating func plot
CaStep float32 `default:"0.05"`
// number of time steps
TimeSteps int
// do spiking instead of Ca conc ramp
TimeSpike bool
// spiking frequency
SpikeFreq float32
Dir *tensorfs.Node `display:"-"`
Tabs lab.Tabber `display:"-"`
}
// Config configures all the elements using the standard functions
func (pl *SKCaPlot) Config(parent *tensorfs.Node, tabs lab.Tabber) {
pl.Dir = parent.Dir("SKCa")
pl.Tabs = tabs
pl.SKCa.Defaults()
pl.SKCa.Gk = 1
pl.CaParams.Defaults()
pl.CaStep = .05
pl.TimeSteps = 200 * 3
pl.TimeSpike = true
pl.NoSpikeThr = 0.5
pl.SpikeFreq = 100
pl.Update()
}
// Update updates computed values
func (pl *SKCaPlot) Update() {
}
// GCaRun plots the conductance G (and other variables) as a function of Ca.
func (pl *SKCaPlot) GCaRun() { //types:add
pl.Update()
dir := pl.Dir.Dir("G_V")
nv := int(1.0 / pl.CaStep)
for vi := range nv {
cai := float32(vi) * pl.CaStep
mh := pl.SKCa.MAsympHill(cai)
mg := pl.SKCa.MAsympGW06(cai)
dir.Float64("Ca", nv).SetFloat1D(float64(cai), vi)
dir.Float64("Mhill", nv).SetFloat1D(float64(mh), vi)
dir.Float64("Mgw06", nv).SetFloat1D(float64(mg), vi)
}
plot.SetFirstStyler(dir.Float64("Ca"), func(s *plot.Style) {
s.Role = plot.X
})
ons := []string{"Mhill", "Mgw06"}
for _, on := range ons {
plot.SetFirstStyler(dir.Float64(on), func(s *plot.Style) {
s.On = true
s.Plot.Title = "sK Ca G(Ca)"
})
}
if pl.Tabs != nil {
pl.Tabs.AsLab().PlotTensorFS(dir)
}
}
// TimeRun runs the equation over time.
func (pl *SKCaPlot) TimeRun() { //types:add
pl.Update()
dir := pl.Dir.Dir("G_Time")
nv := pl.TimeSteps
caIn := float32(1)
caR := float32(0)
m := float32(0)
spike := float32(0)
msdt := float32(0.001)
// caM := float32(0)
// caP := float32(0)
caD := float32(0)
isi := int(1000 / pl.SpikeFreq)
trial := 0
for ti := range nv {
trial = ti / 200
t := float32(ti) * msdt
m = pl.SKCa.MFromCa(caR, m)
pl.SKCa.CaInRFromSpike(spike, caD, &caIn, &caR)
dir.Float64("Time", nv).SetFloat1D(float64(t), ti)
dir.Float64("Spike", nv).SetFloat1D(float64(spike), ti)
// dir.Float64("CaM", nv).SetFloat1D(float64(caM), ti)
// dir.Float64("CaP", nv).SetFloat1D(float64(caP), ti)
// dir.Float64("CaD", nv).SetFloat1D(float64(caD), ti)
dir.Float64("CaIn", nv).SetFloat1D(float64(caIn), ti)
dir.Float64("CaR", nv).SetFloat1D(float64(caR), ti)
dir.Float64("M", nv).SetFloat1D(float64(m), ti)
if m < pl.NoSpikeThr && trial%2 == 0 && ti%isi == 0 { // spike on even trials
spike = 1
} else {
spike = 0
}
// todo: update
// ss.CaParams.FromSpike(spike, &caM, &caP, &caD)
}
plot.SetFirstStyler(dir.Float64("Time"), func(s *plot.Style) {
s.Role = plot.X
})
ons := []string{"Spike", "CaIn", "CaR", "M"}
for _, on := range ons {
plot.SetFirstStyler(dir.Float64(on), func(s *plot.Style) {
s.On = true
s.Plot.Title = "sK Ca G(t)"
})
}
if pl.Tabs != nil {
pl.Tabs.AsLab().PlotTensorFS(dir)
}
}
func (pl *SKCaPlot) MakeToolbar(p *tree.Plan) {
tree.Add(p, func(w *core.FuncButton) {
w.SetFunc(pl.GCaRun).SetIcon(icons.PlayArrow)
})
tree.Add(p, func(w *core.FuncButton) {
w.SetFunc(pl.TimeRun).SetIcon(icons.PlayArrow)
})
}
// Copyright (c) 2020, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package chanplots
import (
"math"
"cogentcore.org/core/core"
"cogentcore.org/core/icons"
"cogentcore.org/core/math32"
"cogentcore.org/core/tree"
"cogentcore.org/lab/lab"
"cogentcore.org/lab/plot"
"cogentcore.org/lab/tensorfs"
"github.com/emer/axon/v2/kinase"
)
type SynCaPlot struct {
// Ca time constants
CaSpike kinase.CaSpikeParams `display:"inline"`
CaDt kinase.CaDtParams `display:"inline"`
Minit float64
Pinit float64
Dinit float64
// adjustment to dt to account for discrete time updating
MdtAdj float64 `default:"0,0.11"`
// adjustment to dt to account for discrete time updating
PdtAdj float64 `default:"0,0.03"`
// adjustment to dt to account for discrete time updating
DdtAdj float64 `default:"0,0.03"`
// number of time steps
TimeSteps int
Dir *tensorfs.Node `display:"-"`
Tabs lab.Tabber `display:"-"`
}
// Config configures all the elements using the standard functions
func (pl *SynCaPlot) Config(parent *tensorfs.Node, tabs lab.Tabber) {
pl.Dir = parent.Dir("SynCa")
pl.Tabs = tabs
pl.CaSpike.Defaults()
pl.CaDt.Defaults()
pl.Minit = 0.7
pl.Pinit = 0.5
pl.Dinit = 0.3
pl.MdtAdj = 0
pl.PdtAdj = 0
pl.DdtAdj = 0
pl.TimeSteps = 1000
pl.Update()
}
// Update updates computed values
func (pl *SynCaPlot) Update() {
}
// CaAtT computes the 3 Ca values at (currentTime + ti), assuming 0
// new Ca incoming (no spiking). It uses closed-form exponential functions.
func (pl *SynCaPlot) CaAtT(ti int32, caM, caP, caD *float32) {
kp := &pl.CaDt
t := float32(ti)
mdt := kp.MDt
pdt := kp.PDt
ddt := kp.DDt
// if kp.ExpAdj.IsTrue() { // adjust for discrete
// mdt *= 1.11
// pdt *= 1.03
// ddt *= 1.03
// }
mi := *caM
pi := *caP
di := *caD
*caM = mi * math32.FastExp(-t*mdt)
em := math32.FastExp(t * mdt)
ep := math32.FastExp(t * pdt)
*caP = pi*math32.FastExp(-t*pdt) - (pdt*mi*math32.FastExp(-t*(mdt+pdt))*(em-ep))/(pdt-mdt)
epd := math32.FastExp(t * (pdt + ddt))
emd := math32.FastExp(t * (mdt + ddt))
emp := math32.FastExp(t * (mdt + pdt))
*caD = pdt*ddt*mi*math32.FastExp(-t*(mdt+pdt+ddt))*(ddt*(emd-epd)+(pdt*(epd-emp))+mdt*(emp-emd))/((mdt-pdt)*(mdt-ddt)*(pdt-ddt)) - ddt*pi*math32.FastExp(-t*(pdt+ddt))*(ep-math32.FastExp(t*ddt))/(ddt-pdt) + di*math32.FastExp(-t*ddt)
}
// CurCa returns the current Ca* values, dealing with updating for
// optimized spike-time update versions.
// ctime is current time in msec, and utime is last update time (-1 if never)
// to avoid running out of float32 precision, ctime should be reset periodically
// along with the Ca values -- in axon this happens during SlowAdapt.
func (pl *SynCaPlot) CurCa(ctime, utime float32, caM, caP, caD *float32) {
kp := &pl.CaSpike
isi := int32(ctime - utime)
if isi <= 0 {
return
}
for j := int32(0); j < isi; j++ {
kp.Dt.FromCa(0, caM, caP, caD) // just decay to 0
}
return
}
// TimeRun runs the equation.
func (pl *SynCaPlot) TimeRun() { //types:add
pl.Update()
dir := pl.Dir.Dir("Ca(t)")
nv := 200
mi := pl.Minit
pi := pl.Pinit
di := pl.Dinit
mdt := float64(pl.CaDt.MDt) * (1.0 + pl.MdtAdj)
pdt := float64(pl.CaDt.PDt) * (1.0 + pl.PdtAdj)
ddt := float64(pl.CaDt.DDt) * (1.0 + pl.DdtAdj)
for ti := range nv {
t := float64(ti)
m := pl.Minit * math.Exp(-t*mdt)
em := math.Exp(t * mdt)
ep := math.Exp(t * pdt)
p := pl.Pinit*math.Exp(-t*pdt) - (pdt*pl.Minit*math.Exp(-t*(mdt+pdt))*(em-ep))/(pdt-mdt)
epd := math.Exp(t * (pdt + ddt))
emd := math.Exp(t * (mdt + ddt))
emp := math.Exp(t * (mdt + pdt))
d := pdt*ddt*pl.Minit*math.Exp(-t*(mdt+pdt+ddt))*(ddt*(emd-epd)+(pdt*(epd-emp))+mdt*(emp-emd))/((mdt-pdt)*(mdt-ddt)*(pdt-ddt)) - ddt*pl.Pinit*math.Exp(-t*(pdt+ddt))*(ep-math.Exp(t*ddt))/(ddt-pdt) + pl.Dinit*math.Exp(-t*ddt)
// test eqs:
caM := float32(pl.Minit)
caP := float32(pl.Pinit)
caD := float32(pl.Dinit)
pl.CaAtT(int32(ti), &caM, &caP, &caD)
m = float64(caM)
p = float64(caP)
d = float64(caD)
caM = float32(pl.Minit)
caP = float32(pl.Pinit)
caD = float32(pl.Dinit)
pl.CurCa(float32(ti), 0, &caM, &caP, &caD)
mi4 := float64(caM)
pi4 := float64(caP)
di4 := float64(caD)
dir.Float64("t", nv).SetFloat1D(t, ti)
dir.Float64("mi", nv).SetFloat1D(mi, ti)
dir.Float64("pi", nv).SetFloat1D(pi, ti)
dir.Float64("di", nv).SetFloat1D(di, ti)
dir.Float64("mi4", nv).SetFloat1D(mi4, ti)
dir.Float64("pi4", nv).SetFloat1D(pi4, ti)
dir.Float64("di4", nv).SetFloat1D(di4, ti)
dir.Float64("m", nv).SetFloat1D(m, ti)
dir.Float64("p", nv).SetFloat1D(p, ti)
dir.Float64("d", nv).SetFloat1D(d, ti)
mi += float64(pl.CaDt.MDt) * (0 - mi)
pi += float64(pl.CaDt.PDt) * (mi - pi)
di += float64(pl.CaDt.DDt) * (pi - di)
}
plot.SetFirstStyler(dir.Float64("t"), func(s *plot.Style) {
s.Role = plot.X
})
ons := []string{"m", "p", "d"}
for _, on := range ons {
plot.SetFirstStyler(dir.Float64(on), func(s *plot.Style) {
s.On = true
s.Plot.Title = "SynCa"
})
}
if pl.Tabs != nil {
pl.Tabs.AsLab().PlotTensorFS(dir)
}
}
func (pl *SynCaPlot) MakeToolbar(p *tree.Plan) {
tree.Add(p, func(w *core.FuncButton) {
w.SetFunc(pl.TimeRun).SetIcon(icons.PlayArrow)
})
}
// Copyright (c) 2020, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package chanplots
import (
"cogentcore.org/core/core"
"cogentcore.org/core/icons"
"cogentcore.org/core/tree"
"cogentcore.org/lab/lab"
"cogentcore.org/lab/plot"
"cogentcore.org/lab/tensorfs"
"github.com/emer/axon/v2/chans"
)
type VGCCPlot struct {
// VGCC function
VGCC chans.VGCCParams `display:"add-fields"`
// starting voltage
Vstart float32 `default:"-90"`
// ending voltage
Vend float32 `default:"10"`
// voltage increment
Vstep float32 `default:"1"`
// number of time steps
TimeSteps int
// frequency of spiking inputs during TimeRun
TimeHz float32
// time-run starting membrane potential
TimeVstart float32
// time-run ending membrane potential
TimeVend float32
Dir *tensorfs.Node `display:"-"`
Tabs lab.Tabber `display:"-"`
}
// Config configures all the elements using the standard functions
func (pl *VGCCPlot) Config(parent *tensorfs.Node, tabs lab.Tabber) {
pl.Dir = parent.Dir("VGCC")
pl.Tabs = tabs
pl.VGCC.Defaults()
pl.VGCC.Ge = 1
pl.Vstart = -90
pl.Vend = 10
pl.Vstep = 1
pl.TimeSteps = 200
pl.TimeHz = 50
pl.TimeVstart = -70
pl.TimeVend = -20
pl.Update()
}
// Update updates computed values
func (pl *VGCCPlot) Update() {
}
// GVRun plots the conductance G (and other variables) as a function of V.
func (pl *VGCCPlot) GVRun() { //types:add
pl.Update()
dir := pl.Dir.Dir("G_V")
nv := int((pl.Vend - pl.Vstart) / pl.Vstep)
for vi := range nv {
v := pl.Vstart + float32(vi)*pl.Vstep
g := pl.VGCC.GFromV(v)
m := pl.VGCC.MFromV(v)
h := pl.VGCC.HFromV(v)
dm := pl.VGCC.DeltaMFromV(v, m)
dh := pl.VGCC.DeltaHFromV(v, h)
dir.Float64("V", nv).SetFloat1D(float64(v), vi)
dir.Float64("Gvgcc", nv).SetFloat1D(float64(g), vi)
dir.Float64("M", nv).SetFloat1D(float64(m), vi)
dir.Float64("H", nv).SetFloat1D(float64(h), vi)
dir.Float64("dM", nv).SetFloat1D(float64(dm), vi)
dir.Float64("dH", nv).SetFloat1D(float64(dh), vi)
}
plot.SetFirstStyler(dir.Float64("V"), func(s *plot.Style) {
s.Role = plot.X
})
plot.SetFirstStyler(dir.Float64("Gvgcc"), func(s *plot.Style) {
s.On = true
s.Plot.Title = "VGCC G(t)"
s.RightY = true
})
ons := []string{"M", "H"}
for _, on := range ons {
plot.SetFirstStyler(dir.Float64(on), func(s *plot.Style) {
s.On = true
})
}
if pl.Tabs != nil {
pl.Tabs.AsLab().PlotTensorFS(dir)
}
}
// TimeRun runs the equation over time.
func (pl *VGCCPlot) TimeRun() { //types:add
pl.Update()
dir := pl.Dir.Dir("G_Time")
nv := pl.TimeSteps
m := float32(0)
h := float32(1)
msdt := float32(0.001)
v := pl.TimeVstart
isi := int(1000 / pl.TimeHz)
var g float32
for ti := range nv {
t := float32(ti) * msdt
g = pl.VGCC.Gvgcc(v, m, h)
dm := pl.VGCC.DeltaMFromV(v, m)
dh := pl.VGCC.DeltaHFromV(v, h)
m += dm
h += dh
dir.Float64("Time", nv).SetFloat1D(float64(t), ti)
dir.Float64("V", nv).SetFloat1D(float64(v), ti)
dir.Float64("Gvgcc", nv).SetFloat1D(float64(g), ti)
dir.Float64("M", nv).SetFloat1D(float64(m), ti)
dir.Float64("H", nv).SetFloat1D(float64(h), ti)
dir.Float64("dM", nv).SetFloat1D(float64(dm), ti)
dir.Float64("dH", nv).SetFloat1D(float64(dh), ti)
if ti%isi < 3 {
v = pl.TimeVend
} else {
v = pl.TimeVstart
}
}
plot.SetFirstStyler(dir.Float64("Time"), func(s *plot.Style) {
s.Role = plot.X
})
plot.SetFirstStyler(dir.Float64("Gvgcc"), func(s *plot.Style) {
s.On = true
s.Plot.Title = "VGCC G(t)"
s.RightY = true
})
plot.SetFirstStyler(dir.Float64("V"), func(s *plot.Style) {
s.On = true
s.RightY = true
})
ons := []string{"M", "H"}
for _, on := range ons {
plot.SetFirstStyler(dir.Float64(on), func(s *plot.Style) {
s.On = true
})
}
if pl.Tabs != nil {
pl.Tabs.AsLab().PlotTensorFS(dir)
}
}
func (pl *VGCCPlot) MakeToolbar(p *tree.Plan) {
tree.Add(p, func(w *core.FuncButton) {
w.SetFunc(pl.GVRun).SetIcon(icons.PlayArrow)
})
tree.Add(p, func(w *core.FuncButton) {
w.SetFunc(pl.TimeRun).SetIcon(icons.PlayArrow)
})
}
// Copyright (c) 2019, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package chans provides standard neural conductance channels for computing
a point-neuron approximation based on the standard equivalent RC circuit
model of a neuron (i.e., basic Ohms law equations).
Includes excitatory, leak, inhibition, and dynamic potassium channels.
*/
package chans
//go:generate core generate -add-types -gosl
//gosl:start
// Chans are ion channels used in computing point-neuron activation function.
type Chans struct {
// excitatory sodium (Na) AMPA channels activated by synaptic glutamate.
E float32
// constant leak (potassium, K+) channels. determines resting potential
// (typically higher than resting potential of K).
L float32
// inhibitory chloride (Cl-) channels activated by synaptic GABA.
I float32
// gated / active potassium channels. Typically hyperpolarizing
// relative to leak / rest.
K float32
}
// SetAll sets all the values.
func (ch *Chans) SetAll(e, l, i, k float32) {
ch.E = e
ch.L = l
ch.I = i
ch.K = k
}
//gosl:end
// SetFromOtherMinus sets all the values from other Chans minus given value
func (ch *Chans) SetFromOtherMinus(oth Chans, minus float32) {
ch.E, ch.L, ch.I, ch.K = oth.E-minus, oth.L-minus, oth.I-minus, oth.K-minus
}
// SetFromMinusOther sets all the values from given value minus other Chans
func (ch *Chans) SetFromMinusOther(minus float32, oth Chans) {
ch.E, ch.L, ch.I, ch.K = minus-oth.E, minus-oth.L, minus-oth.I, minus-oth.K
}
// Copyright (c) 2020, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package chans
import (
"cogentcore.org/core/math32"
)
//gosl:start
// GABA-B is an inhibitory channel activated by the usual GABA inhibitory
// neurotransmitter, which is coupled to the GIRK G-protein coupled inwardly
// rectifying potassium (K) channel. It is ubiquitous in the brain, and critical
// for stability of spiking patterns over time in axon. The inward rectification
// is caused by a Mg+ ion block *from the inside* of the neuron,
// which means that these channels are most open when the neuron is hyperpolarized
// (inactive), and thus it serves to keep inactive neurons inactive.
// Based on Thomson & Destexhe (1999).
type GABABParams struct {
// Gk is the strength of GABA-B conductance as contribution to Gk(t) factor
// (which is then multiplied by Gbar.K that provides pA unit scaling).
// The 0.015 default is a high value that works well in smaller networks.
// Larger networks may benefit from lower levels (e.g., 0.012).
// GababM activation factor can become large, so that overall GgabaB = ~50 nS.
Gk float32 `default:"0.015,0.012,0"`
// Rise is the rise time for bi-exponential time dynamics of GABA-B, in ms.
Rise float32 `default:"45"`
// Decay is the decay time for bi-exponential time dynamics of GABA-B, in ms.
Decay float32 `default:"50"`
// Gbase is the baseline level of GABA-B channels open independent of
// inhibitory input (is added to spiking-produced conductance).
Gbase float32 `default:"0.2"`
// GiSpike is the multiplier for converting Gi to equivalent GABA spikes.
GiSpike float32 `default:"10"`
// MaxTime is the time offset when peak conductance occurs, in msec, computed
// from Rise and Decay.
MaxTime float32 `edit:"-"`
// TauFact is the time constant factor used in integration:
// (Decay / Rise) ^ (Rise / (Decay - Rise))
TauFact float32 `display:"-"`
// RiseDt = 1/Tau
RiseDt float32 `display:"-" edit:"-"`
// DecayDt = 1/Tau
DecayDt float32 `display:"-" edit:"-"`
pad, pad1, pad2 float32
}
func (gp *GABABParams) Defaults() {
gp.Gk = 0.015
gp.Rise = 45
gp.Decay = 50
gp.Gbase = 0.2
gp.GiSpike = 10
gp.Update()
}
func (gp *GABABParams) Update() {
gp.TauFact = math32.Pow(gp.Decay/gp.Rise, gp.Rise/(gp.Decay-gp.Rise))
gp.MaxTime = ((gp.Rise * gp.Decay) / (gp.Decay - gp.Rise)) * math32.Log(gp.Decay/gp.Rise)
gp.RiseDt = 1.0 / gp.Rise
gp.DecayDt = 1.0 / gp.Decay
}
func (gp *GABABParams) ShouldDisplay(field string) bool {
switch field {
case "Gk":
return true
default:
return gp.Gk > 0
}
}
// GFromV returns the GABA-B conductance as a function of v potential.
func (gp *GABABParams) GFromV(v float32) float32 {
ve := max(v, -90.0)
return (ve + 90.0) / (1.0 + math32.FastExp(0.1*((ve+90.0)+10.0)))
}
// GFromS returns the GABA-B conductance as a function of GABA spiking rate,
// based on normalized spiking factor (i.e., Gi from FFFB etc)
func (gp *GABABParams) GFromS(s float32) float32 {
ss := s * gp.GiSpike
if ss > 20 {
return 1
}
return 1.0 / (1.0 + math32.FastExp(-(ss-7.1)/1.4))
}
// DeltaM computes the change in activation M based on the current
// activation m and the spike integration factor x.
func (gp *GABABParams) DeltaM(m, x float32) float32 {
return (gp.TauFact*x - m) * gp.RiseDt
}
// MX updates the GABA-B / GIRK activation M and underlying X integration value
// based on current values and gi inhibitory conductance (proxy for GABA spikes)
func (gp *GABABParams) MX(gi float32, m, x *float32) {
dM := gp.DeltaM(*m, *x)
*x += gp.GFromS(gi) - (*x)*gp.DecayDt
*m += dM
return
}
// GgabaB returns the overall net GABAB / GIRK conductance including
// Gk, Gbase, and voltage-gating, as a function of activation value M.
func (gp *GABABParams) GgabaB(m, v float32) float32 {
return gp.Gk * gp.GFromV(v) * (m + gp.Gbase)
}
//gosl:end
// Copyright (c) 2020, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package chans
import (
"cogentcore.org/core/math32"
)
//gosl:start
// KirParams control the kIR K+ inwardly rectifying current,
// based on the equations from Lindroos et al (2018).
// The conductance is highest at low membrane potentials.
type KirParams struct {
// Gk is the strength of Kir conductance as contribution to Gk(t) factor
// (which is then multiplied by Gbar.K that provides pA unit scaling).
Gk float32 `default:"0.012,0.015,0"`
// MinfOff is the asymptotic gating factor M, offset.
MinfOff float32 `default:"-102"`
// MinfTau is the asymptotic gating factor M, time constant.
MinfTau float32 `default:"13"`
// RiseOff is the rise time constant as a function of voltage, offset.
RiseOff float32 `default:"-60"`
// RiseTau is the rise time constant as a function of voltage, time constant factor.
RiseTau float32 `default:"14"`
// DecayOff is the decay time constant as a function of voltage, offset.
DecayOff float32 `default:"-31"`
// DecayTau is the decay time constant as a function of voltage, time constant factor.
DecayTau float32 `default:"23"`
// Mrest is Minf at resting membrane potential of -70, computed from other params.
Mrest float32 `edit:"-"`
}
func (kp *KirParams) Defaults() {
kp.Gk = 0.0
kp.MinfOff = -102
kp.MinfTau = 13
kp.RiseOff = -60
kp.RiseTau = 14
kp.DecayOff = -31
kp.DecayTau = 23
kp.Update()
}
func (kp *KirParams) Update() {
kp.Mrest = kp.MinfRest()
}
func (kp *KirParams) ShouldDisplay(field string) bool {
switch field {
case "Gk":
return true
default:
return kp.Gk > 0
}
}
// Minf returns Minf as a function of voltage potential.
func (kp *KirParams) Minf(v float32) float32 {
return 1.0 / (1.0 + math32.FastExp((v-kp.MinfOff)/kp.MinfTau))
}
// MinfRest returns Minf at nominal resting membrane potential of -70mV
// which serves as the initial value.
func (kp *KirParams) MinfRest() float32 {
return kp.Minf(-70.0)
}
// MTau returns mtau as a function of voltage.
func (kp *KirParams) MTau(v float32) float32 {
alpha := 0.1 * math32.FastExp(-(v-kp.RiseOff)/kp.RiseTau)
beta := 0.27 / (1.0 + math32.FastExp(-(v-kp.DecayOff)/kp.DecayTau))
return 1.0 / (alpha + beta)
}
// DM computes the change in M gating parameter.
func (kp *KirParams) DM(v, m float32) float32 {
minf := kp.Minf(v)
mtau := kp.MTau(v)
dm := (minf - m) / (mtau * 3) // 3 = Q10
return dm
}
// Gkir returns the overall net Kir conductance.
func (kp *KirParams) Gkir(v float32, m float32) float32 {
return kp.Gk * m
}
//gosl:end
// Copyright (c) 2019, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package chans
import "cogentcore.org/lab/gosl/slbool"
//gosl:start
// KNaParams implements sodium (Na) gated potassium (K) currents
// that drive adaptation (accommodation) in neural firing.
// As neurons spike, driving an influx of Na, this activates
// the K channels, which, like leak channels, pull the membrane
// potential back down toward rest (or even below).
type KNaParams struct {
// On enables this component of KNa adaptation.
On slbool.Bool
// Gk is the maximum potential conductance contribution to Gk(t)
// (which is then multiplied by Gbar.K that provides pA unit scaling).
Gk float32 `default:"0.1"`
// Rise is the time constant in ms for increase in conductance based on Na
// concentration due to spiking.
Rise float32
// Decay is the time constant in ms for decay of conductance.
Decay float32
// Dt = 1/Tau rate constant.
DtRise float32 `display:"-"`
// Dt = 1/Tau rate constant.
DtDecay float32 `display:"-"`
pad, pad1 int32
}
func (ka *KNaParams) Defaults() {
ka.On.SetBool(true)
ka.Rise = 50
ka.Decay = 100
ka.Gk = 0.1
ka.Update()
}
func (ka *KNaParams) Update() {
ka.DtRise = 1 / ka.Rise
ka.DtDecay = 1 / ka.Decay
}
func (ka *KNaParams) ShouldDisplay(field string) bool {
switch field {
case "On":
return true
default:
return ka.On.IsTrue()
}
}
// GcFromSpike updates the KNa conductance based on spike or not.
func (ka *KNaParams) GcFromSpike(gKNa *float32, spike bool) {
if ka.On.IsTrue() {
if spike {
*gKNa += ka.DtRise * (ka.Gk - *gKNa)
} else {
*gKNa -= ka.DtDecay * *gKNa
}
} else {
*gKNa = 0
}
}
// KNaMedSlow describes sodium-gated potassium channel adaptation mechanism.
// Evidence supports 2 different time constants:
// Slick (medium) and Slack (slow)
type KNaMedSlow struct {
// On means apply K-Na adaptation.
On slbool.Bool
// TrialSlow engages an optional version of Slow that discretely turns on at
// the start of new trial (NewState): nrn.GknaSlow += Slow.Gk * nrn.CaDPrev.
// This achieves a strong form of adaptation.
TrialSlow slbool.Bool
pad, pad1 int32
// Med is medium time-scale adaptation.
Med KNaParams `display:"inline"`
// Slow is slow time-scale adaptation.
Slow KNaParams `display:"inline"`
}
func (ka *KNaMedSlow) Defaults() {
ka.Med.Defaults()
ka.Slow.Defaults()
ka.Med.Rise = 50
ka.Med.Decay = 200
ka.Med.Gk = 0.1
ka.Slow.Rise = 1000
ka.Slow.Decay = 1000
ka.Slow.Gk = 0.1
ka.Update()
}
func (ka *KNaMedSlow) Update() {
ka.Med.Update()
ka.Slow.Update()
}
func (ka *KNaMedSlow) ShouldDisplay(field string) bool {
switch field {
case "On":
return true
default:
return ka.On.IsTrue()
}
}
// GcFromSpike updates med, slow time scales of KNa adaptation from spiking.
func (ka *KNaMedSlow) GcFromSpike(gKNaM, gKNaS *float32, spike bool) {
ka.Med.GcFromSpike(gKNaM, spike)
if ka.TrialSlow.IsFalse() {
ka.Slow.GcFromSpike(gKNaS, spike)
}
}
//gosl:end
// Copyright (c) 2022, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package chans
import "cogentcore.org/core/math32"
//gosl:start
// MahpParams implements an M-type medium afterhyperpolarizing (mAHP) K+ channel,
// where m also stands for muscarinic due to the ACh inactivation of this channel.
// It has a slow activation and deactivation time constant, and opens at a lowish
// membrane potential.
// There is one gating variable N updated over time with a tau that is also
// voltage dependent.
// The infinite-time value of N is voltage dependent according to a logistic function
// of the membrane potential, centered at Voff with slope Vslope.
type MahpParams struct {
// Gk is the strength of mAHP conductance as contribution to Gk(t) factor
// (which is then multiplied by Gbar.K that provides pA unit scaling).
Gk float32 `default:"0.05"`
// Off is the voltage offset (threshold) in biological units for infinite time
// N gating function: where the gate is at 50% strength.
Off float32 `default:"-30"`
// Slope is the slope of the arget (infinite time) gating function.
Slope float32 `default:"9"`
// TauMax is the maximum slow rate time constant in msec for activation
// / deactivation. The effective Tau is much slower: 1/20th in original temp,
// and 1/60th in standard 37 C temp.
TauMax float32 `default:"1000"`
// Tadj is a temperature adjustment factor: assume temp = 37 C,
// whereas original units were at 23 C.
Tadj float32 `display:"-" edit:"-"`
// DtMax = 1/Tau
DtMax float32 `display:"-" edit:"-"`
pad, pad2 int32
}
// Defaults sets the parameters
func (mp *MahpParams) Defaults() {
mp.Gk = 0.05
mp.Off = -30
mp.Slope = 9
mp.TauMax = 1000
mp.Tadj = math32.Pow(2.3, (37.0-23.0)/10.0) // 3.2 basically
mp.Update()
}
func (mp *MahpParams) Update() {
mp.DtMax = 1.0 / mp.TauMax
}
func (mp *MahpParams) ShouldDisplay(field string) bool {
switch field {
case "Gk":
return true
default:
return mp.Gk > 0
}
}
// EFun handles singularities in an elegant way -- from Mainen impl
func (mp *MahpParams) EFun(z float32) float32 {
if math32.Abs(z) < 1.0e-4 {
return 1.0 - 0.5*z
}
return z / (math32.FastExp(z) - 1.0)
}
// NinfTauFromV returns the target infinite-time N gate value and
// voltage-dependent time constant tau, from v
func (mp *MahpParams) NinfTauFromV(v float32, ninf, tau *float32) {
vo := v - mp.Off
// logical functions, but have signularity at Off (vo = 0)
// a := mp.DtMax * vo / (1.0 - math32.FastExp(-vo/mp.Slope))
// b := -mp.DtMax * vo / (1.0 - math32.FastExp(vo/mp.Slope))
a := mp.DtMax * mp.Slope * mp.EFun(-vo/mp.Slope)
b := mp.DtMax * mp.Slope * mp.EFun(vo/mp.Slope)
*tau = 1.0 / (a + b)
*ninf = a * *tau // a / (a+b)
*tau /= mp.Tadj // correct right away..
return
}
// DNFromV returns the change in gating factor N based on voltage potential.
func (mp *MahpParams) DNFromV(v, n float32) float32 {
var ninf, tau float32
mp.NinfTauFromV(v, &ninf, &tau)
// dt := 1.0 - math32.FastExp(-mp.Tadj/tau) // Mainen comments out this form; Poirazi uses
// dt := mp.Tadj / tau // simple linear fix
dn := (ninf - n) / tau
return dn
}
// GmAHP returns the conductance as a function of n.
func (mp *MahpParams) GmAHP(v float32, n *float32) float32 {
dn := mp.DNFromV(v, *n)
*n += dn
g := mp.Tadj * mp.Gk * *n
return g
}
//gosl:end
// Copyright (c) 2020, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package chans
import "cogentcore.org/core/math32"
//gosl:start
// NMDAParams control the NMDA dynamics, based on Jahr & Stevens (1990) equations
// which are widely used in models, from Brunel & Wang (2001) to Sanders et al. (2013).
// The overall conductance is a function of a voltage-dependent postsynaptic factor based
// on Mg ion blockage, and presynaptic Glu-based opening, which in a simple model just
// increments
type NMDAParams struct {
// Ge is the multiplier for the NMDA contribution to Ge(t) excitatory conductance.
// Multiplies GnmdaSyn to get net conductance including presynaptic.
// GnmdaSyn can be relatively large, such that overall Gnmda conductance = ~50 nS max.
// Ge(t) is later multiplied by Gbar.E for pA unit scaling.
Ge float32 `default:"0.006,0.007,0"`
// Tau is the decay time constant for NMDA channel activation.
// Rise time is 2 msec and not worth extra cost for biexponential.
// 30 fits the Urakubo et al (2008) model with ITau = 100, but 100
// works better in practice.
Tau float32 `default:"30,50,100,200,300"`
// ITau is the decay time constant for NMDA channel inhibition, which captures the
// Urakubo et al (2008) allosteric dynamics (100 fits their model well).
// Set to 1 to eliminate that mechanism.
ITau float32 `default:"1,100"`
// MgC is the magnesium ion concentration: Brunel & Wang (2001) and
// Sanders et al (2013) use 1 mM, based on Jahr & Stevens (1990).
// Urakubo et al (2008) use 1.5 mM. 1.4 with Voff = 5 works best so far
// in large models, 1.2, Voff = 0 best in smaller nets.
MgC float32 `default:"1:1.5"`
// Voff is the offset in membrane potential in biological units for
// voltage-dependent functions. 5 corresponds to the -65 mV rest,
// -45 threshold of the Urakubo et al (2008) model.
// 0 is used in Brunel & Wang, 2001.
Voff float32 `default:"0"`
// Dt = 1 / tau
Dt float32 `display:"-" json:"-" xml:"-"`
// IDt = 1 / tau
IDt float32 `display:"-" json:"-" xml:"-"`
// MgFact = MgC / 3.57
MgFact float32 `display:"-" json:"-" xml:"-"`
}
func (np *NMDAParams) Defaults() {
np.Ge = 0.006
np.Tau = 100
np.ITau = 1 // off by default, as it doesn't work in actual axon models..
np.MgC = 1.4
np.Voff = 0
np.Update()
}
func (np *NMDAParams) Update() {
np.Dt = 1 / np.Tau
np.IDt = 1 / np.ITau
np.MgFact = np.MgC / 3.57
}
func (np *NMDAParams) ShouldDisplay(field string) bool {
switch field {
case "Ge":
return true
default:
return np.Ge > 0
}
}
// MgGFromV returns the NMDA conductance as a function of biological membrane potential
// based on Mg ion blocking.
// Using parameters from Brunel & Wang (2001). see also Urakubo et al (2008)
func (np *NMDAParams) MgGFromV(v float32) float32 {
av := v + np.Voff
if av >= 0 {
return 0
}
return -av / (1.0 + np.MgFact*math32.FastExp(-0.062*av))
}
// CaFromV returns the calcium current factor as a function of biological membrane
// potential -- this factor is needed for computing the calcium current * MgGFromV.
// This is the same function used in VGCC for their conductance factor.
// based on implementation in Urakubo et al (2008).
// http://kurodalab.bs.s.u-tokyo.ac.jp/info/STDP/
func (np *NMDAParams) CaFromV(v float32) float32 {
av := v + np.Voff
if av > -0.5 && av < 0.5 { // this eliminates div 0 at 0, and numerical "fuzz" around 0
return 1.0 / (0.0756 * (1 + 0.0378*av))
}
return -av / (1.0 - math32.FastExp(0.0756*av))
}
// NMDASyn returns the updated synaptic NMDA Glu binding
// based on new raw spike-driven Glu binding.
func (np *NMDAParams) NMDASyn(nmda, raw float32) float32 {
return nmda + raw - np.Dt*nmda
}
// Gnmda returns the NMDA net conductance from nmda Glu binding and Vm
// including the Ge factor
func (np *NMDAParams) Gnmda(nmda, vm float32) float32 {
return np.Ge * np.MgGFromV(vm) * nmda
}
// SnmdaFromSpike updates sender-based NMDA channel opening based on neural spiking
// using the inhibition and decay factors. These dynamics closely match the
// Urakubo et al (2008) allosteric NMDA receptor behavior, with ITau = 100, Tau = 30.
func (np *NMDAParams) SnmdaFromSpike(spike float32, snmdaO, snmdaI *float32) {
if spike > 0 {
inh := (1 - *snmdaI)
*snmdaO += inh * (1 - *snmdaO)
*snmdaI += inh
} else {
*snmdaO -= np.Dt * *snmdaO
*snmdaI -= np.IDt * *snmdaI
}
}
//gosl:end
// Copyright (c) 2022, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package chans
import "cogentcore.org/core/math32"
//gosl:start
// SahpParams implements a slow afterhyperpolarizing (sAHP) K+ channel,
// It has a slowly accumulating calcium value, aggregated at the
// theta cycle level, that then drives the logistic gating function,
// so that it only activates after a significant accumulation.
// After which point it decays.
// For the theta-cycle updating, the normal m-type tau is all within
// the scope of a single theta cycle, so we just omit the time integration
// of the n gating value, but tau is computed in any case.
type SahpParams struct {
// Gk is the strength of sAHP conductance as contribution to Gk(t) factor
// (which is then multiplied by Gbar.K that provides pA unit scaling).
Gk float32 `default:"0.05,0.1"`
// CaTau is the time constant for integrating Ca across theta cycles.
CaTau float32 `default:"5,10"`
// Off is the integrated Ca offset (threshold) for infinite time N
// gating function, where the gate is at 50% strength.
Off float32 `default:"0.8"`
// Slope of the infinite time logistic gating function.
Slope float32 `default:"0.02"`
// TauMax is the maximum slow rate time constant in msec for activation
// / deactivation. The effective Tau is much slower: 1/20th in original temp,
// and 1/60th in standard 37 C temp.
TauMax float32 `default:"1"`
// 1/Tau
CaDt float32 `display:"-" edit:"-"`
// 1/Tau
DtMax float32 `display:"-" edit:"-"`
pad int32
}
// Defaults sets the parameters
func (mp *SahpParams) Defaults() {
mp.Gk = 0.05
mp.CaTau = 5
mp.Off = 0.8
mp.Slope = 0.02
mp.TauMax = 1
mp.Update()
}
func (mp *SahpParams) Update() {
mp.DtMax = 1.0 / mp.TauMax
mp.CaDt = 1.0 / mp.CaTau
}
func (mp *SahpParams) ShouldDisplay(field string) bool {
switch field {
case "Gk":
return true
default:
return mp.Gk > 0
}
}
// EFun handles singularities in an elegant way -- from Mainen impl
func (mp *SahpParams) EFun(z float32) float32 {
if math32.Abs(z) < 1.0e-4 {
return 1.0 - 0.5*z
}
return z / (math32.FastExp(z) - 1.0)
}
// NinfTauFromCa returns the target infinite-time N gate value and
// time constant tau, from integrated Ca value
func (mp *SahpParams) NinfTauFromCa(ca float32, ninf, tau *float32) {
co := ca - mp.Off
// logical functions, but have signularity at Voff (vo = 0)
// a := mp.DtMax * vo / (1.0 - math32.FastExp(-vo/mp.Vslope))
// b := -mp.DtMax * vo / (1.0 - math32.FastExp(vo/mp.Vslope))
a := mp.DtMax * mp.Slope * mp.EFun(-co/mp.Slope)
b := mp.DtMax * mp.Slope * mp.EFun(co/mp.Slope)
*tau = 1.0 / (a + b)
*ninf = a * *tau // a / (a+b)
return
}
// CaInt returns the updated time-integrated Ca value from current value and current Ca
func (mp *SahpParams) CaInt(caInt, ca float32) float32 {
return caInt + mp.CaDt*(ca-caInt)
}
// DNFromCa returns the change in gating factor N based on integrated Ca
// Omit this and just use ninf directly for theta-cycle updating.
func (mp *SahpParams) DNFromV(ca, n float32) float32 {
var ninf, tau float32
mp.NinfTauFromCa(ca, &ninf, &tau)
dn := (ninf - n) / tau
return dn
}
// GsAHP returns the conductance as a function of n
func (mp *SahpParams) GsAHP(n float32) float32 {
return mp.Gk * n
}
//gosl:end
// Copyright (c) 2020, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package chans
import (
"cogentcore.org/core/math32"
)
//gosl:start
// SKCaParams describes the small-conductance calcium-activated potassium channel,
// activated by intracellular stores in a way that drives pauses in firing,
// and can require inactivity to recharge the Ca available for release.
// These intracellular stores can release quickly, have a slow decay once released,
// and the stores can take a while to rebuild, leading to rapidly triggered,
// long-lasting pauses that don't recur until stores have rebuilt, which is the
// observed pattern of firing of STNp pausing neurons.
// CaIn = intracellular stores available for release; CaR = released amount from stores
// CaM = K channel conductance gating factor driven by CaR binding,
// computed using the Hill equations described in Fujita et al (2012), Gunay et al (2008)
// (also Muddapu & Chakravarthy, 2021): X^h / (X^h + C50^h) where h ~= 4 (hard coded)
type SKCaParams struct {
// Gk is the strength of the SKCa conductance contribution to Gk(t) factor
// (which is then multiplied by Gbar.K that provides pA unit scaling).
Gk float32 `default:"0,2,3"`
// C50 is the 50% Ca concentration baseline value in Hill equation.
// Set this to level that activates at reasonable levels of SKCaR.
C50 float32 `default:"0.4,0.5"`
// Rise is the K channel gating factor activation time constant,
// roughly 5-15 msec in literature.
Rise float32 `default:"15"`
// Decay is the K channel gating factor deactivation time constant,
// roughly 30-50 ms in literature.
Decay float32 `default:"30"`
// KCaR is the proportion of CaIn intracellular stores that are released
// per spike, going into CaR.
KCaR float32 `default:"0.4,0.8"`
// CaRDecayTau is the SKCaR released calcium decay time constant.
CaRDecayTau float32 `default:"150,200"`
// CaInThr is the level of time-integrated spiking activity (CaD) below which CaIn
// intracelluar stores are replenished. A low threshold can be used to
// require minimal activity to recharge. Set to a high value (e.g., 10)
// for constant recharge.
CaInThr float32 `default:"0.01"`
// CaInTau is the time constant in msec for storing CaIn when activity
// is below CaInThr.
CaInTau float32 `default:"50"`
// ActDT = 1 / tau
RiseDt float32 `display:"-" json:"-" xml:"-"`
// DecayDt = 1 / tau
DecayDt float32 `display:"-" json:"-" xml:"-"`
// CaRDecayDt = 1 / tau
CaRDecayDt float32 `display:"-" json:"-" xml:"-"`
// CaInDt = 1 / tau
CaInDt float32 `display:"-" json:"-" xml:"-"`
}
func (sp *SKCaParams) Defaults() {
sp.Gk = 0.0
sp.C50 = 0.5
sp.Rise = 15
sp.Decay = 30
sp.KCaR = 0.8
sp.CaRDecayTau = 150
sp.CaInThr = 0.01
sp.CaInTau = 50
sp.Update()
}
func (sp *SKCaParams) Update() {
sp.RiseDt = 1.0 / sp.Rise
sp.DecayDt = 1.0 / sp.Decay
sp.CaRDecayDt = 1.0 / sp.CaRDecayTau
sp.CaInDt = 1.0 / sp.CaInTau
}
func (sp *SKCaParams) ShouldDisplay(field string) bool {
switch field {
case "Gk":
return true
default:
return sp.Gk > 0
}
}
// MAsympHill gives the asymptotic (driving) gating factor M as a function of CAi
// for the Hill equation version used in Fujita et al (2012)
func (sp *SKCaParams) MAsympHill(cai float32) float32 {
caia := cai / sp.C50
capow := caia * caia * caia * caia
return capow / (1 + capow)
}
// MAsympGW06 gives the asymptotic (driving) gating factor M as a function of CAi
// for the GilliesWillshaw06 equation version -- not used by default.
// this is a log-saturating function
func (sp *SKCaParams) MAsympGW06(cai float32) float32 {
caia := max(cai, 0.001)
return 0.81 / (1.0 + math32.FastExp(-(math32.Log(caia)+0.3))/0.46)
}
// CaInRFromSpike updates CaIn, CaR from Spiking and CaD time-integrated spiking activity
func (sp *SKCaParams) CaInRFromSpike(spike, caD float32, caIn, caR *float32) {
*caR -= *caR * sp.CaRDecayDt
if spike > 0 {
x := *caIn * sp.KCaR
*caR += x
*caIn -= x
}
if caD < sp.CaInThr {
*caIn += sp.CaInDt * (1.0 - *caIn)
}
}
// MFromCa returns updated m gating value as a function of current CaR released Ca
// and the current m gating value, with activation and deactivation time constants.
func (sp *SKCaParams) MFromCa(caR, mcur float32) float32 {
mas := sp.MAsympHill(caR)
if mas > mcur {
return mcur + sp.RiseDt*(mas-mcur)
}
return mcur + sp.DecayDt*(mas-mcur)
}
//gosl:end
// Copyright (c) 2020, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package chans
import (
"cogentcore.org/core/math32"
)
//gosl:start
// VGCCParams control the standard L-type Ca channel
// All functions based on Urakubo et al (2008).
// Source code available at http://kurodalab.bs.s.u-tokyo.ac.jp/info/STDP/Urakubo2008.tar.gz.
// In particular look at the file MODEL/Poirazi_cell/CaL.g.
type VGCCParams struct {
// Ge is the strength of the VGCC contribution to Ge(t) excitary
// conductance. Ge(t) is later multiplied by Gbar.E for pA unit scaling.
// The 0.12 value is from Urakubo et al (2008) model, which best fits actual model
// behavior using axon equations (1.5 nominal in that model).
// 0.02 works better in practice for not getting stuck in high plateau firing.
Ge float32 `default:"0.02,0.12"`
// calcium from conductance factor. Important for learning contribution of VGCC.
Ca float32 `default:"0.25"`
pad, pad1 int32
}
func (np *VGCCParams) Defaults() {
np.Ge = 0.02
np.Ca = 0.25
}
func (np *VGCCParams) Update() {
}
func (np *VGCCParams) ShouldDisplay(field string) bool {
switch field {
case "Ge":
return true
default:
return np.Ge > 0
}
}
// GFromV returns the VGCC conductance as a function of normalized membrane potential
// Based on Urakubo's calculation of `max` in CaL.g in the section commented 'i gate'.
func (np *VGCCParams) GFromV(v float32) float32 {
if v > -0.5 && v < 0.5 { // this avoids divide by 0, and numerical instability around 0
return 1.0 / (0.0756 * (1 + 0.0378*v))
}
return -v / (1.0 - math32.FastExp(0.0756*v))
}
// MFromV returns the M gate function from potential V.
// Based on Urakubo's calculation of `max` in CaL.g in the section commented 'm gate'.
func (np *VGCCParams) MFromV(v float32) float32 {
// approximate values at the asymptotes for performance
if v < -60 {
return 0
}
if v > -10 {
return 1
}
return 1.0 / (1.0 + math32.FastExp(-(v + 37)))
}
// HFromV returns the H gate function from potential V.
// Based on Urakubo's calculation of `max` in CaL.g in the section commented 'h gate'.
func (np *VGCCParams) HFromV(v float32) float32 {
// approximate values at the asymptotes for performance
if v < -50 {
return 1
}
if v > -10 {
return 0
}
return 1.0 / (1.0 + math32.FastExp((v+41)*2))
}
// DeltaMFromV returns the change at msec update scale in M factor
// as a function of V
func (np *VGCCParams) DeltaMFromV(v, m float32) float32 {
vb := min(v, 0.0)
return (np.MFromV(vb) - m) / 3.6
}
// DeltaHFromV returns the change at msec update scale in H factor
// as a function of V
func (np *VGCCParams) DeltaHFromV(v, h float32) float32 {
vb := min(v, 0.0)
return (np.HFromV(vb) - h) / 29.0
}
// Gvgcc returns the VGCC net conductance from m, h activation and v.
func (np *VGCCParams) Gvgcc(v, m, h float32) float32 {
return np.Ge * np.GFromV(v) * m * m * m * h
}
// CaFromG returns the Ca from Gvgcc conductance, current Ca level, and v.
func (np *VGCCParams) CaFromG(v, g, ca float32) float32 {
return -v * np.Ca * g
}
//gosl:end
// Code generated by "core generate -add-types -gosl"; DO NOT EDIT.
package fsfffb
import (
"cogentcore.org/core/enums"
)
var _InhibVarsValues = []InhibVars{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}
// InhibVarsN is the highest valid value for type InhibVars, plus one.
//
//gosl:start
const InhibVarsN InhibVars = 19
//gosl:end
var _InhibVarsValueMap = map[string]InhibVars{`FFsRaw`: 0, `FBsRaw`: 1, `GeExtRaw`: 2, `FFs`: 3, `FBs`: 4, `GeExts`: 5, `FSi`: 6, `SSi`: 7, `SSf`: 8, `FSGi`: 9, `SSGi`: 10, `TotalGi`: 11, `GiOrig`: 12, `LayGi`: 13, `FFAvg`: 14, `FFAvgPrv`: 15, `ModAct`: 16, `DAD1`: 17, `DAD2`: 18}
var _InhibVarsDescMap = map[InhibVars]string{0: `FFsRaw is the raw aggregation of all feedforward incoming spikes into neurons in this pool. It is integrated using FFsRawInt in InhibIntVars.`, 1: `FBsRaw is the raw aggregation of all feedback outgoing spikes generated from neurons in this pool. It is integrated using FBsRawInt in InhibIntVars.`, 2: `GeExtRaw is the raw aggregation of all extra GeExt conductances added to neurons. It is integrated using GeExtRawInt in InhibIntVars.`, 3: `FFs is all feedforward incoming spikes into neurons in this pool, normalized by pool size.`, 4: `FBs is all feedback outgoing spikes generated from neurons in this pool, normalized by pool size.`, 5: `GeExts is all extra GeExt conductances added to neurons, normalized by pool size.`, 6: `FSi is the fast spiking PV+ fast integration of FFs feedforward spikes.`, 7: `SSi is the slow spiking SST+ integration of FBs feedback spikes.`, 8: `SSf is the slow spiking facilitation factor, representing facilitating effects of recent activity.`, 9: `FSGi is the overall fast-spiking inhibitory conductance.`, 10: `SSGi is the overall slow-spiking inhibitory conductance.`, 11: `TotalGi is the overall inhibitory conductance = FSGi + SSGi.`, 12: `GiOrig is the original value of the inhibition (before pool or other effects).`, 13: `LayGi is the layer-level inhibition that is MAX'd with the pool-level inhibition to produce the net inhibition, only for sub-pools.`, 14: `FFAvg is the longer time scale running average FF drive, used for FFAvgPrv.`, 15: `FFAvgPrv is the previous theta cycle FFAvg value, for the FFPrv factor. Updated in the Decay function that is called at start of new State / Trial.`, 16: `ModAct is a pool-specific modulation activity value (e.g., PF = parafasciculus in BG)`, 17: `DAD1 is a pool-specific dopamine D1 value`, 18: `DAD2 is a pool-specific dopamine D2 value`}
var _InhibVarsMap = map[InhibVars]string{0: `FFsRaw`, 1: `FBsRaw`, 2: `GeExtRaw`, 3: `FFs`, 4: `FBs`, 5: `GeExts`, 6: `FSi`, 7: `SSi`, 8: `SSf`, 9: `FSGi`, 10: `SSGi`, 11: `TotalGi`, 12: `GiOrig`, 13: `LayGi`, 14: `FFAvg`, 15: `FFAvgPrv`, 16: `ModAct`, 17: `DAD1`, 18: `DAD2`}
// String returns the string representation of this InhibVars value.
func (i InhibVars) String() string { return enums.String(i, _InhibVarsMap) }
// SetString sets the InhibVars value from its string representation,
// and returns an error if the string is invalid.
func (i *InhibVars) SetString(s string) error {
return enums.SetString(i, s, _InhibVarsValueMap, "InhibVars")
}
// Int64 returns the InhibVars value as an int64.
func (i InhibVars) Int64() int64 { return int64(i) }
// SetInt64 sets the InhibVars value from an int64.
func (i *InhibVars) SetInt64(in int64) { *i = InhibVars(in) }
// Desc returns the description of the InhibVars value.
func (i InhibVars) Desc() string { return enums.Desc(i, _InhibVarsDescMap) }
// InhibVarsValues returns all possible values for the type InhibVars.
func InhibVarsValues() []InhibVars { return _InhibVarsValues }
// Values returns all possible values for the type InhibVars.
func (i InhibVars) Values() []enums.Enum { return enums.Values(_InhibVarsValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i InhibVars) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *InhibVars) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "InhibVars")
}
// Copyright (c) 2019, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package fsfffb provides Fast and Slow
// feedforward (FF) and feedback (FB) inhibition (FFFB)
// based on incoming spikes (FF) and outgoing spikes (FB).
//
// This produces a robust, graded k-Winners-Take-All dynamic of sparse
// distributed representations having approximately k out of N neurons
// active at any time, where k is typically 10-20 percent of N.
package fsfffb
//go:generate core generate -add-types -gosl
import "cogentcore.org/lab/gosl/slbool"
//gosl:start
// GiParams parameterizes feedforward (FF) and feedback (FB) inhibition (FFFB)
// based on incoming spikes (FF) and outgoing spikes (FB) for given excitatory pool
// across Fast (PV+ neurons) and Slow (SST+ neurons) timescales.
// FF+FB -> FS fast spikes (PV), FB*SSf -> SS (SST) slow spikes, which are slow
// to start due to the SSf facilitation factor that increases slowly.
type GiParams struct {
// On enables this level of inhibition.
On slbool.Bool
// Gi is overall inhibition gain, which is the main parameter to adjust
// to change overall activation levels, scaling both the FS and SS factors.
Gi float32 `min:"0" default:"1,1.1,0.75,0.9"`
// FB is a scaling factor for contribution of FB spikes to FSi value,
// where FF spikes always contribute with a factor of 1.
// For small networks, 0.5 or 1 works best; larger networks and
// more demanding inhibition requires higher levels.
FB float32 `min:"0" default:"0.5,1,4"`
// FSTau is fast spiking (PV+) intgration time constant in cycles (msec).
// Tau is roughly 2/3 of the way to asymptotic value.
FSTau float32 `min:"0" default:"6"`
// SS is the multiplier on SS slow-spiking (SST+) in contributing to the
// overall Gi inhibition. FS contributes at a factor of 1.
SS float32 `min:"0" default:"30"`
// SSfTau is the slow-spiking (SST+) facilitation decay time constant
// in cycles (msec). Facilication factor SSf determines impact of FB spikes
// as a function of spike input.
// Tau is roughly 2/3 of the way to asymptotic value.
SSfTau float32 `min:"0" default:"20"`
// SSiTau is the slow-spiking (SST+) integration time constant in cycles (msec)
// cascaded on top of FSTau.
// Tau is roughly 2/3 of the way to asymptotic value.
SSiTau float32 `min:"0" default:"50"`
// FS0 is the fast spiking zero point: below this level, no FS inhibition
// is computed, and this value is subtracted from the FSi.
FS0 float32 `default:"0.1"`
// FFAvgTau is the time constant for updating a running average of the
// feedforward inhibition over a longer time scale, for computing FFPrv.
FFAvgTau float32 `default:"50"`
// FFPrv is the proportion of previous average feed-forward inhibition (FFAvgPrv)
// to add, resulting in an accentuated temporal-derivative dynamic where neurons
// respond most strongly to increases in excitation that exceeds inhibition from last time.
FFPrv float32 `default:"0"`
// ClampExtMin is the minimum GeExt value required to drive external clamping dynamics
// (if clamp is set), where only GeExt drives inhibition. If GeExt is below this value,
// then the usual FS-FFFB drivers are used.
ClampExtMin float32 `default:"0.05"`
// rate = 1 / tau
FSDt float32 `edit:"-" display:"-" json:"-" xml:"-"`
// rate = 1 / tau
SSfDt float32 `edit:"-" display:"-" json:"-" xml:"-"`
// rate = 1 / tau
SSiDt float32 `edit:"-" display:"-" json:"-" xml:"-"`
// rate = 1 / tau
FFAvgDt float32 `edit:"-" display:"-" json:"-" xml:"-"`
pad float32
}
func (fb *GiParams) Update() {
fb.FSDt = 1 / fb.FSTau
fb.SSfDt = 1 / fb.SSfTau
fb.SSiDt = 1 / fb.SSiTau
fb.FFAvgDt = 1 / fb.FFAvgTau
}
func (fb *GiParams) Defaults() {
fb.Gi = 1.1
fb.FB = 1
fb.SS = 30
fb.FSTau = 6
fb.SSfTau = 20
fb.SSiTau = 50
fb.FS0 = 0.1
fb.FFAvgTau = 50
fb.FFPrv = 0
fb.ClampExtMin = 0.05
fb.Update()
}
func (fb *GiParams) ShouldDisplay(field string) bool {
switch field {
case "On":
return true
default:
return fb.On.IsTrue()
}
}
// FSiFromFFs updates fast-spiking inhibition FSi from FFs spikes
func (fb *GiParams) FSiFromFFs(fsi, ffs, fbs float32) float32 {
return fsi + (ffs + fb.FB*fbs) - fb.FSDt*fsi // immediate up, slow down
}
// FS0Thr applies FS0 threshold to given value
func (fb *GiParams) FS0Thr(val float32) float32 {
return max(val-fb.FS0, 0.0)
}
// FS returns the current effective FS value based on fsi.
// If clamped, then only use gext, without applying FS0
func (fb *GiParams) FS(fsi, gext float32, clamped bool) float32 {
if clamped && gext > fb.ClampExtMin {
return gext
}
return fb.FS0Thr(fsi) + gext
}
// SSFromFBs updates slow-spiking inhibition from FBs
func (fb *GiParams) SSFromFBs(ssf, ssi *float32, fbs float32) {
*ssi += fb.SSiDt * (*ssf*fbs - *ssi)
*ssf += fbs*(1-*ssf) - fb.SSfDt**ssf
}
//gosl:end
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package kinase
import (
"fmt"
"image"
"log/slog"
"math/rand"
"slices"
"strings"
"cogentcore.org/core/base/iox/imagex"
"cogentcore.org/core/math32"
"cogentcore.org/lab/plot"
"cogentcore.org/lab/plot/plots"
"cogentcore.org/lab/stats/glm"
"cogentcore.org/lab/table"
"cogentcore.org/lab/tensor"
"github.com/emer/axon/v2/kinase"
)
// Linear performs a linear regression to approximate the synaptic Ca
// integration between send and recv neurons.
type Linear struct {
// Kinase CaSpike params
CaSpike kinase.CaSpikeParams `display:"no-inline" new-window:"+"`
// SynCa20 uses 20 msec time bin integration.
SynCa20 bool
// total number of cycles (1 MSec) to run per learning trial
Cycles int `min:"10" default:"200"`
// number of plus cycles
PlusCycles int `default:"50"`
// NumBins is the number of bins to accumulate spikes over Cycles
NumBins int `default:"8"`
// CyclesPerBin = Cycles / NumBins
CyclesPerBin int `edit:"-"`
// MaxHz is the maximum firing rate to sample in minus, plus phases
MaxHz int `default:"120"`
// StepHz is the step size for sampling Hz
StepHz int `default:"10"`
// NTrials is number of trials per Hz case
NTrials int `default:"100"`
// Total Trials is number of trials for all data
TotalTrials int `edit:"-"`
// Sending neuron
Send Neuron
// Receiving neuron
Recv Neuron
// Standard synapse values
StdSyn Synapse
// Linear synapse values
LinearSyn Synapse
// ErrDWt is the target error dwt: PlusHz - MinusHz
ErrDWt float32
// binned integration of send, recv spikes
CaBins []float32
// Data to fit the regression
Data table.Table
}
func (ls *Linear) Defaults() {
ls.CaSpike.Defaults()
ls.Cycles = 200
ls.PlusCycles = 50
ls.CyclesPerBin = 10
ls.MaxHz = 100
ls.StepHz = 10 // note: 5 gives same results
ls.NTrials = 10 // 20 "
ls.NumBins = ls.Cycles / ls.CyclesPerBin
ls.Update()
}
func (ls *Linear) Update() {
ls.NumBins = ls.Cycles / ls.CyclesPerBin
// ls.CaSpike.Dt.PDTauForNCycles(ls.Cycles)
// ls.Synapse.Dt.PDTauForNCycles(ls.Cycles)
nhz := ls.MaxHz / ls.StepHz
ls.TotalTrials = nhz * nhz * nhz * nhz * ls.NTrials
ls.CaBins = make([]float32, ls.NumBins)
ls.Send.CaBins = make([]float32, ls.NumBins)
ls.Recv.CaBins = make([]float32, ls.NumBins)
}
func (ls *Linear) Init() {
ls.Data.Init()
ls.Send.Init()
ls.Recv.Init()
ls.StdSyn.Init()
ls.LinearSyn.Init()
ls.InitTable()
}
func (ls *Linear) InitTable() {
if ls.Data.NumColumns() > 0 {
return
}
nneur := ls.NumBins
ls.Data.AddIntColumn("Trial")
ls.Data.AddFloat64Column("Hz", 4)
ls.Data.AddFloat64Column("Bins", nneur)
ls.Data.AddFloat64Column("SynCa", 2)
ls.Data.AddFloat64Column("PredCa", 2)
ls.Data.AddFloat64Column("ErrCa", 2)
ls.Data.AddFloat64Column("SSE") // total SSE
ls.Data.SetNumRows(ls.TotalTrials)
}
func (ls *Linear) StartTrial() {
ls.Send.StartTrial()
ls.Recv.StartTrial()
}
// Neuron has Neuron state
type Neuron struct {
// Neuron spiking (0,1)
Spike float32
// Neuron probability of spiking
SpikeP float32
// CaSyn is spike-driven calcium trace for synapse-level Ca-driven learning:
// exponential integration of SpikeG * Spike at SynTau time constant (typically 30).
// Synapses integrate send.CaSyn * recv.CaSyn across M, P, D time integrals for
// the synaptic trace driving credit assignment in learning.
// Time constant reflects binding time of Glu to NMDA and Ca buffering postsynaptically,
// and determines time window where pre * post spiking must overlap to drive learning.
CaSyn float32
// neuron-level spike-driven Ca integration
CaM, CaP, CaD float32
TotalSpikes float32
// binned count of spikes, for regression learning
CaBins []float32
}
func (kn *Neuron) Init() {
kn.Spike = 0
kn.SpikeP = 1
kn.CaSyn = 0
kn.CaM = 0
kn.CaP = 0
kn.CaD = 0
kn.StartTrial()
}
func (kn *Neuron) StartTrial() {
kn.TotalSpikes = 0
for i := range kn.CaBins {
kn.CaBins[i] = 0
}
}
// Cycle does one cycle of neuron updating, with given exponential spike interval
// based on target spiking firing rate.
func (ls *Linear) Cycle(nr *Neuron, expInt float32, cyc int) {
nr.Spike = 0
bin := cyc / ls.CyclesPerBin
if expInt > 0 {
nr.SpikeP *= rand.Float32()
if nr.SpikeP <= expInt {
nr.Spike = 1
nr.SpikeP = 1
nr.TotalSpikes += 1
}
}
nr.CaSyn += ls.CaSpike.CaSynDt * (ls.CaSpike.SpikeCaSyn*nr.Spike - nr.CaSyn)
nr.CaBins[bin] += (nr.CaSyn / float32(ls.CyclesPerBin))
ls.CaSpike.CaMFromSpike(nr.Spike, &nr.CaM, &nr.CaP, &nr.CaD)
}
// Synapse has Synapse state
type Synapse struct {
CaSyn float32
// CaM is first stage running average (mean) Ca calcium level (like CaM = calmodulin), feeds into CaP
CaM float32
// CaP is shorter timescale integrated CaM value, representing the plus, LTP direction of weight change and capturing the function of CaMKII in the Kinase learning rule
CaP float32
// CaD is longer timescale integrated CaP value, representing the minus, LTD direction of weight change and capturing the function of DAPK1 in the Kinase learning rule
CaD float32
// DWt is the CaP - CaD
DWt float32
}
func (ks *Synapse) Init() {
ks.CaSyn = 0
ks.CaM = 0
ks.CaP = 0
ks.CaD = 0
ks.DWt = 0
}
// Run generates data
func (ls *Linear) Run() {
nhz := ls.MaxHz / ls.StepHz
hz := make([]float32, nhz)
i := 0
for h := float32(ls.StepHz); h <= float32(ls.MaxHz); h += float32(ls.StepHz) {
hz[i] = h
i++
}
row := 0
for smi := 0; smi < nhz; smi++ {
sendMinusHz := hz[smi]
for spi := 0; spi < nhz; spi++ {
sendPlusHz := hz[spi]
for rmi := 0; rmi < nhz; rmi++ {
recvMinusHz := hz[rmi]
for rpi := 0; rpi < nhz; rpi++ {
recvPlusHz := hz[rpi]
for ti := 0; ti < ls.NTrials; ti++ {
ls.Trial(sendMinusHz, sendPlusHz, recvMinusHz, recvPlusHz, ti, row)
row++
}
}
}
}
}
}
func (ls *Linear) SetSynState(sy *Synapse, row int) {
ls.Data.Column("SynCa").SetFloatRow(float64(sy.CaP), row, 0)
ls.Data.Column("SynCa").SetFloatRow(float64(sy.CaD), row, 1)
}
func (ls *Linear) SetBins(sn, rn *Neuron, off, row int) {
ls.CaBins[0] = rn.CaBins[0] * sn.CaBins[0]
for i := 2; i < ls.NumBins; i++ {
if ls.SynCa20 {
ls.CaBins[i] = 0.25 * (rn.CaBins[i] + rn.CaBins[i-1]) * (sn.CaBins[i] + sn.CaBins[i-1])
} else {
ls.CaBins[i] = rn.CaBins[i] * sn.CaBins[i]
}
ls.Data.Column("Bins").SetFloatRow(float64(ls.CaBins[i]), row, off+i)
}
}
// Trial runs one trial
func (ls *Linear) Trial(sendMinusHz, sendPlusHz, recvMinusHz, recvPlusHz float32, ti, row int) {
// ls.ErrDWt = (plusHz - minusHz) / 100
ls.Data.Column("Trial").SetFloatRow(float64(ti), row, 0)
ls.Data.Column("Hz").SetFloatRow(float64(sendMinusHz), row, 0)
ls.Data.Column("Hz").SetFloatRow(float64(sendPlusHz), row, 1)
ls.Data.Column("Hz").SetFloatRow(float64(recvMinusHz), row, 2)
ls.Data.Column("Hz").SetFloatRow(float64(recvPlusHz), row, 3)
minusCycles := ls.Cycles - ls.PlusCycles
ls.StartTrial()
cyc := 0
for phs := 0; phs < 2; phs++ {
var maxcyc int
var rhz, shz float32
switch phs {
case 0:
rhz = recvMinusHz
shz = sendMinusHz
maxcyc = minusCycles
case 1:
rhz = recvPlusHz
shz = sendPlusHz
maxcyc = ls.PlusCycles
}
Rint := math32.Exp(-1000.0 / float32(rhz))
Sint := math32.Exp(-1000.0 / float32(shz))
for t := 0; t < maxcyc; t++ {
ls.Cycle(&ls.Send, Sint, cyc)
ls.Cycle(&ls.Recv, Rint, cyc)
ls.StdSyn.CaSyn = 8 * ls.Send.CaSyn * ls.Recv.CaSyn // 12 is standard CaGain factor
ls.CaSpike.Dt.FromCa(ls.StdSyn.CaSyn, &ls.StdSyn.CaM, &ls.StdSyn.CaP, &ls.StdSyn.CaD)
cyc++
}
}
ls.StdSyn.DWt = ls.StdSyn.CaP - ls.StdSyn.CaD
ls.SetSynState(&ls.StdSyn, row)
ls.SetBins(&ls.Send, &ls.Recv, 0, row)
}
// Regress runs the linear regression on the data
func (ls *Linear) Regress() {
r := glm.NewGLM()
err := r.SetTable(&ls.Data, "Bins", "SynCa", "PredCa", "ErrCa")
if err != nil {
slog.Error(err.Error())
return
}
r.DepNames = []string{"CaP", "CaD"}
r.L1Cost = 0.1
r.L2Cost = 0.1
r.StopTolerance = 0.00001
r.ZeroOffset = true
// default coefficients are the current ones..
cp := make([]float32, ls.NumBins)
cd := make([]float32, ls.NumBins)
kinase.CaBinWts(ls.PlusCycles, cp, cd)
cp = append(cp, 0)
cd = append(cd, 0)
cp64 := make([]float64, ls.NumBins+1)
cd64 := make([]float64, ls.NumBins+1)
for i := range ls.NumBins + 1 {
cp64[i] = float64(cp[i])
cd64[i] = float64(cd[i])
}
r.Coeff.Values = append(cp64, cd64...)
// NBins = 8, 200+50 cycles for CaSyn
// r.Coeff.Values = []float64{
// 0.1, 0.4, 0.5, 0.6, 0.7, 0.8, 1.9, 3.0, 0, // big at the end; insensitive to start
// 0.35, 0.65, 0.95, 1.25, 1.25, 1.25, 1.125, 1.0, .0} // up and down
// NBins = 12, 300+50 cycles for CaSyn
// r.Coeff.Values = []float64{
// 0, 0, 0, 0, 0.1, 0.4, 0.5, 0.6, 0.7, 0.8, 1.9, 3.0, 0, // big at the end; insensitive to start
// 0, 0, 0, 0, 0.35, 0.65, 0.95, 1.25, 1.25, 1.25, 1.125, 1.0, .0} // up and down
prc := func() string {
s := "CaP:\t"
for i := range ls.NumBins {
s += fmt.Sprintf("%7.4f\t", r.Coeff.Values[i])
}
s += "\nCaD:\t"
for i := range ls.NumBins {
s += fmt.Sprintf("%7.4f\t", r.Coeff.Values[i+ls.NumBins+1])
}
return s
}
start := prc()
startCaP := slices.Clone(r.Coeff.Values[:ls.NumBins])
startCaD := slices.Clone(r.Coeff.Values[ls.NumBins+1 : 2*ls.NumBins+1])
r.Run()
fmt.Println(r.Variance())
fmt.Println("Starting Coeff:")
fmt.Println(start)
fmt.Println("Final Coeff:")
fmt.Println(prc())
endCaP := slices.Clone(r.Coeff.Values[:ls.NumBins])
endCaD := slices.Clone(r.Coeff.Values[ls.NumBins+1 : 2*ls.NumBins+1])
estr := "synca10"
if ls.SynCa20 {
estr = "synca20"
}
esfn := strings.ToLower(estr)
plt := plot.New()
plt.SetSize(image.Point{1280, 1024})
plots.NewLine(plt, tensor.NewFloat64FromValues(startCaP...)).Styler(func(s *plot.Style) {
s.Plot.Scale = 2
s.Plot.Title = "CaP Linear Regression Coefficients: " + estr
s.Plot.XAxis.Label = "Bins"
s.Label = "Starting"
})
plots.NewLine(plt, tensor.NewFloat64FromValues(endCaP...)).Styler(func(s *plot.Style) {
s.Label = "Final"
})
imagex.Save(plt.RenderImage(), "plot-coefficients-cap-"+esfn+".png")
plt = plot.New()
plt.SetSize(image.Point{1280, 1024})
plots.NewLine(plt, tensor.NewFloat64FromValues(startCaD...)).Styler(func(s *plot.Style) {
s.Plot.Scale = 2
s.Plot.Title = "CaD Linear Regression Coefficients: " + estr
s.Plot.XAxis.Label = "Bins"
s.Label = "Starting"
})
plots.NewLine(plt, tensor.NewFloat64FromValues(endCaD...)).Styler(func(s *plot.Style) {
s.Label = "Final"
})
imagex.Save(plt.RenderImage(), "plot-coefficients-cad-"+esfn+".png")
/*
for vi := 0; vi < 2; vi++ {
r := new(regression.Regression)
r.SetObserved("CaD")
for bi := 0; bi < ls.NumBins; bi++ {
r.SetVar(bi, fmt.Sprintf("Bin_%d", bi))
}
for row := 0; row < ls.Data.Rows; row++ {
st := ls.Data.Tensor("Bins", row).(*tensor.Float64)
cad := ls.Data.TensorFloat1D("SynCa", row, vi)
r.Train(regression.DataPoint(cad, st.Values))
}
r.Run()
fmt.Printf("Regression formula:\n%v\n", r.Formula)
fmt.Printf("Variance observed = %v\nVariance Predicted = %v", r.Varianceobserved, r.VariancePredicted)
fmt.Printf("\nR2 = %v\n", r.R2)
str := "{"
for ci := 0; ci <= ls.NumBins; ci++ {
str += fmt.Sprintf("%8.6g, ", r.Coeff(ci))
}
fmt.Println(str + "}")
}
*/
// ls.Data.SaveCSV("linear_data.tsv", tensor.Tab, table.Headers)
}
// Copyright (c) 2022, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package kinase
//go:generate core generate -add-types -gosl
//gosl:start
// CaDtParams has rate constants for integrating Ca calcium
// at different time scales, including final CaP = CaMKII and CaD = DAPK1
// timescales for LTP potentiation vs. LTD depression factors.
type CaDtParams struct { //types:add
// CaM (calmodulin) time constant in cycles (msec),
// which is the first level integration.
// For CaLearn, 2 is best; for CaSpk, 5 is best.
// For synaptic-level integration this integrates on top of Ca
// signal from send->CaSyn * recv->CaSyn, each of which are
// typically integrated with a 30 msec Tau.
MTau float32 `default:"2,5" min:"1"`
// LTP spike-driven potentiation Ca factor (CaP) time constant
// in cycles (msec), simulating CaMKII in the Kinase framework,
// cascading on top of MTau.
// Computationally, CaP represents the plus phase learning signal that
// reflects the most recent past information.
// Value tracks linearly with number of cycles per learning trial:
// 200 = 40, 300 = 60, 400 = 80
PTau float32 `default:"40,60,80" min:"1"`
// LTD spike-driven depression Ca factor (CaD) time constant
// in cycles (msec), simulating DAPK1 in Kinase framework,
// cascading on top of PTau.
// Computationally, CaD represents the minus phase learning signal that
// reflects the expectation representation prior to experiencing the
// outcome (in addition to the outcome).
// Value tracks linearly with number of cycles per learning trial:
// 200 = 40, 300 = 60, 400 = 80
DTau float32 `default:"40,60,80" min:"1"`
// rate = 1 / tau
MDt float32 `display:"-" json:"-" xml:"-" edit:"-"`
// rate = 1 / tau
PDt float32 `display:"-" json:"-" xml:"-" edit:"-"`
// rate = 1 / tau
DDt float32 `display:"-" json:"-" xml:"-" edit:"-"`
pad, pad1 int32
}
func (kp *CaDtParams) Defaults() {
kp.MTau = 5
kp.PTau = 40
kp.DTau = 40
kp.Update()
}
func (kp *CaDtParams) Update() {
kp.MDt = 1 / kp.MTau
kp.PDt = 1 / kp.PTau
kp.DDt = 1 / kp.DTau
}
// FromCa updates CaM, CaP, CaD from given current calcium value,
// which is a faster time-integral of calcium typically.
func (kp *CaDtParams) FromCa(ca float32, caM, caP, caD *float32) {
*caM += kp.MDt * (ca - *caM)
*caP += kp.PDt * (*caM - *caP)
*caD += kp.DDt * (*caP - *caD)
}
// CaSpikeParams parameterizes the neuron-level spike-driven calcium
// signals, including CaM, CaP, CaD for basic activity stats and RLRate, and
// CaSyn which is integrated at the neuron level and drives synapse-level,
// pre * post Ca integration, providing the Tr credit assignment trace factor
// for kinase error-driven cortical learning.
type CaSpikeParams struct {
// SpikeCaM is the drive factor for updating the neuron-level CaM (calmodulin)
// based on a spike impulse, which is then cascaded into updating the
// CaP and CaD values. These values are used for stats and RLRate computation,
// but do not drive learning directly. Larger values (e.g., 12) may be useful
// in larger models.
SpikeCaM float32 `default:"8,12"`
// SpikeCaSyn is the drive factor for updating the neuron-level CaSyn
// synaptic calcium trace value based on a spike impulse. CaSyn is integrated
// into CaBins which are then used to compute synapse-level pre * post
// Ca values over the theta cycle, which then drive the Tr credit assignment
// trace factor for kinase error-driven cortical learning. Changes in this
// value will affect the net learning rate. Generally tracks SpikeCaM.
SpikeCaSyn float32 `default:"8,12"`
// CaSynTau is the time constant for integrating the spike-driven calcium
// trace CaSyn at sender and recv neurons. See SpikeCaSyn for more info.
// If this param is changed, then there will be a change in effective
// learning rate that can be compensated for by multiplying
// CaScale by sqrt(30 / sqrt(SynTau)
CaSynTau float32 `default:"30" min:"1"`
// CaSynDt rate = 1 / tau
CaSynDt float32 `display:"-" json:"-" xml:"-" edit:"-"`
// Dt are time constants for integrating Spike-driven Ca across CaM, CaP and CaD
// cascading levels. Typically the same as in LearnCa parameters.
Dt CaDtParams `display:"inline"`
}
func (sp *CaSpikeParams) Defaults() {
sp.SpikeCaM = 8
sp.SpikeCaSyn = 8
sp.CaSynTau = 30
sp.Dt.Defaults()
sp.Update()
}
func (sp *CaSpikeParams) Update() {
sp.CaSynDt = 1 / sp.CaSynTau
sp.Dt.Update()
}
// CaMFromSpike updates CaM, CaP, CaD variables from spike input,
// which is either 0 or 1.
func (sp *CaSpikeParams) CaMFromSpike(spike float32, caM, caP, caD *float32) {
ca := sp.SpikeCaM * spike
sp.Dt.FromCa(ca, caM, caP, caD)
}
// CaSynFromSpike returns new CaSyn value based on spike input,
// which is either 0 or 1, and current CaSyn value.
func (sp *CaSpikeParams) CaSynFromSpike(spike float32, caSyn float32) float32 {
ca := sp.SpikeCaSyn * spike
return caSyn + sp.CaSynDt*(ca-caSyn)
}
//gosl:end
// PDTauForNCycles sets the PTau and DTau parameters in proportion to the
// total number of cycles per theta learning trial, e.g., 200 = 40, 280 = 60
func (kp *CaDtParams) PDTauForNCycles(ncycles int) {
tau := 40 * (float32(ncycles) / float32(200))
kp.PTau = tau
kp.DTau = tau
kp.Update()
}
// Copyright (c) 2025, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package kinase
import (
"cogentcore.org/core/math32"
)
// CaBinWts generates the weighting factors for integrating [CaBins] neuron
// level SynCa that have been multiplied send * recv to generate a synapse-level
// synaptic calcium coincidence factor, used for the trace in the kinase learning rule.
// There are separate weights for two time scales of integration: CaP and CaD (cp, cd).
// PlusCycles is the number of cycles in the final plus phase, which determines shape.
// These values are precomputed for given fixed thetaCycles and plusCycles values.
// Fortunately, one set of regression weights works reasonably for the different
// envelope values.
func CaBinWts(plusCycles int, cp, cd []float32) {
nplus := int(math32.Round(float32(plusCycles) / 10))
caBinWts(nplus, cp, cd)
}
// caBinWts generates the weighting factors for integrating [CaBins] neuron
// level SynCa that have been multiplied send * recv to generate a synapse-level
// synaptic calcium coincidence factor, used for the trace in the kinase learning rule.
// There are separate weights for two time scales of integration: CaP and CaD.
// nplus is the number of ca bins associated with the plus phase,
// which sets the natural timescale of the integration: total ca bins can
// be proportional to the plus phase (e.g., 4x for standard 200 / 50 total / plus),
// or longer if there is a longer minus phase window (which is downweighted).
func caBinWts(nplus int, cp, cd []float32) {
n := len(cp)
nminus := n - nplus
// CaP target: [0.1, 0.4, 0.5, 0.6, 0.7, 0.8, 1.7, 3.1]
end := float32(3.4)
start := float32(0.84)
inc := float32(end-start) / float32(nplus)
cur := float32(start) + inc
for i := nminus; i < n; i++ {
cp[i] = cur
cur += inc
}
// prior two nplus windows ("middle") go up from .5 to .8
inc = float32(.3) / float32(2*nplus-1)
mid := n - 3*nplus
cur = start
for i := nminus - 1; i >= mid; i-- {
cp[i] = cur
cur -= inc
}
// then drop off at .7 per plus phase window
inc = float32(.7) / float32(nplus)
for i := mid - 1; i >= 0; i-- {
cp[i] = cur
cur -= inc
if cur < 0 {
cur = 0
}
}
// CaD target: [0.35 0.65 0.95 1.25 1.25 1.25 1.125 1.0]
// CaD drops off in plus
base := float32(1.46)
inc = float32(.22) / float32(nplus)
cur = base - inc
for i := nminus; i < n; i++ {
cd[i] = cur
cur -= inc
}
// is steady at 1.25 in the previous plus chunk
pplus := nminus - nplus
for i := nminus - 1; i >= pplus; i-- {
cd[i] = base
}
// then drops off again to .3
inc = float32(1.2) / float32(nplus+1)
cur = base
for i := pplus - 1; i >= 0; i-- {
cd[i] = cur
cur -= inc
if cur < 0 {
cur = 0
}
}
// rescale for bin size: original bin targets are set for 25 cycles
scale := float32(10) / float32(25)
var cpsum, cdsum float32
for i := range n {
cp[i] *= scale
cd[i] *= scale
cpsum += cp[i]
cdsum += cd[i]
}
// fmt.Println(cpsum, cdsum, cdsum/cpsum)
}
// Theta200plus50 sets bin weights for a theta cycle learning trial of 200 cycles
// and a plus phase of 50
// func (kp *SynCaLinear) Theta200plus50() {
// // todo: compute these weights into GlobalScalars. Normalize?
// kp.CaP.Init(0.3, 0.4, 0.55, 0.65, 0.75, 0.85, 1.0, 1.0) // linear progression
// kp.CaD.Init(0.5, 0.65, 0.75, 0.9, 0.9, 0.9, 0.65, 0.55) // up and down
// }
//
// // Theta280plus70 sets bin weights for a theta cycle learning trial of 280 cycles
// // and a plus phase of 70, with PTau & DTau at 56 (PDTauForNCycles)
// func (kp *SynCaLinear) Theta280plus70() {
// kp.CaP.Init(0.0, 0.1, 0.23, 0.35, 0.45, 0.55, 0.75, 0.75)
// kp.CaD.Init(0.2, 0.3, 0.4, 0.5, 0.5, 0.5, 0.4, 0.3)
// }
// Copyright (c) 2020, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package kinasex
import "cogentcore.org/core/math32"
// ContSyn holds extra synaptic state for continuous learning
type ContSyn struct {
// transitional, temporary DWt value, which is updated in a window after synaptic activity when Ca levels are still elevated, and added to the DWt value after a longer break of spiking where there is enough time for CaMKII driven AMPA receptor trafficking to take place
TDWt float32
// maximum CaD value since last DWt change -- DWt occurs when current CaD has decreased by a given proportion from this recent peak
CaDMax float32
}
// VarByName returns synapse variable by name
func (sy *ContSyn) VarByName(varNm string) float32 {
switch varNm {
case "TDWt":
return sy.TDWt
case "CaDMax":
return sy.CaDMax
}
return math32.NaN()
}
// VarByIndex returns synapse variable by index
func (sy *ContSyn) VarByIndex(varIndex int) float32 {
switch varIndex {
case 0:
return sy.TDWt
case 1:
return sy.CaDMax
}
return math32.NaN()
}
var ContSynVars = []string{"TDWt", "CaDMax"}
// Copyright (c) 2019, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package nxx1 provides the Noisy-X-over-X-plus-1 activation function that well-characterizes
the neural response function empirically, as a saturating sigmoid-like nonlinear response
with an initial largely linear regime.
The basic x/(x+1) sigmoid function is convolved with a gaussian noise kernel to produce
a better approximation of the effects of noise on neural firing -- the main effect is
to create a continuous graded early level of firing even slightly below threshold, softening
the otherwise hard transition to firing at threshold.
A hand-optimized piece-wise function approximation is used to generate the NXX1 function
instead of requiring a lookup table of the gaussian convolution. This is much easier
to use across a range of computational platforms including GPU's, and produces very similar
overall values.
*/
package nxx1
//go:generate core generate -add-types -gosl
import (
"cogentcore.org/core/math32"
)
// Params are the Noisy X/(X+1) rate-coded activation function parameters.
// This function well-characterizes the neural response function empirically,
// as a saturating sigmoid-like nonlinear response with an initial largely linear regime.
// The basic x/(x+1) sigmoid function is convolved with a gaussian noise kernel to produce
// a better approximation of the effects of noise on neural firing -- the main effect is
// to create a continuous graded early level of firing even slightly below threshold, softening
// the otherwise hard transition to firing at threshold.
// A hand-optimized piece-wise function approximation is used to generate the NXX1 function
// instead of requiring a lookup table of the gaussian convolution. This is much easier
// to use across a range of computational platforms including GPU's, and produces very similar
// overall values. abc.
type Params struct {
// threshold value Theta (Q) for firing output activation (.5 is more accurate value based on AdEx biological parameters and normalization
Thr float32 `default:"0.5"`
// gain (gamma) of the rate-coded activation functions -- 100 is default, 80 works better for larger models, and 20 is closer to the actual spiking behavior of the AdEx model -- use lower values for more graded signals, generally in lower input/sensory layers of the network
Gain float32 `default:"80,100,40,20" min:"0"`
// variance of the Gaussian noise kernel for convolving with XX1 in NOISY_XX1 and NOISY_LINEAR -- determines the level of curvature of the activation function near the threshold -- increase for more graded responding there -- note that this is not actual stochastic noise, just constant convolved gaussian smoothness to the activation function
NVar float32 `default:"0.005,0.01" min:"0"`
// threshold on activation below which the direct vm - act.thr is used -- this should be low -- once it gets active should use net - g_e_thr ge-linear dynamics (gelin)
VmActThr float32 `default:"0.01"`
// multiplier on sigmoid used for computing values for net < thr
SigMult float32 `default:"0.33" display:"-" json:"-" xml:"-"`
// power for computing sig_mult_eff as function of gain * nvar
SigMultPow float32 `default:"0.8" display:"-" json:"-" xml:"-"`
// gain multipler on (net - thr) for sigmoid used for computing values for net < thr
SigGain float32 `default:"3" display:"-" json:"-" xml:"-"`
// interpolation range above zero to use interpolation
InterpRange float32 `default:"0.01" display:"-" json:"-" xml:"-"`
// range in units of nvar over which to apply gain correction to compensate for convolution
GainCorRange float32 `default:"10" display:"-" json:"-" xml:"-"`
// gain correction multiplier -- how much to correct gains
GainCor float32 `default:"0.1" display:"-" json:"-" xml:"-"`
// sig_gain / nvar
SigGainNVar float32 `display:"-" json:"-" xml:"-"`
// overall multiplier on sigmoidal component for values below threshold = sig_mult * pow(gain * nvar, sig_mult_pow)
SigMultEff float32 `display:"-" json:"-" xml:"-"`
// 0.5 * sig_mult_eff -- used for interpolation portion
SigValAt0 float32 `display:"-" json:"-" xml:"-"`
// function value at interp_range - sig_val_at_0 -- for interpolation
InterpVal float32 `display:"-" json:"-" xml:"-"`
}
func (xp *Params) Update() {
xp.SigGainNVar = xp.SigGain / xp.NVar
xp.SigMultEff = xp.SigMult * math32.Pow(xp.Gain*xp.NVar, xp.SigMultPow)
xp.SigValAt0 = 0.5 * xp.SigMultEff
xp.InterpVal = xp.XX1GainCor(xp.InterpRange) - xp.SigValAt0
}
func (xp *Params) Defaults() {
xp.Thr = 0.5
xp.Gain = 100
xp.NVar = 0.005
xp.VmActThr = 0.01
xp.SigMult = 0.33
xp.SigMultPow = 0.8
xp.SigGain = 3.0
xp.InterpRange = 0.01
xp.GainCorRange = 10.0
xp.GainCor = 0.1
xp.Update()
}
// XX1 computes the basic x/(x+1) function
func (xp *Params) XX1(x float32) float32 { return x / (x + 1) }
// XX1GainCor computes x/(x+1) with gain correction within GainCorRange
// to compensate for convolution effects
func (xp *Params) XX1GainCor(x float32) float32 {
gainCorFact := (xp.GainCorRange - (x / xp.NVar)) / xp.GainCorRange
if gainCorFact < 0 {
return xp.XX1(xp.Gain * x)
}
newGain := xp.Gain * (1 - xp.GainCor*gainCorFact)
return xp.XX1(newGain * x)
}
// NoisyXX1 computes the Noisy x/(x+1) function -- directly computes close approximation
// to x/(x+1) convolved with a gaussian noise function with variance nvar.
// No need for a lookup table -- very reasonable approximation for standard range of parameters
// (nvar = .01 or less -- higher values of nvar are less accurate with large gains,
// but ok for lower gains)
func (xp *Params) NoisyXX1(x float32) float32 {
if x < 0 { // sigmoidal for < 0
return xp.SigMultEff / (1 + math32.FastExp(-(x * xp.SigGainNVar)))
} else if x < xp.InterpRange {
interp := 1 - ((xp.InterpRange - x) / xp.InterpRange)
return xp.SigValAt0 + interp*xp.InterpVal
} else {
return xp.XX1GainCor(x)
}
}
// X11GainCorGain computes x/(x+1) with gain correction within GainCorRange
// to compensate for convolution effects -- using external gain factor
func (xp *Params) XX1GainCorGain(x, gain float32) float32 {
gainCorFact := (xp.GainCorRange - (x / xp.NVar)) / xp.GainCorRange
if gainCorFact < 0 {
return xp.XX1(gain * x)
}
newGain := gain * (1 - xp.GainCor*gainCorFact)
return xp.XX1(newGain * x)
}
// NoisyXX1Gain computes the noisy x/(x+1) function -- directly computes close approximation
// to x/(x+1) convolved with a gaussian noise function with variance nvar.
// No need for a lookup table -- very reasonable approximation for standard range of parameters
// (nvar = .01 or less -- higher values of nvar are less accurate with large gains,
// but ok for lower gains). Using external gain factor.
func (xp *Params) NoisyXX1Gain(x, gain float32) float32 {
if x < xp.InterpRange {
sigMultEffArg := xp.SigMult * math32.Pow(gain*xp.NVar, xp.SigMultPow)
sigValAt0Arg := 0.5 * sigMultEffArg
if x < 0 { // sigmoidal for < 0
return sigMultEffArg / (1 + math32.FastExp(-(x * xp.SigGainNVar)))
} else { // else x < interp_range
interp := 1 - ((xp.InterpRange - x) / xp.InterpRange)
return sigValAt0Arg + interp*xp.InterpVal
}
} else {
return xp.XX1GainCorGain(x, gain)
}
}
// Copyright (c) 2019, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// bench runs a benchmark model with 5 layers (3 hidden, Input, Output) all of the same
// size, for benchmarking different size networks. These are not particularly realistic
// models for actual applications (e.g., large models tend to have much more topographic
// patterns of connectivity and larger layers with fewer connections), but they are
// easy to run..
package bench
import (
"fmt"
"math"
"math/rand"
"cogentcore.org/core/base/timer"
"cogentcore.org/lab/base/randx"
"cogentcore.org/lab/patterns"
"cogentcore.org/lab/table"
"github.com/emer/axon/v2/axon"
"github.com/emer/emergent/v2/etime"
"github.com/emer/emergent/v2/paths"
)
// note: with 2 hidden layers, this simple test case converges to perfect performance:
// ./bench -epochs 100 -pats 10 -units 100 -threads=1
// so these params below are reasonable for actually learning (eventually)
var LayerParams = axon.LayerSheets{
"Base": {
{Sel: "Layer", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.08
ly.Inhib.Layer.Gi = 1.05
ly.Acts.Gbar.L = 20
}},
{Sel: "#Input", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 0.9 // 0.9 > 1.0
ly.Acts.Clamp.Ge = 1.5
}},
{Sel: "#Output", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 0.70
ly.Acts.Clamp.Ge = 0.8
}},
},
}
var PathParams = axon.PathSheets{
"Base": {
{Sel: "Path", Doc: "",
Set: func(pt *axon.PathParams) {
pt.Learn.LRate.Base = 0.1 // 0.1 is default, 0.05 for TrSpk = .5
pt.SWts.Adapt.LRate = 0.1 // .1 >= .2,
pt.SWts.Init.SPct = 0.5 // .5 >= 1 here -- 0.5 more reliable, 1.0 faster..
}},
{Sel: ".BackPath", Doc: "top-down back-pathways MUST have lower relative weight scale, otherwise network hallucinates",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.2
}},
},
}
func ConfigNet(net *axon.Network, ctx *axon.Context, threads, units int, verbose bool) {
squn := int(math.Sqrt(float64(units)))
shp := []int{squn, squn}
inLay := net.AddLayer("Input", axon.InputLayer, shp...)
hid1Lay := net.AddLayer("Hidden1", axon.SuperLayer, shp...)
hid2Lay := net.AddLayer("Hidden2", axon.SuperLayer, shp...)
hid3Lay := net.AddLayer("Hidden3", axon.SuperLayer, shp...)
outLay := net.AddLayer("Output", axon.TargetLayer, shp...)
full := paths.NewFull()
net.ConnectLayers(inLay, hid1Lay, full, axon.ForwardPath)
net.BidirConnectLayers(hid1Lay, hid2Lay, full)
net.BidirConnectLayers(hid2Lay, hid3Lay, full)
net.BidirConnectLayers(hid3Lay, outLay, full)
net.RecFunTimes = verbose
// builds with default threads
if err := net.Build(); err != nil {
panic(err)
}
net.Defaults()
axon.ApplyParamSheets(net, LayerParams["Base"], PathParams["Base"])
if threads == 0 {
if verbose {
fmt.Print("Threading: using default values\n")
}
} else {
net.SetNThreads(threads)
}
net.InitWeights()
}
func ConfigPats(dt *table.Table, pats, units int) {
squn := int(math.Sqrt(float64(units)))
shp := []int{squn, squn}
// fmt.Printf("shape: %v\n", shp)
dt.AddStringColumn("Name")
dt.AddFloat32Column("Input", shp...)
dt.AddFloat32Column("Output", shp...)
dt.SetNumRows(pats)
// note: actually can learn if activity is .15 instead of .25
nOn := units / 8
minDiff := nOn / 2
patterns.PermutedBinaryMinDiff(dt.Columns.Values[1], nOn, 1, 0, minDiff)
patterns.PermutedBinaryMinDiff(dt.Columns.Values[2], nOn, 1, 0, minDiff)
}
func ConfigEpcLog(dt *table.Table) {
dt.AddIntColumn("Epoch")
dt.AddFloat32Column("PhaseDiff")
dt.AddFloat32Column("AvgPhaseDiff")
dt.AddFloat32Column("SSE")
dt.AddFloat32Column("CountErr")
dt.AddFloat32Column("PctErr")
dt.AddFloat32Column("PctCor")
dt.AddFloat32Column("Hid1ActAvg")
dt.AddFloat32Column("Hid2ActAvg")
dt.AddFloat32Column("OutActAvg")
}
func TrainNet(net *axon.Network, ctx *axon.Context, pats, epcLog *table.Table, epcs int, verbose, gpu bool) {
if gpu {
// gpu.SetDebug(true)
axon.GPUInit()
axon.UseGPU = true
}
net.InitWeights()
np := pats.NumRows()
porder := rand.Perm(np) // randomly permuted order of ints
epcLog.SetNumRows(epcs)
inLay := net.LayerByName("Input")
hid1Lay := net.LayerByName("Hidden1")
hid2Lay := net.LayerByName("Hidden2")
outLay := net.LayerByName("Output")
inPats := pats.Column("Input")
outPats := pats.Column("Output")
cycPerQtr := 50
cycPerStep := 50
tmr := timer.Time{}
tmr.Start()
for epc := 0; epc < epcs; epc++ {
randx.PermuteInts(porder)
outPhaseDiff := float32(0)
cntErr := 0
sse := 0.0
for pi := 0; pi < np; pi++ {
net.ThetaCycleStart(etime.Train, false)
net.MinusPhaseStart()
ppi := porder[pi]
inp := inPats.SubSpace(ppi)
outp := outPats.SubSpace(ppi)
inLay.ApplyExt(0, inp)
outLay.ApplyExt(0, outp)
net.ApplyExts()
for qtr := 0; qtr < 4; qtr++ {
for cyc := 0; cyc < cycPerQtr; cyc++ {
for range cycPerStep {
net.Cycle(false)
}
cyc += cycPerStep - 1
}
if qtr == 2 {
net.MinusPhaseEnd()
net.PlusPhaseStart()
}
}
net.PlusPhaseEnd()
net.DWtToWt()
phasedif := axon.LayerStates.Value(int(outLay.Index), int(0), int(axon.LayerPhaseDiff))
outPhaseDiff += 1.0 - phasedif
pSSE := outLay.PctUnitErr(ctx)[0]
sse += pSSE
if pSSE != 0 {
cntErr++
}
}
outPhaseDiff /= float32(np)
sse /= float64(np)
pctErr := float64(cntErr) / float64(np)
pctCor := 1 - pctErr
t := tmr.Stop()
tmr.Start()
if verbose {
fmt.Printf("epc: %v \tPhaseDiff: %v \tTime:%v\n", epc, outPhaseDiff, t)
}
epcLog.Column("Epoch").SetFloat1D(float64(epc), epc)
epcLog.Column("PhaseDiff").SetFloat1D(float64(outPhaseDiff), epc)
epcLog.Column("SSE").SetFloat1D(sse, epc)
epcLog.Column("CountErr").SetFloat1D(float64(cntErr), epc)
epcLog.Column("PctErr").SetFloat1D(pctErr, epc)
epcLog.Column("PctCor").SetFloat1D(pctCor, epc)
epcLog.Column("Hid1ActAvg").SetFloat1D(float64(axon.PoolAvgMax(axon.AMAct, axon.AMMinus, axon.Avg, hid1Lay.Params.PoolIndex(0), 0)), epc)
epcLog.Column("Hid2ActAvg").SetFloat1D(float64(axon.PoolAvgMax(axon.AMAct, axon.AMMinus, axon.Avg, hid2Lay.Params.PoolIndex(0), 0)), epc)
epcLog.Column("OutActAvg").SetFloat1D(float64(axon.PoolAvgMax(axon.AMAct, axon.AMMinus, axon.Avg, outLay.Params.PoolIndex(0), 0)), epc)
}
tmr.Stop()
if verbose {
fmt.Printf("Took %v for %v epochs, avg per epc: %6.4g\n", tmr.Total, epcs, float64(tmr.Total)/float64(epcs))
net.TimerReport()
} else {
fmt.Printf("Total Secs: %v\n", tmr.Total)
}
axon.GPURelease()
}
// Copyright (c) 2019, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// bench runs a benchmark model with 5 layers (3 hidden, Input, Output) all of the same
// size, for benchmarking different size networks. These are not particularly realistic
// models for actual applications (e.g., large models tend to have much more topographic
// patterns of connectivity and larger layers with fewer connections), but they are
// easy to run..
package benchlvis
import (
"fmt"
"math/rand"
"cogentcore.org/core/base/timer"
"cogentcore.org/lab/base/randx"
"cogentcore.org/lab/patterns"
"cogentcore.org/lab/table"
"github.com/emer/axon/v2/axon"
"github.com/emer/emergent/v2/etime"
"github.com/emer/emergent/v2/paths"
)
var LayerParams = axon.LayerSheets{
"Base": {
{Sel: "Layer", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.08
ly.Inhib.Layer.Gi = 1.05
ly.Acts.Gbar.L = 20
}},
{Sel: "#Input", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 0.9 // 0.9 > 1.0
ly.Acts.Clamp.Ge = 1.5
}},
{Sel: "#Output", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 0.70
ly.Acts.Clamp.Ge = 0.8
}},
},
}
var PathParams = axon.PathSheets{
"Base": {
{Sel: "Path", Doc: "",
Set: func(pt *axon.PathParams) {
pt.Learn.LRate.Base = 0.005 // 0.005 is lvis default
pt.Learn.DWt.SubMean = 0 // 1 is very slow on AMD64 -- good to keep testing
pt.SWts.Adapt.LRate = 0.1 // .1 >= .2,
pt.SWts.Init.SPct = 0.5 // .5 >= 1 here -- 0.5 more reliable, 1.0 faster..
}},
{Sel: ".BackPath", Doc: "top-down back-pathways MUST have lower relative weight scale, otherwise network hallucinates",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.2
}},
},
}
func ConfigNet(ctx *axon.Context, net *axon.Network, inputNeurs, inputPools, pathways, hiddenNeurs, outputDim, threads, maxData int, verbose bool) {
net.SetMaxData(maxData)
/*
* v1m6 ---> v2m16 <--> v4f16 <--> output
* '----------------^
*/
// construct the layers
// in LVIS: 16 x 16 x 5 x 4
v2Pools := inputPools / 2
v4Pools := v2Pools / 2
teNeurs := hiddenNeurs * 2
full := paths.NewFull()
sparseRandom := paths.NewUniformRand()
sparseRandom.PCon = 0.1
Path4x4Skp2 := paths.NewPoolTile()
// skip & size are measured in pools, not individual neurons
Path4x4Skp2.Size.Set(4, 4)
Path4x4Skp2.Skip.Set(2, 2) // skip: how many pools to move over
Path4x4Skp2.Start.Set(-1, -1)
Path4x4Skp2.TopoRange.Min = 0.8
Path4x4Skp2Recip := paths.NewPoolTileRecip(Path4x4Skp2)
_ = Path4x4Skp2Recip
var v1, v2, v4, te []*axon.Layer
v1 = make([]*axon.Layer, pathways)
v2 = make([]*axon.Layer, pathways)
v4 = make([]*axon.Layer, pathways)
te = make([]*axon.Layer, pathways)
outLay := net.AddLayer2D("Output", axon.TargetLayer, outputDim, outputDim)
for pi := 0; pi < pathways; pi++ {
pnm := fmt.Sprintf("%d", pi)
v1[pi] = net.AddLayer4D("V1_"+pnm, axon.InputLayer, inputPools, inputPools, inputNeurs, inputNeurs)
v2[pi] = net.AddLayer4D("V2_"+pnm, axon.SuperLayer, v2Pools, v2Pools, hiddenNeurs, hiddenNeurs)
v4[pi] = net.AddLayer4D("V4_"+pnm, axon.SuperLayer, v4Pools, v4Pools, hiddenNeurs, hiddenNeurs)
te[pi] = net.AddLayer2D("TE_"+pnm, axon.SuperLayer, teNeurs, teNeurs)
v1[pi].AddClass("V1m")
v2[pi].AddClass("V2m V2")
v4[pi].AddClass("V4")
net.ConnectLayers(v1[pi], v2[pi], Path4x4Skp2, axon.ForwardPath)
net.BidirConnectLayers(v2[pi], v4[pi], Path4x4Skp2)
net.ConnectLayers(v1[pi], v4[pi], sparseRandom, axon.ForwardPath).AddClass("V1SC")
net.BidirConnectLayers(v4[pi], te[pi], full)
net.BidirConnectLayers(te[pi], outLay, full)
}
net.RecFunTimes = true // verbose -- always do
// builds with default threads
if err := net.Build(); err != nil {
panic(err)
}
net.Defaults()
axon.ApplyParamSheets(net, LayerParams["Base"], PathParams["Base"])
if threads == 0 {
if verbose {
fmt.Print("Threading: using default values\n")
}
} else {
net.SetNThreads(threads)
}
net.InitWeights()
}
func ConfigPats(pats *table.Table, numPats int, inputShape [2]int, outputShape [2]int) {
pats.AddStringColumn("Name")
pats.AddFloat32Column("Input", inputShape[:]...)
pats.AddFloat32Column("Output", outputShape[:]...)
pats.SetNumRows(numPats)
nOnIn := (inputShape[0] * inputShape[1]) / 16
nOnOut := 2
minDiff := nOnIn / 2
patterns.PermutedBinaryMinDiff(pats.Columns.Values[1], nOnIn, 1, 0, minDiff)
patterns.PermutedBinaryMinDiff(pats.Columns.Values[2], nOnOut, 1, 0, nOnOut)
}
func ConfigEpcLog(dt *table.Table) {
dt.AddIntColumn("Epoch")
dt.AddFloat32Column("PhaseDiff")
dt.AddFloat32Column("AvgPhaseDiff")
dt.AddFloat32Column("SSE")
dt.AddFloat32Column("CountErr")
dt.AddFloat32Column("PctErr")
dt.AddFloat32Column("PctCor")
dt.AddFloat32Column("V2ActAvg")
dt.AddFloat32Column("V4ActAvg")
dt.AddFloat32Column("TEActAvg")
dt.AddFloat32Column("OutActAvg")
}
func TrainNet(ctx *axon.Context, net *axon.Network, pats, epcLog *table.Table, pathways, epcs int, verbose, useGPU bool) {
if useGPU {
// gpu.SetDebug(true)
axon.GPUInit()
axon.UseGPU = true
}
net.InitWeights()
// if useGPU {
// fmt.Println(axon.GPUSystem.Vars().StringDoc())
// }
np := pats.NumRows()
porder := rand.Perm(np) // randomly permuted order of ints
epcLog.SetNumRows(epcs)
var v1 []*axon.Layer
v1 = make([]*axon.Layer, pathways)
for pi := 0; pi < pathways; pi++ {
pnm := fmt.Sprintf("%d", pi)
v1[pi] = net.LayerByName("V1_" + pnm)
}
v2 := net.LayerByName("V2_0")
v4 := net.LayerByName("V4_0")
te := net.LayerByName("TE_0")
outLay := net.LayerByName("Output")
inPats := pats.Column("Input")
outPats := pats.Column("Output")
cycPerQtr := 50
cycPerStep := 50
tmr := timer.Time{}
tmr.Start()
for epc := 0; epc < epcs; epc++ {
randx.PermuteInts(porder)
outPhaseDiff := float32(0)
cntErr := 0
sse := 0.0
for pi := 0; pi < np; pi++ {
net.ThetaCycleStart(etime.Train, false)
net.MinusPhaseStart()
for di := uint32(0); di < ctx.NData; di++ {
epi := (pi + int(di)) % np
ppi := porder[epi]
inp := inPats.SubSpace(ppi)
outp := outPats.SubSpace(ppi)
for pi := 0; pi < pathways; pi++ {
v1[pi].ApplyExt(di, inp)
}
outLay.ApplyExt(di, outp)
net.ApplyExts()
}
for qtr := 0; qtr < 4; qtr++ {
for cyc := 0; cyc < cycPerQtr; cyc++ {
for range cycPerStep {
net.Cycle(false)
}
cyc += cycPerStep - 1
}
if qtr == 2 {
net.MinusPhaseEnd()
net.PlusPhaseStart()
}
}
net.PlusPhaseEnd()
net.DWtToWt()
phasedif := axon.LayerStates.Value(int(outLay.Index), int(0), int(axon.LayerPhaseDiff))
outPhaseDiff += 1.0 - phasedif
pSSE := outLay.PctUnitErr(ctx)[0]
sse += pSSE
if pSSE != 0 {
cntErr++
}
}
outPhaseDiff /= float32(np)
sse /= float64(np)
pctErr := float64(cntErr) / float64(np)
pctCor := 1 - pctErr
t := tmr.Stop()
tmr.Start()
if verbose {
fmt.Printf("epc: %v \tPhaseDiff: %v \tTime:%v\n", epc, outPhaseDiff, t)
}
epcLog.Column("Epoch").SetFloat1D(float64(epc), epc)
epcLog.Column("PhaseDiff").SetFloat1D(float64(outPhaseDiff), epc)
epcLog.Column("SSE").SetFloat1D(sse, epc)
epcLog.Column("CountErr").SetFloat1D(float64(cntErr), epc)
epcLog.Column("PctErr").SetFloat1D(pctErr, epc)
epcLog.Column("PctCor").SetFloat1D(pctCor, epc)
epcLog.Column("V2ActAvg").SetFloat1D(float64(axon.PoolAvgMax(axon.AMAct, axon.AMMinus, axon.Avg, v2.Params.PoolIndex(0), 0)), epc)
epcLog.Column("V4ActAvg").SetFloat1D(float64(axon.PoolAvgMax(axon.AMAct, axon.AMMinus, axon.Avg, v4.Params.PoolIndex(0), 0)), epc)
epcLog.Column("TEActAvg").SetFloat1D(float64(axon.PoolAvgMax(axon.AMAct, axon.AMMinus, axon.Avg, te.Params.PoolIndex(0), 0)), epc)
epcLog.Column("OutActAvg").SetFloat1D(float64(axon.PoolAvgMax(axon.AMAct, axon.AMMinus, axon.Avg, outLay.Params.PoolIndex(0), 0)), epc)
}
tmr.Stop()
if verbose {
fmt.Printf("Took %v for %v epochs, avg per epc: %6.4g\n", tmr.Total, epcs, float64(tmr.Total)/float64(epcs))
net.TimerReport()
} else {
fmt.Printf("Total Secs: %v\n", tmr.Total)
net.TimerReport()
}
axon.GPURelease()
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// bgdorsal simulates the dorsal Basal Ganglia, starting with the
// Dorsal Striatum, centered on the Pallidum Core (GPe) areas that
// drive Go vs. No selection of motor actions.
package bgdorsal
//go:generate core generate -add-types -add-funcs -gosl
import (
"fmt"
"math"
"os"
"reflect"
"strings"
"cogentcore.org/core/base/errors"
"cogentcore.org/core/base/metadata"
"cogentcore.org/core/base/num"
"cogentcore.org/core/base/reflectx"
"cogentcore.org/core/core"
"cogentcore.org/core/enums"
"cogentcore.org/core/gpu"
"cogentcore.org/core/icons"
"cogentcore.org/core/math32"
"cogentcore.org/core/tree"
"cogentcore.org/lab/base/mpi"
"cogentcore.org/lab/base/randx"
"cogentcore.org/lab/plot"
"cogentcore.org/lab/stats/stats"
"cogentcore.org/lab/tensor"
"cogentcore.org/lab/tensorfs"
"github.com/emer/axon/v2/axon"
"github.com/emer/axon/v2/fsfffb"
"github.com/emer/emergent/v2/egui"
"github.com/emer/emergent/v2/env"
"github.com/emer/emergent/v2/looper"
"github.com/emer/emergent/v2/paths"
)
// Modes are the looping modes (Stacks) for running and statistics.
type Modes int32 //enums:enum
const (
Train Modes = iota
Test
)
// Levels are the looping levels for running and statistics.
type Levels int32 //enums:enum
const (
Cycle Levels = iota
Trial
Sequence
Epoch
Run
Expt
)
// StatsPhase is the phase of stats processing for given mode, level.
// Accumulated values are reset at Start, added each Step.
type StatsPhase int32 //enums:enum
const (
Start StatsPhase = iota
Step
)
// see params.go for params
// Sim encapsulates the entire simulation model, and we define all the
// functionality as methods on this struct. This structure keeps all relevant
// state information organized and available without having to pass everything around
// as arguments to methods, and provides the core GUI interface (note the view tags
// for the fields which provide hints to how things should be displayed).
type Sim struct {
// simulation configuration parameters -- set by .toml config file and / or args
Config *Config `new-window:"+"`
// Net is the network: click to view / edit parameters for layers, paths, etc.
Net *axon.Network `new-window:"+" display:"no-inline"`
// Params manages network parameter setting.
Params axon.Params `display:"inline"`
// Loops are the control loops for running the sim, in different Modes
// across stacks of Levels.
Loops *looper.Stacks `new-window:"+" display:"no-inline"`
// Envs provides mode-string based storage of environments.
Envs env.Envs `new-window:"+" display:"no-inline"`
// TrainUpdate has Train mode netview update parameters.
TrainUpdate axon.NetViewUpdate `display:"inline"`
// TestUpdate has Test mode netview update parameters.
TestUpdate axon.NetViewUpdate `display:"inline"`
// Root is the root tensorfs directory, where all stats and other misc sim data goes.
Root *tensorfs.Node `display:"-"`
// Stats has the stats directory within Root.
Stats *tensorfs.Node `display:"-"`
// Current has the current stats values within Stats.
Current *tensorfs.Node `display:"-"`
// StatFuncs are statistics functions called at given mode and level,
// to perform all stats computations. phase = Start does init at start of given level,
// and all intialization / configuration (called during Init too).
StatFuncs []func(mode Modes, level Levels, phase StatsPhase) `display:"-"`
// GUI manages all the GUI elements
GUI egui.GUI `display:"-"`
// RandSeeds is a list of random seeds to use for each run.
RandSeeds randx.Seeds `display:"-"`
}
func Embed(b tree.Node) { egui.Embed[Sim, Config](b) }
func (ss *Sim) SetConfig(cfg *Config) { ss.Config = cfg }
func (ss *Sim) Body() *core.Body { return ss.GUI.Body }
func (ss *Sim) ConfigSim() {
ss.Root, _ = tensorfs.NewDir("Root")
tensorfs.CurRoot = ss.Root
ss.Net = axon.NewNetwork(ss.Config.Name)
ss.Params.Config(LayerParams, PathParams, ss.Config.Params.Sheet, ss.Config.Params.Tag, reflect.ValueOf(ss))
ss.RandSeeds.Init(100) // max 100 runs
ss.InitRandSeed(0)
if ss.Config.GPU {
gpu.SelectAdapter = ss.Config.Run.GPUDevice
axon.GPUInit()
axon.UseGPU = true
}
ss.ConfigEnv()
ss.ConfigNet(ss.Net)
ss.ConfigLoops()
ss.ConfigStats()
// if ss.Config..GPU {
// fmt.Println(axon.GPUSystem.Vars().StringDoc())
// }
if ss.Config.Params.SaveAll {
ss.Config.Params.SaveAll = false
ss.Net.SaveParamsSnapshot(&ss.Config, ss.Config.Params.Good)
os.Exit(0)
}
}
func (ss *Sim) ConfigEnv() {
// Can be called multiple times -- don't re-create
newEnv := (len(ss.Envs) == 0)
for di := 0; di < ss.Config.Run.NData; di++ {
var trn, tst *MotorSeqEnv
if newEnv {
trn = &MotorSeqEnv{}
tst = &MotorSeqEnv{}
} else {
trn = ss.Envs.ByModeDi(Train, di).(*MotorSeqEnv)
tst = ss.Envs.ByModeDi(Test, di).(*MotorSeqEnv)
}
// note: names must be standard here!
trn.Name = env.ModeDi(Train, di)
trn.Defaults()
trn.NActions = ss.Config.Env.NActions
trn.SeqLen = ss.Config.Env.SeqLen
if ss.Config.Env.Env != nil {
reflectx.SetFieldsFromMap(trn, ss.Config.Env.Env)
}
trn.Config(Train, 73+int64(di)*73)
tst.Name = env.ModeDi(Test, di)
tst.Defaults()
tst.NActions = ss.Config.Env.NActions
tst.SeqLen = ss.Config.Env.SeqLen
if ss.Config.Env.Env != nil {
reflectx.SetFieldsFromMap(tst, ss.Config.Env.Env)
}
tst.Config(Test, 181+int64(di)*181)
trn.Init(0)
tst.Init(0)
// note: names must be in place when adding
ss.Envs.Add(trn, tst)
if di == 0 {
ss.ConfigRubicon(trn)
}
}
}
func (ss *Sim) ConfigRubicon(trn *MotorSeqEnv) {
rp := &ss.Net.Rubicon
rp.SetNUSs(2, 1)
rp.Urgency.U50 = 20 // 20 def
}
func (ss *Sim) ConfigNet(net *axon.Network) {
net.SetMaxData(ss.Config.Run.NData)
net.Context().SetISICycles(int32(ss.Config.Run.ISICycles)).
SetMinusCycles(int32(ss.Config.Run.MinusCycles)).
SetPlusCycles(int32(ss.Config.Run.PlusCycles)).
SetSlowInterval(int32(ss.Config.Run.SlowInterval)).Update()
net.SetRandSeed(ss.RandSeeds[0]) // init new separate random seed, using run = 0
ev := ss.Envs.ByModeDi(Train, 0).(*MotorSeqEnv)
np := 1
nu := ss.Config.Params.NUnits
nuPer := ev.NUnitsPer
nAct := ev.NActions
nActPool := ss.Config.Params.NActionPools
nSeq := ev.SeqLen
maxSeqAct := max(nAct, nSeq) // layer size
nuX := nu
nuY := nu
nuCtxY := ss.Config.Params.NCortexUnits
nuCtxX := ss.Config.Params.NCortexUnits
space := float32(2)
p1to1 := paths.NewPoolOneToOne()
_ = p1to1
one2one := paths.NewOneToOne()
_ = one2one
full := paths.NewFull()
_ = full
mtxRandPath := paths.NewUniformRand()
mtxRandPath.PCon = 0.5
_ = mtxRandPath
actPath := paths.NewPoolRect()
actPath.Size.Set(1, nActPool)
actPath.AutoScale = true
motorPFPath := paths.NewRect() // does wrap
motorPFPath.Size.Set(1, 1)
matrixGo, matrixNo, patchD1, patchD2, gpePr, gpeAk, stn, gpi, pf := net.AddDorsalBG("", ss.Config.Params.STNPools, nActPool, nAct, nuY, nuX, nuY, nuX, space)
_, _, _, _ = patchD1, patchD2, gpePr, gpeAk
snc := net.AddLayer2D("SNc", axon.InputLayer, 1, 1)
_ = snc
snc.Doc = "SNc (substantia nigra pars compacta) is the dopamine nucleus that projects to the dorsal BG. This is just for visualization of the dopamine signal: actual dopamine is managed internally via the DA parameter."
// this doesn't have much effect -- slightly worse
// pt := errors.Log1(gpePr.RecvPathBySendName("DGPePr")).(*axon.Path)
// pt.Pattern = p1to1
state := net.AddLayer4D("State", axon.InputLayer, 1, np, nuPer, maxSeqAct)
state.Doc = "State reflects the visual and other sensory state, which provides a timing input to the network, so the network just has to learn which action to perform for a given state, and does not have to generate its own internal timing signal."
s1 := net.AddLayer4D("S1", axon.InputLayer, 1, np, nuPer, nAct+1)
s1.Doc = "S1 is primary somatosenory cortex, which represents the previous motor action taken."
targ := net.AddLayer2D("Target", axon.InputLayer, nuPer, nAct) // Target: just for vis
targ.Doc = "Target is just for visualization, is not connected to anything, showing the correct motor action."
motor := net.AddLayer4D("MotorBS", axon.TargetLayer, 1, nAct, nuPer, 1)
motor.Doc = "Motor brainstem (BS) proxy layer, that performs the final action selection process based on all the higher-level inputs, using a softmax function. There are multiple redundant neurons per action (in the Y, column axis), and the different actions are in the X horizontal axis. The outputs are to the PF (parafasicular thalamus) layer, which feeds back into the striatum, and the VL thalamus as a plus-phase driver layer."
pf.Shape.SetShapeSizes(nActPool, nAct, nuPer, 1)
vl := net.AddPulvLayer4D("VL", 1, nAct, nuPer, 1) // VL predicts brainstem Action
vl.SetBuildConfig("DriveLayName", motor.Name)
vl.Doc = "VL (ventrolateral) thalamus is the main ascending motor thalamus pathway with inputs from all over the motor brainstem (i.e., MotorBS in this model), which provides predictive learning into M1 motor cortex: M1 predicts in the minus phase what MotorBS activates in the plus phase."
// bool before space is selfmaint or not: selfcons much better (false)
m1, m1CT, m1PT, m1PTp, m1VM := net.AddPFC2D("M1", "VM", nuCtxY, nuCtxX, false, false, space)
_ = m1PT
m1.Doc = "M1 is primary motor cortex, which receives from State and S1, and generates top-down predictions on the VL thalamus for which action will be finally selected by the MotorBS, based on inputs from M1 layers. M1 itself represents the superficial cortical portion of M1, with corticothalamic (CT, layer 6), and deep layer 5 (PT=pyramidal tract, PTp=predictive PT) providing stable and dynamic active maintenance, respectively."
m1.SetBuildConfig("GateLayName", m1VM.Name)
m1CT.SetBuildConfig("GateLayName", m1VM.Name)
m1PT.SetBuildConfig("GateLayName", m1VM.Name)
m1PTp.SetBuildConfig("GateLayName", m1VM.Name)
m1VM.SetBuildConfig("GateLayName", m1VM.Name)
motor.SetBuildConfig("GateLayName", m1VM.Name)
// todo: M1PTp should be VL interconnected, prior to PT, not after it.
// vl is a predictive thalamus but we don't have direct access to its source
net.ConnectToPFC(nil, vl, m1, m1CT, m1PT, m1PTp, full, "VLM1") // m1 predicts vl
// these pathways are *essential* -- must get current state here
net.ConnectLayers(m1, vl, full, axon.ForwardPath).AddClass("VLM1")
net.ConnectLayers(gpi, motor, actPath, axon.InhibPath).AddClass("FmGPI")
net.ConnectLayers(m1PT, motor, full, axon.ForwardPath).AddClass("M1ToMotorBS ToMotor")
// net.ConnectLayers(m1PTp, motor, full, axon.ForwardPath).AddClass("M1ToMotorBS")
net.ConnectLayers(m1, motor, full, axon.ForwardPath).AddClass("M1ToMotorBS ToMotor")
net.ConnectLayers(motor, pf, motorPFPath, axon.ForwardPath)
net.ConnectLayers(state, stn, full, axon.ForwardPath).AddClass("ToDSTN FmState")
net.ConnectLayers(state, m1, full, axon.ForwardPath).AddClass("ToM1 FmState")
net.ConnectLayers(s1, stn, full, axon.ForwardPath).AddClass("ToDSTN")
net.ConnectLayers(s1, m1, full, axon.ForwardPath).AddClass("ToM1")
net.ConnectLayers(gpi, m1VM, full, axon.InhibPath).AddClass("DBGInhib")
matrixGo.SetBuildConfig("ThalLay1Name", m1VM.Name)
matrixNo.SetBuildConfig("ThalLay1Name", m1VM.Name)
toMatrix := full
// toMatrix := mtxRandPath // works, but not as reliably
net.ConnectToDSMatrix(state, matrixGo, matrixNo, toMatrix, "StateToMatrix", "FmState")
net.ConnectToDSMatrix(s1, matrixGo, matrixNo, toMatrix, "S1ToMatrix")
net.ConnectToDSMatrix(m1, matrixGo, matrixNo, toMatrix, "M1ToMatrix")
// better without:
// net.ConnectToDSMatrix(m1PT, matrixGo, matrixNo, toMatrix, "M1PTToMatrix")
// net.ConnectToDSMatrix(m1PTp, matrixGo, matrixNo, toMatrix, "M1PTpToMatrix")
net.ConnectToDSPatch(state, patchD1, patchD2, toMatrix, "StateToPatch", "FmState")
net.ConnectToDSPatch(s1, patchD1, patchD2, toMatrix, "S1ToPatch")
net.ConnectToDSPatch(m1, patchD1, patchD2, toMatrix, "M1ToPatch")
// better with:
net.ConnectToDSPatch(m1PT, patchD1, patchD2, toMatrix, "M1PTToPatch")
net.ConnectToDSPatch(m1PTp, patchD1, patchD2, toMatrix, "M1PTpToPatch")
// note: just using direct pathways here -- theoretically through CL
// TODO: not working! -- need to make these modulatory in the right way.
// net.ConnectToDSMatrix(motor, matrixGo, p1to1).AddClass("CLToMatrix")
// net.ConnectToDSMatrix(motor, matrixNo, p1to1).AddClass("CLToMatrix")
pf.PlaceRightOf(gpi, space)
snc.PlaceBehind(stn, space)
m1VM.PlaceRightOf(pf, space)
vl.PlaceRightOf(m1VM, space)
motor.PlaceBehind(vl, space)
targ.PlaceBehind(motor, space)
gpeAk.PlaceBehind(gpePr, space)
stn.PlaceRightOf(gpePr, space)
matrixGo.PlaceAbove(gpi)
matrixNo.PlaceBehind(matrixGo, space)
patchD1.PlaceBehind(matrixNo, space)
state.PlaceAbove(matrixGo)
s1.PlaceRightOf(state, space)
m1.PlaceRightOf(s1, space)
m1PT.PlaceRightOf(m1, space)
net.Build()
net.Defaults()
net.SetNThreads(ss.Config.Run.NThreads)
ss.ApplyParams()
net.InitWeights()
}
func (ss *Sim) ApplyParams() {
ss.Params.Script = ss.Config.Params.Script
ss.Params.ApplyAll(ss.Net)
// compensate for expected activity levels based on max seq len
lnms := []string{"State", "S1", "MotorBS", "VL"}
ev := ss.Envs.ByModeDi(Train, 0).(*MotorSeqEnv)
for _, lnm := range lnms {
ly := ss.Net.LayerByName(lnm)
// fmt.Println(ly.Params.Inhib.ActAvg.Nominal)
if lnm == "State" {
ly.Params.Inhib.ActAvg.Nominal = 0.5 / float32(max(ev.SeqLen, ev.NActions))
} else {
ly.Params.Inhib.ActAvg.Nominal = 0.5 / float32(ev.NActions)
}
}
}
func (ss *Sim) TurnOffTheNoise() {
return // not doing this now -- not better
matrixGo := ss.Net.LayerByName("MatrixGo")
if matrixGo.Params.Acts.Noise.On.IsFalse() {
return
}
ss.Params.ApplySheet(ss.Net, "NoiseOff")
fmt.Println("Turned noise off")
}
//////// Init, utils
// Init restarts the run, and initializes everything, including network weights
// and resets the epoch log table
func (ss *Sim) Init() {
ss.Loops.ResetCounters()
ss.SetRunName()
ss.InitRandSeed(0)
ss.ConfigEnv() // always do -- otherwise env params not reset after run
// selected or patterns have been modified etc
ss.ApplyParams()
ss.StatsInit()
ss.NewRun()
ss.TrainUpdate.RecordSyns()
ss.TrainUpdate.Update(Train, Trial)
}
// InitRandSeed initializes the random seed based on current training run number
func (ss *Sim) InitRandSeed(run int) {
ss.RandSeeds.Set(run)
ss.RandSeeds.Set(run, &ss.Net.Rand)
}
// NetViewUpdater returns the NetViewUpdate for given mode.
func (ss *Sim) NetViewUpdater(mode enums.Enum) *axon.NetViewUpdate {
if mode.Int64() == Train.Int64() {
return &ss.TrainUpdate
}
return &ss.TestUpdate
}
// ConfigLoops configures the control loops: Training, Testing
func (ss *Sim) ConfigLoops() {
ls := looper.NewStacks()
ev := ss.Envs.ByModeDi(Train, 0).(*MotorSeqEnv)
seqs := int(math32.IntMultipleGE(float32(ss.Config.Run.Sequences), float32(ss.Config.Run.NData)))
cycles := ss.Config.Run.Cycles()
seqLen := ev.SeqLen + 1 // 1 reward at end
ls.AddStack(Train, Trial).
AddLevel(Expt, 1).
AddLevel(Run, ss.Config.Run.Runs).
AddLevel(Epoch, ss.Config.Run.Epochs).
AddLevelIncr(Sequence, seqs, ss.Config.Run.NData).
AddLevel(Trial, seqLen).
AddLevel(Cycle, cycles)
ls.AddStack(Test, Trial).
AddLevel(Epoch, 1).
AddLevelIncr(Sequence, seqs, ss.Config.Run.NData).
AddLevel(Trial, seqLen).
AddLevel(Cycle, cycles)
axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, Cycle, Trial, Train,
func(mode enums.Enum) { ss.Net.ClearInputs() },
func(mode enums.Enum) { ss.ApplyInputs(mode.(Modes)) },
)
ls.Stacks[Train].OnInit.Add("Init", ss.Init)
ls.Loop(Train, Run).OnStart.Add("NewRun", ss.NewRun)
for mode, st := range ls.Stacks {
plusPhase := st.Loops[Cycle].EventByName("MinusPhase:End")
plusPhase.OnEvent.InsertBefore("PlusPhase:Start", "TakeAction", func() bool {
// note: critical to have this happen *after* MinusPhase:End and *before* PlusPhase:Start
// because minus phase end has gated info, and plus phase start applies action input
ss.TakeAction(ss.Net, mode.(Modes))
return false
})
// plusPhase := st.Loops[Cycle].EventByName("MinusPhase:End")
// st.Loops[Trial].OnEnd.Prepend("TakeAction", func() bool {
// ss.TakeAction(ss.Net, mode.(Modes))
// return false
// })
}
ls.Loop(Train, Epoch).IsDone.AddBool("StopCrit", func() bool {
epcDir := ss.Stats.Dir(Train.String()).Dir(Epoch.String())
rew := epcDir.Value("Rew").Float1D(-1)
stop := rew >= 0.98
return stop
})
ls.AddOnStartToAll("StatsStart", ss.StatsStart)
ls.AddOnEndToAll("StatsStep", ss.StatsStep)
ls.Loop(Train, Run).OnEnd.Add("SaveWeights", func() {
ctrString := fmt.Sprintf("%03d_%05d", ls.Loop(Train, Run).Counter.Cur, ls.Loop(Train, Epoch).Counter.Cur)
axon.SaveWeightsIfConfigSet(ss.Net, ss.Config.Log.SaveWeights, ctrString, ss.RunName())
})
if ss.Config.GUI {
axon.LooperUpdateNetView(ls, Cycle, Trial, ss.NetViewUpdater)
ls.Stacks[Train].OnInit.Add("GUI-Init", ss.GUI.UpdateWindow)
ls.Stacks[Test].OnInit.Add("GUI-Init", ss.GUI.UpdateWindow)
}
if ss.Config.Debug {
mpi.Println(ls.DocString())
}
ss.Loops = ls
}
// ApplyInputs applies input patterns from given environment for given mode.
// Any other start-of-trial logic can also be put here.
func (ss *Sim) ApplyInputs(mode Modes) {
net := ss.Net
ss.Net.InitExt()
ndata := int(net.Context().NData)
curModeDir := ss.Current.Dir(mode.String())
lays := []string{"State", "S1", "Target", "SNc"}
states := []string{"State", "PrevAction", "Target", "SNc"}
for di := 0; di < ss.Config.Run.NData; di++ {
ev := ss.Envs.ByModeDi(mode, di).(*MotorSeqEnv)
inRew := ev.IsRewTrial()
ev.Step()
for li, lnm := range lays {
snm := states[li]
ly := net.LayerByName(lnm)
itsr := ev.State(snm)
ly.ApplyExt(uint32(di), itsr)
}
curModeDir.StringValue("TrialName", ndata).SetString1D(ev.String(), di)
ss.ApplyRubicon(ev, mode, inRew, uint32(di))
}
net.ApplyExts()
}
// ApplyRubicon applies Rubicon reward inputs
func (ss *Sim) ApplyRubicon(ev *MotorSeqEnv, mode Modes, inRew bool, di uint32) {
rp := &ss.Net.Rubicon
rp.EffortUrgencyUpdate(di, 1)
rp.Urgency.Reset(di)
if inRew {
axon.GlobalScalars.Set(1, int(axon.GvACh), int(di))
ss.SetRew(ev.RPE, di)
} else {
axon.GlobalSetRew(di, 0, false) // no rew
axon.GlobalScalars.Set(0, int(axon.GvACh), int(di))
}
}
func (ss *Sim) SetRew(rew float32, di uint32) {
rp := &ss.Net.Rubicon
axon.GlobalSetRew(di, rew, true)
axon.GlobalScalars.Set(rew, int(axon.GvDA), int(di)) // no reward prediction error
if rew > 0 {
rp.SetUS(di, axon.Positive, 0, 1)
} else if rew < 0 {
rp.SetUS(di, axon.Negative, 0, 1)
}
}
// TakeAction takes action for this step, using decoded cortical action.
// Called at end of minus phase.
func (ss *Sim) TakeAction(net *axon.Network, mode Modes) {
for di := 0; di < ss.Config.Run.NData; di++ {
ev := ss.Envs.ByModeDi(mode, di).(*MotorSeqEnv)
if !ev.IsRewTrialPostStep() {
netAct := ss.DecodeAct(ev, mode, di)
ev.Action(fmt.Sprintf("%d", netAct), nil)
ss.ApplyAction(mode, di)
}
}
ss.Net.ApplyExts() // required!
}
// DecodeAct decodes the MotorBS ActM state to find closest action pattern
func (ss *Sim) DecodeAct(ev *MotorSeqEnv, mode Modes, di int) int {
tsr := axon.StatsLayerValues(ss.Net, ss.Current, mode, di, "MotorBS", "ActM")
return ss.SoftMaxChoose4D(tsr, mode)
// return ss.HardChoose4D(tsr, mode)
return 1
}
// SoftMaxChoose2D probabalistically selects column with most activity in layer,
// using a softmax with Config.Env.ActSoftMaxGain gain factor
func (ss *Sim) SoftMaxChoose2D(vt *tensor.Float64, mode Modes) int {
dy := vt.DimSize(0)
nact := vt.DimSize(1)
var tot float32
probs := make([]float32, nact)
for i := range probs {
var sum float64
for j := 0; j < dy; j++ {
sum += vt.Value(j, i)
}
p := math32.FastExp(ss.Config.Env.ActSoftMaxGain * float32(sum))
probs[i] = p
tot += p
}
for i, p := range probs {
probs[i] = p / tot
}
chs := randx.PChoose32(probs)
return chs
}
// SoftMaxChoose4D probabalistically selects column with most activity in layer,
// using a softmax with Config.Env.ActSoftMaxGain gain factor
func (ss *Sim) SoftMaxChoose4D(vt *tensor.Float64, mode Modes) int {
nact := vt.DimSize(1)
nuY := vt.DimSize(2)
nuX := vt.DimSize(3)
var tot float32
probs := make([]float32, nact)
for i := range probs {
var sum float64
for j := 0; j < nuY; j++ {
for k := 0; k < nuX; k++ {
sum += vt.Value(0, i, j, k)
}
}
p := math32.FastExp(ss.Config.Env.ActSoftMaxGain * float32(sum))
probs[i] = p
tot += p
}
for i, p := range probs {
probs[i] = p / tot
// fmt.Println(i, p, probs[i])
}
chs := randx.PChoose32(probs)
return chs
}
// HardChoose2D deterministically selects column with most activity in layer,
func (ss *Sim) HardChoose2D(vt *tensor.Float32, mode Modes) int {
nact := vt.DimSize(1)
nuY := vt.DimSize(2)
nuX := vt.DimSize(3)
var mx float32
var mxi int
for i := 0; i < nact; i++ {
var sum float32
for j := 0; j < nuY; j++ {
for k := 0; k < nuX; k++ {
sum += vt.Value(0, i, j, k)
}
}
if sum > mx {
mx = sum
mxi = i
}
}
return mxi
}
// HardChoose4D deterministically selects column with most activity in layer,
func (ss *Sim) HardChoose4D(vt *tensor.Float32, mode Modes) int {
nact := vt.DimSize(1)
nuY := vt.DimSize(2)
nuX := vt.DimSize(3)
var mx float32
var mxi int
for i := 0; i < nact; i++ {
var sum float32
for j := 0; j < nuY; j++ {
for k := 0; k < nuX; k++ {
sum += vt.Value(0, i, j, k)
}
}
if sum > mx {
mx = sum
mxi = i
}
}
return mxi
}
func (ss *Sim) ApplyAction(mode Modes, di int) {
net := ss.Net
ev := ss.Envs.ByModeDi(mode, di).(*MotorSeqEnv)
ap := ev.State("Action")
ly := net.LayerByName("MotorBS")
ly.ApplyExt(uint32(di), ap)
}
// NewRun intializes a new Run level of the model.
func (ss *Sim) NewRun() {
ctx := ss.Net.Context()
run := ss.Loops.Loop(Train, Run).Counter.Cur
ss.InitRandSeed(run)
for di := 0; di < int(ctx.NData); di++ {
ss.Envs.ByModeDi(Train, di).Init(run)
ss.Envs.ByModeDi(Test, di).Init(run)
}
ctx.Reset()
ss.Net.InitWeights()
}
//////// Stats
// AddStat adds a stat compute function.
func (ss *Sim) AddStat(f func(mode Modes, level Levels, phase StatsPhase)) {
ss.StatFuncs = append(ss.StatFuncs, f)
}
// StatsStart is called by Looper at the start of given level, for each iteration.
// It needs to call RunStats Start at the next level down.
// e.g., each Epoch is the start of the full set of Trial Steps.
func (ss *Sim) StatsStart(lmd, ltm enums.Enum) {
mode := lmd.(Modes)
level := ltm.(Levels)
if level <= Trial {
return
}
ss.RunStats(mode, level-1, Start)
}
// StatsStep is called by Looper at each step of iteration,
// where it accumulates the stat results.
func (ss *Sim) StatsStep(lmd, ltm enums.Enum) {
mode := lmd.(Modes)
level := ltm.(Levels)
if level < Trial {
return
}
ss.RunStats(mode, level, Step)
tensorfs.DirTable(axon.StatsNode(ss.Stats, mode, level), nil).WriteToLog()
}
// RunStats runs the StatFuncs for given mode, level and phase.
func (ss *Sim) RunStats(mode Modes, level Levels, phase StatsPhase) {
for _, sf := range ss.StatFuncs {
sf(mode, level, phase)
}
if phase == Step && ss.GUI.Tabs != nil {
nm := mode.String() + " " + level.String() + " Plot"
ss.GUI.Tabs.AsLab().GoUpdatePlot(nm)
}
}
// SetRunName sets the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) SetRunName() string {
runName := ss.Params.RunName(ss.Config.Run.Run)
ss.Current.StringValue("RunName", 1).SetString1D(runName, 0)
return runName
}
// RunName returns the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) RunName() string {
return ss.Current.StringValue("RunName", 1).String1D(0)
}
// StatsInit initializes all the stats by calling Start across all modes and levels.
func (ss *Sim) StatsInit() {
for md, st := range ss.Loops.Stacks {
mode := md.(Modes)
for _, lev := range st.Order {
level := lev.(Levels)
if level == Cycle {
continue
}
ss.RunStats(mode, level, Start)
}
}
if ss.GUI.Tabs != nil {
tbs := ss.GUI.Tabs.AsLab()
_, idx := tbs.CurrentTab()
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Sequence))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Epoch))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Run))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Trial))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Test, Trial))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Test, Sequence))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Test, Epoch))
tbs.SelectTabIndex(idx)
}
}
// ConfigStats handles configures functions to do all stats computation
// in the tensorfs system.
func (ss *Sim) ConfigStats() {
net := ss.Net
ss.Stats = ss.Root.Dir("Stats")
ss.Current = ss.Stats.Dir("Current")
ss.SetRunName()
// last arg(s) are levels to exclude
counterFunc := axon.StatLoopCounters(ss.Stats, ss.Current, ss.Loops, net, Trial, Cycle)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
counterFunc(mode, level, phase == Start)
})
runNameFunc := axon.StatRunName(ss.Stats, ss.Current, ss.Loops, net, Trial, Cycle)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
runNameFunc(mode, level, phase == Start)
})
trialNameFunc := axon.StatTrialName(ss.Stats, ss.Current, ss.Loops, net, Trial)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
trialNameFunc(mode, level, phase == Start)
})
perTrlFunc := axon.StatPerTrialMSec(ss.Stats, Train, Sequence)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
perTrlFunc(mode, level, phase == Start)
})
vmly := ss.Net.LayerByName("M1VM")
trialStats := []string{"Di", "Action", "Target", "Correct", "RT"}
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
if level != Trial {
return
}
for si, name := range trialStats {
modeDir := ss.Stats.Dir(mode.String())
curModeDir := ss.Current.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
ndata := int(ss.Net.Context().NData)
for di := range ndata {
tsr := levelDir.Float64(name)
if phase == Start {
tsr.SetNumRows(0)
plot.SetFirstStyler(tsr, func(s *plot.Style) {
s.Range.SetMin(0).SetMax(1)
if si >= 3 && si <= 4 {
s.On = true
}
})
continue
}
ev := ss.Envs.ByModeDi(mode, di).(*MotorSeqEnv)
var stat float32
switch name {
case "Di":
stat = float32(di)
case "Action":
stat = float32(ev.CurAction)
case "Target":
stat = float32(ev.Target)
case "Correct":
stat = num.FromBool[float32](ev.Correct)
case "RT":
stat = axon.LayerStates.Value(vmly.Index, di, int(axon.GatedRT))
if stat < 0 {
stat = math32.NaN()
}
}
curModeDir.Float32(name, ndata).SetFloat1D(float64(stat), di)
tsr.AppendRowFloat(float64(stat))
}
}
})
seqStats := []string{"NCorrect", "Rew", "RewPred", "RPE", "RT"}
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
if level <= Trial {
return
}
for _, name := range seqStats {
modeDir := ss.Stats.Dir(mode.String())
curModeDir := ss.Current.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
subDir := modeDir.Dir((level - 1).String()) // note: will fail for Cycle
tsr := levelDir.Float64(name)
ndata := int(ss.Net.Context().NData)
var stat float64
if phase == Start {
tsr.SetNumRows(0)
plot.SetFirstStyler(tsr, func(s *plot.Style) {
s.Range.SetMin(0).SetMax(1)
if name != "RT" {
s.On = true
}
})
continue
}
switch level {
case Sequence:
for di := range ndata {
ev := ss.Envs.ByModeDi(mode, di).(*MotorSeqEnv)
var stat float32
switch name {
case "NCorrect":
stat = float32(ev.PrevNCorrect)
case "Rew":
stat = ev.Rew
case "RewPred":
stat = ev.RewPred
case "RPE":
stat = ev.RPE
case "RT":
stat = float32(stats.StatMean.Call(subDir.Value(name)).Float1D(0))
}
curModeDir.Float32(name, ndata).SetFloat1D(float64(stat), di)
tsr.AppendRowFloat(float64(stat))
}
case Run:
stat = stats.StatFinal.Call(subDir.Value(name)).Float1D(0)
tsr.AppendRowFloat(stat)
default:
stat = stats.StatMean.Call(subDir.Value(name)).Float1D(0)
tsr.AppendRowFloat(stat)
}
}
})
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
if level <= Epoch {
return
}
name := "EpochsToCrit"
modeDir := ss.Stats.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
subDir := modeDir.Dir((level - 1).String()) // note: will fail for Cycle
tsr := levelDir.Float64(name)
if phase == Start {
tsr.SetNumRows(0)
plot.SetFirstStyler(tsr, func(s *plot.Style) {
s.Range.SetMin(0)
s.On = true
})
return
}
var stat float64
switch level {
case Run:
stat = float64(ss.Loops.Loop(mode, (level - 1)).Counter.Cur)
tsr.AppendRowFloat(stat)
default: // expt
stat = stats.StatMean.Call(subDir.Value(name)).Float1D(0)
tsr.AppendRowFloat(stat)
}
})
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
if level < Expt {
return
}
name := "NFail"
modeDir := ss.Stats.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
subDir := modeDir.Dir((level - 1).String())
tsr := levelDir.Float64(name)
if phase == Start {
tsr.SetNumRows(0)
plot.SetFirstStyler(tsr, func(s *plot.Style) {
s.Range.SetMin(0)
s.On = true
})
return
}
run := subDir.Value("EpochsToCrit")
nfail := 0
for i := range run.Len() {
epc := run.Float1D(i)
if int(epc) == ss.Config.Run.Epochs {
nfail++
}
}
tsr.AppendRowFloat(float64(nfail))
})
runAllFunc := axon.StatLevelAll(ss.Stats, Train, Run, func(s *plot.Style, cl tensor.Values) {
name := metadata.Name(cl)
switch name {
case "EpochsToCrit", "NCorrect":
s.On = true
s.Range.SetMin(0)
}
})
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
runAllFunc(mode, level, phase == Start)
})
patchStats := []string{"PPD1Cor", "PPD1Err", "PPD2Cor", "PPD2Err", "PPDAD1Cor", "PPDAD1Err", "PPDAD2Cor", "PPDAD2Err", "PPDAD1Cur", "PPDAD2Cur"}
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
if level < Trial {
return
}
pd1 := ss.Net.LayerByName("DSPatchD1").Params
pd2 := ss.Net.LayerByName("DSPatchD2").Params
mtx := ss.Net.LayerByName("DMatrixGo").Params
nActs := ss.Config.Env.NActions
for _, name := range patchStats {
modeDir := ss.Stats.Dir(mode.String())
curModeDir := ss.Current.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
subDir := modeDir.Dir((level - 1).String()) // note: will fail for Cycle
tsr := levelDir.Float64(name)
ndata := int(ss.Net.Context().NData)
var stat float64
if phase == Start {
tsr.SetNumRows(0)
plot.SetFirstStyler(tsr, func(s *plot.Style) {
s.Range.SetMin(0).SetMax(1)
if name[:4] == "PPDA" && strings.Contains(name, "Cor") {
s.On = true
}
})
continue
}
switch level {
case Trial:
for di := range ndata {
diu := uint32(di)
ev := ss.Envs.ByModeDi(mode, di).(*MotorSeqEnv)
trg := uint32(ev.Target)
act := uint32(ev.CurAction)
if ev.Trial.Cur == 0 {
stat = math.NaN()
tsr.AppendRowFloat(stat)
continue
} else {
d1cor := float64(axon.PoolAvgMax(axon.AMCaP, axon.AMCycle, axon.Avg, pd1.PoolIndex(1+trg), diu))
d2cor := float64(axon.PoolAvgMax(axon.AMCaP, axon.AMCycle, axon.Avg, pd2.PoolIndex(1+trg), diu))
dad1cor := float64(axon.Pools.Float(int(mtx.PoolIndex(1+trg)), di, int(fsfffb.DAD1)))
dad2cor := float64(axon.Pools.Float(int(mtx.PoolIndex(1+trg)), di, int(fsfffb.DAD2)))
switch name {
case "PPD1Cor":
stat = d1cor
case "PPD2Cor":
stat = d2cor
case "PPD1Err":
lsum := axon.PoolAvgMax(axon.AMCaP, axon.AMCycle, axon.Avg, pd1.PoolIndex(0), diu) * float32(nActs)
stat = (float64(lsum) - d1cor) / float64(nActs-1)
case "PPD2Err":
lsum := axon.PoolAvgMax(axon.AMCaP, axon.AMCycle, axon.Avg, pd2.PoolIndex(0), diu) * float32(nActs)
stat = (float64(lsum) - d2cor) / float64(nActs-1)
case "PPDAD1Cor":
stat = dad1cor
case "PPDAD2Cor":
stat = dad2cor
case "PPDAD1Err":
lsum := 0.0
for ai := range uint32(nActs) {
lsum += float64(axon.Pools.Float(int(mtx.PoolIndex(1+ai)), di, int(fsfffb.DAD1)))
}
stat = (float64(lsum) - dad1cor) / float64(nActs-1)
case "PPDAD2Err":
lsum := 0.0
for ai := range uint32(nActs) {
lsum += float64(axon.Pools.Float(int(mtx.PoolIndex(1+ai)), di, int(fsfffb.DAD2)))
}
stat = (float64(lsum) - dad2cor) / float64(nActs-1)
case "PPDAD1Cur":
stat = float64(axon.Pools.Float(int(mtx.PoolIndex(1+act)), di, int(fsfffb.DAD1)))
case "PPDAD2Cur":
stat = float64(axon.Pools.Float(int(mtx.PoolIndex(1+act)), di, int(fsfffb.DAD2)))
}
}
curModeDir.Float32(name, ndata).SetFloat1D(float64(stat), di)
tsr.AppendRowFloat(stat)
}
case Sequence:
stat = stats.StatMean.Call(subDir.Value(name)).Float1D(0)
for range ndata {
tsr.AppendRowFloat(stat)
}
case Run:
stat = stats.StatFinal.Call(subDir.Value(name)).Float1D(0)
tsr.AppendRowFloat(stat)
default:
stat = stats.StatMean.Call(subDir.Value(name)).Float1D(0)
tsr.AppendRowFloat(stat)
}
}
})
lays := net.LayersByType(axon.SuperLayer, axon.CTLayer, axon.PTMaintLayer, axon.PTPredLayer, axon.TargetLayer)
actGeFunc := axon.StatLayerActGe(ss.Stats, net, Train, Trial, Run, lays...)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
actGeFunc(mode, level, phase == Start)
})
pcaFunc := axon.StatPCA(ss.Stats, ss.Current, net, ss.Config.Run.PCAInterval, Train, Trial, Run, lays...)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
trnEpc := ss.Loops.Loop(Train, Epoch).Counter.Cur
pcaFunc(mode, level, phase == Start, trnEpc)
})
}
// StatCounters returns counters string to show at bottom of netview.
func (ss *Sim) StatCounters(mode, level enums.Enum) string {
counters := ss.Loops.Stacks[mode].CountersString()
vu := ss.NetViewUpdater(mode)
if vu == nil || vu.View == nil {
return counters
}
di := vu.View.Di
counters += fmt.Sprintf(" Di: %d", di)
curModeDir := ss.Current.Dir(mode.String())
if curModeDir.Node("TrialName") == nil {
return counters
}
counters += fmt.Sprintf(" TrialName: %s", curModeDir.StringValue("TrialName").String1D(di))
statNames := []string{"Action", "Target", "Correct", "RT", "PPD1Cor", "PPD1Err", "PPDAD1Cor", "PPDAD1Err"} // "PPD2Cor", "PPD2Err"}
if level == Cycle || curModeDir.Node(statNames[0]) == nil {
return counters
}
for _, name := range statNames {
counters += fmt.Sprintf(" %s: %.4g", name, curModeDir.Value(name).Float1D(di))
}
return counters
}
//////// GUI
// ConfigGUI configures the Cogent Core GUI interface for this simulation.
func (ss *Sim) ConfigGUI(b tree.Node) {
ss.GUI.MakeBody(b, ss, ss.Root, ss.Config.Name, ss.Config.Title, ss.Config.Doc)
ss.GUI.StopLevel = Trial
nv := ss.GUI.AddNetView("Network")
nv.Options.MaxRecs = 2 * ss.Config.Run.Cycles()
nv.Options.Raster.Max = ss.Config.Run.Cycles()
nv.Options.LayerNameSize = 0.03
nv.SetNet(ss.Net)
ss.TrainUpdate.Config(nv, axon.Theta, ss.StatCounters)
ss.TestUpdate.Config(nv, axon.Theta, ss.StatCounters)
ss.GUI.OnStop = func(mode, level enums.Enum) {
vu := ss.NetViewUpdater(mode)
vu.UpdateWhenStopped(mode, level)
}
nv.SceneXYZ().Camera.Pose.Pos.Set(0, 1.0, 2.5)
nv.SceneXYZ().Camera.LookAt(math32.Vec3(0, -0.1, 0.02), math32.Vec3(0, 1, 0))
ss.StatsInit()
ss.GUI.FinalizeGUI(false)
}
func (ss *Sim) MakeToolbar(p *tree.Plan) {
ss.GUI.AddLooperCtrl(p, ss.Loops)
tree.Add(p, func(w *core.Separator) {})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "New seed",
Icon: icons.Add,
Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
Active: egui.ActiveAlways,
Func: func() {
ss.RandSeeds.NewSeeds()
},
})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "README",
Icon: icons.FileMarkdown,
Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
Active: egui.ActiveAlways,
Func: func() {
core.TheApp.OpenURL(ss.Config.URL)
},
})
}
func (ss *Sim) RunNoGUI() {
gpu.DebugAdapter = true
ss.Init()
if ss.Config.Params.Note != "" {
mpi.Printf("Note: %s\n", ss.Config.Params.Note)
}
if ss.Config.Log.SaveWeights {
mpi.Printf("Saving final weights per run\n")
}
if ss.Config.Params.SearchN {
n := PSearch.NumParams()
fmt.Println(n)
axon.GPURelease()
return
}
if ss.Config.Params.SearchAt > 0 {
err := ss.ParamSearch(ss.Config.Params.SearchAt - 1)
if err != nil {
axon.GPURelease()
return
}
}
runName := ss.SetRunName()
netName := ss.Net.Name
cfg := &ss.Config.Log
axon.OpenLogFiles(ss.Loops, ss.Stats, netName, runName, [][]string{cfg.Train, cfg.Test})
mpi.Printf("Running %d Runs starting at %d\n", ss.Config.Run.Runs, ss.Config.Run.Run)
ss.Loops.Loop(Train, Run).Counter.SetCurMaxPlusN(ss.Config.Run.Run, ss.Config.Run.Runs)
ss.Loops.Run(Train)
axon.CloseLogFiles(ss.Loops, ss.Stats, Cycle)
axon.GPURelease()
}
// ParamSearch applies param search values for given index (in [0..n) range),
// saving a `job.label` file with the param value.
func (ss *Sim) ParamSearch(paramIndex int) error {
lbl, err := axon.ApplyPathSearch(ss.Net, PSearch, paramIndex)
if err != nil {
return errors.Log(err)
}
err = os.WriteFile("job.label", []byte(lbl), 0666)
if err != nil {
errors.Log(err)
} else {
fmt.Println("Running Search:", lbl)
}
return nil
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"github.com/emer/axon/v2/sims/bgdorsal"
"github.com/emer/emergent/v2/egui"
)
func main() { egui.Run[bgdorsal.Sim, bgdorsal.Config]() }
// Copyright (c) 2023, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bgdorsal
import (
"cogentcore.org/core/core"
"github.com/emer/emergent/v2/egui"
)
// EnvConfig has config params for environment.
type EnvConfig struct {
// Env parameters: can set any field/subfield on Env struct,
// using standard TOML formatting.
Env map[string]any
// SeqLen is the sequence length.
SeqLen int `default:"3"`
// NActions is the number of distinct actions represented: determines the difficulty
// of learning in terms of the size of the space that must be searched.
// effective size = NActions ^ SeqLen
// 4 ^ 3 = 64 or 7 ^2 = 49 are reliably solved
NActions int `default:"4"`
// ActSoftMaxGain is the gain on the softmax for choosing actions:
// lower values are more noisy; 2 > 3+ > 1>.
ActSoftMaxGain float32 `default:"2"`
}
// ParamConfig has config parameters related to sim params.
type ParamConfig struct {
// STNPools makes separate pools for STN (else one layer).
STNPools bool `default:"false"` // not better, maybe bit worse
// NActionPools is the number of pools per action (Y axis of pools)
NActionPools int `default:"1"`
// NUnits is the number of units per X,Y dim, for BG.
NUnits int `default:"6"`
// NCortexUnits is the number of units per X,Y dim, for cortex.
NCortexUnits int `default:"6"`
// Script is an interpreted script that is run to set parameters in Layer and Path
// sheets, by default using the "Script" set name.
Script string `new-window:"+" width:"100"`
// Sheet is the extra params sheet name(s) to use (space separated
// if multiple). Must be valid name as listed in compiled-in params
// or loaded params.
Sheet string
// Tag is an extra tag to add to file names and logs saved from this run.
Tag string
// Note is additional info to describe the run params etc,
// like a git commit message for the run.
Note string
// SaveAll will save a snapshot of all current param and config settings
// in a directory named params_<datestamp> (or _good if Good is true),
// then quit. Useful for comparing to later changes and seeing multiple
// views of current params.
SaveAll bool `nest:"+"`
// Good is for SaveAll, save to params_good for a known good params state.
// This can be done prior to making a new release after all tests are passing.
// Add results to git to provide a full diff record of all params over level.
Good bool `nest:"+"`
// SearchN causes the sim app to print the total number of search params.
// The simmer search function will call this at the start of a search.
// If Debug flag is set, then this only prints all the searches and does
// not apply them.
SearchN bool
// SearchAt runs parameter search job for given parameter index.
// Using [1..N] (inclusive of N) range of numbers, so that the
// non-zero value here indicates to use parameter search.
SearchAt int
}
func (pc *ParamConfig) FieldWidget(field string) core.Value {
return egui.ScriptFieldWidget(field)
}
// RunConfig has config parameters related to running the sim.
type RunConfig struct {
// GPUDevice selects the gpu device to use.
GPUDevice int
// NData is the number of data-parallel items to process in parallel per trial.
// Is significantly faster for both CPU and GPU. Results in an effective
// mini-batch of learning.
NData int `default:"16" min:"1"`
// SlowInterval is the interval between slow adaptive processes.
// This generally needs to be longer than the default of 100 in larger models.
SlowInterval int `default:"200"` // 200 >= 100 > 400
// NThreads is the number of parallel threads for CPU computation;
// 0 = use default.
NThreads int `default:"0"`
// Run is the _starting_ run number, which determines the random seed.
// Runs counts up from there. Can do all runs in parallel by launching
// separate jobs with each starting Run, Runs = 1.
Run int `default:"0" flag:"run"`
// Runs is the total number of runs to do when running Train, starting from Run.
Runs int `default:"25" min:"1"`
// Epochs is the total number of epochs per run.
Epochs int `default:"25"`
// Sequences is the total number of sequences per epoch.
// Should be an even multiple of NData.
Sequences int `default:"128"`
// ISICycles is the number of no-input inter-stimulus interval
// cycles at the start of the trial.
ISICycles int `default:"0"`
// MinusCycles is the number of cycles in the minus phase per trial.
MinusCycles int `default:"250"`
// PlusCycles is the number of cycles in the plus phase per trial.
PlusCycles int `default:"50"`
// PCAInterval is how often (in epochs) to compute PCA on hidden
// representations to measure variance.
PCAInterval int `default:"10"`
}
// Cycles returns the total number of cycles per trial: ISI + Minus + Plus.
func (rc *RunConfig) Cycles() int {
return rc.ISICycles + rc.MinusCycles + rc.PlusCycles
}
// LogConfig has config parameters related to logging data.
type LogConfig struct {
// SaveWeights will save final weights after each run.
SaveWeights bool
// Train has the list of Train mode levels to save log files for.
Train []string `default:"['Expt', 'Run', 'Epoch']" nest:"+"`
// Test has the list of Test mode levels to save log files for.
Test []string `nest:"+"`
// Testing activates testing mode: records detailed data for Go CI tests
// (not the same as running test mode on network, via Looper).
Testing bool
}
// Config has the overall Sim configuration options.
type Config struct {
egui.BaseConfig
// Env has environment configuration options.
Env EnvConfig `display:"add-fields"`
// Params has parameter related configuration options.
Params ParamConfig `display:"add-fields"`
// Run has sim running related configuration options.
Run RunConfig `display:"add-fields"`
// Log has data logging related configuration options.
Log LogConfig `display:"add-fields"`
}
func (cfg *Config) Defaults() {
cfg.Name = "BGDorsal"
cfg.Title = "Pallidal Core (GPe) Dorsal Striatum"
cfg.URL = "https://github.com/emer/axon/blob/main/sims/bgdorsal/README.md"
cfg.Doc = "This project simulates the Dorsal Basal Ganglia, starting with the Dorsal Striatum, centered on the Pallidum Core (GPe) areas that drive Go vs. No selection of motor actions."
}
// Code generated by "core generate -add-types -add-funcs -gosl"; DO NOT EDIT.
package bgdorsal
import (
"cogentcore.org/core/enums"
)
var _ModesValues = []Modes{0, 1}
// ModesN is the highest valid value for type Modes, plus one.
//
//gosl:start
const ModesN Modes = 2
//gosl:end
var _ModesValueMap = map[string]Modes{`Train`: 0, `Test`: 1}
var _ModesDescMap = map[Modes]string{0: ``, 1: ``}
var _ModesMap = map[Modes]string{0: `Train`, 1: `Test`}
// String returns the string representation of this Modes value.
func (i Modes) String() string { return enums.String(i, _ModesMap) }
// SetString sets the Modes value from its string representation,
// and returns an error if the string is invalid.
func (i *Modes) SetString(s string) error { return enums.SetString(i, s, _ModesValueMap, "Modes") }
// Int64 returns the Modes value as an int64.
func (i Modes) Int64() int64 { return int64(i) }
// SetInt64 sets the Modes value from an int64.
func (i *Modes) SetInt64(in int64) { *i = Modes(in) }
// Desc returns the description of the Modes value.
func (i Modes) Desc() string { return enums.Desc(i, _ModesDescMap) }
// ModesValues returns all possible values for the type Modes.
func ModesValues() []Modes { return _ModesValues }
// Values returns all possible values for the type Modes.
func (i Modes) Values() []enums.Enum { return enums.Values(_ModesValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Modes) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Modes) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Modes") }
var _LevelsValues = []Levels{0, 1, 2, 3, 4, 5}
// LevelsN is the highest valid value for type Levels, plus one.
//
//gosl:start
const LevelsN Levels = 6
//gosl:end
var _LevelsValueMap = map[string]Levels{`Cycle`: 0, `Trial`: 1, `Sequence`: 2, `Epoch`: 3, `Run`: 4, `Expt`: 5}
var _LevelsDescMap = map[Levels]string{0: ``, 1: ``, 2: ``, 3: ``, 4: ``, 5: ``}
var _LevelsMap = map[Levels]string{0: `Cycle`, 1: `Trial`, 2: `Sequence`, 3: `Epoch`, 4: `Run`, 5: `Expt`}
// String returns the string representation of this Levels value.
func (i Levels) String() string { return enums.String(i, _LevelsMap) }
// SetString sets the Levels value from its string representation,
// and returns an error if the string is invalid.
func (i *Levels) SetString(s string) error { return enums.SetString(i, s, _LevelsValueMap, "Levels") }
// Int64 returns the Levels value as an int64.
func (i Levels) Int64() int64 { return int64(i) }
// SetInt64 sets the Levels value from an int64.
func (i *Levels) SetInt64(in int64) { *i = Levels(in) }
// Desc returns the description of the Levels value.
func (i Levels) Desc() string { return enums.Desc(i, _LevelsDescMap) }
// LevelsValues returns all possible values for the type Levels.
func LevelsValues() []Levels { return _LevelsValues }
// Values returns all possible values for the type Levels.
func (i Levels) Values() []enums.Enum { return enums.Values(_LevelsValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Levels) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Levels) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Levels") }
var _StatsPhaseValues = []StatsPhase{0, 1}
// StatsPhaseN is the highest valid value for type StatsPhase, plus one.
//
//gosl:start
const StatsPhaseN StatsPhase = 2
//gosl:end
var _StatsPhaseValueMap = map[string]StatsPhase{`Start`: 0, `Step`: 1}
var _StatsPhaseDescMap = map[StatsPhase]string{0: ``, 1: ``}
var _StatsPhaseMap = map[StatsPhase]string{0: `Start`, 1: `Step`}
// String returns the string representation of this StatsPhase value.
func (i StatsPhase) String() string { return enums.String(i, _StatsPhaseMap) }
// SetString sets the StatsPhase value from its string representation,
// and returns an error if the string is invalid.
func (i *StatsPhase) SetString(s string) error {
return enums.SetString(i, s, _StatsPhaseValueMap, "StatsPhase")
}
// Int64 returns the StatsPhase value as an int64.
func (i StatsPhase) Int64() int64 { return int64(i) }
// SetInt64 sets the StatsPhase value from an int64.
func (i *StatsPhase) SetInt64(in int64) { *i = StatsPhase(in) }
// Desc returns the description of the StatsPhase value.
func (i StatsPhase) Desc() string { return enums.Desc(i, _StatsPhaseDescMap) }
// StatsPhaseValues returns all possible values for the type StatsPhase.
func StatsPhaseValues() []StatsPhase { return _StatsPhaseValues }
// Values returns all possible values for the type StatsPhase.
func (i StatsPhase) Values() []enums.Enum { return enums.Values(_StatsPhaseValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i StatsPhase) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *StatsPhase) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "StatsPhase")
}
// Copyright (c) 2022, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bgdorsal
import (
"fmt"
"strconv"
"cogentcore.org/lab/base/randx"
"cogentcore.org/lab/tensor"
"github.com/emer/emergent/v2/env"
)
// MotorSeqEnv implements simple motor sequencing patterns to test DS BG learning.
// Simplest mode has sequential State inputs which require distinct motor actions.
// Also implements simple reward prediction error for dopamine.
// The first trial is blank, and the last trial has the reward.
type MotorSeqEnv struct {
// name of environment -- Train or Test
Name string
// training or testing env?
Mode Modes
// trial counter for index into sequence
Trial env.Counter
// number of distinct actions represented: determines the difficulty
// of learning in terms of the size of the space that must be searched.
// effective size = NActions ^ SeqLen
// 5^5 = 3,125 is base test, reliably, quickly solved
NActions int
// sequence length.
SeqLen int
// learning rate for reward prediction
RewPredLRate float32 `default:"0.01"`
// additional learning rate factor for going up vs. down -- going up slower is better?
RewPredLRateUp float32 `default:"0.5"` // 0.5 > 0.8 > 0.2 > 1
// minimum rewpred value
RewPredMin float32 `default:"0.1"`
// give reward with probability in proportion to number of
// correct actions in sequence, above given threshold. If 0, don't use
PartialCredit bool `default:"true"`
// if doing partial credit, also make the reward value graded (weaker for fewer)
PartialGraded bool `default:"true"`
// sequence map from sequence index to target motor action
SeqMap []int
// current target correct action according to the sequence
Target int `edit:"-"`
// current action taken by network
CurAction int `edit:"-"`
// previous action taken by network
PrevAction int `edit:"-"`
// is current action correct
Correct bool `edit:"-"`
// number of correct actions taken this sequence
NCorrect int `edit:"-"`
// previous number of correct actions taken, when reward is computed (NCorrect is reset)
PrevNCorrect int `edit:"-"`
// raw reward based on action sequence, computed at end of seq
Rew float32 `edit:"-"`
// reward prediction based on incremental learning: RewPredLRate * (Rew - RewPred)
RewPred float32 `edit:"-"`
// reward prediction error: Rew - RewPred
RPE float32 `edit:"-"`
// number of units per localist representation, in Y axis
NUnitsPer int `display:"-"`
// total number of units: NActions * NUnitsPer
NUnits int `display:"-"`
// random number generator for the env -- all random calls must use this
Rand randx.SysRand `display:"-"`
// random seed
RandSeed int64 `edit:"-"`
// named states: State, Target, PrevAction, Action
States map[string]*tensor.Float32
}
func (ev *MotorSeqEnv) Label() string { return ev.Name }
func (ev *MotorSeqEnv) Defaults() {
ev.NActions = 5
ev.SeqLen = 5
ev.PartialCredit = true // critical for seq len = 3+
ev.PartialGraded = true // key for seq 3
ev.RewPredLRate = 0.01 // GPU 16 0.01 > 0.02 >> 0.05 > 0.1, 0.2 for partial, seq3
ev.RewPredLRateUp = 1
ev.RewPredMin = 0.1 // 0.1 > 0.05 > 0.2
ev.NUnitsPer = 5
ev.NUnits = ev.NUnitsPer * ev.NActions
}
// Config configures the world
func (ev *MotorSeqEnv) Config(mode Modes, rndseed int64) {
ev.Mode = mode
ev.RandSeed = rndseed
ev.Rand.NewRand(ev.RandSeed)
ev.States = make(map[string]*tensor.Float32)
ev.States["State"] = tensor.NewFloat32(ev.NUnitsPer, ev.SeqLen)
ev.States["Target"] = tensor.NewFloat32(ev.NUnitsPer, ev.NActions)
ev.States["Action"] = tensor.NewFloat32(ev.NUnitsPer, ev.NActions)
ev.States["PrevAction"] = tensor.NewFloat32(ev.NUnitsPer, ev.NActions+1)
ev.States["Rew"] = tensor.NewFloat32(1, 1)
ev.States["SNc"] = tensor.NewFloat32(1, 1)
}
func (ev *MotorSeqEnv) InitSeqMap() {
// pord := ev.Rand.Perm(ev.NActions, -1)
ev.SeqMap = make([]int, ev.SeqLen)
for i := 0; i < ev.SeqLen; i++ {
ev.SeqMap[i] = i // no randomness! otherwise doesn't work on gpu!
}
// ev.SeqMap[0] = 4 // todo: cheating -- 4 is initial bias; 0 also learns quickly
// ev.SeqMap[0] = 3 // 3, 2 good test cases -- can learn but not initial bias -- 3 esp hard
}
func (ev *MotorSeqEnv) String() string {
return fmt.Sprintf("%d", ev.Target)
}
func (ev *MotorSeqEnv) Init(run int) {
ev.Trial.Max = ev.SeqLen + 1 // rew
ev.Trial.Init()
ev.Trial.Cur = 0
ev.InitSeqMap()
ev.NCorrect, ev.Rew, ev.RPE = 0, 0, 0
ev.RewPred = ev.RewPredMin
}
func (ev *MotorSeqEnv) State(el string) tensor.Values {
return ev.States[el]
}
// RenderBlank renders blank
func (ev *MotorSeqEnv) RenderBlank(name string) {
av := ev.States[name]
av.SetZeros()
}
// RenderLocalist renders localist * NUnitsPer
func (ev *MotorSeqEnv) RenderLocalist(name string, idx int) {
av := ev.States[name]
av.SetZeros()
if idx >= av.DimSize(1) {
return
}
for yi := range ev.NUnitsPer {
av.Set(1, yi, idx)
}
}
func (ev *MotorSeqEnv) IsRewTrial() bool {
return ev.Trial.Cur == ev.Trial.Max-1
}
func (ev *MotorSeqEnv) IsRewTrialPostStep() bool {
return ev.Trial.Cur == 0
}
// RenderState renders the current state
func (ev *MotorSeqEnv) RenderState() {
trl := ev.Trial.Cur
ev.RenderBlank("Action")
ev.States["SNc"].Set1D(ev.RPE, 0)
ev.States["Rew"].Set1D(ev.Rew, 0)
if ev.IsRewTrial() {
ev.RenderBlank("State")
ev.RenderBlank("Target")
ev.RenderBlank("PrevAction")
} else {
st := ev.SeqMap[trl]
ev.Target = st // todo: starting with simple 1-to-1
ev.RenderLocalist("State", st)
ev.RenderLocalist("Target", ev.Target)
if trl > 0 {
ev.RenderLocalist("PrevAction", 1+ev.PrevAction)
} else {
ev.RenderLocalist("PrevAction", 0)
}
}
}
// Step does one step, advancing the Trial counter, rendering states
func (ev *MotorSeqEnv) Step() bool {
// fmt.Println("\nstep:", ev.Trial.Cur)
ev.RenderState()
ev.Trial.Incr()
return true
}
// Action records the current action taken by model, at end of minus phase
// Computes Rew* at end of sequence
func (ev *MotorSeqEnv) Action(action string, nop tensor.Values) {
ev.PrevAction = ev.CurAction
ev.CurAction, _ = strconv.Atoi(action)
// fmt.Println("act:", ev.Trial.Cur, action, ev.CurAction, ev.Target, ev.NCorrect)
if ev.CurAction == ev.Target {
ev.Correct = true
ev.NCorrect++
// fmt.Println("correct:", ev.NCorrect)
} else {
ev.Correct = false
// fmt.Println("incorrect:")
}
ev.RenderLocalist("Action", ev.CurAction)
if ev.Trial.Cur == ev.Trial.Max-1 { // trial before reward trial
ev.ComputeReward()
}
}
func (ev *MotorSeqEnv) ComputeReward() {
ev.Rew = 0
// fmt.Println("rew, ncor:", ev.NCorrect, ev.SeqLen)
if ev.PartialCredit {
prew := float32(ev.NCorrect) / float32(ev.SeqLen)
doRew := randx.BoolP32(prew, &ev.Rand)
if doRew {
if ev.PartialGraded {
ev.Rew = prew
} else {
ev.Rew = 1
}
}
} else {
if ev.NCorrect == ev.SeqLen {
ev.Rew = 1
}
}
ev.RPE = ev.Rew - ev.RewPred
if ev.RPE > 0 {
ev.RewPred += ev.RewPredLRateUp * ev.RewPredLRate * ev.RPE
} else {
ev.RewPred += ev.RewPredLRate * ev.RPE
}
if ev.RewPred < ev.RewPredMin {
ev.RewPred = ev.RewPredMin
}
ev.PrevNCorrect = ev.NCorrect
ev.NCorrect = 0
}
func (ev *MotorSeqEnv) DecodeAct(vt *tensor.Float32) int {
mxi := ev.DecodeLocalist(vt)
return mxi
}
func (ev *MotorSeqEnv) DecodeLocalist(vt *tensor.Float32) int {
dx := vt.DimSize(1)
var mx float32
var mxi int
for i := 0; i < dx; i++ {
var sum float32
for j := 0; j < ev.NUnitsPer; j++ {
sum += vt.Value(j, i)
}
if sum > mx {
mx = sum
mxi = i
}
}
return mxi
}
// Copyright (c) 2022, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bgdorsal
import (
"github.com/emer/axon/v2/axon"
)
// LayerParams sets the minimal non-default params.
// Base is always applied, and others can be optionally selected to apply on top of that.
var LayerParams = axon.LayerSheets{
"Base": {
{Sel: "Layer", Doc: "clamp gain makes big diff on overall excitation, gating propensity",
Set: func(ly *axon.LayerParams) {
ly.Acts.Clamp.Ge = 1.0 // 1.5 is def, was 0.6 (too low)
ly.Acts.Noise.On.SetBool(true) // true >= false (minor)
ly.Acts.Noise.Ge = 0.0001 // 0.0001 > others; could just be noise ;)
ly.Acts.Noise.Gi = 0.0001 // 0.0001 perhaps better than others
ly.Learn.Timing.On.SetBool(true)
// ly.Learn.Timing.Refractory.SetBool(true)
ly.Learn.Timing.LearnThr = 0.05
ly.Learn.Timing.SynCaCycles = 200 // 200 > 180 > 220 > 250, 160 for 250/50 cyc
ly.Learn.Timing.Cycles = 210 // 210 >= 200 >= 190 > 220
// ly.Learn.Timing.TimeDiffTau = 4
}},
{Sel: ".PFCLayer", Doc: "pfc",
Set: func(ly *axon.LayerParams) {
// ly.Learn.NeuroMod.DAMod = axon.NoDAMod // NoDAMod > D1Mod
// ly.Learn.NeuroMod.DAModGain = 0.005 // 0.005 > higher
// ly.Learn.NeuroMod.DipGain = 0 // 0 > higher
ly.Learn.RLRate.SigmoidLinear.SetBool(false) // false >> true; orig = true
ly.Acts.Decay.Glong = 0 // 0 ==? 0.1; > higher
ly.Learn.CaLearn.ETraceTau = 4 // 4 > 3?
ly.Learn.CaLearn.ETraceScale = 0.02 // 0 == 0.02 >= 0.05 > 0.1 -- todo..
ly.Acts.KNa.On.SetBool(true)
ly.Acts.KNa.Med.Gk = 0.2 // 0.2 > 0.1 > 0.05
ly.Acts.KNa.Slow.Gk = 0.2
ly.Acts.Mahp.Gk = 0.05 // 0.05
ly.Acts.Sahp.Gk = 0.05 // 0.05
ly.Acts.Sahp.CaTau = 5 // 5 (def) == 10
// ly.Acts.NMDA.Tau = 100 // 100 def >> 200
// ly.Learn.LearnNMDA.Tau = 100 // 100 def >> 200
}},
{Sel: ".DSMatrixLayer", Doc: "all matrix",
Set: func(ly *axon.LayerParams) {
ly.Learn.TrgAvgAct.RescaleOn.SetBool(true) // true >> false
}},
{Sel: ".DSPatchLayer", Doc: "all matrix",
Set: func(ly *axon.LayerParams) {
// ly.Learn.NeuroMod.AChLRateMod = 1 // 1 is now default
}},
{Sel: ".DSTNLayer", Doc: "all STN",
Set: func(ly *axon.LayerParams) {
}},
{Sel: "#M1VM", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Learn.NeuroMod.AChDisInhib = 0
}},
{Sel: ".PTMaintLayer", Doc: "time integration params",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 2.4 // 2.4 >= 2.2, 2.6
ly.Inhib.ActAvg.Nominal = 0.3 // 0.3 def -- key but wrong!
ly.Acts.Dend.ModGain = 1.5 // 1.5 def > 1.0
}},
{Sel: ".PTPredLayer", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 0.7 // 0.7 > 0.8 > 0.9 with SSGi=2
// ly.CT.GeGain = 0.05 // 0.05 >= 0.07 > 0.03
ly.CT.DecayTau = 100 // 100 >= 120, 80
}},
{Sel: ".CTLayer", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 1.2 // 1.2 > 1.4 > 1.6 with SSGi=2
ly.CT.GeGain = 5 // 5 > 3, 8
ly.CT.DecayTau = 100 // 100 > 120 >> 80
// ly.Acts.Dend.SSGi = 2 // 2 new default
}},
{Sel: "#MotorBS", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.On.SetBool(true)
ly.Inhib.Pool.On.SetBool(false)
ly.Inhib.Layer.Gi = 0.2 // 0.2 > 0.25 > 0.3 > 0.1
ly.Acts.Clamp.Ge = 2 // 2 > 2.2 > 2.5 > 2.2 > 1.5, >> 1 -- absolutely critical given GPi inhib
}},
{Sel: "#VL", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 1.0 // 1 > 0.95 > 1.05 > 0.9
ly.Pulvinar.DriveScale = 0.1 // 0.1 > 0.12, 0.15
}},
{Sel: "#DGPeAk", Doc: "arkypallidal",
Set: func(ly *axon.LayerParams) {
ly.Acts.Init.GeBase = 0.2 // 0.2 >= 0.15 >> 0.25 0.3
ly.Acts.Init.GeVar = 0.1 // 0.1 > 0.15 > 0.2 > 0.05
}},
},
"NoiseOff": {
{Sel: "Layer", Doc: "turn off noise",
Set: func(ly *axon.LayerParams) {
ly.Acts.Noise.On.SetBool(false)
}},
},
}
// PathParams sets the minimal non-default params.
// Base is always applied, and others can be optionally selected to apply on top of that.
var PathParams = axon.PathSheets{
"Base": {
{Sel: "Path", Doc: "",
Set: func(pt *axon.PathParams) {
pt.Learn.LRate.Base = 0.04 // 0.04 > 0.03
pt.Learn.DWt.SynTraceTau = 1 // 1 > 2; todo: explore for pfc paths separately
pt.Learn.DWt.SynCa20.SetBool(true) // 20 > 10 for long 300 ms thetacycle; only for ToTarget paths
pt.SWts.Adapt.HiMeanDecay = 0.0008 // 0.0008 for 4x6, 0.005 for 3x10 -- not clear if real..
pt.Learn.DWt.SubMean = 0 // 0 >> 1 -- fails at 1
pt.Learn.DWt.LearnThr = 0 // 0 > .1
}},
// {Sel: ".PFCPath", Doc: "",
// Set: func(pt *axon.PathParams) {
// pt.Learn.DWt.CaPScale = 1
// pt.Learn.SynCaBin.Envelope = kinase.Env30
// }},
{Sel: ".CTCtxtPath", Doc: "all CT context paths",
Set: func(pt *axon.PathParams) {
pt.Learn.DWt.LearnThr = 0
}},
{Sel: ".CTtoPred", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 2 // 1 def
}},
{Sel: ".PTtoPred", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1 // was 6
}},
{Sel: ".DSPatchPath", Doc: "",
Set: func(pt *axon.PathParams) {
pt.Learn.LRate.Base = 0.04 // 0.04 std best
}},
{Sel: ".DSMatrixPath", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1.8 // 1.8 > others
pt.Learn.LRate.Base = 0.02 // rlr sig: .02 > .015 .025
pt.Learn.DWt.LearnThr = 0.1 // 0.1 > 0.2
}},
{Sel: ".SuperToPT", Doc: "one-to-one from super",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 0.5
}},
{Sel: ".PTSelfMaint", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1 // 1 def
}},
{Sel: ".SuperToThal", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 3.0 // 3
}},
// {Sel: ".FmState", Doc: "",
// Set: func(pt *axon.PathParams) {
// pt.PathScale.Rel = 0.5 // abs, rel < 1 worse
// }},
{Sel: ".ToM1", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1.5 // now 1.5 > 2 > 1 ..
pt.Learn.LRate.Base = 0.04 // 0.04 > 0.02
}},
{Sel: ".ToMotor", Doc: "ToTarget excitatory paths to MotorBS; see #DGPiToMotorBS too",
Set: func(pt *axon.PathParams) {
pt.Learn.LRate.Base = 0.02 // 0.02 > 0.04 > 0.01 -- still key
pt.Learn.DWt.SynTraceTau = 1 // 1 > 2
pt.Learn.DWt.CaPScale = 1.05 // 1.05 > 1 > 1.1; still key v.80, could not avoid.
// note: SWts not used for ToTarget layers.
}},
{Sel: "#DGPiToMotorBS", Doc: "ToTarget inhibition of MotorBS",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 3 // 3 >= 3.5 > 2.5
pt.Learn.LRate.Base = 0.04 // 0.04 > 0.02 > 0.0005 with STN 150
pt.Learn.DWt.CaPScale = 1.05 // 1.05 > 1 > 1.1; this is particularly key
}},
{Sel: ".VLM1", Doc: "ToTarget projections from M1 layers to VL",
Set: func(pt *axon.PathParams) {
pt.Learn.LRate.Base = 0.02 // 0.02 > 0.04 > 0.01 -- still key
pt.Learn.DWt.CaPScale = 1 // 1.0 == 1.05
// note: SWts not used for ToTarget layers.
}},
{Sel: "#DGPiToM1VM", Doc: "final inhibition",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 2 // 2 > 1.6, 2.4
// learn = false by default
}},
{Sel: "#DGPiToPF", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 0.7 // 0.7 >= 0.6 >= 0.5 > lower
pt.Learn.LRate.Base = 0.04 // 0.4 prev default
}},
{Sel: "#StateToM1", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1 // 1.2 >= 1 > 0.8
}},
{Sel: "#MotorBSToPF", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1 // 1 > 1.1 > 0.9 >> 0.5
pt.Learn.LRate.Base = 0.04 // 0.04 > 0.02
// fixed is not better:
// pt.Learn.Learn.SetBool(false)
// pt.SWts.Init.SPct = 0
// pt.SWts.Init.Mean = 0.8
// pt.SWts.Init.Var = 0.0
}},
// {Sel: ".PFToDMatrix", Doc: "",
// Set: func(pt *axon.PathParams) {
// // std random sig better
// // pt.SWts.Init.Mean = 0.5
// // pt.SWts.Init.Var = 0.0
// }},
{Sel: ".M1ToMotorBS", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 2 // 2 > 1.5, 2.5
}},
{Sel: "#M1ToMotorBS", Doc: "weaker; note: this is a proxy for cerebellum etc inputs",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1.5 // 1.5 > 1.2, 1.8, 1, 2, 2.5 sensitive
}},
{Sel: "#M1PTToMotorBS", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 2 // 2 > 1.6, 2.4 sensitive
pt.PathScale.Rel = 1 // 1
// note: lr = 0.04 in orig
}},
{Sel: "#M1PTToVL", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1 // 1 > 0.8, 1.2
pt.PathScale.Rel = 0.1 // 0.1 > 0.2, .05, 0
}},
{Sel: "#M1PTToM1PT", Doc: "self path",
Set: func(pt *axon.PathParams) {
pt.Learn.LRate.Base = 0.0001 // 0.0001 > .04 but not a major diff
}},
// {Sel: "#M1PTpToMotorBS", Doc: "not used",
// Set: func(pt *axon.PathParams) {
// pt.PathScale.Abs = 2
// pt.PathScale.Rel = 1
// }},
/*
{Sel: "#DMatrixNoToDMatrixGo", Doc: "weakish no->go inhibition is beneficial",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.1 // 0.1 > 0.08, 0.12 > 0.05 not too sensitive
pt.Learn.Learn.SetBool(false) // no-learn better than learn
}},
{Sel: "#DMatrixGoToDGPeAk", Doc: "go inhibition",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 0.6 // 0.6 > 0.5 > 0.4 > 0.7
}},
{Sel: "#DGPeAkToDMatrixNo", Doc: "go disinhibition",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 4 // 4 > 5 > 6 > 3 >> 2
}},
{Sel: "#DGPePrToDGPePr", Doc: "self-inhib -- only source of self reg",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 4.5 // 4.5 >= 4 >= 4.8 >> 3.2
}},
{Sel: "#DGPePrToDSTN", Doc: "enough to kick off the ping-pong dynamics for STN.",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 0.4 // 0.4 >= 0.5 > 0.6
}},
// {Sel: ".StateToDMatrix", Doc: "",
// Set: func(pt *axon.PathParams) {
// pt.PathScale.Abs = 1.5 // 1.8 def
// }},
// {Sel: ".CLToDMatrix", Doc: "",
// Set: func(pt *axon.PathParams) {
// pt.Learn.Learn = false
// pt.PathScale.Rel = 0.001
// }},
*/
},
}
/////////
// LayerParamsDefs has builtin default values.
var LayerParamsDefs = axon.LayerSheets{
"Base": {
{Sel: "Layer", Doc: "clamp gain makes big diff on overall excitation, gating propensity",
Set: func(ly *axon.LayerParams) {
ly.Acts.Clamp.Ge = 1.0 // 1.5 is def, was 0.6 (too low)
ly.Acts.Noise.On.SetBool(true) // true >= false (minor)
ly.Acts.Noise.Ge = 0.0001 // 0.0001 > others; could just be noise ;)
ly.Acts.Noise.Gi = 0.0001 // 0.0001 perhaps better than others
}},
{Sel: ".PFCLayer", Doc: "pfc",
Set: func(ly *axon.LayerParams) {
// ly.Learn.NeuroMod.DAMod = axon.NoDAMod // NoDAMod > D1Mod
// ly.Learn.NeuroMod.DAModGain = 0.005 // 0.005 > higher
// ly.Learn.NeuroMod.DipGain = 0 // 0 > higher
ly.Learn.RLRate.SigmoidLinear.SetBool(false) // false >> true; orig = true
ly.Acts.Decay.Glong = 0 // 0 ==? 0.1; > higher
ly.Learn.CaLearn.ETraceTau = 4 // 4 > 3?
ly.Learn.CaLearn.ETraceScale = 0.02 // 0 == 0.02 >= 0.05 > 0.1 -- todo..
ly.Acts.KNa.On.SetBool(true)
ly.Acts.KNa.Med.Gk = 0.2 // 0.2 > 0.1 > 0.05
ly.Acts.KNa.Slow.Gk = 0.2
ly.Acts.Mahp.Gk = 0.05 // 0.05
ly.Acts.Sahp.Gk = 0.05 // 0.05
ly.Acts.Sahp.CaTau = 10 // 10 (def) > 5?
// ly.Acts.NMDA.Tau = 100 // 100 def >> 200
// ly.Learn.LearnNMDA.Tau = 100 // 100 def >> 200
}},
{Sel: "#M1VM", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Learn.NeuroMod.AChDisInhib = 0
}},
{Sel: ".PTMaintLayer", Doc: "time integration params",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 2.4 // 2.4 >= 2.2, 2.6
ly.Inhib.ActAvg.Nominal = 0.3 // 0.3 def -- key but wrong!
ly.Acts.Decay.OnRew.SetBool(true) // true def -- seems better?
ly.Acts.Dend.ModGain = 1.0 // 1.5 def
ly.Acts.Kir.Gk = 0 // no real diff here over range 0-10
ly.Acts.MaintNMDA.Ge = 0.007 // 0.007 >= 0.006 > 0.005 > 0.004 > 0.008
ly.Acts.MaintNMDA.Tau = 200 // 200 > 250, 180
}},
{Sel: ".PTPredLayer", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 0.7 // 0.7 > 0.8 > 0.9 with SSGi=2
ly.CT.GeGain = 0.05 // 0.05 >= 0.07 > 0.03
ly.CT.DecayTau = 100 // 100 >= 120, 80
ly.Acts.Dend.SSGi = 2 // 2 new default
}},
{Sel: ".CTLayer", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 1.2 // 1.2 > 1.4 > 1.6 with SSGi=2
ly.CT.GeGain = 5 // 5 > 3, 8
ly.CT.DecayTau = 100 // 100 > 120 >> 80
ly.Acts.Dend.SSGi = 2 // 2 new default
}},
{Sel: "#MotorBS", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.On.SetBool(true)
ly.Inhib.Pool.On.SetBool(false)
ly.Inhib.Layer.Gi = 0.2 // 0.2 > 0.3 > 0.1
ly.Acts.Clamp.Ge = 2.0 // 2 > 2.5 > 2.2 > 1.5, >> 1 -- absolutely critical given GPi inhib
// ly.Learn.RLRate.Diff.SetBool(false) // true > false
// ly.Learn.RLRate.SigmoidLinear.SetBool(false) // false >> true; orig = true
// ly.Learn.RLRate.SigmoidMin = 0.05 // 0.05 def > 0.1 > 0.2 > 0.02
}},
// {Sel: "#M1", Doc: "",
// Set: func(ly *axon.LayerParams) {
// ly.Learn.NeuroMod.DAMod = axon.D1Mod // not good here.
// ly.Learn.NeuroMod.DAModGain = 0.03 // up to 0.04 good
// ly.Learn.NeuroMod.DipGain = 0.1 // 0.1 > 0 > 0.2
// }},
{Sel: "#DGPeAk", Doc: "arkypallidal",
Set: func(ly *axon.LayerParams) {
ly.Acts.Init.GeBase = 0.2 // 0.2 >= 0.15 >> 0.25 0.3
ly.Acts.Init.GeVar = 0.1 // 0.1 > 0.15 > 0.2 > 0.05
}},
{Sel: ".DSMatrixLayer", Doc: "all matrix",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Pool.Gi = 0.5 // 0.5 > others
ly.Learn.NeuroMod.BurstGain = 0.1 // 0.1 > 0.2 still v53
ly.Learn.NeuroMod.DAModGain = 0 // 0 > higher?
ly.DSMatrix.PatchBurstGain = 1.0 // 1 > others
ly.DSMatrix.PatchDAModGain = 0.02 // .02 > .01 > .05 > 0; 0 not that bad
ly.DSMatrix.PatchD1Range.Set(0.1, 0.3) // 0.3 > 0.35, .4
ly.DSMatrix.PatchD2Range.Set(0.05, 0.25) // 0.05, 0.25 > 0.1, 0.3
ly.Learn.RLRate.On.SetBool(true) // note: applied for tr update trials
ly.Learn.RLRate.SigmoidMin = 0.001 // 0.001 >= 0.01 -- minor
ly.Learn.TrgAvgAct.RescaleOn.SetBool(true) // true > false
// base defaults below
ly.Inhib.Layer.On.SetBool(true)
ly.Inhib.Pool.On.SetBool(true)
ly.Inhib.Pool.FB = 0
ly.Striatum.GateThr = 0.05 // .05 default
ly.Acts.Kir.Gk = 10 // 10 > 5 > 20
ly.Acts.GabaB.Gk = 0
ly.Acts.NMDA.Ge = 0.006 // 0.006 default, necessary (0 very bad)
ly.Acts.Dend.ModBase = 1
ly.Acts.Dend.ModGain = 0 // has no effect
ly.Learn.NeuroMod.AChLRateMod = 0 // dorsal should not use
ly.Learn.NeuroMod.AChDisInhib = 0
}},
{Sel: ".DSPatchLayer", Doc: "all matrix",
Set: func(ly *axon.LayerParams) {
ly.Learn.NeuroMod.AChLRateMod = 1 // 1 is now default
}},
{Sel: ".DSTNLayer", Doc: "all STN",
Set: func(ly *axon.LayerParams) {
ly.Acts.Init.GeBase = 0.1
ly.Acts.Kir.Gk = 10 // 10 >= 8 > 12 > 5 > 2 -- key for pause
ly.Acts.SKCa.Gk = 2 // 2 > 1.8 >> 2.5 >> 3 >> 1 (for Kir = 10)
ly.Acts.SKCa.CaRDecayTau = 150 // 150 >= 140 >= 160 > 180 > 200 > 130 >> 80 def -- key param!
ly.Inhib.Layer.On.SetBool(true) // actually needs this, FF
ly.Inhib.Layer.Gi = 0.5 // 0.5 > 0.4 >> 0.6
ly.Inhib.Layer.FB = 0
ly.Inhib.Pool.On.SetBool(true) // actually needs this
ly.Inhib.Pool.Gi = 0.5 // 0.5 > 0.4 >> 0.6
ly.Inhib.Pool.FB = 0
ly.Learn.NeuroMod.AChDisInhib = 0
}},
{Sel: "#PF", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.On.SetBool(false)
ly.Inhib.Pool.On.SetBool(false)
}},
{Sel: "#DGPePr", Doc: "prototypical",
Set: func(ly *axon.LayerParams) {
ly.Acts.Init.GeBase = 0.4 // 0.4 > 0.3, 0.5
ly.Acts.Init.GeVar = 0.2
}},
{Sel: "#DGPeAk", Doc: "arkypallidal",
Set: func(ly *axon.LayerParams) {
ly.Acts.Init.GeBase = 0.2 // 0.2 > 0.3, 0.1
ly.Acts.Init.GeVar = 0.1 // 0.1 == 0.2 > 0.05
}},
{Sel: "#DGPi", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Acts.Init.GeBase = 0.3 // 0.3 > 0.2, 0.1
ly.Acts.Init.GeVar = 0.1
}},
},
}
// PathParamsDefs are builtin default params
var PathParamsDefs = axon.PathSheets{
"Base": {
{Sel: "Path", Doc: "",
Set: func(pt *axon.PathParams) {
pt.Learn.LRate.Base = 0.04 // 0.04 > 0.03
pt.Learn.DWt.SynTraceTau = 1 // 1 > 2
pt.SWts.Adapt.HiMeanDecay = 0.0008 // 0.0008 for 4x6, 0.005 for 3x10 -- not clear if real..
pt.Learn.DWt.SubMean = 0 // 0 >> 1 -- fails at 1
pt.Learn.DWt.LearnThr = 0 // 0 > .1
}},
// {Sel: ".PFCPath", Doc: "",
// Set: func(pt *axon.PathParams) {
// pt.Learn.DWt.CaPScale = 1
// pt.Learn.SynCaBin.Envelope = kinase.Env30
// }},
{Sel: ".CTCtxtPath", Doc: "all CT context paths",
Set: func(pt *axon.PathParams) {
pt.Learn.DWt.LearnThr = 0
}},
{Sel: ".CTtoPred", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 2 // 1 def
}},
{Sel: ".PTtoPred", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1 // was 6
}},
{Sel: ".DSPatchPath", Doc: "",
Set: func(pt *axon.PathParams) {
pt.Learn.LRate.Base = 0.04 // 0.04 std best
}},
{Sel: ".DSMatrixPath", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1.8 // 1.8 > others
pt.Learn.LRate.Base = 0.02 // rlr sig: .02 > .015 .025
pt.Learn.DWt.LearnThr = 0.1 // 0.1 > 0.2
pt.DSMatrix.PatchDA = 0.5 // 0.5 > 0.8 >> 0.2
pt.DSMatrix.Credit = 0.6 // key param, 0.6 > 0.5, 0.4, 0.7, 1 with pf modulation
pt.DSMatrix.Delta = 1 // verified essential v0.2.40
// Delta should always be 1 except for testing; adjust lrate to compensate
pt.DSMatrix.OffTrace = 0.1 // 0.1 > 0.2, 0.5 > 0.05 > 0
pt.DSMatrix.D2Scale = 1 // 1 >= .9, 1.1
pt.SWts.Adapt.On.SetBool(false) // false > true here
}},
{Sel: ".SuperToPT", Doc: "one-to-one from super",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 0.5
}},
{Sel: ".PTSelfMaint", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1 // 1 def
}},
{Sel: ".SuperToThal", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 3.0 // 3
}},
// {Sel: ".FmState", Doc: "",
// Set: func(pt *axon.PathParams) {
// pt.PathScale.Rel = 0.5 // abs, rel < 1 worse
// }},
{Sel: ".ToM1", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1.5 // now 1.5 > 2 > 1 ..
pt.Learn.LRate.Base = 0.04 // 0.04 > 0.02
}},
{Sel: ".ToMotor", Doc: "all excitatory paths to MotorBS; see #DGPiToMotorBS too",
Set: func(pt *axon.PathParams) {
pt.Learn.LRate.Base = 0.02 // 0.02 > 0.04 > 0.01 -- still key
pt.Learn.DWt.CaPScale = 1.05 // 1.05 > 1 > 1.1
pt.Learn.DWt.SynCa20.SetBool(true) // 20 > 10
// note: MotorBS is a target, key for learning; SWts not used.
// pt.Learn.SynCaBin.Envelope = kinase.Env10
// pt.Learn.DWt.CaPScale = 1 // tbd in Env
}},
{Sel: "#DGPiToMotorBS", Doc: "final inhibition",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 3 // 3 >= 3.5 > 2.5
pt.Learn.LRate.Base = 0.04 // 0.04 > 0.02 > 0.0005 with STN 150
// pt.Learn.SynCaBin.Envelope = kinase.Env10
// pt.Learn.DWt.CaPScale = 1 // tbd in Env
}},
{Sel: ".VLM1", Doc: "",
Set: func(pt *axon.PathParams) {
pt.Learn.LRate.Base = 0.02 // 0.02 > 0.04 > 0.01 -- still key
// note: VL is a target layer; SWts not used.
// pt.Learn.SynCaBin.Envelope = kinase.Env10
// pt.Learn.DWt.CaPScale = 1 // tbd in Env
}},
{Sel: "#DGPiToM1VM", Doc: "final inhibition",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 2 // 2 > 1.6, 2.4
// learn = false by default
}},
{Sel: "#DGPiToPF", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 0.7 // 0.7 >= 0.6 >= 0.5 > lower
pt.Learn.LRate.Base = 0.04 // 0.4 prev default
}},
{Sel: "#StateToM1", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1 // 1.2 >= 1 > 0.8
}},
{Sel: "#MotorBSToPF", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1 // 1 > 1.1 > 0.9 >> 0.5
pt.Learn.LRate.Base = 0.04 // 0.04 > 0.02
// fixed is not better:
// pt.Learn.Learn.SetBool(false)
// pt.SWts.Init.SPct = 0
// pt.SWts.Init.Mean = 0.8
// pt.SWts.Init.Var = 0.0
}},
// {Sel: ".PFToDMatrix", Doc: "",
// Set: func(pt *axon.PathParams) {
// // std random sig better
// // pt.SWts.Init.Mean = 0.5
// // pt.SWts.Init.Var = 0.0
// }},
{Sel: ".M1ToMotorBS", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 2 // 2 > 1.5, 2.5
}},
{Sel: "#M1ToMotorBS", Doc: "weaker; note: this is a proxy for cerebellum etc inputs",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1.5 // 1.5 > 1.2, 1.8, 1, 2, 2.5 sensitive
}},
{Sel: "#M1PTToMotorBS", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 2 // 2 > 1.6, 2.4 sensitive
pt.PathScale.Rel = 1 // 1
// note: lr = 0.04 in orig
}},
{Sel: "#M1PTToVL", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1 // 1 > 0.8, 1.2
pt.PathScale.Rel = 0.1 // 0.1 > 0.2, .05, 0
}},
{Sel: "#M1PTToM1PT", Doc: "self path",
Set: func(pt *axon.PathParams) {
pt.Learn.LRate.Base = 0.0001 // 0.0001 > .04 but not a major diff
}},
// {Sel: "#M1PTpToMotorBS", Doc: "not used",
// Set: func(pt *axon.PathParams) {
// pt.PathScale.Abs = 2
// pt.PathScale.Rel = 1
// }},
{Sel: "#DGPePrToDGPi", Doc: "nogo influence on gating -- decreasing produces more graded function of Go",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1 // 1 > 0.8, 1.2
}},
{Sel: "#DMatrixGoToDGPi", Doc: "go influence on gating -- slightly weaker than integrated GPePr",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1 // 1 >> 1.2, 0.8
}},
{Sel: "#DSTNToDGPi", Doc: "strong initial phasic activation",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = .2 // .2 >= .16 >> .24
}},
{Sel: "#DMatrixNoToDGPePr", Doc: "proto = primary classical NoGo pathway",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1 // 1 > 0.8, 1.2
}},
{Sel: "#DGPePrToDGPePr", Doc: "self-inhib -- only source of self reg",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 4.5 // 4.5 >= 4 >= 4.8 >> 3.2
}},
{Sel: "#DSTNToDGPePr", Doc: "stronger STN -> DGPePr to kick it high at start",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 0.5 // 0.5 > 0.4 >> 0.6
}},
{Sel: "#DGPePrToDGPeAk", Doc: "just enough to knock down in baseline state",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1 // 1 > 1.2 >> 0.8
}},
{Sel: "#DMatrixGoToDGPeAk", Doc: "go inhibition",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 0.6 // 0.6 > 0.5 > 0.4 > 0.7
}},
{Sel: "#DSTNToDGPeAk", Doc: "this is weak biologically -- but relatively sensitive..",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 0.1 // 0.1 > 0.12 >> 0.08
}},
{Sel: ".ToDSTN", Doc: "excitatory inputs",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 2
}},
{Sel: "#DGPePrToDSTN", Doc: "enough to kick off the ping-pong dynamics for STN.",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 0.4 // 0.4 >= 0.5 > 0.6
}},
{Sel: "#StateToDSTN", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 2 // 2 > 1.6, 2.4
}},
{Sel: "#S1ToDSTN", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 2 // 2 >= 2.4 > 1.6
}},
{Sel: "#DMatrixNoToDMatrixGo", Doc: "weakish no->go inhibition is beneficial",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.1 // 0.1 > 0.08, 0.12 > 0.05 not too sensitive
pt.Learn.Learn.SetBool(false) // no-learn better than learn
}},
{Sel: "#DGPeAkToDMatrixGo", Doc: "go disinhibition",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 4 // 4 > 5 > 6 > 3 >> 2
}},
{Sel: ".PFToDMatrix", Doc: "",
Set: func(pt *axon.PathParams) {
pt.Learn.Learn.SetBool(false)
pt.Com.GType = axon.ModulatoryG
pt.PathScale.Abs = 1
}},
},
}
// Copyright (c) 2022, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"fmt"
"strconv"
"cogentcore.org/lab/base/randx"
"cogentcore.org/lab/tensor"
"github.com/emer/emergent/v2/env"
)
// MotorSeqEnv implements simple motor sequencing patterns to test DS BG learning.
// Simplest mode has sequential State inputs which require distinct motor actions.
// Also implements simple reward prediction error for dopamine.
// The first trial is blank, and the last trial has the reward.
type MotorSeqEnv struct {
// name of environment -- Train or Test
Name string
// trial counter for index into sequence
Trial env.Counter
// sequence length.
SeqLen int
// number of distinct actions represented: determines the difficulty
// of learning in terms of the size of the space that must be searched.
// effective size = NActions ^ SeqLen
// 4^4 = 256 or 10^3 = 1000 are reliably solved
NActions int
// learning rate for reward prediction
RewPredLRate float32 `default:"0.01"`
// additional learning rate factor for going up vs. down -- going up slower is better?
RewPredLRateUp float32 `default:"0.5"`
// minimum rewpred value
RewPredMin float32 `default:"0.1"`
// give reward with probability in proportion to number of
// correct actions in sequence, above given threshold. If 0, don't use
PartialCreditAt int
// if doing partial credit, also make the reward value graded (weaker for fewer)
PartialGraded bool
// sequence map from sequence index to target motor action
SeqMap []int
// current target correct action according to the sequence
Target int `edit:"-"`
// current action taken by network
CurAction int `edit:"-"`
// previous action taken by network
PrevAction int `edit:"-"`
// is current action correct
Correct bool `edit:"-"`
// number of correct actions taken this sequence
NCorrect int `edit:"-"`
// previous number of correct actions taken, when reward is computed (NCorrect is reset)
PrevNCorrect int `edit:"-"`
// raw reward based on action sequence, computed at end of seq
Rew float32 `edit:"-"`
// reward prediction based on incremental learning: RewPredLRate * (Rew - RewPred)
RewPred float32 `edit:"-"`
// reward prediction error: Rew - RewPred
RPE float32 `edit:"-"`
// number of units per localist representation, in Y axis
NUnitsPer int `display:"-"`
// total number of units: NActions * NUnitsPer
NUnits int `display:"-"`
// random number generator for the env -- all random calls must use this
Rand randx.SysRand `display:"-"`
// random seed
RandSeed int64 `edit:"-"`
// named states: State, Target, PrevAction, Action
States map[string]*tensor.Float32
}
func (ev *MotorSeqEnv) Label() string { return ev.Name }
func (ev *MotorSeqEnv) Defaults() {
ev.SeqLen = 3 // 2x5 is easily solved, 3x5 is 100% with 49u
ev.NActions = 5 // 2x7 good test
ev.PartialCreditAt = 1 // 1 default: critical for seq len = 3
ev.PartialGraded = true // key for seq 3
ev.RewPredLRate = 0.01 // GPU 16 0.01 > 0.02 >> 0.05 > 0.1, 0.2 for partial, seq3
ev.RewPredLRateUp = 1
ev.RewPredMin = 0.1 // 0.1 > 0.05 > 0.2
ev.NUnitsPer = 5
ev.NUnits = ev.NUnitsPer * ev.NActions
}
// Config configures the world
func (ev *MotorSeqEnv) Config(rndseed int64) {
ev.RandSeed = rndseed
ev.Rand.NewRand(ev.RandSeed)
ev.States = make(map[string]*tensor.Float32)
ev.States["State"] = tensor.NewFloat32(ev.NUnitsPer, ev.SeqLen)
ev.States["Target"] = tensor.NewFloat32(ev.NUnitsPer, ev.NActions)
ev.States["Action"] = tensor.NewFloat32(ev.NUnitsPer, ev.NActions)
ev.States["PrevAction"] = tensor.NewFloat32(ev.NUnitsPer, ev.NActions+1)
ev.States["Rew"] = tensor.NewFloat32(1, 1)
ev.States["SNc"] = tensor.NewFloat32(1, 1)
}
func (ev *MotorSeqEnv) InitSeqMap() {
// pord := ev.Rand.Perm(ev.NActions, -1)
ev.SeqMap = make([]int, ev.SeqLen)
for i := 0; i < ev.SeqLen; i++ {
ev.SeqMap[i] = i // no randomness! otherwise doesn't work on gpu!
}
}
func (ev *MotorSeqEnv) String() string {
return fmt.Sprintf("%d", ev.Target)
}
func (ev *MotorSeqEnv) Init(run int) {
ev.Trial.Max = ev.SeqLen + 1 // rew
ev.Trial.Init()
ev.Trial.Cur = 0
ev.InitSeqMap()
ev.NCorrect, ev.Rew, ev.RPE = 0, 0, 0
ev.RewPred = ev.RewPredMin
}
func (ev *MotorSeqEnv) State(el string) tensor.Values {
return ev.States[el]
}
// RenderBlank renders blank
func (ev *MotorSeqEnv) RenderBlank(name string) {
av := ev.States[name]
av.SetZeros()
}
// RenderLocalist renders localist * NUnitsPer
func (ev *MotorSeqEnv) RenderLocalist(name string, idx int) {
av := ev.States[name]
av.SetZeros()
if idx >= av.DimSize(1) {
return
}
for yi := range ev.NUnitsPer {
av.Set(1, yi, idx)
}
}
func (ev *MotorSeqEnv) IsRewTrial() bool {
return ev.Trial.Cur == ev.Trial.Max-1
}
func (ev *MotorSeqEnv) IsRewTrialPostStep() bool {
return ev.Trial.Cur == 0
}
// RenderState renders the current state
func (ev *MotorSeqEnv) RenderState() {
trl := ev.Trial.Cur
ev.RenderBlank("Action")
ev.States["SNc"].Set1D(ev.RPE, 0)
ev.States["Rew"].Set1D(ev.Rew, 0)
if ev.IsRewTrial() {
ev.RenderBlank("State")
ev.RenderBlank("Target")
ev.RenderBlank("PrevAction")
} else {
st := ev.SeqMap[trl]
ev.Target = st // todo: starting with simple 1-to-1
ev.RenderLocalist("State", st)
ev.RenderLocalist("Target", ev.Target)
if trl > 0 {
ev.RenderLocalist("PrevAction", 1+ev.PrevAction)
} else {
ev.RenderLocalist("PrevAction", 0)
}
}
}
// Step does one step, advancing the Trial counter, rendering states
func (ev *MotorSeqEnv) Step() bool {
// fmt.Println("\nstep:", ev.Trial.Cur)
ev.RenderState()
ev.Trial.Incr()
return true
}
// Action records the current action taken by model, at end of minus phase
// Computes Rew* at end of sequence
func (ev *MotorSeqEnv) Action(action string, nop tensor.Values) {
ev.PrevAction = ev.CurAction
ev.CurAction, _ = strconv.Atoi(action)
// fmt.Println("act:", ev.Trial.Cur, action, ev.CurAction, ev.Target, ev.NCorrect)
if ev.CurAction == ev.Target {
ev.Correct = true
ev.NCorrect++
// fmt.Println("correct:", ev.NCorrect)
} else {
ev.Correct = false
// fmt.Println("incorrect:")
}
ev.RenderLocalist("Action", ev.CurAction)
if ev.Trial.Cur == ev.Trial.Max-1 { // trial before reward trial
ev.ComputeReward()
}
}
func (ev *MotorSeqEnv) ComputeReward() {
ev.Rew = 0
// fmt.Println("rew, ncor:", ev.NCorrect, ev.SeqLen)
if ev.PartialCreditAt > 0 {
prew := float32(ev.NCorrect) / float32(ev.SeqLen)
doRew := randx.BoolP32(prew, &ev.Rand)
if doRew {
if ev.PartialGraded {
ev.Rew = prew
} else {
ev.Rew = 1
}
}
} else {
if ev.NCorrect == ev.SeqLen {
ev.Rew = 1
}
}
ev.RPE = ev.Rew - ev.RewPred
if ev.RPE > 0 {
ev.RewPred += ev.RewPredLRateUp * ev.RewPredLRate * ev.RPE
} else {
ev.RewPred += ev.RewPredLRate * ev.RPE
}
if ev.RewPred < ev.RewPredMin {
ev.RewPred = ev.RewPredMin
}
ev.PrevNCorrect = ev.NCorrect
ev.NCorrect = 0
}
// Copyright (c) 2025, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"fmt"
"strconv"
"cogentcore.org/core/cli"
"cogentcore.org/lab/stats/stats"
"cogentcore.org/lab/tensor"
"cogentcore.org/lab/tensorfs"
)
// Sim is the overall sim.
type Sim struct {
// TD implements TD Q learning.
TD TD
// Env is the motor sequence env.
Env MotorSeqEnv
// Number of runs.
Runs int `default:"10"`
// Number of trials per epoch; one trial is a sequence through the env.
Trials int `default:"128"`
// Number of epochs per Run, max.
Epochs int `default:"1000"`
// PrintInterval is the interval in epochs to print data.
PrintInterval int `default:"10"`
// StopCrit is the stopping criterion in terms of average reward per epoch.
StopCrit float32 `default:"0.98"`
// LogEpochs records data by epoch
LogEpochs bool
// Debug prints detailed debug info
Debug bool
}
func (sim *Sim) Defaults() {
sim.TD.Defaults()
sim.Env.Defaults()
sim.Env.NUnitsPer = 1
}
func main() {
sim := &Sim{}
sim.Defaults()
cli.SetFromDefaults(sim)
opts := cli.DefaultOptions("SeqTD", "Motor Sequence TD Q Learning")
opts.DefaultFiles = append(opts.DefaultFiles, "config.toml")
cli.Run(opts, sim, RunSim)
}
func RunSim(sim *Sim) error {
debug := sim.Debug
logEpoch := sim.LogEpochs
td := &sim.TD
env := &sim.Env
td.Config(env.SeqLen, env.NActions)
env.NUnitsPer = 1
env.Config(0)
epcs := tensor.NewFloat64(sim.Runs)
for run := range sim.Runs {
td.Init()
env.Init(run)
finalRew := float32(0)
finalEpoch := 0
for epoch := range sim.Epochs {
rewSum := float32(0)
for trial := range sim.Trials {
for step := range env.SeqLen {
state := env.Trial.Cur
action := td.Action(state)
env.Step()
next := env.Trial.Cur
env.Action(strconv.Itoa(action), nil)
if step == env.SeqLen-1 {
td.UpdateFinal(state, action, env.Rew)
} else {
td.UpdateQ(state, action, next, 0) // no feedback during trials
}
if debug {
fmt.Printf("%02d\t%05d\t%d\t%d\t%d\t%v\t%d\t%7.4f\n", run, trial, step, state, action, env.Correct, env.NCorrect, env.Rew)
}
}
env.Step()
rewSum += env.Rew
}
rewSum /= float32(sim.Trials)
stop := rewSum >= sim.StopCrit
if logEpoch && (stop || (epoch+1)%sim.PrintInterval == 0) {
fmt.Printf("%02d\t%05d\tRew: %7.4f\tLRate: %7.4f\tEpsilon: %7.4f\n", run, epoch, rewSum, td.LRate.Current, td.Epsilon.Current)
}
td.EpochUpdate(epoch + 1)
finalRew = rewSum
finalEpoch = epoch
if stop {
break
}
}
epcs.Set(float64(finalEpoch), run)
fmt.Printf("%02d\tNEpochs: %d\tRew: %7.4f\n", run, finalEpoch, finalRew)
if debug {
fmt.Println("Final Q Weights:\n", td.Q.String())
}
}
dir, _ := tensorfs.NewDir("Desc")
stats.Describe(dir, epcs)
dt := tensorfs.DirTable(dir, nil)
fmt.Println(dt)
return nil
}
// Code generated by "goal build"; DO NOT EDIT.
//line td.goal:1
// Copyright (c) 2025, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// td simulates a simple td agent
package main
import (
"math/rand"
"cogentcore.org/core/math32"
"cogentcore.org/lab/base/randx"
"cogentcore.org/lab/tensor"
)
// Epsilon has parameters for epsilon greedy policy,
// where random exploration happens with probability epsilon.
type Epsilon struct {
// Init is the initial value: pure exploration
Init float32 `default:"1"`
// Min is the minimum value: always some exploration.
Min float32 `default:"0.01"`
// Decay per epoch.
Decay float32 `default:"0.0002"`
// Current epsilon value, updated every epoch.
Current float32
}
func (eg *Epsilon) Defaults() {
eg.Init = 1
eg.Min = 0.01
eg.Decay = 0.0002
eg.Current = eg.Init
}
// Update updates the Current epsilon value for given epoch.
func (eg *Epsilon) Update(epoch int) float32 {
eg.Current = eg.Min + (eg.Init-eg.Min)*math32.Exp(-eg.Decay*float32(epoch))
return eg.Current
}
// LRate has parameters for an annealing learning rate schedule.
type LRate struct {
// Init is the initial lrate value.
Init float32 `default:"0.7"`
// Min is the minimum lrate.
Min float32 `default:"0.01"`
// Decay per epoch.
Decay float32 `default:"0.001"`
// Current lrate value, updated every epoch.
Current float32
}
func (eg *LRate) Defaults() {
eg.Init = 0.7
eg.Min = 0.01
eg.Decay = 0.001
eg.Current = eg.Init
}
// Update updates the lrate value for given epoch.
func (eg *LRate) Update(epoch int) float32 {
eg.Current = eg.Min + (eg.Init-eg.Min)*math32.Exp(-eg.Decay*float32(epoch))
return eg.Current
}
// TD implements a simple TD Q-learning simulation.
type TD struct {
// LRate is the learning rate per step for updating Q.
// It can decay to anneal the rate of change over time.
LRate LRate `display:"inline"`
// Epsilon computes the epsilon-greedy exploration value,
// allowing exploration to decrease over time.
Epsilon Epsilon `display:"inline"`
// Gamma is the discount factor.
Gamma float32 `default:"0.95"`
// NStates is the number of states.
NStates int
// NActions is the number of actions per state.
NActions int
// Epoch is the current epoch counter, for driving parameter updates.
Epoch int `edit:"-"`
// Q are the state-action values: [States][Actions]
Q tensor.Float32
}
func (td *TD) Defaults() {
td.LRate.Defaults()
td.Epsilon.Defaults()
td.Gamma = 0.95
}
func NewTD(states, actions int) *TD {
td := &TD{NStates: states, NActions: actions}
td.Defaults()
return td
}
func (td *TD) Config(states, actions int) {
td.NStates = states
td.NActions = actions
}
func (td *TD) Init() {
td.Q.SetShapeSizes(td.NStates, td.NActions)
tensor.SetAllFloat64(&td.Q, 0)
td.EpochUpdate(0)
}
// EpochUpdate updates parameters for a new epoch.
func (td *TD) EpochUpdate(epoch int) {
td.Epoch = epoch
td.Epsilon.Update(epoch)
td.LRate.Update(epoch)
}
// MaxQ returns the max Q value for given state.
func (td *TD) MaxQ(state int) (float32, int) {
mx := float32(0)
mi := 0
for i := range td.NActions {
q := td.Q.Value(int(state), int(i))
if i == 0 || q > mx {
mx = q
mi = i
}
}
return mx, mi
}
func (td *TD) EpsilonGreedyAction(state int) int {
if randx.BoolP(float64(td.Epsilon.Current)) {
return rand.Intn(td.NActions)
}
_, mi := td.MaxQ(state)
return mi
}
// Action computes the action to take for given state.
func (td *TD) Action(state int) int {
return td.EpsilonGreedyAction(state)
}
// UpdateQ updates the Q value for state, action taken
// based on next state and reward value for current action.
func (td *TD) UpdateQ(state, action, next int, rew float32) {
lrate := td.LRate.Current
nq, _ := td.MaxQ(next)
nextVal := rew + td.Gamma*nq
td.Q.SetAdd(lrate*(nextVal-td.Q.Value(int(state), int(action))), int(state), int(action))
}
// UpdateFinal updates the Q value for state, action taken
// for a final trial in a sequence, with a final reward estimate value.
func (td *TD) UpdateFinal(state, action int, rew float32) {
lrate := td.LRate.Current
td.Q.SetAdd(lrate*(rew-td.Q.Value(int(state), int(action))), int(state), int(action))
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// bgventral simulates the inhibitory dynamics in the STN and GPe
// leading to integration of Go vs. NoGo signal in the basal
// ganglia, for the Ventral Striatum (VS) global Go vs. No case.
package bgventral
//go:generate core generate -add-types -add-funcs -gosl
import (
"fmt"
"math"
"os"
"reflect"
"cogentcore.org/core/base/num"
"cogentcore.org/core/base/reflectx"
"cogentcore.org/core/core"
"cogentcore.org/core/enums"
"cogentcore.org/core/gpu"
"cogentcore.org/core/icons"
"cogentcore.org/core/math32"
"cogentcore.org/core/tree"
"cogentcore.org/lab/base/mpi"
"cogentcore.org/lab/base/randx"
"cogentcore.org/lab/plot"
"cogentcore.org/lab/stats/stats"
"cogentcore.org/lab/tensorfs"
"github.com/emer/axon/v2/axon"
"github.com/emer/emergent/v2/egui"
"github.com/emer/emergent/v2/env"
"github.com/emer/emergent/v2/looper"
"github.com/emer/emergent/v2/paths"
)
// Modes are the looping modes (Stacks) for running and statistics.
type Modes int32 //enums:enum
const (
Train Modes = iota
Test
)
// Levels are the looping levels for running and statistics.
type Levels int32 //enums:enum
const (
Cycle Levels = iota
Theta
Trial
Epoch
Run
)
// StatsPhase is the phase of stats processing for given mode, level.
// Accumulated values are reset at Start, added each Step.
type StatsPhase int32 //enums:enum
const (
Start StatsPhase = iota
Step
)
// see params.go for params
// Sim encapsulates the entire simulation model, and we define all the
// functionality as methods on this struct. This structure keeps all relevant
// state information organized and available without having to pass everything around
// as arguments to methods, and provides the core GUI interface (note the view tags
// for the fields which provide hints to how things should be displayed).
type Sim struct {
// simulation configuration parameters -- set by .toml config file and / or args
Config *Config `new-window:"+"`
// Net is the network: click to view / edit parameters for layers, paths, etc.
Net *axon.Network `new-window:"+" display:"no-inline"`
// Params manages network parameter setting.
Params axon.Params `display:"inline"`
// Loops are the control loops for running the sim, in different Modes
// across stacks of Levels.
Loops *looper.Stacks `new-window:"+" display:"no-inline"`
// Envs provides mode-string based storage of environments.
Envs env.Envs `new-window:"+" display:"no-inline"`
// TrainUpdate has Train mode netview update parameters.
TrainUpdate axon.NetViewUpdate `display:"inline"`
// TestUpdate has Test mode netview update parameters.
TestUpdate axon.NetViewUpdate `display:"inline"`
// Root is the root tensorfs directory, where all stats and other misc sim data goes.
Root *tensorfs.Node `display:"-"`
// Stats has the stats directory within Root.
Stats *tensorfs.Node `display:"-"`
// Current has the current stats values within Stats.
Current *tensorfs.Node `display:"-"`
// StatFuncs are statistics functions called at given mode and level,
// to perform all stats computations. phase = Start does init at start of given level,
// and all intialization / configuration (called during Init too).
StatFuncs []func(mode Modes, level Levels, phase StatsPhase) `display:"-"`
// GUI manages all the GUI elements
GUI egui.GUI `display:"-"`
// RandSeeds is a list of random seeds to use for each run.
RandSeeds randx.Seeds `display:"-"`
}
func Embed(b tree.Node) { egui.Embed[Sim, Config](b) }
func (ss *Sim) SetConfig(cfg *Config) { ss.Config = cfg }
func (ss *Sim) Body() *core.Body { return ss.GUI.Body }
func (ss *Sim) ConfigSim() {
ss.Root, _ = tensorfs.NewDir("Root")
tensorfs.CurRoot = ss.Root
ss.Net = axon.NewNetwork(ss.Config.Name)
ss.Params.Config(LayerParams, PathParams, ss.Config.Params.Sheet, ss.Config.Params.Tag, reflect.ValueOf(ss))
ss.RandSeeds.Init(100) // max 100 runs
ss.InitRandSeed(0)
if ss.Config.GPU {
gpu.SelectAdapter = ss.Config.Run.GPUDevice
axon.GPUInit()
axon.UseGPU = true
}
ss.ConfigEnv()
ss.ConfigNet(ss.Net)
ss.ConfigLoops()
ss.ConfigStats()
// if ss.Config..GPU {
// fmt.Println(axon.GPUSystem.Vars().StringDoc())
// }
if ss.Config.Params.SaveAll {
ss.Config.Params.SaveAll = false
ss.Net.SaveParamsSnapshot(&ss.Config, ss.Config.Params.Good)
os.Exit(0)
}
}
func (ss *Sim) ConfigEnv() {
// Can be called multiple times -- don't re-create
newEnv := (len(ss.Envs) == 0)
for di := 0; di < ss.Config.Run.NData; di++ {
var trn, tst *GoNoEnv
if newEnv {
trn = &GoNoEnv{}
tst = &GoNoEnv{}
} else {
trn = ss.Envs.ByModeDi(Train, di).(*GoNoEnv)
tst = ss.Envs.ByModeDi(Test, di).(*GoNoEnv)
}
// note: names must be standard here!
trn.Name = env.ModeDi(Train, di)
trn.Defaults()
if ss.Config.Env.Env != nil {
reflectx.SetFieldsFromMap(trn, ss.Config.Env.Env)
}
trn.Config(Train, 73+int64(di)*73)
tst.Name = env.ModeDi(Test, di)
tst.Defaults()
if ss.Config.Env.Env != nil {
reflectx.SetFieldsFromMap(tst, ss.Config.Env.Env)
}
tst.Config(Test, 181+int64(di)*181)
trn.Init(0)
tst.Init(0)
// note: names must be in place when adding
ss.Envs.Add(trn, tst)
if di == 0 {
ss.ConfigRubicon(trn)
}
}
}
func (ss *Sim) ConfigRubicon(trn *GoNoEnv) {
rp := &ss.Net.Rubicon
rp.SetNUSs(2, 1)
rp.Urgency.U50 = 20 // 20 def
}
func (ss *Sim) ConfigNet(net *axon.Network) {
net.SetMaxData(ss.Config.Run.NData)
net.Context().SetISICycles(int32(ss.Config.Run.ISICycles)).
SetMinusCycles(int32(ss.Config.Run.MinusCycles)).
SetPlusCycles(int32(ss.Config.Run.PlusCycles)).Update()
net.SetRandSeed(ss.RandSeeds[0]) // init new separate random seed, using run = 0
ev := ss.Envs.ByModeDi(Train, 0).(*GoNoEnv)
np := 1
nuY := ev.NUnitsY
nuX := ev.NUnitsX
space := float32(2)
one2one := paths.NewOneToOne()
full := paths.NewFull()
_ = full
mtxRandPath := paths.NewPoolUniformRand()
mtxRandPath.PCon = 0.5
_ = mtxRandPath
mtxGo, mtxNo, gpePr, gpeAk, stn, gpi := net.AddVentralBG("", 1, np, nuY, nuX, nuY, nuX, space)
_, _ = gpePr, gpeAk
snc := net.AddLayer2D("SNc", axon.InputLayer, 1, 1)
_ = snc
urge := net.AddUrgencyLayer(5, 4)
_ = urge
accPos := net.AddLayer4D("ACCPos", axon.InputLayer, 1, np, nuY, nuX)
accNeg := net.AddLayer4D("ACCNeg", axon.InputLayer, 1, np, nuY, nuX)
accPos.AddClass("ACC")
accNeg.AddClass("ACC")
accPosPT, accPosVM := net.AddPTMaintThalForSuper(accPos, nil, "VM", "PFCPath", one2one, full, one2one, true, space)
_ = accPosPT
net.ConnectLayers(accPos, stn, full, axon.ForwardPath).AddClass("CortexToSTN")
net.ConnectLayers(accNeg, stn, full, axon.ForwardPath).AddClass("CortexToSTN")
net.ConnectLayers(gpi, accPosVM, full, axon.InhibPath).AddClass("BgFixed")
mtxGo.SetBuildConfig("ThalLay1Name", accPosVM.Name)
mtxNo.SetBuildConfig("ThalLay1Name", accPosVM.Name)
net.ConnectToVSMatrix(accPos, mtxGo, full).AddClass("ACCToVMatrix")
net.ConnectToVSMatrix(accNeg, mtxNo, full).AddClass("ACCToVMatrix")
// cross connections:
net.ConnectToVSMatrix(accPos, mtxNo, full).AddClass("ACCToVMatrix")
net.ConnectToVSMatrix(accNeg, mtxGo, full).AddClass("ACCToVMatrix")
net.ConnectToVSMatrix(urge, mtxGo, full)
accPosVM.PlaceRightOf(gpi, space)
snc.PlaceRightOf(accPosVM, space)
urge.PlaceRightOf(snc, space)
gpeAk.PlaceAbove(gpi)
stn.PlaceRightOf(gpePr, space)
mtxGo.PlaceAbove(gpeAk)
accPos.PlaceAbove(mtxGo)
accNeg.PlaceRightOf(accPos, space)
net.Build()
net.Defaults()
net.SetNThreads(ss.Config.Run.NThreads)
ss.ApplyParams()
net.InitWeights()
}
func (ss *Sim) ApplyParams() {
ss.Params.Script = ss.Config.Params.Script
ss.Params.ApplyAll(ss.Net)
}
//////// Init, utils
// Init restarts the run, and initializes everything, including network weights
// and resets the epoch log table
func (ss *Sim) Init() {
ss.Loops.ResetCounters()
ss.SetRunName()
ss.InitRandSeed(0)
ss.ConfigEnv() // always do -- otherwise env params not reset after run
// selected or patterns have been modified etc
ss.ApplyParams()
ss.StatsInit()
ss.NewRun()
ss.TrainUpdate.RecordSyns()
ss.TrainUpdate.Update(Train, Trial)
}
// InitRandSeed initializes the random seed based on current training run number
func (ss *Sim) InitRandSeed(run int) {
ss.RandSeeds.Set(run)
ss.RandSeeds.Set(run, &ss.Net.Rand)
}
// NetViewUpdater returns the NetViewUpdate for given mode.
func (ss *Sim) NetViewUpdater(mode enums.Enum) *axon.NetViewUpdate {
if mode.Int64() == Train.Int64() {
return &ss.TrainUpdate
}
return &ss.TestUpdate
}
// ConfigLoops configures the control loops: Training, Testing
func (ss *Sim) ConfigLoops() {
ls := looper.NewStacks()
ev := ss.Envs.ByModeDi(Test, 0).(*GoNoEnv)
trials := int(math32.IntMultipleGE(float32(ss.Config.Run.Trials), float32(ss.Config.Run.NData)))
cycles := ss.Config.Run.Cycles()
ls.AddStack(Train, Trial).
AddLevel(Run, ss.Config.Run.Runs).
AddLevel(Epoch, ss.Config.Run.Epochs).
AddLevelIncr(Trial, trials, ss.Config.Run.NData).
AddLevel(Theta, 3).
AddLevel(Cycle, cycles)
nTestInc := int(1.0/ev.TestInc) + 1
totTstTrls := ev.TestReps * nTestInc * nTestInc
testTrials := int(math32.IntMultipleGE(float32(totTstTrls), float32(ss.Config.Run.NData)))
ls.AddStack(Test, Trial).
AddLevel(Epoch, 1).
AddLevelIncr(Trial, testTrials, ss.Config.Run.NData).
AddLevel(Theta, 3).
AddLevel(Cycle, cycles)
axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, Cycle, Theta, Train,
func(mode enums.Enum) { ss.Net.ClearInputs() },
func(mode enums.Enum) {
trial := ls.Stacks[mode].Loops[Trial].Counter.Cur
theta := ls.Stacks[mode].Loops[Theta].Counter.Cur
ss.ApplyInputs(mode.(Modes), trial, theta)
},
)
ls.Stacks[Train].OnInit.Add("Init", ss.Init)
ls.Loop(Train, Run).OnStart.Add("NewRun", ss.NewRun)
ls.AddOnEndToLoop(Theta, "GatedAction", func(mode enums.Enum) {
theta := ls.Stacks[mode].Loops[Theta].Counter.Cur
if theta == 1 {
ss.GatedAction(mode.(Modes))
}
})
ls.AddOnStartToAll("StatsStart", ss.StatsStart)
ls.AddOnEndToAll("StatsStep", ss.StatsStep)
ls.Loop(Train, Run).OnEnd.Add("SaveWeights", func() {
ctrString := fmt.Sprintf("%03d_%05d", ls.Loop(Train, Run).Counter.Cur, ls.Loop(Train, Epoch).Counter.Cur)
axon.SaveWeightsIfConfigSet(ss.Net, ss.Config.Log.SaveWeights, ctrString, ss.RunName())
})
if ss.Config.GUI {
axon.LooperUpdateNetView(ls, Cycle, Theta, ss.NetViewUpdater)
ls.Stacks[Train].OnInit.Add("GUI-Init", ss.GUI.UpdateWindow)
ls.Stacks[Test].OnInit.Add("GUI-Init", ss.GUI.UpdateWindow)
}
if ss.Config.Debug {
mpi.Println(ls.DocString())
}
ss.Loops = ls
}
// ApplyInputs applies input patterns from given environment for given mode.
// Any other start-of-trial logic can also be put here.
func (ss *Sim) ApplyInputs(mode Modes, trial, theta int) {
net := ss.Net
ndata := int(net.Context().NData)
curModeDir := ss.Current.Dir(mode.String())
lays := []string{"ACCPos", "ACCNeg"}
net.InitExt()
for di := range ndata {
idx := trial + di
ev := ss.Envs.ByModeDi(mode, di).(*GoNoEnv)
ev.Trial.Set(idx)
if theta == 0 {
ev.Step()
} else {
for _, lnm := range lays {
ly := ss.Net.LayerByName(lnm)
st := ev.State(ly.Name)
if st != nil {
ly.ApplyExt(uint32(di), st)
}
}
}
curModeDir.StringValue("TrialName", ndata).SetString1D(ev.String(), di)
ss.ApplyRubicon(ev, mode, theta, uint32(di))
}
net.ApplyExts()
}
// ApplyRubicon applies Rubicon reward inputs
func (ss *Sim) ApplyRubicon(ev *GoNoEnv, mode Modes, trial int, di uint32) {
rp := &ss.Net.Rubicon
rp.NewState(di, &ss.Net.Rand) // first before anything else is updated
rp.EffortUrgencyUpdate(di, 1)
if mode == Test {
rp.Urgency.Reset(di)
}
switch trial {
case 0:
axon.GlobalSetRew(di, 0, false) // no rew
axon.GlobalScalars.Set(0, int(axon.GvACh), int(di))
case 1:
axon.GlobalSetRew(di, 0, false) // no rew
axon.GlobalScalars.Set(1, int(axon.GvACh), int(di))
case 2:
axon.GlobalScalars.Set(1, int(axon.GvACh), int(di))
ss.GatedRew(ev, di)
}
}
// GatedRew applies reward input based on gating action and input
func (ss *Sim) GatedRew(ev *GoNoEnv, di uint32) {
// note: not using RPE here at this point
rew := ev.Rew
ss.SetRew(rew, di)
}
func (ss *Sim) SetRew(rew float32, di uint32) {
rp := &ss.Net.Rubicon
axon.GlobalSetRew(di, rew, true)
axon.GlobalScalars.Set(rew, int(axon.GvDA), int(di)) // no reward prediction error
if rew > 0 {
rp.SetUS(di, axon.Positive, 0, 1)
} else if rew < 0 {
rp.SetUS(di, axon.Negative, 0, 1)
}
}
// GatedAction records gating action and generates reward
// this happens at the end of Trial == 1 (2nd trial)
// so that the reward is present during the final trial when learning occurs.
func (ss *Sim) GatedAction(mode Modes) {
ctx := ss.Net.Context()
curModeDir := ss.Current.Dir(mode.String())
mtxly := ss.Net.LayerByName("VMatrixGo")
vmly := ss.Net.LayerByName("ACCPosVM")
vmlpi := vmly.Params.PoolIndex(0)
mtxlpi := mtxly.Params.PoolIndex(0)
nan := math.NaN()
ndata := int(ctx.NData)
for di := 0; di < ndata; di++ {
ev := ss.Envs.ByModeDi(mode, di).(*GoNoEnv)
didGate := mtxly.Params.AnyGated(uint32(di))
action := "Gated"
if !didGate {
action = "NoGate"
}
ev.Action(action, nil)
rt := axon.LayerStates.Value(vmly.Index, di, int(axon.LayerRT))
if rt > 0 {
curModeDir.Float32("ACCPosVM_RT", ndata).SetFloat1D(float64(rt/200), di)
} else {
curModeDir.Float32("ACCPosVM_RT", ndata).SetFloat1D(nan, di)
}
cycavg := float64(axon.PoolAvgMax(axon.AMCaPMax, axon.AMCycle, axon.Avg, vmlpi, uint32(di)))
curModeDir.Float32("ACCPosVM_ActAvg", ndata).SetFloat1D(cycavg, di)
cycavg = float64(axon.PoolAvgMax(axon.AMCaPMax, axon.AMCycle, axon.Avg, mtxlpi, uint32(di)))
curModeDir.Float32("VMatrixGo_ActAvg", ndata).SetFloat1D(cycavg, di)
}
}
// NewRun intializes a new Run level of the model.
func (ss *Sim) NewRun() {
ctx := ss.Net.Context()
run := ss.Loops.Loop(Train, Run).Counter.Cur
ss.InitRandSeed(run)
for di := 0; di < int(ctx.NData); di++ {
ss.Envs.ByModeDi(Train, di).Init(run)
ss.Envs.ByModeDi(Test, di).Init(run)
}
ctx.Reset()
ss.Net.InitWeights()
}
//////// Stats
// AddStat adds a stat compute function.
func (ss *Sim) AddStat(f func(mode Modes, level Levels, phase StatsPhase)) {
ss.StatFuncs = append(ss.StatFuncs, f)
}
// StatsStart is called by Looper at the start of given level, for each iteration.
// It needs to call RunStats Start at the next level down.
// e.g., each Epoch is the start of the full set of Trial Steps.
func (ss *Sim) StatsStart(lmd, ltm enums.Enum) {
mode := lmd.(Modes)
level := ltm.(Levels)
if level <= Trial {
return
}
ss.RunStats(mode, level-1, Start)
}
// StatsStep is called by Looper at each step of iteration,
// where it accumulates the stat results.
func (ss *Sim) StatsStep(lmd, ltm enums.Enum) {
mode := lmd.(Modes)
level := ltm.(Levels)
if level == Cycle {
return
}
ss.RunStats(mode, level, Step)
tensorfs.DirTable(axon.StatsNode(ss.Stats, mode, level), nil).WriteToLog()
}
// RunStats runs the StatFuncs for given mode, level and phase.
func (ss *Sim) RunStats(mode Modes, level Levels, phase StatsPhase) {
for _, sf := range ss.StatFuncs {
sf(mode, level, phase)
}
if phase == Step && ss.GUI.Tabs != nil {
nm := mode.String() + " " + level.String() + " Plot"
ss.GUI.Tabs.AsLab().GoUpdatePlot(nm)
ss.GUI.Tabs.AsLab().GoUpdatePlot("Train TrialAll Plot")
}
}
// SetRunName sets the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) SetRunName() string {
runName := ss.Params.RunName(ss.Config.Run.Run)
ss.Current.StringValue("RunName", 1).SetString1D(runName, 0)
return runName
}
// RunName returns the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) RunName() string {
return ss.Current.StringValue("RunName", 1).String1D(0)
}
// StatsInit initializes all the stats by calling Start across all modes and levels.
func (ss *Sim) StatsInit() {
for md, st := range ss.Loops.Stacks {
mode := md.(Modes)
for _, lev := range st.Order {
level := lev.(Levels)
if level == Cycle {
continue
}
ss.RunStats(mode, level, Start)
}
}
if ss.GUI.Tabs != nil {
tbs := ss.GUI.Tabs.AsLab()
_, idx := tbs.CurrentTab()
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Trial))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Epoch))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Run))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Test, Trial))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Test, Epoch))
tbs.SelectTabIndex(idx)
}
}
// ConfigStats handles configures functions to do all stats computation
// in the tensorfs system.
func (ss *Sim) ConfigStats() {
net := ss.Net
ss.Stats = ss.Root.Dir("Stats")
ss.Current = ss.Stats.Dir("Current")
ss.SetRunName()
// last arg(s) are levels to exclude
counterFunc := axon.StatLoopCounters(ss.Stats, ss.Current, ss.Loops, net, Trial, Cycle)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
counterFunc(mode, level, phase == Start)
})
runNameFunc := axon.StatRunName(ss.Stats, ss.Current, ss.Loops, net, Trial, Cycle)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
runNameFunc(mode, level, phase == Start)
})
trialNameFunc := axon.StatTrialName(ss.Stats, ss.Current, ss.Loops, net, Trial)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
trialNameFunc(mode, level, phase == Start)
})
perTrlFunc := axon.StatPerTrialMSec(ss.Stats, Train, Trial)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
perTrlFunc(mode, level, phase == Start)
})
// up to a point, it is good to use loops over stats in one function,
// to reduce repetition of boilerplate.
statNames := []string{"ACCPos", "ACCNeg", "Gated", "Should", "Match", "Rew", "ACCPosVM_RT", "ACCPosVM_ActAvg", "VMatrixGo_ActAvg"}
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
for si, name := range statNames {
modeDir := ss.Stats.Dir(mode.String())
curModeDir := ss.Current.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
subDir := modeDir.Dir((level - 1).String()) // note: will fail for Cycle
tsr := levelDir.Float64(name)
ndata := int(ss.Net.Context().NData)
var stat float64
if phase == Start {
tsr.SetNumRows(0)
plot.SetFirstStyler(tsr, func(s *plot.Style) {
s.Range.SetMin(0).SetMax(1)
if si >= 2 && si <= 5 {
s.On = true
}
})
continue
}
switch level {
case Trial:
for di := range ndata {
ev := ss.Envs.ByModeDi(mode, di).(*GoNoEnv)
var stat float32
switch name {
case "ACCPos":
stat = ev.ACCPos
case "ACCNeg":
stat = ev.ACCNeg
case "Gated":
stat = num.FromBool[float32](ev.Gated)
case "Should":
stat = num.FromBool[float32](ev.Should)
case "Match":
stat = num.FromBool[float32](ev.Match)
case "Rew":
stat = ev.Rew
case "ACCPosVM_RT", "ACCPosVM_ActAvg", "VMatrixGo_ActAvg":
stat = float32(curModeDir.Float32(name, ndata).Float1D(di))
}
curModeDir.Float32(name, ndata).SetFloat1D(float64(stat), di)
tsr.AppendRowFloat(float64(stat))
}
case Epoch:
stat = stats.StatMean.Call(subDir.Value(name)).Float1D(0)
tsr.AppendRowFloat(stat)
if mode == Train {
break
}
// below is special Test Epoch stats to summarize testing data
if si == 0 {
stats.Groups(curModeDir, subDir.Value("TrialName"))
}
stats.GroupStats(curModeDir, stats.StatMean, subDir.Value(name))
// note: results go under Group name: TrialName
gp := curModeDir.Dir("Stats/TrialName/" + name).Value("Mean")
plot.SetFirstStyler(gp, func(s *plot.Style) {
if si >= 2 && si <= 3 {
s.On = true
}
})
if si == len(statNames)-1 {
nrows := gp.DimSize(0)
row := curModeDir.Dir("Stats").Int("Row", nrows)
for i := range nrows {
row.Set(i, i)
}
ss.GUI.Tabs.AsLab().PlotTensorFS(curModeDir.Dir("Stats"))
}
case Run:
stat = stats.StatMean.Call(subDir.Value(name)).Float1D(0)
tsr.AppendRowFloat(stat)
}
}
})
lays := net.LayersByType(axon.GPLayer, axon.STNLayer, axon.VSMatrixLayer, axon.PTMaintLayer, axon.BGThalLayer, axon.UrgencyLayer)
actGeFunc := axon.StatLayerActGe(ss.Stats, net, Train, Trial, Run, lays...)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
actGeFunc(mode, level, phase == Start)
})
}
// StatCounters returns counters string to show at bottom of netview.
func (ss *Sim) StatCounters(mode, level enums.Enum) string {
counters := ss.Loops.Stacks[mode].CountersString()
vu := ss.NetViewUpdater(mode)
if vu == nil || vu.View == nil {
return counters
}
di := vu.View.Di
counters += fmt.Sprintf(" Di: %d", di)
curModeDir := ss.Current.Dir(mode.String())
if curModeDir.Node("TrialName") == nil {
return counters
}
counters += fmt.Sprintf(" TrialName: %s", curModeDir.StringValue("TrialName").String1D(di))
statNames := []string{"Gated"}
if level == Cycle || curModeDir.Node(statNames[0]) == nil {
return counters
}
for _, name := range statNames {
counters += fmt.Sprintf(" %s: %.4g", name, curModeDir.Float32(name).Float1D(di))
}
return counters
}
//////// GUI
// ConfigGUI configures the Cogent Core GUI interface for this simulation.
func (ss *Sim) ConfigGUI(b tree.Node) {
ss.GUI.MakeBody(b, ss, ss.Root, ss.Config.Name, ss.Config.Title, ss.Config.Doc)
ss.GUI.StopLevel = Trial
nv := ss.GUI.AddNetView("Network")
nv.Options.MaxRecs = 2 * ss.Config.Run.Cycles()
nv.Options.Raster.Max = ss.Config.Run.Cycles()
nv.SetNet(ss.Net)
ss.TrainUpdate.Config(nv, axon.Theta, ss.StatCounters)
ss.TestUpdate.Config(nv, axon.Theta, ss.StatCounters)
ss.GUI.OnStop = func(mode, level enums.Enum) {
vu := ss.NetViewUpdater(mode)
vu.UpdateWhenStopped(mode, level)
}
// nv.SceneXYZ().Camera.Pose.Pos.Set(0, 1, 2.75) // more "head on" than default which is more "top down"
// nv.SceneXYZ().Camera.LookAt(math32.Vec3(0, 0, 0), math32.Vec3(0, 1, 0))
ss.StatsInit()
ss.GUI.FinalizeGUI(false)
}
func (ss *Sim) MakeToolbar(p *tree.Plan) {
ss.GUI.AddLooperCtrl(p, ss.Loops)
tree.Add(p, func(w *core.Separator) {})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "New seed",
Icon: icons.Add,
Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
Active: egui.ActiveAlways,
Func: func() {
ss.RandSeeds.NewSeeds()
},
})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "README",
Icon: icons.FileMarkdown,
Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
Active: egui.ActiveAlways,
Func: func() {
core.TheApp.OpenURL(ss.Config.URL)
},
})
}
func (ss *Sim) RunNoGUI() {
ss.Init()
if ss.Config.Params.Note != "" {
mpi.Printf("Note: %s\n", ss.Config.Params.Note)
}
if ss.Config.Log.SaveWeights {
mpi.Printf("Saving final weights per run\n")
}
runName := ss.SetRunName()
netName := ss.Net.Name
cfg := &ss.Config.Log
axon.OpenLogFiles(ss.Loops, ss.Stats, netName, runName, [][]string{cfg.Train, cfg.Test})
mpi.Printf("Running %d Runs starting at %d\n", ss.Config.Run.Runs, ss.Config.Run.Run)
ss.Loops.Loop(Train, Run).Counter.SetCurMaxPlusN(ss.Config.Run.Run, ss.Config.Run.Runs)
ss.Loops.Run(Train)
axon.CloseLogFiles(ss.Loops, ss.Stats, Cycle)
axon.GPURelease()
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"github.com/emer/axon/v2/sims/bgventral"
"github.com/emer/emergent/v2/egui"
)
func main() { egui.Run[bgventral.Sim, bgventral.Config]() }
// Copyright (c) 2023, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bgventral
import (
"cogentcore.org/core/core"
"github.com/emer/emergent/v2/egui"
)
// EnvConfig has config params for environment.
type EnvConfig struct {
// Env parameters: can set any field/subfield on Env struct,
// using standard TOML formatting.
Env map[string]any
// test with no ACC activity at all -- params need to prevent gating in this situation too
ZeroTest bool
}
// ParamConfig has config parameters related to sim params.
type ParamConfig struct {
// Tweak means to perform automated parameter tweaking for
// parameters marked Hypers Tweak = log,incr, or [vals].
Tweak bool
// Baseline for Tweak, if true, first run a baseline with current default params.
Baseline bool
// DryRun for Tweak, if true, only print what would be done, don't run.
DryRun bool
// Script is an interpreted script that is run to set parameters in Layer and Path
// sheets, by default using the "Script" set name.
Script string `new-window:"+" width:"100"`
// Sheet is the extra params sheet name(s) to use (space separated
// if multiple). Must be valid name as listed in compiled-in params
// or loaded params.
Sheet string
// Tag is an extra tag to add to file names and logs saved from this run.
Tag string
// Note is additional info to describe the run params etc,
// like a git commit message for the run.
Note string
// SaveAll will save a snapshot of all current param and config settings
// in a directory named params_<datestamp> (or _good if Good is true),
// then quit. Useful for comparing to later changes and seeing multiple
// views of current params.
SaveAll bool `nest:"+"`
// Good is for SaveAll, save to params_good for a known good params state.
// This can be done prior to making a new release after all tests are passing.
// Add results to git to provide a full diff record of all params over level.
Good bool `nest:"+"`
}
func (pc *ParamConfig) FieldWidget(field string) core.Value {
return egui.ScriptFieldWidget(field)
}
// RunConfig has config parameters related to running the sim.
type RunConfig struct {
// GPUDevice selects the gpu device to use.
GPUDevice int
// NData is the number of data-parallel items to process in parallel per trial.
// Is significantly faster for both CPU and GPU. Results in an effective
// mini-batch of learning.
NData int `default:"16" min:"1"`
// NThreads is the number of parallel threads for CPU computation;
// 0 = use default.
NThreads int `default:"0"`
// Run is the _starting_ run number, which determines the random seed.
// Runs counts up from there. Can do all runs in parallel by launching
// separate jobs with each starting Run, Runs = 1.
Run int `default:"0" flag:"run"`
// Runs is the total number of runs to do when running Train, starting from Run.
Runs int `default:"1" min:"1"`
// Epochs is the total number of epochs per run.
Epochs int `default:"30"`
// Trials is the total number of trials per epoch.
// Should be an even multiple of NData.
Trials int `default:"128"`
// ISICycles is the number of no-input inter-stimulus interval
// cycles at the start of the trial.
ISICycles int `default:"0"`
// MinusCycles is the number of cycles in the minus phase per trial.
MinusCycles int `default:"150"`
// PlusCycles is the number of cycles in the plus phase per trial.
PlusCycles int `default:"50"`
}
// Cycles returns the total number of cycles per trial: ISI + Minus + Plus.
func (rc *RunConfig) Cycles() int {
return rc.ISICycles + rc.MinusCycles + rc.PlusCycles
}
// LogConfig has config parameters related to logging data.
type LogConfig struct {
// SaveWeights will save final weights after each run.
SaveWeights bool
// Train has the list of Train mode levels to save log files for.
Train []string `default:"['Run', 'Epoch']" nest:"+"`
// Test has the list of Test mode levels to save log files for.
Test []string `nest:"+"`
}
// Config has the overall Sim configuration options.
type Config struct {
egui.BaseConfig
// Env has environment configuration options.
Env EnvConfig `display:"add-fields"`
// Params has parameter related configuration options.
Params ParamConfig `display:"add-fields"`
// Run has sim running related configuration options.
Run RunConfig `display:"add-fields"`
// Log has data logging related configuration options.
Log LogConfig `display:"add-fields"`
}
func (cfg *Config) Defaults() {
cfg.Name = "PCoreVS"
cfg.Title = "Pallidal Core (GPe) Ventral Striatum"
cfg.URL = "https://github.com/emer/axon/blob/main/sims/bgventral/README.md"
cfg.Doc = "This project simulates the Ventral Basal Ganglia, starting with the Ventral Striatum, centered on the Pallidum Core (GPe) areas that drive Go vs. No engagement in a goal."
}
// Code generated by "core generate -add-types -add-funcs -gosl"; DO NOT EDIT.
package bgventral
import (
"cogentcore.org/core/enums"
)
var _ModesValues = []Modes{0, 1}
// ModesN is the highest valid value for type Modes, plus one.
//
//gosl:start
const ModesN Modes = 2
//gosl:end
var _ModesValueMap = map[string]Modes{`Train`: 0, `Test`: 1}
var _ModesDescMap = map[Modes]string{0: ``, 1: ``}
var _ModesMap = map[Modes]string{0: `Train`, 1: `Test`}
// String returns the string representation of this Modes value.
func (i Modes) String() string { return enums.String(i, _ModesMap) }
// SetString sets the Modes value from its string representation,
// and returns an error if the string is invalid.
func (i *Modes) SetString(s string) error { return enums.SetString(i, s, _ModesValueMap, "Modes") }
// Int64 returns the Modes value as an int64.
func (i Modes) Int64() int64 { return int64(i) }
// SetInt64 sets the Modes value from an int64.
func (i *Modes) SetInt64(in int64) { *i = Modes(in) }
// Desc returns the description of the Modes value.
func (i Modes) Desc() string { return enums.Desc(i, _ModesDescMap) }
// ModesValues returns all possible values for the type Modes.
func ModesValues() []Modes { return _ModesValues }
// Values returns all possible values for the type Modes.
func (i Modes) Values() []enums.Enum { return enums.Values(_ModesValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Modes) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Modes) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Modes") }
var _LevelsValues = []Levels{0, 1, 2, 3, 4}
// LevelsN is the highest valid value for type Levels, plus one.
//
//gosl:start
const LevelsN Levels = 5
//gosl:end
var _LevelsValueMap = map[string]Levels{`Cycle`: 0, `Theta`: 1, `Trial`: 2, `Epoch`: 3, `Run`: 4}
var _LevelsDescMap = map[Levels]string{0: ``, 1: ``, 2: ``, 3: ``, 4: ``}
var _LevelsMap = map[Levels]string{0: `Cycle`, 1: `Theta`, 2: `Trial`, 3: `Epoch`, 4: `Run`}
// String returns the string representation of this Levels value.
func (i Levels) String() string { return enums.String(i, _LevelsMap) }
// SetString sets the Levels value from its string representation,
// and returns an error if the string is invalid.
func (i *Levels) SetString(s string) error { return enums.SetString(i, s, _LevelsValueMap, "Levels") }
// Int64 returns the Levels value as an int64.
func (i Levels) Int64() int64 { return int64(i) }
// SetInt64 sets the Levels value from an int64.
func (i *Levels) SetInt64(in int64) { *i = Levels(in) }
// Desc returns the description of the Levels value.
func (i Levels) Desc() string { return enums.Desc(i, _LevelsDescMap) }
// LevelsValues returns all possible values for the type Levels.
func LevelsValues() []Levels { return _LevelsValues }
// Values returns all possible values for the type Levels.
func (i Levels) Values() []enums.Enum { return enums.Values(_LevelsValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Levels) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Levels) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Levels") }
var _StatsPhaseValues = []StatsPhase{0, 1}
// StatsPhaseN is the highest valid value for type StatsPhase, plus one.
//
//gosl:start
const StatsPhaseN StatsPhase = 2
//gosl:end
var _StatsPhaseValueMap = map[string]StatsPhase{`Start`: 0, `Step`: 1}
var _StatsPhaseDescMap = map[StatsPhase]string{0: ``, 1: ``}
var _StatsPhaseMap = map[StatsPhase]string{0: `Start`, 1: `Step`}
// String returns the string representation of this StatsPhase value.
func (i StatsPhase) String() string { return enums.String(i, _StatsPhaseMap) }
// SetString sets the StatsPhase value from its string representation,
// and returns an error if the string is invalid.
func (i *StatsPhase) SetString(s string) error {
return enums.SetString(i, s, _StatsPhaseValueMap, "StatsPhase")
}
// Int64 returns the StatsPhase value as an int64.
func (i StatsPhase) Int64() int64 { return int64(i) }
// SetInt64 sets the StatsPhase value from an int64.
func (i *StatsPhase) SetInt64(in int64) { *i = StatsPhase(in) }
// Desc returns the description of the StatsPhase value.
func (i StatsPhase) Desc() string { return enums.Desc(i, _StatsPhaseDescMap) }
// StatsPhaseValues returns all possible values for the type StatsPhase.
func StatsPhaseValues() []StatsPhase { return _StatsPhaseValues }
// Values returns all possible values for the type StatsPhase.
func (i StatsPhase) Values() []enums.Enum { return enums.Values(_StatsPhaseValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i StatsPhase) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *StatsPhase) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "StatsPhase")
}
// Copyright (c) 2022, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bgventral
import (
"fmt"
"math/rand"
"cogentcore.org/lab/base/randx"
"cogentcore.org/lab/tensor"
"github.com/emer/emergent/v2/env"
"github.com/emer/emergent/v2/popcode"
)
// GoNoEnv implements simple Go vs. NoGo input patterns to test BG learning.
type GoNoEnv struct {
// name of environment -- Train or Test
Name string
// training or testing env?
Mode Modes
// trial counter -- set by caller for testing
Trial env.Counter
// if true, ACCPos and Neg are set manually for testing specific cases;
// do not generate random vals for training or auto-increment ACCPos / Neg values during test
ManualValues bool
// activation of ACC positive valence -- drives go
ACCPos float32
// activation of ACC neg valence -- drives nogo
ACCNeg float32
// threshold on diff between ACCPos - ACCNeg for counting as a Go trial
PosNegThr float32
// learning rate for reward prediction
RewPredLRate float32
// minimum rewpred value
RewPredMin float32
// reward value for case where it gated and it should have:
// nominally 1 but can lead to over-learning, RPE would decrease over time
GatedShould float32
// reward value for case where it did not gate and it should have:
// in real case, would not get anything for this, but 1 is a cheat to improve perf
NoGatedShould float32
// reward value for case where it gated and it should not have. should be -1
GatedShouldnt float32
// reward value for case where it did not gate and it should not have:
// should be 0
NoGatedShouldnt float32
// increment in testing activation for test all
TestInc float32
// number of repetitions per testing level
TestReps int
// number of units, Y
NUnitsY int `display:"-"`
// number of units, X
NUnitsX int `display:"-"`
// total number of units
NUnits int `display:"-"`
// pop code the values in ACCPos and Neg
PopCode popcode.OneD
// random number generator for the env -- all random calls must use this
Rand randx.SysRand `display:"-"`
// random seed
RandSeed int64 `edit:"-"`
// named states: ACCPos, ACCNeg
States map[string]*tensor.Float32
// true if Pos - Neg > Thr
Should bool `edit:"-"`
// true if model gated on this trial
Gated bool `edit:"-"`
// true if gated == should
Match bool `edit:"-"`
// reward based on match between Should vs. Gated
Rew float32 `edit:"-"`
// reward prediction based on incremental learning: RewPredLRate * (Rew - RewPred)
RewPred float32 `edit:"-"`
// reward prediction error: Rew - RewPred
RPE float32 `edit:"-"`
}
func (ev *GoNoEnv) Label() string { return ev.Name }
func (ev *GoNoEnv) Defaults() {
ev.TestInc = 0.1
ev.TestReps = 32
ev.NUnitsY = 5
ev.NUnitsX = 5
ev.NUnits = ev.NUnitsY * ev.NUnitsX
ev.PosNegThr = 0
ev.RewPredLRate = 0.01 // GPU 16 0.01 > 0.02 >> 0.05 > 0.1, 0.2 for partial, seq3
ev.RewPredMin = 0.1 // 0.1 > 0.05 > 0.2
ev.GatedShould = 1 // note: works; BurstGain = 0.1 helps prevent overlearning
ev.NoGatedShould = 0 // note: works fine here -- much more realistic
ev.GatedShouldnt = -1
ev.NoGatedShouldnt = 0
ev.PopCode.Defaults()
ev.PopCode.SetRange(-0.2, 1.2, 0.1)
}
// Config configures the world
func (ev *GoNoEnv) Config(mode Modes, rndseed int64) {
ev.Mode = mode
ev.RandSeed = rndseed
ev.Rand.NewRand(ev.RandSeed)
ev.States = make(map[string]*tensor.Float32)
ev.States["ACCPos"] = tensor.NewFloat32(ev.NUnitsY, ev.NUnitsX)
ev.States["ACCNeg"] = tensor.NewFloat32(ev.NUnitsY, ev.NUnitsX)
ev.States["Rew"] = tensor.NewFloat32(1, 1)
ev.States["SNc"] = tensor.NewFloat32(1, 1)
}
func (ev *GoNoEnv) Init(run int) {
ev.Trial.Init()
}
func (ev *GoNoEnv) State(el string) tensor.Values {
return ev.States[el]
}
func (ev *GoNoEnv) String() string {
return fmt.Sprintf("%4f_%4f", ev.ACCPos, ev.ACCNeg)
}
// RenderACC renders the given value in ACC popcode
func (ev *GoNoEnv) RenderACC(name string, val float32) {
st := ev.States[name]
ev.PopCode.Encode(&st.Values, val, ev.NUnits, false)
}
// RenderLayer renders a whole-layer popcode value
func (ev *GoNoEnv) RenderLayer(name string, val float32) {
st := ev.States[name]
ev.PopCode.Encode(&st.Values, val, ev.NUnits, false)
}
// RenderState renders the current state
func (ev *GoNoEnv) RenderState() {
ev.RenderACC("ACCPos", ev.ACCPos)
ev.RenderACC("ACCNeg", ev.ACCNeg)
}
// Step does one step -- must set Trial.Cur first if doing testing
func (ev *GoNoEnv) Step() bool {
nTestInc := int(1.0/ev.TestInc) + 1
if !ev.ManualValues {
if ev.Mode == Test {
repn := ev.Trial.Cur / ev.TestReps
pos := repn / nTestInc
neg := repn % nTestInc
ev.ACCPos = float32(pos) * ev.TestInc
ev.ACCNeg = float32(neg) * ev.TestInc
// fmt.Printf("idx: %d di: %d repn: %d pos: %d neg: %d\n", idx, di, repn, pos, neg)
} else {
ev.ACCPos = rand.Float32()
ev.ACCNeg = rand.Float32()
}
}
ev.RenderState()
return true
}
func (ev *GoNoEnv) Action(action string, nop tensor.Values) {
if action == "Gated" {
ev.Gated = true
} else {
ev.Gated = false
}
pndiff := (ev.ACCPos - ev.ACCNeg) - ev.PosNegThr
should := pndiff > 0
didGate := ev.Gated
match := false
var rew float32
switch {
case should && didGate:
rew = ev.GatedShould
match = true
case should && !didGate:
rew = ev.NoGatedShould
case !should && didGate:
rew = ev.GatedShouldnt
case !should && !didGate:
rew = ev.NoGatedShouldnt
match = true
}
ev.Should = should
ev.Match = match
ev.Rew = rew
// fmt.Println(should, didGate, match, rew)
ev.ComputeDA(rew)
}
func (ev *GoNoEnv) ComputeDA(rew float32) {
ev.RPE = rew - ev.RewPred
ev.RewPred += ev.RewPredLRate * (rew - ev.RewPred)
if ev.RewPred < ev.RewPredMin {
ev.RewPred = ev.RewPredMin
}
}
// Copyright (c) 2022, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bgventral
import "github.com/emer/axon/v2/axon"
// LayerParams sets the minimal non-default params.
// Base is always applied, and others can be optionally selected to apply on top of that.
var LayerParams = axon.LayerSheets{
"Base": {
{Sel: "Layer", Doc: "clamp gain makes big diff on overall excitation, gating propensity",
Set: func(ly *axon.LayerParams) {
ly.Acts.Clamp.Ge = 1.0 // 1.5 is def, was 0.6 (too low)
}},
{Sel: ".VBG", Doc: "all ModACh",
Set: func(ly *axon.LayerParams) {
ly.Acts.Dend.ModACh.SetBool(true)
}},
{Sel: ".VSMatrixLayer", Doc: "all mtx",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Pool.On.SetBool(false)
ly.Inhib.Layer.Gi = 0.5 // 0.5 needed for differentiated reps
ly.Inhib.ActAvg.Nominal = 0.25
ly.Striatum.GateThr = 0.05 // todo: .01 should be new default
ly.Learn.RLRate.On.SetBool(true) // only used for non-rew trials -- key
ly.Learn.RLRate.Diff.SetBool(false)
ly.Learn.RLRate.SigmoidMin = 0.01 // 0.01 better than .05
ly.Learn.TrgAvgAct.RescaleOn.SetBool(true)
ly.Learn.NeuroMod.BurstGain = 0.1 // 1 def -- must be smaller given rew dynamics
ly.Learn.NeuroMod.DAModGain = 0 // strongly biases the gating
}},
{Sel: ".VSTNLayer", Doc: "all VSTN",
Set: func(ly *axon.LayerParams) {
ly.Acts.Init.GeBase = 0.1 // todo: re-param with more stn, increase..
ly.Acts.SKCa.CaRDecayTau = 80
}},
{Sel: ".PTMaintLayer", Doc: "time integration params",
Set: func(ly *axon.LayerParams) {
// ly.Inhib.Layer.Gi = 3.2 // 3.2 def
ly.Acts.Dend.ModGain = 1.5 // 1.5 def
ly.Acts.Kir.Gk = 0 // no real diff here over range 0-10
ly.Acts.Dend.ModACh.SetBool(true)
}},
{Sel: ".ACC", Doc: "manipulate noise to see about integration over time",
Set: func(ly *axon.LayerParams) {
ly.Acts.Noise.On.SetBool(false)
ly.Acts.Noise.Ge = 0.1 // 0.1 is visibly impactful
ly.Acts.Noise.Gi = 0.01 // 0.01 -- if to strong, rep becomes very weak
}},
{Sel: "#VGPi", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Acts.Init.GeBase = 0.3 // 0.3 > 0.2, 0.1
ly.Acts.Init.GeVar = 0.1
}},
},
}
// PathParams sets the minimal non-default params.
// Base is always applied, and others can be optionally selected to apply on top of that.
var PathParams = axon.PathSheets{
"Base": {
{Sel: ".VSMatrixPath", Doc: "",
Set: func(pt *axon.PathParams) {
pt.Learn.LRate.Base = 0.01 // 0.01, vs .02 default
pt.Learn.DWt.LearnThr = 0.1 // prevents learning below this thr: preserves low act
pt.VSMatrix.RewActLearn.SetBool(false) // significantly cleaner
pt.SWts.Adapt.On.SetBool(false) // not much diff: false >= true
}},
{Sel: "#UrgencyToVMatrixGo", Doc: "strong urgency factor",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.1 // don't dilute from others
pt.PathScale.Abs = 1
pt.Learn.Learn.SetBool(false)
}},
{Sel: ".SuperToPT", Doc: "one-to-one from super",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 0.5
}},
{Sel: ".SuperToThal", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 3.0 // was 4
}},
{Sel: ".ACCToVMatrix", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1.5 // 1.5 good; 1.8 causes some breakthrough
}},
{Sel: "#VMatrixNoToVMatrixGo", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.05
pt.PathScale.Abs = 1
pt.Learn.Learn.SetBool(false)
}},
{Sel: "#VSTNToVGPi", Doc: "strong initial phasic activation",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = .2
}},
},
}
/*
// ParamSetsDefs contains the full set of parameters, many of which are at default values
// and have informed the default values in the first place.
var ParamSetsDefs = params.Sets{
"Defaults": {
{Sel: ".MatrixLayer", Doc: "all mtx",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Pool.On = "false",
ly.Inhib.Layer.Gi = "0.5",
ly.Inhib.Layer.FB = "0",
ly.Striatum.GateThr = "0.05", // .05 default
ly.Acts.Kir.Gbar = "10", // 10 > 5 > 20
ly.Acts.GabaB.Gk = "0",
ly.Acts.NMDA.Ge = "0.006", // 0.006 default, necessary (0 very bad)
ly.Learn.NeuroMod.AChLRateMod = "0", // no diff here -- always ACh
ly.Learn.NeuroMod.BurstGain = "0.1", // 0.1 == 0.2 > 0.05 > 0.5; only for weird rew case here; 1 def
},
Hypers: params.Hypers{
ly.Learn.NeuroMod.BurstGain = {"Tweak = "-"},
ly.Acts.Kir.Gbar = {"Tweak = "-"},
ly.Acts.NMDA.Ge = {"Tweak = "-"},
ly.Inhib.Layer.Gi = {"Tweak = "-"},
}},
{Sel: ".VSTNLayer", Doc: "all VSTN",
Set: func(ly *axon.LayerParams) {
ly.Acts.Init.GeBase = "0.1",
ly.Acts.Kir.Gbar = "10", // 10 > 5 > 2 -- key for pause
ly.Acts.SKCa.Gk = "2", // 2 > 5 >> 1 (for Kir = 10)
ly.Inhib.Layer.On = "true", // really no inhib neurons here. all VGPePr
ly.Learn.NeuroMod.AChDisInhib = "0",
},
Hypers: params.Hypers{
ly.Acts.Init.GeBase = {"Tweak = "-"},
ly.Acts.Kir.Gbar = {"Tweak = "-"},
ly.Acts.SKCa.Gk = {"Tweak = "-"},
}},
{Sel: "#VGPePr", Doc: "prototypical",
Set: func(ly *axon.LayerParams) {
ly.Acts.Init.GeBase = "0.4", // 0.4 > 0.3, 0.5
ly.Acts.Init.GeVar = "0.2",
},
Hypers: params.Hypers{
ly.Acts.Init.GeBase = {"Tweak = "-"},
}},
{Sel: "#VGPeAk", Doc: "arkypallidal",
Set: func(ly *axon.LayerParams) {
ly.Acts.Init.GeBase = "0.2", // 0.2 > 0.3, 0.1
ly.Acts.Init.GeVar = "0.1", // 0.1 == 0.2 > 0.05
},
Hypers: params.Hypers{
ly.Acts.Init.GeBase = {"Tweak = "-"},
ly.Acts.Init.GeVar = {"Tweak = "-"},
}},
{Sel: "#VGPi", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Acts.Init.GeBase = "0.3", // 0.3 > 0.2, 0.1
ly.Acts.Init.GeVar = "0.1",
},
Hypers: params.Hypers{
ly.Acts.Init.GeBase = {"Tweak = "-"},
}},
{Sel: ".VGPeAkToVMatrix", Doc: "go disinhibition",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = "3", // 3 >= 2, 4
},
Hypers: params.Hypers{
pt.PathScale.Abs = {"Tweak = "-"},
}},
{Sel: "#VMatrixGoToVGPeAk", Doc: "go inhibition",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = ".5", // stronger = more binary
},
Hypers: params.Hypers{
pt.PathScale.Abs = {"Tweak = "-"},
}},
{Sel: "#VGPePrToVSTN", Doc: "enough to kick off the ping-pong dynamics for VSTN.",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = "0.5",
},
Hypers: params.Hypers{
pt.PathScale.Abs = {"Tweak = "-"},
}},
{Sel: "#VSTNToVGPePr", Doc: "stronger VSTN -> VGPePr to kick it high at start",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = "0.5",
},
Hypers: params.Hypers{
pt.PathScale.Abs = {"Tweak = "-"},
}},
{Sel: "#VSTNToVGPeAk", Doc: "this is weak biologically -- could try 0",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = "0.1",
},
Hypers: params.Hypers{
pt.PathScale.Abs = {"Tweak = "-"},
}},
{Sel: "#VMatrixNoToVGPePr", Doc: "proto = primary classical NoGo pathway",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = "1", // 1 fully inhibits Pr
},
Hypers: params.Hypers{
pt.PathScale.Abs = {"Tweak = "-"},
}},
{Sel: "#VGPePrToVGPePr", Doc: "self-inhib -- only source of self reg",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = "4", // 4 best for DS
},
Hypers: params.Hypers{
pt.PathScale.Abs = {"Tweak = "-"},
}},
{Sel: "#VGPePrToVGPeAk", Doc: "just enough to knock down in baseline state",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = "1",
},
Hypers: params.Hypers{
pt.PathScale.Abs = {"Tweak = "-"},
}},
{Sel: "#VMatrixGoToVGPi", Doc: "go influence on gating",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = ".2", // .1 too weak
},
Hypers: params.Hypers{
pt.PathScale.Abs = {"Tweak = "-"},
}},
{Sel: "#VGPePrToVGPi", Doc: "nogo influence on gating -- decreasing produces more graded function of Go",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = "1", // 2 is much worse.. keep at 1
},
Hypers: params.Hypers{
pt.PathScale.Abs = {"Tweak = "-"},
}},
{Sel: "#VSTNToVGPi", Doc: "strong initial phasic activation",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = ".2",
},
Hypers: params.Hypers{
pt.PathScale.Abs = {"Tweak = "-"},
}},
{Sel: "#VGPiToACCPosVM", Doc: "final inhibition",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = "5", // needs to be very strong -- 5
},
Hypers: params.Hypers{
pt.PathScale.Abs = {"Tweak = "-"},
}},
},
}
*/
// Copyright (c) 2023, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package armaze
import "cogentcore.org/core/math32/minmax"
// Arm represents the properties of a given arm of the N-maze,
// representing a different choice option with different cost / benefit
// tradeoffs, in terms of distance and effort factors for getting
// down the arm, and US present at the end, which is delivered with
// a given probability and magnitude range.
// Each arm has its own distinctive CS visible at the start, which is the
// only cue used for the agent to decide whether to choose this arm or not.
type Arm struct {
// CS == index of Arm
CS int
// length of arm: distance from CS start to US end for this arm
Length int
// index of US present at the end of this arm.
// Indexes [0:NDrives] are positive USs, and beyond that are negative USs.
US int
// range of different effort levels per step (uniformly randomly sampled per step) for going down this arm
Effort minmax.F32
// range of different US magnitudes (uniformly sampled)
USMag minmax.F32
// probability of delivering the US
USProb float32
// USAvail indicates that the US is available on this trial: this is computed
// from the USProb at the start of each behavioral trial
USAvail bool `edit:"-"`
// nominal expected value = US.Prob * US.Mag
ExValue float32 `edit:"-"`
// nominal expected cost = effort + normalized length
ExCost float32 `edit:"-"`
// nominal expected utility = ExValue - CostFactor * ExCost.
// This is only meaningful relative to other options, not in any absolute terms.
ExUtil float32 `edit:"-"`
// UtilGroup is the group id for computing the BestOption utility for this arm:
// = US for positive, and NDrives for all negative USs
UtilGroup int
// BestOption is true if this arm represents the best option in terms of ExUtil
// relative to other options _for the same US_.
// All negative USs are considered as one group for ranking.
BestOption bool `edit:"-"`
}
func (arm *Arm) Defaults() {
arm.Length = 4
arm.Effort.Set(1, 1)
arm.USMag.Set(1, 1)
arm.USProb = 1
arm.Empty()
}
// Empty sets all state to -1
func (arm *Arm) Empty() {
arm.US = -1
arm.CS = -1
arm.ExValue = 0
arm.ExUtil = 0
}
// Copyright (c) 2023, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package armaze
import "cogentcore.org/core/math32/minmax"
// Params are misc environment parameters
type Params struct {
// effort for turning
TurnEffort minmax.F32 `nest:"+" default:"{'Min':0.5, 'Max':0.5}"`
// effort for consuming US
ConsumeEffort minmax.F32 `nest:"+" default:"{'Min':0.5, 'Max':0.5}"`
// an arbitrary scaling factor for costs relative to values,
// used in computing the expected utility ExUtil for an arm.
// These utilities are only useful for relative comparisons,
// that go into computing the UtilRank, which should be used for evaluating
// overall choices.
CostFactor float32 `default:"0.2"`
// threshold for considering a drive to be active; used in evaluating whether
// an Arm choice is considered to be a good option.
ActiveDriveThr float32 `default:"0.5"`
// always turn left -- zoolander style -- reduces degrees of freedom in evaluating behavior
AlwaysLeft bool `default:"true"`
// after running down an Arm, a new random starting location is selected (otherwise same arm as last run)
RandomStart bool `default:"true"`
// if true, allow movement between arms just by going Left or Right.
// Otherwise once past the start, no switching is allowed
OpenArms bool `default:"true"`
// strength of inactive inputs (e.g., Drives in Approach paradigm)
Inactive minmax.F32 `nest:"+" default:"{'Min':0, 'Max':0}" display:"inline"`
// number of Y-axis repetitions of localist stimuli -- for redundancy in spiking nets
NYReps int `default:"4"`
}
// Config has environment configuration
type Config struct {
// experimental paradigm that governs the configuration of environment based on params,
// e.g., how the Range values are assigned to different arms.
Paradigm Paradigms
// for debugging, print out key steps including a trace of the action generation logic
Debug bool
// number of different drive-like body states (hunger, thirst, etc),
// that are satisfied by a corresponding positive US outcome.
// This is in addition to the first curiosity drive, which is always present.
NDrives int
// number of negative US outcomes -- these are added after NDrives positive USs to total US list
NNegUSs int
// total number of USs = NDrives + NNegUSs
NUSs int `edit:"-"`
// number of different arms, each of which has its own distinctive CS.
// This is determined by the Paradigm (e.g., 2*NUSs for the Group cases).
NArms int `edit:"-"`
// range of arm length sallocated across arms, per Paradigm
LengthRange minmax.Int `nest:"+"`
// range of effort values allocated across arms, per Paradigm
EffortRange minmax.F32 `nest:"+"`
// range of US magnitudes allocated across arms, per Paradigm
USMagRange minmax.F32 `nest:"+"`
// range of US probabilities allocated across arms, per Paradigm
USProbRange minmax.F32 `nest:"+"`
// parameters for each arm option: dist, effort, US
Arms []*Arm
// misc params
Params Params `display:"add-fields"`
}
func (cfg *Config) Defaults() {
if cfg.NDrives == 0 {
cfg.NDrives = 4
cfg.LengthRange.Set(4, 4)
cfg.EffortRange.Set(1, 1)
cfg.USMagRange.Set(1, 1)
cfg.USProbRange.Set(1, 1)
}
cfg.Update()
}
func (cfg *Config) Update() {
cfg.NUSs = cfg.NDrives + cfg.NNegUSs
}
// Copyright (c) 2019, Cogent Core. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package armaze
import (
"cogentcore.org/core/math32"
"cogentcore.org/core/xyz"
"cogentcore.org/lab/physics"
"cogentcore.org/lab/physics/builder"
"cogentcore.org/lab/physics/phyxyz"
)
// Emery encapsulates all the emery agent config and physics.
type Emery struct {
// full length of emery
Length float32
// emery object
Obj *builder.Object `display:"-"`
// PlaneXZ joint for controlling 2D position.
XZ *builder.Joint
// joint for the neck.
Neck *builder.Joint
// Right eye of emery
EyeR *builder.Body `display:"-"`
}
func (em *Emery) Defaults() {
em.Length = 1
}
// Make constructs a new Emery virtual hamster Object in given World.
func (em *Emery) Make(wl *builder.World, sc *phyxyz.Scene, vw *GUI) {
name := "emery"
mass := float32(0.5) // kg -- typical for adult rat
hl := em.Length / 2
hh := hl / 2
hw := hh
headsz := hh * 0.75
eyesz := headsz * .2
rot := math32.NewQuatIdentity()
obj := wl.NewObject()
em.Obj = obj
emr := obj.NewDynamicSkin(sc, name+"_body", physics.Box, "purple", mass, math32.Vec3(hw, hh, hl), math32.Vec3(0, hh, 0), rot)
esk := emr.Skin
esk.InitSkin = func(sld *xyz.Solid) {
esk.BoxInit(sld)
sld.Updater(func() {
esk.Color = vw.StateColor()
esk.UpdateColor(esk.Color, sld)
})
}
em.XZ = obj.NewJointPlaneXZ(nil, emr, math32.Vec3(0, 0, 0), math32.Vec3(0, -hh, 0))
headPos := math32.Vec3(0, hh, -(hl + headsz))
head := obj.NewDynamicSkin(sc, name+"_head", physics.Box, "tan", mass*.1, math32.Vec3(headsz, headsz, headsz), headPos, rot)
em.Neck = obj.NewJointFixed(emr, head, math32.Vec3(0, hh, -hl), math32.Vec3(0, 0, headsz))
em.Neck.ParentFixed = true
eyeoff := math32.Vec3(-headsz*.6, headsz*.1, -(headsz + eyesz*.3))
bd := obj.NewDynamicSkin(sc, name+"_eye-l", physics.Box, "green", mass*.01, math32.Vec3(eyesz, eyesz*.5, eyesz*.2), headPos.Add(eyeoff), rot)
ej := obj.NewJointFixed(head, bd, eyeoff, math32.Vec3(0, 0, -eyesz*.3))
ej.ParentFixed = true
eyeoff.X = headsz * .6
em.EyeR = obj.NewDynamicSkin(sc, name+"_eye-r", physics.Box, "green", mass*.01, math32.Vec3(eyesz, eyesz*.5, eyesz*.2), headPos.Add(eyeoff), rot)
ej = obj.NewJointFixed(head, em.EyeR, eyeoff, math32.Vec3(0, 0, -eyesz*.3))
ej.ParentFixed = true
// emr.Updater(func() {
// ev := vw.Env
// x, y := vw.Geom.Pos(ev.Arm, ev.Pos)
// emr.Rel.Pos.Set(x, 0, y)
// })
}
// Code generated by "core generate -add-types -gosl"; DO NOT EDIT.
package armaze
import (
"cogentcore.org/core/enums"
)
var _ActionsValues = []Actions{0, 1, 2, 3, 4}
// ActionsN is the highest valid value for type Actions, plus one.
//
//gosl:start
const ActionsN Actions = 5
//gosl:end
var _ActionsValueMap = map[string]Actions{`Forward`: 0, `Left`: 1, `Right`: 2, `Consume`: 3, `None`: 4}
var _ActionsDescMap = map[Actions]string{0: ``, 1: ``, 2: ``, 3: ``, 4: ``}
var _ActionsMap = map[Actions]string{0: `Forward`, 1: `Left`, 2: `Right`, 3: `Consume`, 4: `None`}
// String returns the string representation of this Actions value.
func (i Actions) String() string { return enums.String(i, _ActionsMap) }
// SetString sets the Actions value from its string representation,
// and returns an error if the string is invalid.
func (i *Actions) SetString(s string) error {
return enums.SetString(i, s, _ActionsValueMap, "Actions")
}
// Int64 returns the Actions value as an int64.
func (i Actions) Int64() int64 { return int64(i) }
// SetInt64 sets the Actions value from an int64.
func (i *Actions) SetInt64(in int64) { *i = Actions(in) }
// Desc returns the description of the Actions value.
func (i Actions) Desc() string { return enums.Desc(i, _ActionsDescMap) }
// ActionsValues returns all possible values for the type Actions.
func ActionsValues() []Actions { return _ActionsValues }
// Values returns all possible values for the type Actions.
func (i Actions) Values() []enums.Enum { return enums.Values(_ActionsValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Actions) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Actions) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Actions") }
var _ParadigmsValues = []Paradigms{0, 1}
// ParadigmsN is the highest valid value for type Paradigms, plus one.
//
//gosl:start
const ParadigmsN Paradigms = 2
//gosl:end
var _ParadigmsValueMap = map[string]Paradigms{`GroupGoodBad`: 0, `GroupRisk`: 1}
var _ParadigmsDescMap = map[Paradigms]string{0: `GroupGoodBad allocates Arms into 2 groups, with first group unambiguously Good and the second Bad, using the Min, Max values of each Range parameter: Length, Effort, USMag, USProb. Good has Min cost, Max US, and opposite for Bad. This also aligns with the ordering of USs, such that negative USs are last.`, 1: `GroupRisk allocates Arms into 2 groups with conflicting Cost and Benefit tradeoffs, with the first group having Min cost and Min US, and the second group having Max cost and Max US.`}
var _ParadigmsMap = map[Paradigms]string{0: `GroupGoodBad`, 1: `GroupRisk`}
// String returns the string representation of this Paradigms value.
func (i Paradigms) String() string { return enums.String(i, _ParadigmsMap) }
// SetString sets the Paradigms value from its string representation,
// and returns an error if the string is invalid.
func (i *Paradigms) SetString(s string) error {
return enums.SetString(i, s, _ParadigmsValueMap, "Paradigms")
}
// Int64 returns the Paradigms value as an int64.
func (i Paradigms) Int64() int64 { return int64(i) }
// SetInt64 sets the Paradigms value from an int64.
func (i *Paradigms) SetInt64(in int64) { *i = Paradigms(in) }
// Desc returns the description of the Paradigms value.
func (i Paradigms) Desc() string { return enums.Desc(i, _ParadigmsDescMap) }
// ParadigmsValues returns all possible values for the type Paradigms.
func ParadigmsValues() []Paradigms { return _ParadigmsValues }
// Values returns all possible values for the type Paradigms.
func (i Paradigms) Values() []enums.Enum { return enums.Values(_ParadigmsValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Paradigms) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Paradigms) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "Paradigms")
}
var _TraceStatesValues = []TraceStates{0, 1, 2, 3, 4, 5, 6, 7}
// TraceStatesN is the highest valid value for type TraceStates, plus one.
//
//gosl:start
const TraceStatesN TraceStates = 8
//gosl:end
var _TraceStatesValueMap = map[string]TraceStates{`TrSearching`: 0, `TrDeciding`: 1, `TrJustEngaged`: 2, `TrApproaching`: 3, `TrConsuming`: 4, `TrRewarded`: 5, `TrGiveUp`: 6, `TrBumping`: 7}
var _TraceStatesDescMap = map[TraceStates]string{0: `Searching is not yet goal engaged, looking for a goal`, 1: `Deciding is having some partial gating but not in time for action`, 2: `JustEngaged means just decided to engage in a goal`, 3: `Approaching is goal engaged, approaching the goal`, 4: `Consuming is consuming the US, first step (prior to getting reward, step1)`, 5: `Rewarded is just received reward from a US`, 6: `GiveUp is when goal is abandoned`, 7: `Bumping is bumping into a wall`}
var _TraceStatesMap = map[TraceStates]string{0: `TrSearching`, 1: `TrDeciding`, 2: `TrJustEngaged`, 3: `TrApproaching`, 4: `TrConsuming`, 5: `TrRewarded`, 6: `TrGiveUp`, 7: `TrBumping`}
// String returns the string representation of this TraceStates value.
func (i TraceStates) String() string { return enums.String(i, _TraceStatesMap) }
// SetString sets the TraceStates value from its string representation,
// and returns an error if the string is invalid.
func (i *TraceStates) SetString(s string) error {
return enums.SetString(i, s, _TraceStatesValueMap, "TraceStates")
}
// Int64 returns the TraceStates value as an int64.
func (i TraceStates) Int64() int64 { return int64(i) }
// SetInt64 sets the TraceStates value from an int64.
func (i *TraceStates) SetInt64(in int64) { *i = TraceStates(in) }
// Desc returns the description of the TraceStates value.
func (i TraceStates) Desc() string { return enums.Desc(i, _TraceStatesDescMap) }
// TraceStatesValues returns all possible values for the type TraceStates.
func TraceStatesValues() []TraceStates { return _TraceStatesValues }
// Values returns all possible values for the type TraceStates.
func (i TraceStates) Values() []enums.Enum { return enums.Values(_TraceStatesValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i TraceStates) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *TraceStates) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "TraceStates")
}
// Copyright (c) 2023, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package armaze represents an N-armed maze ("bandit")
// with each Arm having a distinctive CS stimulus at the start
// (could be one of multiple possibilities) and (some probability of)
// a US outcome at the end of the maze (could be either positive
// or negative, with (variable) magnitude and probability.
//
// The maze can have open or closed arms -- open arms allow
// switching to a neighboring arm anytime, while closed arms
// only allow switching at the start.
package armaze
//go:generate core generate -add-types -gosl
import (
"fmt"
"cogentcore.org/core/cli"
"cogentcore.org/core/math32/minmax"
"cogentcore.org/lab/base/randx"
"cogentcore.org/lab/tensor"
)
// Actions is a list of mutually exclusive states
// for tracing the behavior and internal state of Emery
type Actions int32 //enums:enum
const (
Forward Actions = iota
Left
Right
Consume
None
)
// General note on US / Drive indexes:
// The env does _not_ represent any built-in drives or USs (curiosity, effort, urgency)
// 0 = start of the sim-specific USs and Drives
// Env implements an N-armed maze ("bandit")
// with each Arm having a distinctive CS stimulus visible at the start
// (could be one of multiple possibilities) and (some probability of)
// a US outcome at the end of the maze (could be either positive
// or negative, with (variable) magnitude and probability.
type Env struct {
// name of environment -- Train or Test
Name string
// number of data parallel envs to represent
NData int `edit:"-"`
// data parallel index to view
Di int `edit:"-"`
// configuration parameters
Config Config `new-window:"+"`
// current drive strength for each of Config.NDrives in normalized
// 0-1 units of each drive (beyond built-in curiosity drive)
Drives []float32
// current arm location: either facing (Pos=0) or in (Pos > 0)
Arm int `edit:"-"`
// current position in the Arm: 0 = at start looking in, otherwise at given distance into the arm
Pos int `edit:"-"`
// distance from US within current arm
Dist int `edit:"-"`
// current integer time step since last NewStart
Tick int `edit:"-"`
// current target drive, in paradigms where that is used (e.g., Approach)
TrgDrive int `edit:"-"`
// Current US being consumed -- is -1 unless being consumed
USConsumed int `edit:"-"`
// reward or punishment value generated by the current US being consumed.
// just the Magnitude of the US -- does NOT include any modulation by Drive
USValue float32 `edit:"-"`
// just finished consuming a US -- ready to start doing something new
JustConsumed bool `edit:"-"`
// last action taken
LastAct Actions `edit:"-"`
// effort on current trial
Effort float32 `edit:"-"`
// last CS seen
LastCS int `edit:"-"`
// last US -- previous trial
LastUS int `edit:"-"`
// true if looking at correct CS for first time
ShouldGate bool `edit:"-"`
// just gated on this trial -- set by sim-- used for instinct
JustGated bool `edit:"-"`
// has gated at some point during sequence -- set by sim -- used for instinct
HasGated bool `edit:"-"`
// named states -- e.g., USs, CSs, etc
States map[string]*tensor.Float32
// maximum length of any arm
MaxLength int `edit:"-"`
// random number generator for the env -- all random calls must use this
Rand randx.SysRand `display:"-"`
// random seed
RandSeed int64 `edit:"-"`
}
func (ev *Env) Label() string { return ev.Name }
func (ev *Env) String() string { return fmt.Sprintf("%d_%d_%d", ev.Arm, ev.Pos, ev.Dist) }
// Defaults sets default params
func (ev *Env) Defaults() {
ev.Config.Defaults()
cli.SetFromDefaults(&ev.Config)
ev.Config.Update()
}
// ConfigEnv configures the environment.
// additional parameterization via specific configs
// is applied after this step, which initializes
// everything according to basic Ns
// takes the data parallel index di
func (ev *Env) ConfigEnv(di int) {
ev.Di = di
if ev.Rand.Rand == nil {
ev.Rand.NewRand(ev.RandSeed)
} else {
ev.Rand.Seed(ev.RandSeed)
}
switch ev.Config.Paradigm {
case GroupGoodBad:
ev.ConfigGroupGoodBad()
}
ev.ExpectedUtilities()
ev.Validate()
}
func (ev *Env) Validate() error {
return nil
}
// Init does updating preparing to run -- params could have changed since initial config
// so updates everything except broad overall config stuff.
func (ev *Env) Init(run int) {
cfg := &ev.Config
ev.UpdateMaxLength()
ev.States = make(map[string]*tensor.Float32)
ev.States["CS"] = tensor.NewFloat32(cfg.Params.NYReps, cfg.NArms)
ev.States["Pos"] = tensor.NewFloat32(cfg.Params.NYReps, ev.MaxLength+1)
ev.States["Dist"] = tensor.NewFloat32(cfg.Params.NYReps, ev.MaxLength+1)
ev.States["Action"] = tensor.NewFloat32(cfg.Params.NYReps, int(ActionsN))
ev.NewStart()
ev.JustConsumed = true // will trigger a new start again on Step
}
func (ev *Env) State(el string) tensor.Values {
return ev.States[el]
}
// NewStart starts a new approach run
func (ev *Env) NewStart() {
for _, arm := range ev.Config.Arms { // do at start so it is consistent
arm.USAvail = randx.BoolP32(arm.USProb, &ev.Rand)
}
if ev.Config.Params.RandomStart {
ev.Arm = ev.Rand.Intn(len(ev.Config.Arms))
}
arm := ev.Config.Arms[ev.Arm]
ev.Pos = 0
ev.Dist = arm.Length - ev.Pos
ev.Tick = 0
ev.JustGated = false
ev.HasGated = false
ev.USConsumed = -1
ev.USValue = 0
ev.JustConsumed = false
ev.TrgDrive = ev.Rand.Intn(ev.Config.NDrives)
for i := range ev.Drives {
if i == ev.TrgDrive {
ev.Drives[i] = 1
} else {
ev.Drives[i] = ev.InactiveValue()
}
}
ev.RenderState()
}
func (ev *Env) ExpectedUtilities() {
ev.UpdateMaxLength()
cfg := &ev.Config
maxLen := float32(ev.MaxLength)
nus := cfg.NDrives
if cfg.NNegUSs > 0 {
nus++
}
for ui := 0; ui < nus; ui++ {
maxUtil := float32(0)
for _, arm := range ev.Config.Arms {
usSign := float32(1)
if ui < cfg.NDrives {
if arm.US != ui {
continue
}
arm.UtilGroup = ui
} else {
usSign = -1
if arm.US < cfg.NDrives { // no positive
continue
}
arm.UtilGroup = ui
}
val := usSign * arm.USMag.Midpoint() * arm.USProb
arm.ExValue = val
cost := cfg.Params.CostFactor * ((float32(arm.Length) / maxLen) + arm.Effort.Midpoint())
arm.ExCost = cost
util := val - cost
arm.ExUtil = util
if util > maxUtil {
maxUtil = util
}
}
for _, arm := range ev.Config.Arms {
if arm.UtilGroup != ui {
continue
}
arm.BestOption = (arm.ExUtil >= maxUtil)
}
}
}
// ArmIsBest returns true if the given arm is the best choice,
// based on the arm being marked as BestChoice in terms of its
// general expected utility, and in terms of the US matching
// an active drive and not being a negative US outcome.
func (ev *Env) ArmIsBest(armIdx int) bool {
arm := ev.Config.Arms[armIdx]
if !arm.BestOption {
return false
}
if ev.ArmIsNegative(armIdx) {
return false
}
return ev.Drives[arm.US] > ev.Config.Params.ActiveDriveThr
}
func (ev *Env) MaxDrive() int {
mx := float32(0)
mi := 0
for i, d := range ev.Drives {
if d > mx {
mx = d
mi = i
}
}
return mi
}
// ArmIsNegative returns true if the given arm has a negative outcome
func (ev *Env) ArmIsNegative(armIdx int) bool {
arm := ev.Config.Arms[armIdx]
return (arm.US > ev.Config.NDrives)
}
// Step does one step. it is up to the driving sim to decide when to call NewStart
func (ev *Env) Step() bool {
ev.LastCS = ev.CurCS()
if ev.JustConsumed { // from last time, not this time.
ev.NewStart()
} else {
ev.Tick++
}
ev.TakeAct(ev.LastAct)
ev.RenderState()
return true
}
//////////////////////////////////////////////////
// Render
// RenderLocalist renders one localist state
func (ev *Env) RenderLocalist(name string, val int) {
st := ev.States[name]
st.SetZeros()
if val >= st.DimSize(1) {
return
}
for y := 0; y < ev.Config.Params.NYReps; y++ {
st.Set(1.0, y, val)
}
}
// RenderLocalist4D renders one localist state in 4D
func (ev *Env) RenderLocalist4D(name string, val int) {
st := ev.States[name]
st.SetZeros()
for y := 0; y < ev.Config.Params.NYReps; y++ {
st.Set(1.0, 0, val, y, 0)
}
}
// RenderState renders the current state
func (ev *Env) RenderState() {
ev.RenderLocalist("CS", ev.CurCS())
ev.RenderLocalist("Pos", ev.Pos)
ev.RenderLocalist("Dist", ev.Dist)
}
// RenderAction renders the action
func (ev *Env) RenderAction(act Actions) {
ev.RenderLocalist("Action", int(act))
}
//////////////////////////////////////////////////
// Action
func (ev *Env) DecodeAct(vt *tensor.Float64) Actions {
mxi := ev.DecodeLocalist(vt)
return Actions(mxi)
}
func (ev *Env) DecodeLocalist(vt *tensor.Float64) int {
dx := vt.DimSize(1)
var max float64
var mxi int
for i := 0; i < dx; i++ {
var sum float64
for j := 0; j < ev.Config.Params.NYReps; j++ {
sum += vt.Value(j, i)
}
if sum > max {
max = sum
mxi = i
}
}
return mxi
}
// Action records the LastAct and renders it, but does not
// update the state accordingly.
func (ev *Env) Action(action string, nop tensor.Values) {
act := None
act.SetString(action)
ev.LastAct = act
ev.RenderAction(act) // plus phase input is action
// note: action not taken via TakeAct until start of trial in Step()
}
func (ev *Env) TakeAct(act Actions) {
narms := ev.Config.NArms
arm := ev.Config.Arms[ev.Arm]
switch act {
case Forward:
ev.Effort = ev.ForwardEffort(arm) // pay effort regardless
npos := ev.Pos + 1
if npos <= arm.Length {
ev.Pos = npos
} else {
// todo: bump into wall?
}
case Left:
ev.Effort = ev.TurnEffort() // pay effort regardless
if ev.Config.Params.OpenArms || ev.Pos == 0 {
ev.Arm--
}
if ev.Arm < 0 {
ev.Arm += narms
}
case Right:
ev.Effort = ev.TurnEffort() // pay effort regardless
if ev.Config.Params.OpenArms || ev.Pos == 0 {
ev.Arm++
}
if ev.Arm >= narms {
ev.Arm += narms
}
case Consume:
ev.Effort = ev.ConsumeEffort()
if ev.Pos == arm.Length {
if ev.USConsumed < 0 {
ev.ConsumeUS(arm)
}
}
}
// always update Dist
arm = ev.Config.Arms[ev.Arm]
ev.Dist = arm.Length - ev.Pos
}
// ConsumeUS implements the consume action at current position in given arm
func (ev *Env) ConsumeUS(arm *Arm) {
mag := MinMaxRand(arm.USMag, ev.Rand)
if arm.USAvail {
ev.USConsumed = arm.US
ev.USValue = mag
ev.JustConsumed = true
} else {
ev.USConsumed = -1
ev.USValue = 0
}
}
// InstinctAct returns an "instinctive" action that implements a basic policy
func (ev *Env) InstinctAct(justGated, hasGated bool) Actions {
ev.JustGated = justGated
ev.HasGated = hasGated
ev.ShouldGate = ((hasGated && ev.USConsumed >= 0) || // To clear the goal after US
(!hasGated && ev.ArmIsBest(ev.Arm))) // looking at correct, haven't yet gated
arm := ev.CurArm()
if ev.Pos >= arm.Length {
return Consume
}
if ev.HasGated {
return Forward
}
if ev.LastAct == Left || ev.LastAct == Right {
return ev.LastAct
}
if ev.Config.Params.AlwaysLeft || randx.BoolP(.5, &ev.Rand) {
return Left
}
return Right
}
//////////////////////////////////////////////////
// Utils
// CurArm returns current Arm
func (ev *Env) CurArm() *Arm {
return ev.Config.Arms[ev.Arm]
}
// CurCS returns current CS from current Arm
func (ev *Env) CurCS() int {
return ev.CurArm().CS
}
// MinMaxRand returns a random number in the range between Min and Max
func MinMaxRand(mm minmax.F32, rand randx.SysRand) float32 {
return mm.Min + rand.Float32()*mm.Range()
}
// InactiveVal returns a new random inactive value from Config.Params.Inactive
// param range.
func (ev *Env) InactiveValue() float32 {
return MinMaxRand(ev.Config.Params.Inactive, ev.Rand)
}
// ForwardEffort returns a new random Effort value from Arm Effort range
func (ev *Env) ForwardEffort(arm *Arm) float32 {
return MinMaxRand(arm.Effort, ev.Rand)
}
// TurnEffort returns a new random Effort value from Config.Params.TurnEffort
// param range.
func (ev *Env) TurnEffort() float32 {
return MinMaxRand(ev.Config.Params.TurnEffort, ev.Rand)
}
// ConsumeEffort returns a new random Effort value from Config.Params.ConsumeEffort
// param range.
func (ev *Env) ConsumeEffort() float32 {
return MinMaxRand(ev.Config.Params.ConsumeEffort, ev.Rand)
}
func (ev *Env) UpdateMaxLength() {
ev.MaxLength = 0
for _, arm := range ev.Config.Arms {
if arm.Length > ev.MaxLength {
ev.MaxLength = arm.Length
}
}
}
// Copyright (c) 2023, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package armaze
import (
"image"
"image/color"
"cogentcore.org/core/base/errors"
"cogentcore.org/core/colors"
"cogentcore.org/core/colors/colormap"
"cogentcore.org/core/core"
"cogentcore.org/core/events"
"cogentcore.org/core/icons"
"cogentcore.org/core/math32"
"cogentcore.org/core/styles"
"cogentcore.org/core/styles/abilities"
"cogentcore.org/core/tree"
"cogentcore.org/core/xyz"
"cogentcore.org/core/xyz/xyzcore"
"cogentcore.org/lab/physics"
"cogentcore.org/lab/physics/builder"
"cogentcore.org/lab/physics/phyxyz"
"cogentcore.org/lab/plot"
"cogentcore.org/lab/plotcore"
"cogentcore.org/lab/table"
"cogentcore.org/lab/tensorcore"
"github.com/emer/axon/v2/axon"
)
// GUI renders multiple views of the flat world env
type GUI struct {
// update display -- turn off to make it faster
Disp bool
// the env being visualized
Env *Env
// name of current env -- number is NData index
EnvName string
// 3D visualization of the Scene
SceneEditor *xyzcore.SceneEditor
// list of material colors
MatColors []string
// internal state colors
StateColors map[string]string
// current internal / behavioral state
State TraceStates
// trace record of recent activity
Trace StateTrace
// view of the gui obj
EnvForm *core.Form `display:"-"`
// offscreen render camera settings
Camera phyxyz.Camera
// first-person right-eye full field view
EyeRFullImage *core.Image `display:"-"`
// first-person right-eye fovea view
EyeRFovImage *core.Image `display:"-"`
// plot of positive valence drives, active OFC US state, and reward
USposPlot *plotcore.Editor
// data for USPlot
USposData *table.Table
// plot of negative valence active OFC US state, and outcomes
USnegPlot *plotcore.Editor
// data for USPlot
USnegData *table.Table
// Emery state
Emery Emery `new-window:"+"`
// Maze state
Maze Maze `new-window:"+"`
// The core physics elements: Model, Builder, Scene
Physics builder.Physics
}
// ConfigGUI configures all the world view GUI elements
// pass an initial env to use for configuring
func (vw *GUI) ConfigGUI(ev *Env, b core.Widget) {
vw.Disp = true
vw.Env = ev
vw.EnvName = ev.Name
vw.Camera.Defaults()
vw.Camera.FOV = 90
vw.StateColors = map[string]string{
"TrSearching": "aqua",
"TrDeciding": "coral",
"TrJustEngaged": "yellow",
"TrApproaching": "cornflowerblue",
"TrConsuming": "purple",
"TrRewarded": "green",
"TrGiveUp": "black",
"TrBumping": "red",
}
vw.MatColors = []string{"blue", "orange", "red", "violet", "navy", "brown", "pink", "purple", "olive", "chartreuse", "cyan", "magenta", "salmon", "goldenrod", "SykBlue"}
core.NewToolbar(b).Maker(vw.MakeToolbar)
split := core.NewSplits(b)
fr := core.NewFrame(split)
fr.Styler(func(s *styles.Style) {
s.Direction = styles.Column
})
// vw.EnvForm = core.NewForm(fr).SetStruct(vw)
core.Bind(&vw.Disp, core.NewSwitch(fr).SetText("Display"))
imfr := core.NewFrame(fr)
imfr.Styler(func(s *styles.Style) {
s.Display = styles.Grid
s.Columns = 2
s.Grow.Set(0, 0)
})
core.NewText(imfr).SetText("Eye-View, Fovea:")
core.NewText(imfr).SetText("Full Field:")
vw.EyeRFovImage = core.NewImage(imfr)
vw.EyeRFovImage.Name = "eye-r-fov-img"
vw.EyeRFovImage.Image = image.NewRGBA(image.Rectangle{Max: vw.Camera.Size})
vw.EyeRFullImage = core.NewImage(imfr)
vw.EyeRFullImage.Name = "eye-r-full-img"
vw.EyeRFullImage.Image = image.NewRGBA(image.Rectangle{Max: vw.Camera.Size})
wd := float32(420)
ht := float32(120)
vw.USposPlot = plotcore.NewEditor(fr)
vw.USposPlot.Name = "us-pos"
vw.USposPlot.Styler(func(s *styles.Style) {
s.Max.X.Px(wd)
s.Max.Y.Px(ht)
})
vw.USnegPlot = plotcore.NewEditor(fr)
vw.USnegPlot.Name = "us-neg"
vw.USnegPlot.Styler(func(s *styles.Style) {
s.Max.X.Px(wd)
s.Max.Y.Px(ht)
})
vw.ConfigUSPlots()
vw.SceneEditor = xyzcore.NewSceneEditor(split)
vw.SceneEditor.UpdateWidget()
sc := vw.SceneEditor.SceneXYZ()
vw.ConfigPhysics(sc)
sc.Camera.Pose.Pos = math32.Vec3(0, 29, -4)
sc.Camera.LookAt(math32.Vec3(0, 4, -5), math32.Vec3(0, 1, 0))
sc.SaveCamera("2")
sc.Camera.Pose.Pos = math32.Vec3(0, 24, 32)
sc.Camera.LookAt(math32.Vec3(0, 3.6, 0), math32.Vec3(0, 1, 0))
sc.SaveCamera("1")
sc.SaveCamera("default")
split.SetSplits(.4, .6)
}
func (vw *GUI) MakeToolbar(p *tree.Plan) {
tree.Add(p, func(w *core.Button) {
w.SetText("Init").SetIcon(icons.ClearAll).
SetTooltip("Init env").
OnClick(func(e events.Event) {
vw.Env.Init(0)
})
})
tree.Add(p, func(w *core.Button) {
w.SetText("Reset Trace").SetIcon(icons.Undo).
SetTooltip("Reset trace of position, etc, shown in 2D View").
OnClick(func(e events.Event) {
vw.Trace = nil
})
})
tree.Add(p, func(w *core.FuncButton) {
w.SetFunc(vw.Forward).SetText("Fwd").SetIcon(icons.SkipNext).
Styler(func(s *styles.Style) {
s.SetAbilities(true, abilities.RepeatClickable)
})
})
tree.Add(p, func(w *core.FuncButton) {
w.SetFunc(vw.Left).SetText("Left").SetIcon(icons.KeyboardArrowLeft).
Styler(func(s *styles.Style) {
s.SetAbilities(true, abilities.RepeatClickable)
})
})
tree.Add(p, func(w *core.FuncButton) {
w.SetFunc(vw.Right).SetText("Right").SetIcon(icons.KeyboardArrowRight).
Styler(func(s *styles.Style) {
s.SetAbilities(true, abilities.RepeatClickable)
})
})
tree.Add(p, func(w *core.FuncButton) {
w.SetFunc(vw.Consume).SetText("Consume").SetIcon(icons.SentimentExcited).
Styler(func(s *styles.Style) {
s.SetAbilities(true, abilities.RepeatClickable)
})
})
}
// ConfigPhysics makes the physics Model
func (vw *GUI) ConfigPhysics(sc *xyz.Scene) {
vw.Physics.Model = physics.NewModel()
vw.Physics.Builder = builder.NewBuilder()
vw.Physics.Model.GPU = false
sc.Background = colors.Scheme.Select.Container
xyz.NewAmbient(sc, "ambient", 0.3, xyz.DirectSun)
dir := xyz.NewDirectional(sc, "dir", 1, xyz.DirectSun)
dir.Pos.Set(0, 2, 1) // default: 0,1,1 = above and behind us (we are at 0,0,X)
vw.Physics.Scene = phyxyz.NewScene(sc)
vw.Maze.Config(vw.Env.Config.NArms, vw.Env.MaxLength)
wl := vw.Physics.Builder.NewGlobalWorld()
vw.Maze.Make(wl, vw.Physics.Scene, vw)
ew := vw.Physics.Builder.NewWorld()
vw.Emery.Make(ew, vw.Physics.Scene, vw)
vw.Physics.Builder.ReplicateWorld(vw.Physics.Scene, 1, 1, vw.Env.NData)
vw.Physics.Build()
}
func (vw *GUI) ConfigUSPlots() {
dp := table.New()
vw.USposData = dp
// plot.SetStyler(dp.AddStringColumn("US"), func(s *plot.Style) {
// s.Plotter = "Bar"
// s.Role = plot.X
// })
ysty := func(s *plot.Style) {
s.Plotter = "Bar"
s.Role = plot.Y
s.On = true
s.NoLegend = true
s.Range.SetMin(0).SetMax(1)
}
plot.SetStyler(dp.AddFloat64Column("Drive"), func(s *plot.Style) {
s.Plot.Title = "Positive USs"
ysty(s)
})
plot.SetStyler(dp.AddFloat64Column("OFC"), ysty)
plot.SetStyler(dp.AddFloat64Column("USin"), ysty)
dp.SetNumRows(vw.Env.Config.NDrives + 1)
dn := table.New()
vw.USnegData = dn
// plot.SetStyler(dn.AddStringColumn("US"), func(s *plot.Style) {
// s.Plotter = "Bar"
// s.Role = plot.X
// })
plot.SetStyler(dn.AddFloat64Column("OFC"), func(s *plot.Style) {
s.Plot.Title = "Negative USs"
ysty(s)
})
plot.SetStyler(dn.AddFloat64Column("USin"), ysty)
dn.SetNumRows(vw.Env.Config.NNegUSs + 2)
vw.USposPlot.SetTable(dp)
vw.USnegPlot.SetTable(dn)
}
// GrabEyeImg takes a snapshot from the perspective of Emer's right eye
func (vw *GUI) GrabEyeImg() {
vw.Camera.FOV = 90
img := vw.Physics.Scene.RenderFrom(vw.Emery.EyeR.Skin, &vw.Camera)[0]
if img == nil {
return
}
vw.EyeRFullImage.SetImage(img)
vw.EyeRFullImage.NeedsRender()
vw.Camera.FOV = 10
img = vw.Physics.Scene.RenderFrom(vw.Emery.EyeR.Skin, &vw.Camera)[0]
if img == nil {
return
}
vw.EyeRFovImage.SetImage(img)
vw.EyeRFovImage.NeedsRender()
}
func (vw *GUI) ConfigWorldView(tg *tensorcore.TensorGrid) {
cnm := "ArmMazeColors"
cm, ok := colormap.AvailableMaps[cnm]
if !ok {
ev := vw.Env
cm = &colormap.Map{}
cm.Name = cnm
cm.Indexed = true
nc := ev.Config.NArms
cm.Colors = make([]color.RGBA, nc)
cm.NoColor = colors.Black
for i, cnm := range vw.MatColors {
cm.Colors[i] = errors.Log1(colors.FromString(cnm))
}
colormap.AvailableMaps[cnm] = cm
}
tensorcore.AddGridStylerTo(tg, func(s *tensorcore.GridStyle) {
s.ColorMap = core.ColorMapName(cnm)
s.GridFill = 1
})
}
func (vw *GUI) UpdateWorld(ctx *axon.Context, ev *Env, net *axon.Network, state TraceStates) {
vw.State = state
vw.Trace.AddRec(ctx, uint32(ev.Di), ev, net, state)
if vw.SceneEditor == nil || !vw.Disp {
return
}
if vw.Env != ev {
vw.Env = ev
vw.EnvName = ev.Name
vw.Trace = nil
vw.EnvForm.Update()
}
vw.UpdateGUI()
}
func (vw *GUI) UpdateGUI() {
if vw.SceneEditor == nil || !vw.Disp {
return
}
vw.Physics.Scene.Update()
vw.GrabEyeImg()
if vw.SceneEditor.IsVisible() {
vw.SceneEditor.NeedsRender()
}
}
func (vw *GUI) StateColor() string {
return vw.StateColors[vw.State.String()]
}
func (vw *GUI) Left() { //types:add
ev := vw.Env
ev.InstinctAct(ev.JustGated, ev.HasGated)
ev.Action("Left", nil)
ev.Step()
vw.UpdateGUI()
}
func (vw *GUI) Right() { //types:add
ev := vw.Env
ev.InstinctAct(ev.JustGated, ev.HasGated)
ev.Action("Right", nil)
ev.Step()
vw.UpdateGUI()
}
func (vw *GUI) Forward() { //types:add
ev := vw.Env
ev.InstinctAct(ev.JustGated, ev.HasGated)
ev.Action("Forward", nil)
ev.Step()
vw.UpdateGUI()
}
func (vw *GUI) Consume() { //types:add
ev := vw.Env
ev.InstinctAct(ev.JustGated, ev.HasGated)
ev.Action("Consume", nil)
ev.Step()
vw.UpdateGUI()
}
// Copyright (c) 2023, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package armaze
import (
"fmt"
"cogentcore.org/core/math32"
"cogentcore.org/core/xyz"
"cogentcore.org/lab/physics"
"cogentcore.org/lab/physics/builder"
"cogentcore.org/lab/physics/phyxyz"
)
// Maze specifies the Maze physical space.
type Maze struct {
// width of arm -- emery rodent is 1 unit wide
ArmWidth float32 `default:"2"`
// total space between arms, ends up being divided on either side
ArmSpace float32 `default:"1"`
// multiplier per unit arm length -- keep square with width
LengthScale float32 `default:"2"`
// thickness of walls, floor
Thick float32 `default:"0.1"`
// height of walls
Height float32 `default:"0.2"`
// width + space
ArmWidthTot float32 `edit:"-"`
// computed total depth, starts at 0 goes deep
Depth float32 `edit:"-"`
// computed total width
Width float32 `edit:"-"`
// half width for centering on 0 X
HalfWidth float32 `edit:"-"`
// builder object
Obj *builder.Object
}
func (mz *Maze) Config(nArms int, maxLen int) {
mz.ArmSpace = 1
mz.ArmWidth = 2
mz.LengthScale = 2
mz.Thick = 0.1
mz.Height = 0.2
mz.ArmWidthTot = mz.ArmWidth + mz.ArmSpace
mz.Width = float32(nArms) * mz.ArmWidthTot
mz.Depth = float32(maxLen) * mz.LengthScale
mz.HalfWidth = mz.Width / 2
}
// pos returns the center position for given arm, position coordinate
func (mz *Maze) Pos(arm, pos int) (x, y float32) {
x = (float32(arm)+.5)*mz.ArmWidthTot - mz.HalfWidth
y = -(float32(pos) + .5) * mz.LengthScale // not centered -- going back in depth
return
}
// Make makes the Maze
func (mz *Maze) Make(wl *builder.World, sc *phyxyz.Scene, vw *GUI) {
dp := mz.Depth + 3*mz.LengthScale
rot := math32.NewQuatIdentity()
obj := wl.NewObject()
mz.Obj = obj
obj.NewBodySkin(sc, "floor", physics.Plane, "grey", math32.Vec3(mz.Width/2, 0, dp/2), math32.Vec3(0, 0, 0), rot)
mz.MakeArms(obj, sc, vw)
mz.MakeStims(obj, sc, vw)
}
func (mz *Maze) MakeArms(obj *builder.Object, sc *phyxyz.Scene, vw *GUI) {
ev := vw.Env
exln := mz.LengthScale
harm := .5 * mz.ArmWidth
hh := .5 * mz.Height
rot := math32.NewQuatIdentity()
for i, arm := range ev.Config.Arms {
anm := fmt.Sprintf("arm_%d\n", i)
x, _ := mz.Pos(i, 0)
ln := mz.LengthScale * float32(arm.Length)
hl := .5*ln + exln
obj.NewBodySkin(sc, anm+"_left-wall", physics.Box, "black", math32.Vec3(mz.Thick/2, hh, hl), math32.Vec3(x-harm, hh, -hl), rot)
obj.NewBodySkin(sc, anm+"_right-wall", physics.Box, "black", math32.Vec3(mz.Thick/2, hh, hl), math32.Vec3(x+harm, hh, -hl), rot)
}
}
// MakeStims constructs stimuli: CSs, USs
func (mz *Maze) MakeStims(obj *builder.Object, sc *phyxyz.Scene, vw *GUI) {
ev := vw.Env
exLn := mz.LengthScale
usHt := mz.Height / 2
usDp := (0.2 * mz.LengthScale) / 2
csHt := mz.LengthScale / 2
rot := math32.NewQuatIdentity()
for i, arm := range ev.Config.Arms {
x, _ := mz.Pos(i, 0)
ln := mz.LengthScale * float32(arm.Length)
usnm := fmt.Sprintf("us_%d\n", i)
csnm := fmt.Sprintf("cs_%d\n", i)
obj.NewBodySkin(sc, usnm, physics.Box, vw.MatColors[arm.US], math32.Vec3(mz.ArmWidth/2, usHt, usDp), math32.Vec3(x, usHt, -ln-1.1*exLn), rot)
bd := obj.NewBodySkin(sc, csnm, physics.Box, vw.MatColors[arm.CS], math32.Vec3(mz.ArmWidth/2, csHt, mz.Thick/2), math32.Vec3(x, usHt+csHt, -ln-2*exLn), rot)
sk := bd.Skin
sk.InitSkin = func(sld *xyz.Solid) {
sk.BoxInit(sld)
sld.Updater(func() {
sk.Color = vw.MatColors[arm.CS]
sk.UpdateColor(sk.Color, sld)
})
}
}
}
// Copyright (c) 2023, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package armaze
// Paradigms is a list of experimental paradigms that
// govern the configuration of the arms.
type Paradigms int32 //enums:enum
const (
// GroupGoodBad allocates Arms into 2 groups, with first group unambiguously Good
// and the second Bad, using the Min, Max values of each Range parameter:
// Length, Effort, USMag, USProb. Good has Min cost, Max US, and opposite for Bad.
// This also aligns with the ordering of USs, such that negative USs are last.
GroupGoodBad Paradigms = iota
// GroupRisk allocates Arms into 2 groups with conflicting Cost and Benefit
// tradeoffs, with the first group having Min cost and Min US, and the second
// group having Max cost and Max US.
GroupRisk
)
///////////////////////////////////////////////
// GroupGoodBad
// ConfigGroupGoodBad
func (ev *Env) ConfigGroupGoodBad() {
cfg := &ev.Config
cfg.Update()
cfg.NArms = 2 * cfg.NUSs
ev.Drives = make([]float32, cfg.NDrives)
cfg.Arms = make([]*Arm, cfg.NArms)
ai := 0
for gi := 0; gi < 2; gi++ {
var eff, mag, prob float32
var length int
// todo: with BLANovelInhib lr=0.01, this bias has reversed!?
if gi == 1 { // bad case: there is a small but significant left side bias, so make this on bad
length = cfg.LengthRange.Max
eff = cfg.EffortRange.Max
mag = cfg.USMagRange.Min
prob = cfg.USProbRange.Min
} else { // good case
length = cfg.LengthRange.Min
eff = cfg.EffortRange.Min
mag = cfg.USMagRange.Max
prob = cfg.USProbRange.Max
}
for ui := 0; ui < cfg.NUSs; ui++ {
arm := &Arm{CS: ai, Length: length, US: ui}
arm.Effort.Set(eff, eff)
arm.USMag.Set(mag, mag)
arm.USProb = prob
cfg.Arms[ai] = arm
ai++
}
}
ev.UpdateMaxLength()
}
// Copyright (c) 2020, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package armaze
import (
"github.com/emer/axon/v2/axon"
)
// TraceStates is a list of mutually exclusive states
// for tracing the behavior and internal state of Emery
type TraceStates int32 //enums:enum
const (
// Searching is not yet goal engaged, looking for a goal
TrSearching TraceStates = iota
// Deciding is having some partial gating but not in time for action
TrDeciding
// JustEngaged means just decided to engage in a goal
TrJustEngaged
// Approaching is goal engaged, approaching the goal
TrApproaching
// Consuming is consuming the US, first step (prior to getting reward, step1)
TrConsuming
// Rewarded is just received reward from a US
TrRewarded
// GiveUp is when goal is abandoned
TrGiveUp
// Bumping is bumping into a wall
TrBumping
)
// TraceRec holds record of info for tracing behavior, state
type TraceRec struct {
// absolute time
Time float32
// trial counter
Trial int
// current arm
Arm int
// position in arm
Pos int
// behavioral / internal state summary
State TraceStates
// NDrives current drive state level
Drives []float32
}
// StateTrace holds trace records
type StateTrace []*TraceRec
// AddRec adds a record with data from given sources
func (tr *StateTrace) AddRec(ctx *axon.Context, di uint32, ev *Env, net *axon.Network, state TraceStates) *TraceRec {
rec := &TraceRec{Arm: ev.Arm, Pos: ev.Pos, State: state}
rec.Drives = make([]float32, ev.Config.NDrives)
if ctx != nil {
rec.Time = ctx.Time
rec.Trial = int(ctx.TrialsTotal)
for i := 0; i < ev.Config.NDrives; i++ {
rec.Drives[i] = axon.GlobalVectors.Value(int(axon.GvDrives), int(1+i), int(di))
}
}
*tr = append(*tr, rec)
return rec
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// choose: This project tests the Rubicon framework
// making cost-benefit based choices.
package choose
//go:generate core generate -add-types -add-funcs -gosl
import (
"fmt"
"os"
"reflect"
"cogentcore.org/core/base/num"
"cogentcore.org/core/base/reflectx"
"cogentcore.org/core/core"
"cogentcore.org/core/enums"
"cogentcore.org/core/gpu"
"cogentcore.org/core/icons"
"cogentcore.org/core/math32"
"cogentcore.org/core/tree"
"cogentcore.org/lab/base/mpi"
"cogentcore.org/lab/base/randx"
"cogentcore.org/lab/tensorfs"
"github.com/emer/axon/v2/axon"
"github.com/emer/axon/v2/sims/choose/armaze"
"github.com/emer/emergent/v2/egui"
"github.com/emer/emergent/v2/env"
"github.com/emer/emergent/v2/etime"
"github.com/emer/emergent/v2/looper"
"github.com/emer/emergent/v2/paths"
)
// Modes are the looping modes (Stacks) for running and statistics.
type Modes int32 //enums:enum
const (
Train Modes = iota
Test
)
// Levels are the looping levels for running and statistics.
type Levels int32 //enums:enum
const (
Cycle Levels = iota
Trial
Epoch
Run
Expt
)
// StatsPhase is the phase of stats processing for given mode, level.
// Accumulated values are reset at Start, added each Step.
type StatsPhase int32 //enums:enum
const (
Start StatsPhase = iota
Step
)
// see params.go for params
// Sim encapsulates the entire simulation model, and we define all the
// functionality as methods on this struct. This structure keeps all relevant
// state information organized and available without having to pass everything around
// as arguments to methods, and provides the core GUI interface (note the view tags
// for the fields which provide hints to how things should be displayed).
type Sim struct {
// simulation configuration parameters -- set by .toml config file and / or args
Config *Config `new-window:"+"`
// Net is the network: click to view / edit parameters for layers, paths, etc.
Net *axon.Network `new-window:"+" display:"no-inline"`
// StopOnSeq stops running at end of a sequence (for NetView Di data parallel index).
StopOnSeq bool
// StopOnErr stops running when an error programmed into the code occurs.
StopOnErr bool
// Params manages network parameter setting.
Params axon.Params `display:"inline"`
// Loops are the control loops for running the sim, in different Modes
// across stacks of Levels.
Loops *looper.Stacks `new-window:"+" display:"no-inline"`
// Envs provides mode-string based storage of environments.
Envs env.Envs `new-window:"+" display:"no-inline"`
// TrainUpdate has Train mode netview update parameters.
TrainUpdate axon.NetViewUpdate `display:"inline"`
// Root is the root tensorfs directory, where all stats and other misc sim data goes.
Root *tensorfs.Node `display:"-"`
// Stats has the stats directory within Root.
Stats *tensorfs.Node `display:"-"`
// Current has the current stats values within Stats.
Current *tensorfs.Node `display:"-"`
// StatFuncs are statistics functions called at given mode and level,
// to perform all stats computations. phase = Start does init at start of given level,
// and all intialization / configuration (called during Init too).
StatFuncs []func(mode Modes, level Levels, phase StatsPhase) `display:"-"`
// GUI manages all the GUI elements
GUI egui.GUI `display:"-"`
// GUI for viewing env.
EnvGUI *armaze.GUI `display:"-"`
// RandSeeds is a list of random seeds to use for each run.
RandSeeds randx.Seeds `display:"-"`
}
func (ss *Sim) SetConfig(cfg *Config) { ss.Config = cfg }
func (ss *Sim) Body() *core.Body { return ss.GUI.Body }
func (ss *Sim) ConfigSim() {
ss.Root, _ = tensorfs.NewDir("Root")
tensorfs.CurRoot = ss.Root
ss.Net = axon.NewNetwork(ss.Config.Name)
ss.Params.Config(LayerParams, PathParams, ss.Config.Params.Sheet, ss.Config.Params.Tag, reflect.ValueOf(ss))
ss.RandSeeds.Init(100) // max 100 runs
ss.InitRandSeed(0)
if ss.Config.GPU {
gpu.SelectAdapter = ss.Config.Run.GPUDevice
axon.GPUInit()
axon.UseGPU = true
}
ss.ConfigEnv()
ss.ConfigNet(ss.Net)
ss.ConfigLoops()
ss.ConfigStats()
// if ss.Config..GPU {
// fmt.Println(axon.GPUSystem.Vars().StringDoc())
// }
if ss.Config.Params.SaveAll {
ss.Config.Params.SaveAll = false
ss.Net.SaveParamsSnapshot(&ss.Config, ss.Config.Params.Good)
os.Exit(0)
}
}
func (ss *Sim) ConfigEnv() {
// Can be called multiple times -- don't re-create
newEnv := (len(ss.Envs) == 0)
if ss.Config.Env.Config != "" {
fmt.Println("Env Config:", ss.Config.Env.Config)
}
for di := 0; di < ss.Config.Run.NData; di++ {
var trn *armaze.Env
if newEnv {
trn = &armaze.Env{}
} else {
trn = ss.Envs.ByModeDi(Train, di).(*armaze.Env)
}
// note: names must be standard here!
trn.Name = env.ModeDi(Train, di)
trn.Defaults()
trn.RandSeed = 73
if !ss.Config.Env.SameSeed {
trn.RandSeed += int64(di) * 73
}
trn.Config.NDrives = ss.Config.Env.NDrives
if ss.Config.Env.Config != "" {
args := os.Args
os.Args = args[:1]
// todo: need config
// _, err := cli.Config(&trn.Config, ss.Config.Env.Config)
// if err != nil {
// slog.Error(err.Error())
// }
}
trn.ConfigEnv(di)
trn.Validate()
trn.Init(0)
// note: names must be in place when adding
ss.Envs.Add(trn)
if di == 0 {
ss.ConfigRubicon(trn)
}
}
}
func (ss *Sim) ConfigRubicon(trn *armaze.Env) {
rp := &ss.Net.Rubicon
rp.SetNUSs(trn.Config.NDrives, 1)
rp.Defaults()
rp.USs.PVposGain = 2 // higher = more pos reward (saturating logistic func)
rp.USs.PVnegGain = 1 // global scaling of RP neg level -- was 1
rp.LHb.VSPatchGain = 4
rp.LHb.VSPatchNonRewThr = 0.15
rp.USs.USnegGains[0] = 2 // big salient input!
rp.Drive.DriveMin = 0.5 // 0.5 -- should be
rp.Urgency.U50 = 10
if ss.Config.Params.Rubicon != nil {
reflectx.SetFieldsFromMap(rp, ss.Config.Params.Rubicon)
}
}
func (ss *Sim) ConfigNet(net *axon.Network) {
net.SetMaxData(ss.Config.Run.NData)
net.Context().SetISICycles(int32(ss.Config.Run.ISICycles)).
SetMinusCycles(int32(ss.Config.Run.MinusCycles)).
SetPlusCycles(int32(ss.Config.Run.PlusCycles)).Update()
net.SetRandSeed(ss.RandSeeds[0]) // init new separate random seed, using run = 0
ev := ss.Envs.ByModeDi(Train, 0).(*armaze.Env)
nuBgY := 5
nuBgX := 5
nuCtxY := 6
nuCtxX := 6
nAct := int(armaze.ActionsN)
popY := 4
popX := 4
space := float32(2)
pone2one := paths.NewPoolOneToOne()
one2one := paths.NewOneToOne()
full := paths.NewFull()
mtxRandPath := paths.NewPoolUniformRand()
mtxRandPath.PCon = 0.75
_ = mtxRandPath
_ = pone2one
pathClass := "PFCPath"
ny := ev.Config.Params.NYReps
narm := ev.Config.NArms
vSgpi, vSmtxGo, vSmtxNo, urgency, pvPos, blaPosAcq, blaPosExt, blaNegAcq, blaNegExt, blaNov, ofcPosUS, ofcPosUSCT, ofcPosUSPT, ofcPosUSPTp, ilPos, ilPosCT, ilPosPT, ilPosPTp, ofcNegUS, ofcNegUSCT, ofcNegUSPT, ofcNegUSPTp, ilNeg, ilNegCT, ilNegPT, ilNegPTp, accCost, plUtil, sc := net.AddRubicon(ny, popY, popX, nuBgY, nuBgX, nuCtxY, nuCtxX, space)
_, _ = plUtil, urgency
_, _ = ofcNegUSCT, ofcNegUSPTp
_, _ = vSmtxGo, vSmtxNo
plUtilPTp := net.LayerByName("PLutilPTp")
cs, csP := net.AddInputPulv2D("CS", ny, narm, space)
dist, distP := net.AddInputPulv2D("Dist", ny, ev.MaxLength+1, space)
//////// M1, VL, ALM
act := net.AddLayer2D("Act", axon.InputLayer, ny, nAct) // Action: what is actually done
vl := net.AddPulvLayer2D("VL", ny, nAct) // VL predicts brainstem Action
vl.SetBuildConfig("DriveLayName", act.Name)
m1, m1CT := net.AddSuperCT2D("M1", "PFCPath", nuCtxY, nuCtxX, space, one2one)
m1P := net.AddPulvForSuper(m1, space)
alm, almCT, almPT, almPTp, almMD := net.AddPFC2D("ALM", "MD", nuCtxY, nuCtxX, true, true, space)
_ = almPT
net.ConnectLayers(vSgpi, almMD, full, axon.InhibPath)
// net.ConnectToMatrix(alm, vSmtxGo, full) // todo: explore
// net.ConnectToMatrix(alm, vSmtxNo, full)
net.ConnectToPFCBidir(m1, m1P, alm, almCT, almPT, almPTp, full, "M1ALM") // alm predicts m1
// vl is a predictive thalamus but we don't have direct access to its source
net.ConnectToPulv(m1, m1CT, vl, full, full, pathClass)
net.ConnectToPFC(nil, vl, alm, almCT, almPT, almPTp, full, "VLALM") // alm predicts m1
// sensory inputs guiding action
// note: alm gets effort, dist via predictive coding below
net.ConnectLayers(dist, m1, full, axon.ForwardPath).AddClass("ToM1")
net.ConnectLayers(ofcNegUS, m1, full, axon.ForwardPath).AddClass("ToM1")
// shortcut: not needed
// net.ConnectLayers(dist, vl, full, axon.ForwardPath).AddClass("ToVL")
// these pathways are *essential* -- must get current state here
net.ConnectLayers(m1, vl, full, axon.ForwardPath).AddClass("ToVL")
net.ConnectLayers(alm, vl, full, axon.ForwardPath).AddClass("ToVL")
net.ConnectLayers(m1, accCost, full, axon.ForwardPath).AddClass("MToACC")
net.ConnectLayers(alm, accCost, full, axon.ForwardPath).AddClass("MToACC")
// key point: cs does not project directly to alm -- no simple S -> R mappings!?
//////// CS -> BLA, OFC
net.ConnectToSC1to1(cs, sc)
net.ConnectCSToBLApos(cs, blaPosAcq, blaNov)
net.ConnectToBLAExt(cs, blaPosExt, full)
net.ConnectToBLAAcq(cs, blaNegAcq, full)
net.ConnectToBLAExt(cs, blaNegExt, full)
// for some reason this really makes things worse:
// net.ConnectToVSMatrix(cs, vSmtxGo, full)
// net.ConnectToVSMatrix(cs, vSmtxNo, full)
// OFCus predicts cs
net.ConnectToPFCBack(cs, csP, ofcPosUS, ofcPosUSCT, ofcPosUSPT, ofcPosUSPTp, full, "CSToPFC")
net.ConnectToPFCBack(cs, csP, ofcNegUS, ofcNegUSCT, ofcPosUSPT, ofcNegUSPTp, full, "CSToPFC")
//////// OFC, ACC, ALM predicts dist
// todo: a more dynamic US rep is needed to drive predictions in OFC
// using distance and effort here in the meantime
net.ConnectToPFCBack(dist, distP, ofcPosUS, ofcPosUSCT, ofcPosUSPT, ofcPosUSPTp, full, "DistToPFC")
net.ConnectToPFCBack(dist, distP, ilPos, ilPosCT, ilPosPT, ilPosPTp, full, "PosToPFC")
net.ConnectToPFC(dist, distP, ofcNegUS, ofcNegUSCT, ofcNegUSPT, ofcNegUSPTp, full, "DistToPFC")
net.ConnectToPFC(dist, distP, ilNeg, ilNegCT, ilNegPT, ilNegPTp, full, "DistToPFC")
// alm predicts all effort, cost, sensory state vars
net.ConnectToPFC(dist, distP, alm, almCT, almPT, almPTp, full, "DistToPFC")
//////// ALM, M1 <-> OFC, ACC
// action needs to know if maintaining a goal or not
// using plUtil as main summary "driver" input to action system
// PTp provides good notmaint signal for action.
net.ConnectLayers(plUtilPTp, alm, full, axon.ForwardPath).AddClass("ToALM")
net.ConnectLayers(plUtilPTp, m1, full, axon.ForwardPath).AddClass("ToM1")
// note: in Obelisk this helps with the Consume action
// but here in this example it produces some instability
// at later time points -- todo: investigate later.
// net.ConnectLayers(notMaint, vl, full, axon.ForwardPath).AddClass("ToVL")
//////// position
cs.PlaceRightOf(pvPos, space*2)
dist.PlaceRightOf(cs, space)
m1.PlaceRightOf(dist, space)
alm.PlaceRightOf(m1, space)
vl.PlaceBehind(m1P, space)
act.PlaceBehind(vl, space)
net.Build()
net.Defaults()
net.SetNThreads(ss.Config.Run.NThreads)
ss.ApplyParams()
net.InitWeights()
}
func (ss *Sim) ApplyParams() {
ss.Params.Script = ss.Config.Params.Script
net := ss.Net
ss.Params.ApplyAll(net)
// params that vary as number of CSs
ev := ss.Envs.ByModeDi(Train, 0).(*armaze.Env)
nCSTot := ev.Config.NArms
cs := net.LayerByName("CS")
cs.Params.Inhib.ActAvg.Nominal = 0.32 / float32(nCSTot)
csp := net.LayerByName("CSP")
csp.Params.Inhib.ActAvg.Nominal = 0.32 / float32(nCSTot)
bla := net.LayerByName("BLAposAcqD1")
pji, _ := bla.RecvPathBySendName("BLANovelCS")
pj := pji.(*axon.Path)
// this is very sensitive param to get right
// too little and the hamster does not try CSs at the beginning,
// too high and it gets stuck trying the same location over and over
pj.Params.PathScale.Abs = float32(math32.Min(2.3+(float32(nCSTot)/10.0), 3.0))
}
//////// Init, utils
// Init restarts the run, and initializes everything, including network weights
// and resets the epoch log table
func (ss *Sim) Init() {
ss.Loops.ResetCounters()
ss.SetRunName()
ss.InitRandSeed(0)
// ss.ConfigEnv() // always do -- otherwise env params not reset after run
ss.ApplyParams()
ss.StatsInit()
ss.NewRun()
ss.TrainUpdate.RecordSyns()
ss.TrainUpdate.Update(Train, Trial)
}
// InitRandSeed initializes the random seed based on current training run number
func (ss *Sim) InitRandSeed(run int) {
ss.RandSeeds.Set(run)
ss.RandSeeds.Set(run, &ss.Net.Rand)
}
// NetViewUpdater returns the NetViewUpdate for given mode.
func (ss *Sim) NetViewUpdater(mode enums.Enum) *axon.NetViewUpdate {
return &ss.TrainUpdate
}
// ConfigLoops configures the control loops: Training, Testing
func (ss *Sim) ConfigLoops() {
ls := looper.NewStacks()
cycles := ss.Config.Run.Cycles()
trials := int(math32.IntMultipleGE(float32(ss.Config.Run.Trials), float32(ss.Config.Run.NData)))
// Note: actual max counters set by env
ls.AddStack(Train, Trial).
AddLevel(Expt, 1).
AddLevel(Run, ss.Config.Run.Runs).
AddLevel(Epoch, ss.Config.Run.Epochs).
AddLevelIncr(Trial, trials, ss.Config.Run.NData).
AddLevel(Cycle, cycles)
axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, Cycle, Trial, Train,
func(mode enums.Enum) { ss.Net.ClearInputs() },
func(mode enums.Enum) { ss.ApplyInputs(mode.(Modes)) },
)
ls.Stacks[Train].OnInit.Add("Init", ss.Init)
ls.Loop(Train, Run).OnStart.Add("NewRun", ss.NewRun)
for mode, st := range ls.Stacks {
plusPhase := st.Loops[Cycle].EventByName("MinusPhase:End")
plusPhase.OnEvent.InsertBefore("PlusPhase:Start", "TakeAction", func() bool {
// note: critical to have this happen *after* MinusPhase:End and *before* PlusPhase:Start
// because minus phase end has gated info, and plus phase start applies action input
ss.TakeAction(ss.Net, mode.(Modes))
return false
})
}
ls.AddOnStartToAll("StatsStart", ss.StatsStart)
ls.AddOnEndToAll("StatsStep", ss.StatsStep)
if ss.Config.GUI {
axon.LooperUpdateNetView(ls, Cycle, Trial, ss.NetViewUpdater)
ls.Stacks[Train].OnInit.Add("GUI-Init", ss.GUI.UpdateWindow)
ls.Loop(Train, Trial).OnEnd.Add("UpdateEnvGUI", func() {
ss.UpdateEnvGUI(Train)
})
}
if ss.Config.Debug {
mpi.Println(ls.DocString())
}
ss.Loops = ls
}
// TakeAction takes action for this step, using either decoded cortical
// or reflexive subcortical action from env.
// Called at end of minus phase. However, it can still gate sometimes
// after this point, so that is dealt with at end of plus phase.
func (ss *Sim) TakeAction(net *axon.Network, mode Modes) {
rp := &net.Rubicon
ctx := net.Context()
curModeDir := ss.Current.Dir(mode.String())
mtxLy := net.LayerByName("VMatrixGo")
vlly := net.LayerByName("VL")
threshold := float32(0.1)
ndata := int(ctx.NData)
for di := 0; di < ndata; di++ {
diu := uint32(di)
ev := ss.Envs.ByModeDi(mode, di).(*armaze.Env)
justGated := mtxLy.Params.AnyGated(diu) // not updated until plus phase: rp.VSMatrix.JustGated.IsTrue()
hasGated := axon.GlobalScalars.Value(int(axon.GvVSMatrixHasGated), di) > 0
ev.InstinctAct(justGated, hasGated)
csGated := (justGated && !rp.HasPosUS(diu))
ach := axon.GlobalScalars.Value(int(axon.GvACh), di)
mtxLpi := mtxLy.Params.PoolIndex(0)
mtxCaPMax := axon.PoolAvgMax(axon.AMCaPMax, axon.AMCycle, axon.Max, mtxLpi, diu)
deciding := !csGated && !hasGated && (ach > threshold && mtxCaPMax > threshold) // give it time
wasDeciding := num.ToBool(curModeDir.Float32("Deciding", ndata).Float1D(di))
if wasDeciding {
deciding = false // can't keep deciding!
}
curModeDir.Float32("Deciding", ndata).SetFloat1D(num.FromBool[float64](deciding), di)
trSt := armaze.TrSearching
if hasGated {
trSt = armaze.TrApproaching
}
if csGated || deciding {
act := "CSGated"
trSt = armaze.TrJustEngaged
if !csGated {
act = "Deciding"
trSt = armaze.TrDeciding
}
// ss.Stats.SetStringDi("Debug", di, act)
ev.Action("None", nil)
ss.ApplyAction(mode, di)
curModeDir.StringValue("ActAction", ndata).SetString1D("None", di)
curModeDir.StringValue("Instinct", ndata).SetString1D("None", di)
curModeDir.StringValue("NetAction", ndata).SetString1D(act, di)
curModeDir.Float64("ActMatch", ndata).SetFloat1D(1, di)
lpi := vlly.Params.PoolIndex(0)
axon.PoolsInt.Set(0, int(lpi), 0, int(axon.Clamped)) // not clamped this trial
} else {
// ss.Stats.SetStringDi("Debug", di, "acting")
netAct := ss.DecodeAct(ev, mode, di)
genAct := ev.InstinctAct(justGated, hasGated)
curModeDir.StringValue("NetAction", ndata).SetString1D(netAct.String(), di)
curModeDir.StringValue("Instinct", ndata).SetString1D(genAct.String(), di)
if netAct == genAct {
curModeDir.Float64("ActMatch", ndata).SetFloat1D(1, di)
} else {
curModeDir.Float64("ActMatch", ndata).SetFloat1D(0, di)
}
actAct := genAct
if curModeDir.Float64("CortexDriving", ndata).Float1D(di) > 0 {
actAct = netAct
}
curModeDir.StringValue("ActAction", ndata).SetString1D(actAct.String(), di)
ev.Action(actAct.String(), nil)
ss.ApplyAction(mode, di)
switch {
case rp.HasPosUS(diu):
trSt = armaze.TrRewarded
case actAct == armaze.Consume:
trSt = armaze.TrConsuming
}
}
if axon.GlobalScalars.Value(int(axon.GvGiveUp), di) > 0 {
trSt = armaze.TrGiveUp
}
curModeDir.Int("TraceStateInt", ndata).SetInt1D(int(trSt), di)
curModeDir.StringValue("TraceState", ndata).SetString1D(trSt.String(), di)
}
net.ApplyExts()
}
// DecodeAct decodes the VL ActM state to find closest action pattern
func (ss *Sim) DecodeAct(ev *armaze.Env, mode Modes, di int) armaze.Actions {
tsr := axon.StatsLayerValues(ss.Net, ss.Current, mode, di, "VL", "CaP")
return ev.DecodeAct(tsr)
}
func (ss *Sim) ApplyAction(mode Modes, di int) {
net := ss.Net
ev := ss.Envs.ByModeDi(mode, di).(*armaze.Env)
ap := ev.State("Action")
ly := net.LayerByName("Act")
ly.ApplyExt(uint32(di), ap)
}
// ApplyInputs applies input patterns from given environment for given mode.
// Any other start-of-trial logic can also be put here.
func (ss *Sim) ApplyInputs(mode Modes) {
net := ss.Net
ss.Net.InitExt()
curModeDir := ss.Current.Dir(mode.String())
lays := []string{"Dist", "CS"}
ndata := int(net.Context().NData)
for di := 0; di < ndata; di++ {
ev := ss.Envs.ByModeDi(mode, di).(*armaze.Env)
giveUp := axon.GlobalScalars.Value(int(axon.GvGiveUp), di) > 0
if giveUp {
ev.JustConsumed = true // triggers a new start -- we just consumed the giving up feeling :)
}
ev.Step()
if ev.Tick == 0 {
driving := num.FromBool[float64](randx.BoolP32(ss.Config.Env.PctCortex))
curModeDir.Float64("CortexDriving", ndata).SetFloat1D(driving, di)
}
for _, lnm := range lays {
ly := net.LayerByName(lnm)
itsr := ev.State(lnm)
ly.ApplyExt(uint32(di), itsr)
}
curModeDir.StringValue("TrialName", ndata).SetString1D(ev.String(), di)
ss.ApplyRubicon(ev, mode, uint32(di))
}
net.ApplyExts()
}
// ApplyRubicon applies Rubicon reward inputs.
func (ss *Sim) ApplyRubicon(ev *armaze.Env, mode Modes, di uint32) {
rp := &ss.Net.Rubicon
rp.NewState(di, &ss.Net.Rand) // first before anything else is updated
rp.SetGoalMaintFromLayer(di, ss.Net, "PLutilPT", 0.2)
rp.DecodePVEsts(di, ss.Net)
rp.SetGoalDistEst(di, float32(ev.Dist))
rp.EffortUrgencyUpdate(di, ev.Effort)
if ev.USConsumed >= 0 {
rp.SetUS(di, axon.Positive, ev.USConsumed, ev.USValue)
}
rp.SetDrives(di, 0.5, ev.Drives...)
rp.Step(di, &ss.Net.Rand)
}
// NewRun intializes a new Run level of the model.
func (ss *Sim) NewRun() {
ctx := ss.Net.Context()
run := ss.Loops.Loop(Train, Run).Counter.Cur
ss.InitRandSeed(run)
for di := 0; di < int(ctx.NData); di++ {
ss.Envs.ByModeDi(Train, di).Init(run)
}
ctx.Reset()
ss.Net.InitWeights()
}
//////// Stats
// AddStat adds a stat compute function.
func (ss *Sim) AddStat(f func(mode Modes, level Levels, phase StatsPhase)) {
ss.StatFuncs = append(ss.StatFuncs, f)
}
// StatsStart is called by Looper at the start of given level, for each iteration.
// It needs to call RunStats Start at the next level down.
// e.g., each Epoch is the start of the full set of Trial Steps.
func (ss *Sim) StatsStart(lmd, ltm enums.Enum) {
mode := lmd.(Modes)
level := ltm.(Levels)
if level <= Trial {
return
}
ss.RunStats(mode, level-1, Start)
}
// StatsStep is called by Looper at each step of iteration,
// where it accumulates the stat results.
func (ss *Sim) StatsStep(lmd, ltm enums.Enum) {
mode := lmd.(Modes)
level := ltm.(Levels)
if level < Trial {
return
}
ss.RunStats(mode, level, Step)
tensorfs.DirTable(axon.StatsNode(ss.Stats, mode, level), nil).WriteToLog()
}
// RunStats runs the StatFuncs for given mode, level and phase.
func (ss *Sim) RunStats(mode Modes, level Levels, phase StatsPhase) {
for _, sf := range ss.StatFuncs {
sf(mode, level, phase)
}
if phase == Step && ss.GUI.Tabs != nil {
nm := mode.String() + " " + level.String() + " Plot"
ss.GUI.Tabs.AsLab().GoUpdatePlot(nm)
}
}
// SetRunName sets the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) SetRunName() string {
runName := ss.Params.RunName(ss.Config.Run.Run)
ss.Current.StringValue("RunName", 1).SetString1D(runName, 0)
return runName
}
// RunName returns the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) RunName() string {
return ss.Current.StringValue("RunName", 1).String1D(0)
}
// StatsInit initializes all the stats by calling Start across all modes and levels.
func (ss *Sim) StatsInit() {
for md, st := range ss.Loops.Stacks {
mode := md.(Modes)
for _, lev := range st.Order {
level := lev.(Levels)
if level == Cycle {
continue
}
ss.RunStats(mode, level, Start)
}
}
if ss.GUI.Tabs != nil {
tbs := ss.GUI.Tabs.AsLab()
_, idx := tbs.CurrentTab()
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Epoch))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Run))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Trial))
tbs.SelectTabIndex(idx)
}
}
// ConfigStats handles configures functions to do all stats computation
// in the tensorfs system.
func (ss *Sim) ConfigStats() {
net := ss.Net
ss.Stats = ss.Root.Dir("Stats")
ss.Current = ss.Stats.Dir("Current")
ss.SetRunName()
// note: Trial level is not recorded, only the sequence
// last arg(s) are levels to exclude
counterFunc := axon.StatLoopCounters(ss.Stats, ss.Current, ss.Loops, net, Trial, Cycle)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
counterFunc(mode, level, phase == Start)
})
runNameFunc := axon.StatRunName(ss.Stats, ss.Current, ss.Loops, net, Trial, Cycle)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
runNameFunc(mode, level, phase == Start)
})
trialNameFunc := axon.StatTrialName(ss.Stats, ss.Current, ss.Loops, net, Trial)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
trialNameFunc(mode, level, phase == Start)
})
perTrlFunc := axon.StatPerTrialMSec(ss.Stats, Train, Trial)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
perTrlFunc(mode, level, phase == Start)
})
// seqStats := []string{"NCorrect", "Rew", "RewPred", "RPE", "RewEpc"}
// ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
// if level <= Trial {
// return
// }
// for _, name := range seqStats {
// modeDir := ss.Stats.Dir(mode.String())
// curModeDir := ss.Current.Dir(mode.String())
// levelDir := modeDir.Dir(level.String())
// subDir := modeDir.Dir((level - 1).String()) // note: will fail for Cycle
// tsr := levelDir.Float64(name)
// ndata := int(ss.Net.Context().NData)
// var stat float64
// if phase == Start {
// tsr.SetNumRows(0)
// plot.SetFirstStyler(tsr, func(s *plot.Style) {
// s.Range.SetMin(0).SetMax(1)
// s.On = true
// })
// continue
// }
// switch level {
// case Trial:
// curModeDir.Float32(name, ndata).SetFloat1D(float64(stat), di)
// tsr.AppendRowFloat(float64(stat))
// default:
// stat = stats.StatMean.Call(subDir.Value(name)).Float1D(0)
// tsr.AppendRowFloat(stat)
// }
// }
// })
lays := net.LayersByType(axon.SuperLayer, axon.CTLayer, axon.TargetLayer, axon.PTMaintLayer, axon.PTPredLayer, axon.GPLayer, axon.STNLayer, axon.VSMatrixLayer, axon.BGThalLayer, axon.UrgencyLayer)
actGeFunc := axon.StatLayerActGe(ss.Stats, net, Train, Trial, Run, lays...)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
actGeFunc(mode, level, phase == Start)
})
clays := net.LayersByType(axon.SuperLayer, axon.CTLayer, axon.PTMaintLayer, axon.PTPredLayer)
pcaFunc := axon.StatPCA(ss.Stats, ss.Current, net, ss.Config.Run.PCAInterval, Train, Trial, Run, clays...)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
trnEpc := ss.Loops.Loop(Train, Epoch).Counter.Cur
pcaFunc(mode, level, phase == Start, trnEpc)
})
}
// StatCounters returns counters string to show at bottom of netview.
func (ss *Sim) StatCounters(mode, level enums.Enum) string {
counters := ss.Loops.Stacks[mode].CountersString()
vu := ss.NetViewUpdater(mode)
if vu == nil || vu.View == nil {
return counters
}
di := vu.View.Di
counters += fmt.Sprintf(" Di: %d", di)
curModeDir := ss.Current.Dir(mode.String())
if curModeDir.Node("TrialName") == nil {
return counters
}
statNames := []string{"DA", "RewPred"}
if level == Cycle || curModeDir.Node(statNames[0]) == nil {
return counters
}
for _, name := range statNames {
counters += fmt.Sprintf(" %s: %.4g", name, curModeDir.Value(name).Float1D(di))
}
return counters
}
//////// GUI
// ConfigGUI configures the Cogent Core GUI interface for this simulation.
func (ss *Sim) ConfigGUI(b tree.Node) {
ss.GUI.MakeBody(b, ss, ss.Root, ss.Config.Name, ss.Config.Title, ss.Config.Doc)
ss.GUI.StopLevel = Trial
nv := ss.GUI.AddNetView("Network")
nv.Options.MaxRecs = 2 * ss.Config.Run.Cycles()
nv.Options.Raster.Max = ss.Config.Run.Cycles()
nv.Options.LayerNameSize = 0.02
nv.SetNet(ss.Net)
ss.TrainUpdate.Config(nv, axon.Theta, ss.StatCounters)
ss.GUI.OnStop = func(mode, level enums.Enum) {
vu := ss.NetViewUpdater(mode)
vu.UpdateWhenStopped(mode, level)
}
nv.SceneXYZ().Camera.Pose.Pos.Set(0, 1.4, 2.6)
nv.SceneXYZ().Camera.LookAt(math32.Vector3{}, math32.Vec3(0, 1, 0))
evtab, _ := ss.GUI.Tabs.NewTab("Maze")
ev := ss.Envs.ByModeDi(etime.Train, 0).(*armaze.Env)
ss.EnvGUI = &armaze.GUI{}
ss.EnvGUI.ConfigGUI(ev, evtab)
ss.StatsInit()
ss.GUI.Tabs.SelectTabIndex(0)
ss.GUI.FinalizeGUI(false)
}
func (ss *Sim) MakeToolbar(p *tree.Plan) {
ss.GUI.AddLooperCtrl(p, ss.Loops)
tree.Add(p, func(w *core.Separator) {})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "New seed",
Icon: icons.Add,
Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
Active: egui.ActiveAlways,
Func: func() {
ss.RandSeeds.NewSeeds()
},
})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "README",
Icon: icons.FileMarkdown,
Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
Active: egui.ActiveAlways,
Func: func() {
core.TheApp.OpenURL(ss.Config.URL)
},
})
}
func (ss *Sim) UpdateEnvGUI(mode Modes) {
vu := ss.NetViewUpdater(mode)
if vu == nil || vu.View == nil {
return
}
curModeDir := ss.Current.Dir(mode.String())
ctx := ss.Net.Context()
ndata := int(ctx.NData)
di := vu.View.Di
ev := ss.Envs.ByModeDi(mode, di).(*armaze.Env)
net := ss.Net
rp := &net.Rubicon
dp := ss.EnvGUI.USposData
ofcPosUS := net.LayerByName("OFCposPT")
ofcmul := float32(1)
np := int(rp.NPosUSs)
dp.SetNumRows(np)
for i := 0; i < np; i++ {
drv := axon.GlobalVectors.Value(int(axon.GvDrives), i, di)
us := axon.GlobalVectors.Value(int(axon.GvUSpos), i, di)
lpi := ofcPosUS.Params.PoolIndex(uint32(i + 1))
ofc := axon.PoolAvgMax(axon.AMCaD, axon.AMPlus, axon.Avg, lpi, uint32(di)) * ofcmul
dp.Column("Drive").SetFloat(float64(drv), i)
dp.Column("USin").SetFloat(float64(us), i)
dp.Column("OFC").SetFloat(float64(ofc), i)
}
dn := ss.EnvGUI.USnegData
ofcNegUS := net.LayerByName("OFCnegPT")
nn := int(rp.NNegUSs)
dn.SetNumRows(nn)
for i := 0; i < nn; i++ {
us := axon.GlobalVectors.Value(int(axon.GvUSneg), i, di)
lpi := ofcNegUS.Params.PoolIndex(uint32(i + 1))
ofc := axon.PoolAvgMax(axon.AMCaD, axon.AMPlus, axon.Avg, lpi, uint32(di)) * ofcmul
dn.Column("USin").SetFloat(float64(us), i)
dn.Column("OFC").SetFloat(float64(ofc), i)
}
ss.EnvGUI.USposPlot.GoUpdatePlot()
ss.EnvGUI.USnegPlot.GoUpdatePlot()
trSt := curModeDir.Int("TraceStateInt", ndata).Int1D(di)
ss.EnvGUI.SceneEditor.AsyncLock()
defer ss.EnvGUI.SceneEditor.AsyncUnlock()
ss.EnvGUI.UpdateWorld(ctx, ev, net, armaze.TraceStates(trSt))
}
func (ss *Sim) RunNoGUI() {
ss.Init()
if ss.Config.Params.Note != "" {
mpi.Printf("Note: %s\n", ss.Config.Params.Note)
}
if ss.Config.Log.SaveWeights {
mpi.Printf("Saving final weights per run\n")
}
runName := ss.SetRunName()
netName := ss.Net.Name
cfg := &ss.Config.Log
axon.OpenLogFiles(ss.Loops, ss.Stats, netName, runName, [][]string{cfg.Train})
mpi.Printf("Running %d Runs starting at %d\n", ss.Config.Run.Runs, ss.Config.Run.Run)
ss.Loops.Loop(Train, Run).Counter.SetCurMaxPlusN(ss.Config.Run.Run, ss.Config.Run.Runs)
ss.Loops.Run(Train)
axon.CloseLogFiles(ss.Loops, ss.Stats, Cycle)
axon.GPURelease()
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"github.com/emer/axon/v2/sims/choose"
"github.com/emer/emergent/v2/egui"
)
func main() { egui.Run[choose.Sim, choose.Config]() }
// Copyright (c) 2023, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package choose
import (
"cogentcore.org/core/core"
"cogentcore.org/lab/base/mpi"
"github.com/emer/emergent/v2/egui"
)
// EnvConfig has config params for environment.
type EnvConfig struct {
// Env parameters: can set any field/subfield on Env struct,
// using standard TOML formatting.
Env map[string]any
// Config is the name of config file that loads into Env.Config
// for setting environment parameters directly.
Config string
// NDrives is the number of different drive-like body states
// (hunger, thirst, etc), that are satisfied by a corresponding US outcome.
NDrives int `default:"4"`
// PctCortexStEpc is epoch when PctCortex starts increasing.
PctCortexStEpc int `default:"10"`
// PctCortexNEpc is the number of epochs over which PctCortexMax is reached.
PctCortexNEpc int `default:"1"`
// PctCortex is the proportion of behavioral approach sequences driven
// by the cortex vs. hard-coded reflexive subcortical.
PctCortex float32 `edit:"-"`
// SameSeed is for testing, force each env to use same seed.
SameSeed bool
}
// CurPctCortex returns current PctCortex and updates field, based on epoch counter
func (cfg *EnvConfig) CurPctCortex(epc int) float32 {
if epc >= cfg.PctCortexStEpc && cfg.PctCortex < 1 {
cfg.PctCortex = float32(epc-cfg.PctCortexStEpc) / float32(cfg.PctCortexNEpc)
if cfg.PctCortex > 1 {
cfg.PctCortex = 1
} else {
mpi.Printf("PctCortex updated to: %g at epoch: %d\n", cfg.PctCortex, epc)
}
}
return cfg.PctCortex
}
// ParamConfig has config parameters related to sim params.
type ParamConfig struct {
// Rubicon parameters: can set any field/subfield on Net.Rubicon params,
// using standard TOML formatting.
Rubicon map[string]any
// Script is an interpreted script that is run to set parameters in Layer and Path
// sheets, by default using the "Script" set name.
Script string `new-window:"+" width:"100"`
// Sheet is the extra params sheet name(s) to use (space separated
// if multiple). Must be valid name as listed in compiled-in params
// or loaded params.
Sheet string
// Tag is an extra tag to add to file names and logs saved from this run.
Tag string
// Note is additional info to describe the run params etc,
// like a git commit message for the run.
Note string
// SaveAll will save a snapshot of all current param and config settings
// in a directory named params_<datestamp> (or _good if Good is true),
// then quit. Useful for comparing to later changes and seeing multiple
// views of current params.
SaveAll bool `nest:"+"`
// Good is for SaveAll, save to params_good for a known good params state.
// This can be done prior to making a new release after all tests are passing.
// Add results to git to provide a full diff record of all params over level.
Good bool `nest:"+"`
}
func (pc *ParamConfig) FieldWidget(field string) core.Value {
return egui.ScriptFieldWidget(field)
}
// RunConfig has config parameters related to running the sim.
type RunConfig struct {
// GPUDevice selects the gpu device to use.
GPUDevice int
// NData is the number of data-parallel items to process in parallel per trial.
// Is significantly faster for both CPU and GPU. Results in an effective
// mini-batch of learning.
NData int `default:"16" min:"1"`
// NThreads is the number of parallel threads for CPU computation;
// 0 = use default.
NThreads int `default:"0"`
// Run is the _starting_ run number, which determines the random seed.
// Runs counts up from there. Can do all runs in parallel by launching
// separate jobs with each starting Run, Runs = 1.
Run int `default:"0" flag:"run"`
// Runs is the total number of runs to do when running Train, starting from Run.
Runs int `default:"5" min:"1"`
// Epochs is the total number of epochs per run.
Epochs int `default:"100"`
// Trials is the total number of trials per epoch.
// Should be an even multiple of NData.
Trials int `default:"128"`
// ISICycles is the number of no-input inter-stimulus interval
// cycles at the start of the trial.
ISICycles int `default:"0"`
// MinusCycles is the number of cycles in the minus phase per trial.
MinusCycles int `default:"150"`
// PlusCycles is the number of cycles in the plus phase per trial.
PlusCycles int `default:"50"`
// PCAInterval is how often (in epochs) to compute PCA on hidden
// representations to measure variance.
PCAInterval int `default:"10"`
}
// Cycles returns the total number of cycles per trial: ISI + Minus + Plus.
func (rc *RunConfig) Cycles() int {
return rc.ISICycles + rc.MinusCycles + rc.PlusCycles
}
// LogConfig has config parameters related to logging data.
type LogConfig struct {
// SaveWeights will save final weights after each run.
SaveWeights bool
// Train has the list of Train mode levels to save log files for.
Train []string `default:"['Expt', 'Run', 'Epoch']" nest:"+"`
}
// Config has the overall Sim configuration options.
type Config struct {
egui.BaseConfig
// environment configuration options
Env EnvConfig `display:"add-fields"`
// Params has parameter related configuration options.
Params ParamConfig `display:"add-fields"`
// Run has sim running related configuration options.
Run RunConfig `display:"add-fields"`
// Log has data logging related configuration options.
Log LogConfig `display:"add-fields"`
}
func (cfg *Config) Defaults() {
cfg.Name = "Choose"
cfg.Title = "Choose Maze Arms"
cfg.URL = "https://github.com/emer/axon/blob/main/sims/choose/README.md"
cfg.Doc = "This project tests the Rubicon framework making cost-benefit based choices."
}
// Code generated by "core generate -add-types -add-funcs -gosl"; DO NOT EDIT.
package choose
import (
"cogentcore.org/core/enums"
)
var _ModesValues = []Modes{0, 1}
// ModesN is the highest valid value for type Modes, plus one.
//
//gosl:start
const ModesN Modes = 2
//gosl:end
var _ModesValueMap = map[string]Modes{`Train`: 0, `Test`: 1}
var _ModesDescMap = map[Modes]string{0: ``, 1: ``}
var _ModesMap = map[Modes]string{0: `Train`, 1: `Test`}
// String returns the string representation of this Modes value.
func (i Modes) String() string { return enums.String(i, _ModesMap) }
// SetString sets the Modes value from its string representation,
// and returns an error if the string is invalid.
func (i *Modes) SetString(s string) error { return enums.SetString(i, s, _ModesValueMap, "Modes") }
// Int64 returns the Modes value as an int64.
func (i Modes) Int64() int64 { return int64(i) }
// SetInt64 sets the Modes value from an int64.
func (i *Modes) SetInt64(in int64) { *i = Modes(in) }
// Desc returns the description of the Modes value.
func (i Modes) Desc() string { return enums.Desc(i, _ModesDescMap) }
// ModesValues returns all possible values for the type Modes.
func ModesValues() []Modes { return _ModesValues }
// Values returns all possible values for the type Modes.
func (i Modes) Values() []enums.Enum { return enums.Values(_ModesValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Modes) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Modes) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Modes") }
var _LevelsValues = []Levels{0, 1, 2, 3, 4}
// LevelsN is the highest valid value for type Levels, plus one.
//
//gosl:start
const LevelsN Levels = 5
//gosl:end
var _LevelsValueMap = map[string]Levels{`Cycle`: 0, `Trial`: 1, `Epoch`: 2, `Run`: 3, `Expt`: 4}
var _LevelsDescMap = map[Levels]string{0: ``, 1: ``, 2: ``, 3: ``, 4: ``}
var _LevelsMap = map[Levels]string{0: `Cycle`, 1: `Trial`, 2: `Epoch`, 3: `Run`, 4: `Expt`}
// String returns the string representation of this Levels value.
func (i Levels) String() string { return enums.String(i, _LevelsMap) }
// SetString sets the Levels value from its string representation,
// and returns an error if the string is invalid.
func (i *Levels) SetString(s string) error { return enums.SetString(i, s, _LevelsValueMap, "Levels") }
// Int64 returns the Levels value as an int64.
func (i Levels) Int64() int64 { return int64(i) }
// SetInt64 sets the Levels value from an int64.
func (i *Levels) SetInt64(in int64) { *i = Levels(in) }
// Desc returns the description of the Levels value.
func (i Levels) Desc() string { return enums.Desc(i, _LevelsDescMap) }
// LevelsValues returns all possible values for the type Levels.
func LevelsValues() []Levels { return _LevelsValues }
// Values returns all possible values for the type Levels.
func (i Levels) Values() []enums.Enum { return enums.Values(_LevelsValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Levels) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Levels) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Levels") }
var _StatsPhaseValues = []StatsPhase{0, 1}
// StatsPhaseN is the highest valid value for type StatsPhase, plus one.
//
//gosl:start
const StatsPhaseN StatsPhase = 2
//gosl:end
var _StatsPhaseValueMap = map[string]StatsPhase{`Start`: 0, `Step`: 1}
var _StatsPhaseDescMap = map[StatsPhase]string{0: ``, 1: ``}
var _StatsPhaseMap = map[StatsPhase]string{0: `Start`, 1: `Step`}
// String returns the string representation of this StatsPhase value.
func (i StatsPhase) String() string { return enums.String(i, _StatsPhaseMap) }
// SetString sets the StatsPhase value from its string representation,
// and returns an error if the string is invalid.
func (i *StatsPhase) SetString(s string) error {
return enums.SetString(i, s, _StatsPhaseValueMap, "StatsPhase")
}
// Int64 returns the StatsPhase value as an int64.
func (i StatsPhase) Int64() int64 { return int64(i) }
// SetInt64 sets the StatsPhase value from an int64.
func (i *StatsPhase) SetInt64(in int64) { *i = StatsPhase(in) }
// Desc returns the description of the StatsPhase value.
func (i StatsPhase) Desc() string { return enums.Desc(i, _StatsPhaseDescMap) }
// StatsPhaseValues returns all possible values for the type StatsPhase.
func StatsPhaseValues() []StatsPhase { return _StatsPhaseValues }
// Values returns all possible values for the type StatsPhase.
func (i StatsPhase) Values() []enums.Enum { return enums.Values(_StatsPhaseValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i StatsPhase) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *StatsPhase) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "StatsPhase")
}
// Copyright (c) 2022, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package choose
import "github.com/emer/axon/v2/axon"
// LayerParams sets the minimal non-default params.
// Base is always applied, and others can be optionally selected to apply on top of that.
var LayerParams = axon.LayerSheets{
"Base": {
{Sel: "Layer", Doc: "generic params for all layers",
Set: func(ly *axon.LayerParams) {
ly.Acts.Clamp.Ge = 1.5
}},
{Sel: ".PFCLayer", Doc: "pfc layers: slower trgavgact",
Set: func(ly *axon.LayerParams) {
ly.Learn.TrgAvgAct.SynScaleRate = 0.0002 // also now set by default
}},
{Sel: ".PTMaintLayer", Doc: "time integration params",
Set: func(ly *axon.LayerParams) {
// ly.Inhib.Layer.Gi = 2.4
// ly.Inhib.Pool.Gi = 2.4
ly.Acts.Dend.ModGain = 1.5 // 1.5; was 2 min -- reduces maint early
ly.Learn.NeuroMod.AChDisInhib = 0.0 // not much effect here..
}},
{Sel: ".VSTNLayer", Doc: "all VSTN",
Set: func(ly *axon.LayerParams) {
ly.Acts.Init.GeBase = 0.1
ly.Acts.Kir.Gk = 10 // 10 > 5 > 2 -- key for pause
ly.Acts.SKCa.Gk = 2 // 2 > 5 >> 1 (for Kir = 10)
ly.Acts.SKCa.CaRDecayTau = 80 // 80 > 150
// ly.Inhib.Layer.On.SetBool(true) // really no inhib neurons here. all VGPePr
ly.Learn.NeuroMod.AChDisInhib = 0
}},
{Sel: ".PTPredLayer", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.1
ly.CT.GeGain = 0.05 // 0.05 key for stronger activity
// ly.CT.DecayTau = 50
ly.Learn.NeuroMod.AChDisInhib = 0 // 0.2, 0.5 not much diff
}},
{Sel: ".CS", Doc: "need to adjust Nominal for number of CSs -- now down automatically",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.1 // 0.1 for 4, divide by N/4 from there
}},
// {Sel: "#OFCpos", Doc: "",
// Set: func(ly *axon.LayerParams) {
// ly.Inhib.Pool.Gi = 1
// }},
// {Sel: "#OFCposPT", Doc: "",
// Set: func(ly *axon.LayerParams) {
// ly.Inhib.Pool.Gi = 0.5
// }},
{Sel: "#OFCposPTp", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.1 // 0.1 -- affects how strongly BLA is driven -- key param
ly.Inhib.Pool.Gi = 1.4 // 1.4 orig
}},
{Sel: "#ILposPTp", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 1.2
}},
{Sel: "#ILnegPTp", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 1.2
}},
{Sel: "#OFCneg", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.1
// ly.Inhib.Layer.Gi = 0.5 // weaker in general so needs to be lower
}},
// {Sel: "#OFCnegPT", Doc: "",
// Set: func(ly *axon.LayerParams) {
// ly.Inhib.ActAvg.Nominal = 0.2
// ly.Inhib.Pool.Gi = 3.0
// }},
// {Sel: "#OFCnegPTp", Doc: "",
// Set: func(ly *axon.LayerParams) {
// ly.Inhib.Pool.Gi = 1.4
// }},
// {Sel: "#ILpos", Doc: "",
// Set: func(ly *axon.LayerParams) {
// ly.Inhib.Pool.Gi = 1
// }},
{Sel: ".VSMatrixLayer", Doc: "vs mtx",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.On.SetBool(false) // todo: explore -- could be bad for gating
ly.Inhib.Pool.Gi = 0.5 // go lower, get more inhib from elsewhere?
ly.Inhib.Pool.FB = 0
ly.Acts.Dend.ModGain = 1 // todo: 2 is default
ly.Acts.Kir.Gk = 2
ly.Learn.NeuroMod.BurstGain = 1
ly.Learn.NeuroMod.DAModGain = 0 // no bias is better!
ly.Learn.RLRate.SigmoidMin = 0.001 // 0.01 better than .05
}},
{Sel: "#BLAposAcqD1", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 2 // 2 fine with BLANovelInhib path
ly.Inhib.Pool.Gi = 1
}},
{Sel: "#BLAposExtD2", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.CT.GeGain = 0.5
}},
{Sel: "#BLAnegAcqD2", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 1.2 // weaker
}},
{Sel: ".VSPatchLayer", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Pool.Gi = 0.5 // 0.5 ok?
ly.Inhib.Pool.FB = 0 // only fb
ly.Learn.NeuroMod.DipGain = 1 // if < 1, overshoots, more -DA
ly.Learn.NeuroMod.BurstGain = 1
ly.Learn.RLRate.SigmoidMin = 0.01 // 0.01 > 0.05 def
ly.Learn.TrgAvgAct.GiBaseInit = 0 // 0.2 gets too diffuse
}},
{Sel: ".LDTLayer", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.LDT.MaintInhib = 2.0 // 0.95 is too weak -- depends on activity..
}},
{Sel: "#SC", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Acts.KNa.Slow.Gk = 0.8 // .8 reliable decreases -- could go higher
}},
},
}
// PathParams sets the minimal non-default params.
// Base is always applied, and others can be optionally selected to apply on top of that.
var PathParams = axon.PathSheets{
"Base": {
{Sel: ".PFCPath", Doc: "pfc path params -- more robust to long-term training",
Set: func(pt *axon.PathParams) {
pt.Learn.DWt.SubMean = 1 // 1 > 0 for long-term stability
pt.Learn.LRate.Base = 0.01 // 0.04 def; 0.02 more stable; 0.01 even more
}},
{Sel: ".PTtoPred", Doc: "stronger drive on pt pred",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1
}},
{Sel: "#BLAposAcqD1ToOFCpos", Doc: "stronger",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1.5 // 1.5; stronger = bad later
}},
{Sel: "#OFCposToILpos", Doc: "stronger",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 3
}},
{Sel: ".USToBLAExtInhib", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 2
}},
{Sel: "#ILposToPLutil", Doc: "not good to make this stronger",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1 // todo: try 3?
}},
{Sel: ".MToACC", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 3
}},
// {Sel: ".PTSelfMaint", Doc: "",
// Set: func(pt *axon.PathParams) {
// pt.PathScale.Abs = 4
// pt.Learn.LRate.Base = 0.0001 // this is not a problem
// }},
////////////////////////////////////////////
// Rubicon Paths
{Sel: ".VSMatrixPath", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1.5 // 3 orig
pt.Learn.DWt.LearnThr = 0.1
pt.Learn.LRate.Base = 0.02 // 0.05 def
}},
{Sel: ".ToSC", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 2
}},
{Sel: ".DrivesToMtx", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1
}},
{Sel: ".BLAExtPath", Doc: "ext learns relatively fast",
Set: func(pt *axon.PathParams) {
pt.Learn.LRate.Base = 0.05 // 0.05 > 0.02 = 0.01
}},
{Sel: ".BLAAcqToGo", Doc: "must dominate",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 1
pt.PathScale.Abs = 4 // 4 > 3 > 2 for urgency early
}},
{Sel: ".BLAExtToAcq", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1.0 // 0.5 is min effective
}},
{Sel: ".CSToBLApos", Doc: "",
Set: func(pt *axon.PathParams) {
pt.Learn.LRate.Base = 0.01 // 0.01 > 0.02 much better long term
}},
{Sel: ".PFCToVSMtx", Doc: "contextual, should be weaker",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.1 // 0.1 def
pt.PathScale.Abs = 1 // 1.5def
}},
{Sel: "#OFCposToVMatrixGo", Doc: "specific best go signal",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 3
}},
{Sel: "#ILposToVMatrixGo", Doc: "specific best go signal",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 3
}},
{Sel: "#ACCcostToVMatrixGo", Doc: "costs..",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 3
}},
{Sel: ".VSPatchPath", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 4 // 4 > 3 > 2 -- key for rapid learning
pt.Learn.DWt.LearnThr = 0
pt.Learn.LRate.Base = 0.02 // 0.02 > 0.01
}},
{Sel: ".CSToBLANovelInhib", Doc: "learning rate here is critical to bootstrap & then fade",
Set: func(pt *axon.PathParams) {
pt.Learn.LRate.Base = 0.01 // 0.01 slightly worse for Gate CS, but shows cost effects..
// 0.02 too fast and Gate CS suffers significantly. 0.005 best for Gate CS, but inhibits costs
}},
{Sel: ".SuperToThal", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 4 // 4 = 3, 2 worse
}},
{Sel: ".SuperToPT", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 0.5 // 0.5 def
}},
{Sel: ".GPiToBGThal", Doc: "inhibition from GPi to MD",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 5 // with new mod, this can be stronger
}},
{Sel: ".BLAFromNovel", Doc: "Note: this setting is overwritten in boa.go ApplyParams",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.1 // weak rel to not dilute rest of bla paths
pt.PathScale.Abs = 3 // 2 is good for .CS nominal .1, but 3 needed for .03
}},
},
}
// Copyright (c) 2023, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package deepfsa
import (
"cogentcore.org/core/core"
"github.com/emer/emergent/v2/egui"
)
// EnvConfig has config params for environment
// note: only adding fields for key Env params that matter for both Network and Env
// other params are set via the Env map data mechanism.
type EnvConfig struct {
// Env parameters: can set any field/subfield on Env struct,
// using standard TOML formatting.
Env map[string]any
// UnitsPer is the number of units per localist output unit. 1 works better than 5 here
UnitsPer int `default:"1"`
// InputNames are names of input letters.
InputNames []string `default:"['B','T','S','X','V','P','E']"`
// InputMap is the map of input names, initialized during ConfigEnv.
InputNameMap map[string]int `display:"-"`
}
// InitNameMap is called during ConfigEnv
func (cfg *EnvConfig) InitNameMap() {
if cfg.InputNameMap != nil {
return
}
cfg.InputNameMap = make(map[string]int, len(cfg.InputNames))
for i, nm := range cfg.InputNames {
cfg.InputNameMap[nm] = i
}
}
// ParamConfig has config parameters related to sim params.
type ParamConfig struct {
// Script is an interpreted script that is run to set parameters in Layer and Path
// sheets, by default using the "Script" set name.
Script string `new-window:"+" width:"100"`
// Sheet is the extra params sheet name(s) to use (space separated
// if multiple). Must be valid name as listed in compiled-in params
// or loaded params.
Sheet string
// Tag is an extra tag to add to file names and logs saved from this run.
Tag string
// Note is additional info to describe the run params etc,
// like a git commit message for the run.
Note string
// SaveAll will save a snapshot of all current param and config settings
// in a directory named params_<datestamp> (or _good if Good is true),
// then quit. Useful for comparing to later changes and seeing multiple
// views of current params.
SaveAll bool `nest:"+"`
// Good is for SaveAll, save to params_good for a known good params state.
// This can be done prior to making a new release after all tests are passing.
// Add results to git to provide a full diff record of all params over level.
Good bool `nest:"+"`
}
func (pc *ParamConfig) FieldWidget(field string) core.Value {
return egui.ScriptFieldWidget(field)
}
// RunConfig has config parameters related to running the sim.
type RunConfig struct {
// GPUDevice selects the gpu device to use.
GPUDevice int
// NData is the number of data-parallel items to process in parallel per trial.
// Is significantly faster for both CPU and GPU. Results in an effective
// mini-batch of learning.
NData int `default:"16" min:"1"`
// NThreads is the number of parallel threads for CPU computation;
// 0 = use default.
NThreads int `default:"0"`
// Run is the _starting_ run number, which determines the random seed.
// Runs counts up from there. Can do all runs in parallel by launching
// separate jobs with each starting Run, Runs = 1.
Run int `default:"0" flag:"run"`
// Runs is the total number of runs to do when running Train, starting from Run.
Runs int `default:"5" min:"1"`
// Epochs is the total number of epochs per run.
Epochs int `default:"100"`
// Trials is the total number of trials per epoch.
// Should be an even multiple of NData.
Trials int `default:"196"`
// ISICycles is the number of no-input inter-stimulus interval
// cycles at the start of the trial.
ISICycles int `default:"0"`
// MinusCycles is the number of cycles in the minus phase per trial.
MinusCycles int `default:"150"`
// PlusCycles is the number of cycles in the plus phase per trial.
PlusCycles int `default:"50"`
// NZero is how many perfect, zero-error epochs before stopping a Run.
NZero int `default:"2"`
// TestInterval is how often (in epochs) to run through all the test patterns,
// in terms of training epochs. Can use 0 or -1 for no testing.
TestInterval int `default:"0"`
// PCAInterval is how often (in epochs) to compute PCA on hidden
// representations to measure variance.
PCAInterval int `default:"10"`
// StartWeights is the name of weights file to load at start of first run.
StartWeights string
}
// Cycles returns the total number of cycles per trial: ISI + Minus + Plus.
func (rc *RunConfig) Cycles() int {
return rc.ISICycles + rc.MinusCycles + rc.PlusCycles
}
// LogConfig has config parameters related to logging data.
type LogConfig struct {
// SaveWeights will save final weights after each run.
SaveWeights bool
// Train has the list of Train mode levels to save log files for.
Train []string `default:"['Expt', 'Run', 'Epoch']" nest:"+"`
// Test has the list of Test mode levels to save log files for.
Test []string `nest:"+"`
}
// Config has the overall Sim configuration options.
type Config struct {
egui.BaseConfig
// Env has environment related configuration options.
Env EnvConfig `display:"add-fields"`
// Params has parameter related configuration options.
Params ParamConfig `display:"add-fields"`
// Run has sim running related configuration options.
Run RunConfig `display:"add-fields"`
// Log has data logging related configuration options.
Log LogConfig `display:"add-fields"`
}
func (cfg *Config) Defaults() {
cfg.Name = "FSA"
cfg.Title = "Finite State Automaton"
cfg.URL = "https://github.com/emer/axon/blob/main/sims/deepfsa/README.md"
cfg.Doc = "This demonstrates a basic deep predictive learning Axon model on the Finite State Automaton problem (e.g., the Reber grammar). The network learns the underlying grammar that generates partially ambiguous observable state tokens, strictly through errors in predicting the sequences of these tokens."
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// deepfsa runs a DeepAxon network on the classic Reber grammar
// finite state automaton problem.
package deepfsa
//go:generate core generate -add-types -add-funcs -gosl
import (
"fmt"
"log"
"os"
"reflect"
"cogentcore.org/core/base/reflectx"
"cogentcore.org/core/core"
"cogentcore.org/core/enums"
"cogentcore.org/core/gpu"
"cogentcore.org/core/icons"
"cogentcore.org/core/math32"
"cogentcore.org/core/tree"
"cogentcore.org/lab/base/mpi"
"cogentcore.org/lab/base/randx"
"cogentcore.org/lab/plot"
"cogentcore.org/lab/stats/stats"
"cogentcore.org/lab/tensorfs"
"github.com/emer/axon/v2/axon"
"github.com/emer/emergent/v2/egui"
"github.com/emer/emergent/v2/env"
"github.com/emer/emergent/v2/looper"
"github.com/emer/emergent/v2/netview"
"github.com/emer/emergent/v2/paths"
)
// Modes are the looping modes (Stacks) for running and statistics.
type Modes int32 //enums:enum
const (
Train Modes = iota
Test
)
// Levels are the looping levels for running and statistics.
type Levels int32 //enums:enum
const (
Cycle Levels = iota
Trial
Epoch
Run
Expt
)
// StatsPhase is the phase of stats processing for given mode, level.
// Accumulated values are reset at Start, added each Step.
type StatsPhase int32 //enums:enum
const (
Start StatsPhase = iota
Step
)
// see params.go for params, config.go for Config
// Sim encapsulates the entire simulation model, and we define all the
// functionality as methods on this struct. This structure keeps all relevant
// state information organized and available without having to pass everything around
// as arguments to methods, and provides the core GUI interface (note the view tags
// for the fields which provide hints to how things should be displayed).
type Sim struct {
// simulation configuration parameters -- set by .toml config file and / or args
Config *Config `new-window:"+"`
// Net is the network: click to view / edit parameters for layers, paths, etc.
Net *axon.Network `new-window:"+" display:"no-inline"`
// Params manages network parameter setting.
Params axon.Params `display:"inline"`
// Loops are the control loops for running the sim, in different Modes
// across stacks of Levels.
Loops *looper.Stacks `new-window:"+" display:"no-inline"`
// Envs provides mode-string based storage of environments.
Envs env.Envs `new-window:"+" display:"no-inline"`
// TrainUpdate has Train mode netview update parameters.
TrainUpdate axon.NetViewUpdate `display:"inline"`
// TestUpdate has Test mode netview update parameters.
TestUpdate axon.NetViewUpdate `display:"inline"`
// Root is the root tensorfs directory, where all stats and other misc sim data goes.
Root *tensorfs.Node `display:"-"`
// Stats has the stats directory within Root.
Stats *tensorfs.Node `display:"-"`
// Current has the current stats values within Stats.
Current *tensorfs.Node `display:"-"`
// StatFuncs are statistics functions called at given mode and level,
// to perform all stats computations. phase = Start does init at start of given level,
// and all intialization / configuration (called during Init too).
StatFuncs []func(mode Modes, level Levels, phase StatsPhase) `display:"-"`
// GUI manages all the GUI elements
GUI egui.GUI `display:"-"`
// RandSeeds is a list of random seeds to use for each run.
RandSeeds randx.Seeds `display:"-"`
}
func Embed(b tree.Node) { egui.Embed[Sim, Config](b) }
func (ss *Sim) SetConfig(cfg *Config) { ss.Config = cfg }
func (ss *Sim) Body() *core.Body { return ss.GUI.Body }
func (ss *Sim) ConfigSim() {
ss.Root, _ = tensorfs.NewDir("Root")
tensorfs.CurRoot = ss.Root
ss.Net = axon.NewNetwork(ss.Config.Name)
ss.Params.Config(LayerParams, PathParams, ss.Config.Params.Sheet, ss.Config.Params.Tag, reflect.ValueOf(ss))
ss.RandSeeds.Init(100) // max 100 runs
ss.InitRandSeed(0)
if ss.Config.GPU {
gpu.SelectAdapter = ss.Config.Run.GPUDevice
axon.GPUInit()
axon.UseGPU = true
}
ss.ConfigEnv()
ss.ConfigNet(ss.Net)
ss.ConfigLoops()
ss.ConfigStats()
if ss.Config.Params.SaveAll {
ss.Config.Params.SaveAll = false
ss.Net.SaveParamsSnapshot(&ss.Config, ss.Config.Params.Good)
os.Exit(0)
}
}
func (ss *Sim) ConfigEnv() {
ss.Config.Env.InitNameMap()
// Can be called multiple times -- don't re-create
newEnv := (len(ss.Envs) == 0)
for di := 0; di < ss.Config.Run.NData; di++ {
var trn, tst *FSAEnv
if newEnv {
trn = &FSAEnv{}
tst = &FSAEnv{}
} else {
trn = ss.Envs.ByModeDi(Train, di).(*FSAEnv)
tst = ss.Envs.ByModeDi(Test, di).(*FSAEnv)
}
// note: names must be standard here!
trn.Name = env.ModeDi(Train, di)
trn.Seq.Max = 25 // 25 sequences per epoch training
trn.RandSeed = 73 + int64(di)*73
trn.TMatReber()
if ss.Config.Env.Env != nil {
reflectx.SetFieldsFromMap(trn, ss.Config.Env.Env)
}
trn.Validate()
tst.Name = env.ModeDi(Test, di)
tst.Seq.Max = 10
tst.RandSeed = 181 + int64(di)*181
tst.TMatReber() // todo: random
if ss.Config.Env.Env != nil {
reflectx.SetFieldsFromMap(tst, ss.Config.Env.Env)
}
tst.Validate()
trn.Init(0)
tst.Init(0)
// note: names must be in place when adding
ss.Envs.Add(trn, tst)
}
}
func (ss *Sim) ConfigNet(net *axon.Network) {
net.SetMaxData(ss.Config.Run.NData)
net.Context().SetISICycles(int32(ss.Config.Run.ISICycles)).
SetMinusCycles(int32(ss.Config.Run.MinusCycles)).
SetPlusCycles(int32(ss.Config.Run.PlusCycles)).Update()
net.SetRandSeed(ss.RandSeeds[0]) // init new separate random seed, using run = 0
in, inp := net.AddInputPulv4D("Input", 1, 7, ss.Config.Env.UnitsPer, 1, 2)
trg := net.AddLayer2D("Targets", axon.InputLayer, 1, 7) // just for visualization
in.AddClass("InLay")
inp.AddClass("InLay")
trg.AddClass("InLay")
full := paths.NewFull()
full.SelfCon = true // unclear if this makes a diff for self cons at all
// one2one := paths.NewOneToOne()
// _ = one2one
hid, hidct := net.AddSuperCT2D("Hidden", "", 10, 10, 2, full)
// full > one2one -- one2one weights go to 0 -- this is key for more posterior-cortical CT
// hidct.Shape.SetShape([]int{10, 20}, nil, nil) // 200 == 500 == 1000 >> 100 here!
// note: tried 4D 6,6,2,2 with pool 1to1 -- not better
// also 12,12 not better than 10,10
// this adds maint + ctself: better than plain ctself, but not by much
net.ConnectCTSelf(hidct, full, "")
// pure CT self, no maintenance:
// net.ConnectLayers(hidct, hidct, full, axon.CTCtxtPath).AddClass("CTSelfCtxt")
net.ConnectLayers(in, hid, full, axon.ForwardPath)
net.ConnectToPulv(hid, hidct, inp, full, full, "") // inp -> hid and inp -> hidct is *essential*
// net.ConnectLayers(inp, hid, full, BackPath).AddClass("FromPvlv")
// net.ConnectLayers(hidct, hid, full, BackPath)
// not useful:
// net.ConnectCtxtToCT(in, hidct, full)
hid.PlaceAbove(in)
hidct.PlaceRightOf(hid, 2)
inp.PlaceBehind(in, 2)
trg.PlaceBehind(inp, 2)
net.Build()
net.Defaults()
net.SetNThreads(ss.Config.Run.NThreads)
ss.ApplyParams()
net.InitWeights()
}
func (ss *Sim) ApplyParams() {
ss.Params.Script = ss.Config.Params.Script
ss.Params.ApplyAll(ss.Net)
}
//////// Init, utils
// Init restarts the run, and initializes everything, including network weights
// and resets the epoch log table
func (ss *Sim) Init() {
ss.Loops.ResetCounters()
ss.SetRunName()
ss.InitRandSeed(0)
// ss.ConfigEnv() // re-config env just in case a different set of patterns was
// selected or patterns have been modified etc
ss.ApplyParams()
ss.StatsInit()
ss.NewRun()
ss.TrainUpdate.RecordSyns()
ss.TrainUpdate.Update(Train, Trial)
}
// InitRandSeed initializes the random seed based on current training run number
func (ss *Sim) InitRandSeed(run int) {
ss.RandSeeds.Set(run)
ss.RandSeeds.Set(run, &ss.Net.Rand)
}
// NetViewUpdater returns the NetViewUpdate for given mode.
func (ss *Sim) NetViewUpdater(mode enums.Enum) *axon.NetViewUpdate {
if mode.Int64() == Train.Int64() {
return &ss.TrainUpdate
}
return &ss.TestUpdate
}
// ConfigLoops configures the control loops: Training, Testing
func (ss *Sim) ConfigLoops() {
ls := looper.NewStacks()
trials := int(math32.IntMultipleGE(float32(ss.Config.Run.Trials), float32(ss.Config.Run.NData)))
cycles := ss.Config.Run.Cycles()
ls.AddStack(Train, Trial).
AddLevel(Expt, 1).
AddLevel(Run, ss.Config.Run.Runs).
AddLevel(Epoch, ss.Config.Run.Epochs).
AddLevelIncr(Trial, trials, ss.Config.Run.NData).
AddLevel(Cycle, cycles)
ls.AddStack(Test, Trial).
AddLevel(Epoch, 1).
AddLevelIncr(Trial, trials, ss.Config.Run.NData).
AddLevel(Cycle, cycles)
axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, Cycle, Trial, Train,
func(mode enums.Enum) { ss.Net.ClearInputs() },
func(mode enums.Enum) { ss.ApplyInputs(mode.(Modes)) },
)
ls.Stacks[Train].OnInit.Add("Init", ss.Init)
ls.Loop(Train, Run).OnStart.Add("NewRun", ss.NewRun)
trainEpoch := ls.Loop(Train, Epoch)
trainEpoch.IsDone.AddBool("NZeroStop", func() bool {
stopNz := ss.Config.Run.NZero
if stopNz <= 0 {
return false
}
curModeDir := ss.Current.Dir(Train.String())
curNZero := int(curModeDir.Value("NZero").Float1D(-1))
stop := curNZero >= stopNz
return stop
return false
})
trainEpoch.OnStart.Add("TestAtInterval", func() {
if (ss.Config.Run.TestInterval > 0) && ((trainEpoch.Counter.Cur+1)%ss.Config.Run.TestInterval == 0) {
ss.TestAll()
}
})
ls.AddOnStartToAll("StatsStart", ss.StatsStart)
ls.AddOnEndToAll("StatsStep", ss.StatsStep)
ls.Loop(Train, Run).OnEnd.Add("SaveWeights", func() {
ctrString := fmt.Sprintf("%03d_%05d", ls.Loop(Train, Run).Counter.Cur, ls.Loop(Train, Epoch).Counter.Cur)
axon.SaveWeightsIfConfigSet(ss.Net, ss.Config.Log.SaveWeights, ctrString, ss.RunName())
})
if ss.Config.GUI {
axon.LooperUpdateNetView(ls, Cycle, Trial, ss.NetViewUpdater)
ls.Stacks[Train].OnInit.Add("GUI-Init", ss.GUI.UpdateWindow)
ls.Stacks[Test].OnInit.Add("GUI-Init", ss.GUI.UpdateWindow)
}
if ss.Config.Debug {
mpi.Println(ls.DocString())
}
ss.Loops = ls
}
// ApplyInputs applies input patterns from given environment for given mode.
// Any other start-of-trial logic can also be put here.
func (ss *Sim) ApplyInputs(mode Modes) {
net := ss.Net
ctx := ss.Net.Context()
ndata := int(ctx.NData)
in := net.LayerByName("Input")
trg := net.LayerByName("Targets")
clrmsk, setmsk, _ := in.ApplyExtFlags()
curModeDir := ss.Current.Dir(mode.String())
net.InitExt()
for di := uint32(0); di < ctx.NData; di++ {
fsenv := ss.Envs.ByModeDi(mode, int(di)).(*FSAEnv)
fsenv.Step()
ns := fsenv.NNext.Values[0]
for i := 0; i < ns; i++ {
lbl := fsenv.NextLabels.Values[i]
li, ok := ss.Config.Env.InputNameMap[lbl]
if !ok {
log.Printf("Input label: %v not found in InputNames list of labels\n", lbl)
continue
}
if i == 0 {
for yi := 0; yi < ss.Config.Env.UnitsPer; yi++ {
idx := li*ss.Config.Env.UnitsPer + yi
in.ApplyExtValue(uint32(idx), di, 1, clrmsk, setmsk, false)
}
}
trg.ApplyExtValue(uint32(li), di, 1, clrmsk, setmsk, false)
curModeDir.StringValue("TrialName", ndata).SetString1D(fsenv.String(), int(di))
}
}
ss.Net.ApplyExts()
}
// NewRun intializes a new Run level of the model.
func (ss *Sim) NewRun() {
ctx := ss.Net.Context()
run := ss.Loops.Loop(Train, Run).Counter.Cur
ss.InitRandSeed(run)
for di := 0; di < int(ctx.NData); di++ {
ss.Envs.ByModeDi(Train, di).Init(run)
ss.Envs.ByModeDi(Test, di).Init(run)
}
ctx.Reset()
ss.Net.InitWeights()
if ss.Config.Run.StartWeights != "" { // this is just for testing -- not usually needed
ss.Net.OpenWeightsJSON(core.Filename(ss.Config.Run.StartWeights))
mpi.Printf("Starting with initial weights from: %s\n", ss.Config.Run.StartWeights)
}
}
// TestAll runs through the full set of testing items
func (ss *Sim) TestAll() {
ctx := ss.Net.Context()
for di := 0; di < int(ctx.NData); di++ {
ss.Envs.ByModeDi(Test, di).Init(0)
}
ss.Loops.ResetAndRun(Test)
ss.Loops.Mode = Train // important because this is called from Train Run: go back.
}
//////// Stats
// AddStat adds a stat compute function.
func (ss *Sim) AddStat(f func(mode Modes, level Levels, phase StatsPhase)) {
ss.StatFuncs = append(ss.StatFuncs, f)
}
// StatsStart is called by Looper at the start of given level, for each iteration.
// It needs to call RunStats Start at the next level down.
// e.g., each Epoch is the start of the full set of Trial Steps.
func (ss *Sim) StatsStart(lmd, ltm enums.Enum) {
mode := lmd.(Modes)
level := ltm.(Levels)
if level <= Trial {
return
}
ss.RunStats(mode, level-1, Start)
}
// StatsStep is called by Looper at each step of iteration,
// where it accumulates the stat results.
func (ss *Sim) StatsStep(lmd, ltm enums.Enum) {
mode := lmd.(Modes)
level := ltm.(Levels)
if level == Cycle {
return
}
ss.RunStats(mode, level, Step)
tensorfs.DirTable(axon.StatsNode(ss.Stats, mode, level), nil).WriteToLog()
}
// RunStats runs the StatFuncs for given mode, level and phase.
func (ss *Sim) RunStats(mode Modes, level Levels, phase StatsPhase) {
for _, sf := range ss.StatFuncs {
sf(mode, level, phase)
}
if phase == Step && ss.GUI.Tabs != nil {
nm := mode.String() + " " + level.String() + " Plot"
ss.GUI.Tabs.AsLab().GoUpdatePlot(nm)
}
}
// SetRunName sets the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) SetRunName() string {
runName := ss.Params.RunName(ss.Config.Run.Run)
ss.Current.StringValue("RunName", 1).SetString1D(runName, 0)
return runName
}
// RunName returns the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) RunName() string {
return ss.Current.StringValue("RunName", 1).String1D(0)
}
// StatsInit initializes all the stats by calling Start across all modes and levels.
func (ss *Sim) StatsInit() {
for md, st := range ss.Loops.Stacks {
mode := md.(Modes)
for _, lev := range st.Order {
level := lev.(Levels)
if level == Cycle {
continue
}
ss.RunStats(mode, level, Start)
}
}
if ss.GUI.Tabs != nil {
tbs := ss.GUI.Tabs.AsLab()
_, idx := tbs.CurrentTab()
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Epoch))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Run))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Test, Trial))
tbs.SelectTabIndex(idx)
}
}
// ConfigStats handles configures functions to do all stats computation
// in the tensorfs system.
func (ss *Sim) ConfigStats() {
net := ss.Net
ss.Stats = ss.Root.Dir("Stats")
ss.Current = ss.Stats.Dir("Current")
ss.SetRunName()
// last arg(s) are levels to exclude
counterFunc := axon.StatLoopCounters(ss.Stats, ss.Current, ss.Loops, net, Trial, Cycle)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
counterFunc(mode, level, phase == Start)
})
runNameFunc := axon.StatRunName(ss.Stats, ss.Current, ss.Loops, net, Trial, Cycle)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
runNameFunc(mode, level, phase == Start)
})
trialNameFunc := axon.StatTrialName(ss.Stats, ss.Current, ss.Loops, net, Trial)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
trialNameFunc(mode, level, phase == Start)
})
// up to a point, it is good to use loops over stats in one function,
// to reduce repetition of boilerplate.
statNames := []string{"CorSim", "UnitErr", "Err", "Output", "NZero", "FirstZero", "LastZero"}
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
for _, name := range statNames {
if name == "NZero" && (mode != Train || level == Trial) {
return
}
modeDir := ss.Stats.Dir(mode.String())
curModeDir := ss.Current.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
subDir := modeDir.Dir((level - 1).String()) // note: will fail for Cycle
tsr := levelDir.Float64(name)
ctx := ss.Net.Context()
ndata := int(ctx.NData)
var stat float64
if phase == Start {
tsr.SetNumRows(0)
plot.SetFirstStyler(tsr, func(s *plot.Style) {
s.Range.SetMin(0).SetMax(1)
s.On = true
switch name {
case "NZero", "UnitErr", "Output":
s.On = false
case "FirstZero", "LastZero":
if level < Run {
s.On = false
}
}
})
switch name {
case "NZero":
if level == Epoch {
curModeDir.Float64(name, 1).SetFloat1D(0, 0)
}
case "FirstZero", "LastZero":
if level == Epoch {
curModeDir.Float64(name, 1).SetFloat1D(-1, 0)
}
}
continue
}
switch level {
case Trial:
out := ss.Net.LayerByName("InputP")
trg := ss.Net.LayerByName("Targets")
for di := range ndata {
var stat float64
switch name {
case "CorSim":
stat = 1.0 - float64(axon.LayerStates.Value(int(out.Index), int(di), int(axon.LayerPhaseDiff)))
case "UnitErr":
stat = out.PctUnitErr(ctx)[di]
case "Err":
_, minusIndexes, _ := out.LocalistErr4D(ctx)
minusIndex := minusIndexes[di]
trgExt := axon.Neurons.Value(int(trg.NeurStIndex+uint32(minusIndex)), di, int(axon.Ext))
curModeDir.Float64("Output", ndata).SetFloat1D(float64(minusIndex), di)
stat = 1.0
if trgExt > 0.5 {
stat = 0
}
case "Output":
stat = curModeDir.Float64("Output", ndata).Float1D(di)
}
curModeDir.Float64(name, ndata).SetFloat1D(stat, di)
tsr.AppendRowFloat(stat)
}
case Epoch:
nz := curModeDir.Float64("NZero", 1).Float1D(0)
switch name {
case "NZero":
err := stats.StatSum.Call(subDir.Value("Err")).Float1D(0)
stat = curModeDir.Float64(name, 1).Float1D(0)
if err == 0 {
stat++
} else {
stat = 0
}
curModeDir.Float64(name, 1).SetFloat1D(stat, 0)
case "FirstZero":
stat = curModeDir.Float64(name, 1).Float1D(0)
if stat < 0 && nz == 1 {
stat = curModeDir.Int("Epoch", 1).Float1D(0)
}
curModeDir.Float64(name, 1).SetFloat1D(stat, 0)
case "LastZero":
stat = curModeDir.Float64(name, 1).Float1D(0)
if stat < 0 && nz >= float64(ss.Config.Run.NZero) {
stat = curModeDir.Int("Epoch", 1).Float1D(0)
}
curModeDir.Float64(name, 1).SetFloat1D(stat, 0)
default:
stat = stats.StatMean.Call(subDir.Value(name)).Float1D(0)
}
tsr.AppendRowFloat(stat)
case Run:
stat = stats.StatFinal.Call(subDir.Value(name)).Float1D(0)
tsr.AppendRowFloat(stat)
default: // Expt
stat = stats.StatMean.Call(subDir.Value(name)).Float1D(0)
tsr.AppendRowFloat(stat)
}
}
})
perTrlFunc := axon.StatPerTrialMSec(ss.Stats, Train, Trial)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
perTrlFunc(mode, level, phase == Start)
})
prevCorFunc := axon.StatPrevCorSim(ss.Stats, ss.Current, net, Trial, Run, "InputP")
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
prevCorFunc(mode, level, phase == Start)
})
lays := net.LayersByType(axon.SuperLayer, axon.CTLayer, axon.TargetLayer)
actGeFunc := axon.StatLayerActGe(ss.Stats, net, Train, Trial, Run, lays...)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
actGeFunc(mode, level, phase == Start)
})
pcaFunc := axon.StatPCA(ss.Stats, ss.Current, net, ss.Config.Run.PCAInterval, Train, Trial, Run, lays...)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
trnEpc := ss.Loops.Loop(Train, Epoch).Counter.Cur
pcaFunc(mode, level, phase == Start, trnEpc)
})
stateFunc := axon.StatLayerState(ss.Stats, net, Test, Trial, true, "ActM", "Input", "InputP")
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
stateFunc(mode, level, phase == Start)
})
}
// StatCounters returns counters string to show at bottom of netview.
func (ss *Sim) StatCounters(mode, level enums.Enum) string {
counters := ss.Loops.Stacks[mode].CountersString()
vu := ss.NetViewUpdater(mode)
if vu == nil || vu.View == nil {
return counters
}
di := vu.View.Di
counters += fmt.Sprintf(" Di: %d", di)
curModeDir := ss.Current.Dir(mode.String())
if curModeDir.Node("TrialName") == nil {
return counters
}
counters += fmt.Sprintf(" TrialName: %s", curModeDir.StringValue("TrialName").String1D(di))
statNames := []string{"CorSim", "Err"}
if level == Cycle || curModeDir.Node(statNames[0]) == nil {
return counters
}
for _, name := range statNames {
counters += fmt.Sprintf(" %s: %.4g", name, curModeDir.Float64(name).Float1D(di))
}
return counters
}
//////// GUI
func (ss *Sim) ConfigNetView(nv *netview.NetView) {
// nv.ViewDefaults()
// nv.Scene().Camera.Pose.Pos.Set(0, 1.5, 3.0) // more "head on" than default which is more "top down"
// nv.Scene().Camera.LookAt(math32.Vec3(0, 0, 0), math32.Vec3(0, 1, 0))
nv.ConfigLabels(ss.Config.Env.InputNames)
ly := nv.LayerByName("Targets")
for li, lnm := range ss.Config.Env.InputNames {
lbl := nv.LabelByName(lnm)
lbl.Pose = ly.Pose
lbl.Pose.Pos.Y += .2
lbl.Pose.Pos.Z += .02
lbl.Pose.Pos.X += 0.05 + float32(li)*.045
lbl.Pose.Scale.SetMul(math32.Vec3(0.6, 0.4, 0.5))
}
}
// ConfigGUI configures the Cogent Core GUI interface for this simulation.
func (ss *Sim) ConfigGUI(b tree.Node) {
ss.GUI.MakeBody(b, ss, ss.Root, ss.Config.Name, ss.Config.Title, ss.Config.Doc)
ss.GUI.StopLevel = Trial
nv := ss.GUI.AddNetView("Network")
nv.Options.MaxRecs = 2 * ss.Config.Run.Cycles()
nv.Options.Raster.Max = ss.Config.Run.Cycles()
nv.SetNet(ss.Net)
ss.TrainUpdate.Config(nv, axon.Theta, ss.StatCounters)
ss.TestUpdate.Config(nv, axon.Theta, ss.StatCounters)
ss.GUI.OnStop = func(mode, level enums.Enum) {
vu := ss.NetViewUpdater(mode)
vu.UpdateWhenStopped(mode, level)
}
ss.ConfigNetView(nv)
ss.StatsInit()
ss.GUI.FinalizeGUI(false)
}
func (ss *Sim) MakeToolbar(p *tree.Plan) {
ss.GUI.AddLooperCtrl(p, ss.Loops)
tree.Add(p, func(w *core.Separator) {})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "New Seed",
Icon: icons.Add,
Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
Active: egui.ActiveAlways,
Func: func() {
ss.RandSeeds.NewSeeds()
},
})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "README",
Icon: icons.FileMarkdown,
Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
Active: egui.ActiveAlways,
Func: func() {
core.TheApp.OpenURL(ss.Config.URL)
},
})
}
func (ss *Sim) RunNoGUI() {
ss.Init()
if ss.Config.Params.Note != "" {
mpi.Printf("Note: %s\n", ss.Config.Params.Note)
}
if ss.Config.Log.SaveWeights {
mpi.Printf("Saving final weights per run\n")
}
runName := ss.SetRunName()
netName := ss.Net.Name
cfg := &ss.Config.Log
axon.OpenLogFiles(ss.Loops, ss.Stats, netName, runName, [][]string{cfg.Train, cfg.Test})
mpi.Printf("Running %d Runs starting at %d\n", ss.Config.Run.Runs, ss.Config.Run.Run)
ss.Loops.Loop(Train, Run).Counter.SetCurMaxPlusN(ss.Config.Run.Run, ss.Config.Run.Runs)
ss.Loops.Run(Train)
axon.CloseLogFiles(ss.Loops, ss.Stats, Cycle)
axon.GPURelease()
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"github.com/emer/axon/v2/sims/deepfsa"
"github.com/emer/emergent/v2/egui"
)
func main() { egui.Run[deepfsa.Sim, deepfsa.Config]() }
// Code generated by "core generate -add-types -add-funcs -gosl"; DO NOT EDIT.
package deepfsa
import (
"cogentcore.org/core/enums"
)
var _ModesValues = []Modes{0, 1}
// ModesN is the highest valid value for type Modes, plus one.
//
//gosl:start
const ModesN Modes = 2
//gosl:end
var _ModesValueMap = map[string]Modes{`Train`: 0, `Test`: 1}
var _ModesDescMap = map[Modes]string{0: ``, 1: ``}
var _ModesMap = map[Modes]string{0: `Train`, 1: `Test`}
// String returns the string representation of this Modes value.
func (i Modes) String() string { return enums.String(i, _ModesMap) }
// SetString sets the Modes value from its string representation,
// and returns an error if the string is invalid.
func (i *Modes) SetString(s string) error { return enums.SetString(i, s, _ModesValueMap, "Modes") }
// Int64 returns the Modes value as an int64.
func (i Modes) Int64() int64 { return int64(i) }
// SetInt64 sets the Modes value from an int64.
func (i *Modes) SetInt64(in int64) { *i = Modes(in) }
// Desc returns the description of the Modes value.
func (i Modes) Desc() string { return enums.Desc(i, _ModesDescMap) }
// ModesValues returns all possible values for the type Modes.
func ModesValues() []Modes { return _ModesValues }
// Values returns all possible values for the type Modes.
func (i Modes) Values() []enums.Enum { return enums.Values(_ModesValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Modes) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Modes) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Modes") }
var _LevelsValues = []Levels{0, 1, 2, 3, 4}
// LevelsN is the highest valid value for type Levels, plus one.
//
//gosl:start
const LevelsN Levels = 5
//gosl:end
var _LevelsValueMap = map[string]Levels{`Cycle`: 0, `Trial`: 1, `Epoch`: 2, `Run`: 3, `Expt`: 4}
var _LevelsDescMap = map[Levels]string{0: ``, 1: ``, 2: ``, 3: ``, 4: ``}
var _LevelsMap = map[Levels]string{0: `Cycle`, 1: `Trial`, 2: `Epoch`, 3: `Run`, 4: `Expt`}
// String returns the string representation of this Levels value.
func (i Levels) String() string { return enums.String(i, _LevelsMap) }
// SetString sets the Levels value from its string representation,
// and returns an error if the string is invalid.
func (i *Levels) SetString(s string) error { return enums.SetString(i, s, _LevelsValueMap, "Levels") }
// Int64 returns the Levels value as an int64.
func (i Levels) Int64() int64 { return int64(i) }
// SetInt64 sets the Levels value from an int64.
func (i *Levels) SetInt64(in int64) { *i = Levels(in) }
// Desc returns the description of the Levels value.
func (i Levels) Desc() string { return enums.Desc(i, _LevelsDescMap) }
// LevelsValues returns all possible values for the type Levels.
func LevelsValues() []Levels { return _LevelsValues }
// Values returns all possible values for the type Levels.
func (i Levels) Values() []enums.Enum { return enums.Values(_LevelsValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Levels) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Levels) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Levels") }
var _StatsPhaseValues = []StatsPhase{0, 1}
// StatsPhaseN is the highest valid value for type StatsPhase, plus one.
//
//gosl:start
const StatsPhaseN StatsPhase = 2
//gosl:end
var _StatsPhaseValueMap = map[string]StatsPhase{`Start`: 0, `Step`: 1}
var _StatsPhaseDescMap = map[StatsPhase]string{0: ``, 1: ``}
var _StatsPhaseMap = map[StatsPhase]string{0: `Start`, 1: `Step`}
// String returns the string representation of this StatsPhase value.
func (i StatsPhase) String() string { return enums.String(i, _StatsPhaseMap) }
// SetString sets the StatsPhase value from its string representation,
// and returns an error if the string is invalid.
func (i *StatsPhase) SetString(s string) error {
return enums.SetString(i, s, _StatsPhaseValueMap, "StatsPhase")
}
// Int64 returns the StatsPhase value as an int64.
func (i StatsPhase) Int64() int64 { return int64(i) }
// SetInt64 sets the StatsPhase value from an int64.
func (i *StatsPhase) SetInt64(in int64) { *i = StatsPhase(in) }
// Desc returns the description of the StatsPhase value.
func (i StatsPhase) Desc() string { return enums.Desc(i, _StatsPhaseDescMap) }
// StatsPhaseValues returns all possible values for the type StatsPhase.
func StatsPhaseValues() []StatsPhase { return _StatsPhaseValues }
// Values returns all possible values for the type StatsPhase.
func (i StatsPhase) Values() []enums.Enum { return enums.Values(_StatsPhaseValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i StatsPhase) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *StatsPhase) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "StatsPhase")
}
// Copyright (c) 2019, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package deepfsa
import (
"fmt"
"cogentcore.org/lab/base/randx"
"cogentcore.org/lab/tensor"
"github.com/emer/emergent/v2/env"
)
// FSAEnv generates states in a finite state automaton (FSA) which is a
// simple form of grammar for creating non-deterministic but still
// overall structured sequences.
type FSAEnv struct {
// name of this environment
Name string
// transition matrix, which is a square NxN tensor with outer dim being
// current state and inner dim having probability of transitioning to that state.
TMat tensor.Float64 `display:"no-inline"`
// transition labels, one for each transition cell in TMat matrix.
Labels tensor.String
// automaton state within FSA that we're in.
AState env.CurPrev[int]
// number of next states in current state output (scalar).
NNext tensor.Int
// next states that have non-zero probability, with actual randomly
// chosen next state at start.
NextStates tensor.Int
// transition labels for next states that have non-zero probability,
// with actual randomly chosen one for next state at start.
NextLabels tensor.String
// sequence counter within epoch.
Seq env.Counter `display:"inline"`
// tick counter within sequence.
Tick env.Counter `display:"inline"`
// trial is the step counter within sequence, which is how many steps taken
// within current sequence. It resets to 0 at start of each sequence.
Trial env.Counter `display:"inline"`
// random number generator for the env. all random calls must use this.
// set seed here for weight initialization values.
Rand randx.SysRand `display:"-"`
// random seed.
RandSeed int64 `edit:"-"`
}
// InitTMat initializes matrix and labels to given size
func (ev *FSAEnv) InitTMat(nst int) {
ev.TMat.SetShapeSizes(nst, nst)
ev.Labels.SetShapeSizes(nst, nst)
ev.TMat.SetZeros()
ev.Labels.SetZeros()
ev.NNext.SetShapeSizes(1)
ev.NextStates.SetShapeSizes(nst)
ev.NextLabels.SetShapeSizes(nst)
}
// SetTMat sets given transition matrix probability and label
func (ev *FSAEnv) SetTMat(fm, to int, p float64, lbl string) {
ev.TMat.Set(p, fm, to)
ev.Labels.Set(lbl, fm, to)
}
// TMatReber sets the transition matrix to the standard Reber grammar FSA
func (ev *FSAEnv) TMatReber() {
ev.InitTMat(8)
ev.SetTMat(0, 1, 1, "B") // 0 = start
ev.SetTMat(1, 2, 0.5, "T") // 1 = state 0 in usu diagram (+1 for all states)
ev.SetTMat(1, 3, 0.5, "P")
ev.SetTMat(2, 2, 0.5, "S")
ev.SetTMat(2, 4, 0.5, "X")
ev.SetTMat(3, 3, 0.5, "T")
ev.SetTMat(3, 5, 0.5, "V")
ev.SetTMat(4, 6, 0.5, "S")
ev.SetTMat(4, 3, 0.5, "X")
ev.SetTMat(5, 6, 0.5, "V")
ev.SetTMat(5, 4, 0.5, "P")
ev.SetTMat(6, 7, 1, "E") // 7 = end
ev.Init(0)
}
func (ev *FSAEnv) Validate() error {
if ev.TMat.Len() == 0 {
return fmt.Errorf("FSAEnv: %v has no transition matrix TMat set", ev.Name)
}
return nil
}
func (ev *FSAEnv) Label() string { return ev.Name }
func (ev *FSAEnv) State(element string) tensor.Values {
switch element {
case "NNext":
return &ev.NNext
case "NextStates":
return &ev.NextStates
case "NextLabels":
return &ev.NextLabels
}
return nil
}
// String returns the current state as a string
func (ev *FSAEnv) String() string {
nn := ev.NNext.Values[0]
lbls := ev.NextLabels.Values[0:nn]
return fmt.Sprintf("S_%d_%v", ev.AState.Cur, lbls)
}
func (ev *FSAEnv) Init(run int) {
ev.Rand.NewRand(ev.RandSeed)
ev.Seq.Init()
ev.Tick.Init()
ev.Trial.Init()
ev.Trial.Cur = -1 // init state -- key so that first Step() = 0
ev.AState.Cur = 0
ev.AState.Prev = -1
}
// NextState sets NextStates including randomly chosen one at start
func (ev *FSAEnv) NextState() {
nst := ev.TMat.DimSize(0)
if ev.AState.Cur < 0 || ev.AState.Cur >= nst-1 {
ev.AState.Cur = 0
}
ri := ev.AState.Cur * nst
ps := ev.TMat.Values[ri : ri+nst]
ls := ev.Labels.Values[ri : ri+nst]
nxt := randx.PChoose64(ps, &ev.Rand) // next state chosen at random
ev.NextStates.Set1D(nxt, 0)
ev.NextLabels.Set1D(ls[nxt], 0)
idx := 1
for i, p := range ps {
if i != nxt && p > 0 {
ev.NextStates.Set1D(i, idx)
ev.NextLabels.Set1D(ls[i], idx)
idx++
}
}
ev.NNext.Set1D(idx, 0)
ev.AState.Set(nxt)
}
func (ev *FSAEnv) Step() bool {
ev.NextState()
ev.Trial.Incr()
ev.Tick.Incr()
if ev.AState.Prev == 0 {
ev.Tick.Init()
ev.Seq.Incr()
}
return true
}
func (ev *FSAEnv) Action(element string, input tensor.Values) {
// nop
}
// Compile-time check that implements Env interface
var _ env.Env = (*FSAEnv)(nil)
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package deepfsa
import (
"github.com/emer/axon/v2/axon"
)
// LayerParams sets the minimal non-default params.
// Base is always applied, and others can be optionally selected to apply on top of that.
var LayerParams = axon.LayerSheets{
"Base": {
{Sel: "Layer", Doc: "generic layer params",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.15 // 0.15 best
ly.Inhib.Layer.Gi = 1.0 // 1.0 > 1.1 v1.6.1
ly.Inhib.Layer.FB = 1 // 1.0 > 0.5
ly.Inhib.ActAvg.AdaptGi.SetBool(false) // not needed; doesn't engage
ly.Learn.TrgAvgAct.SubMean = 1 // 1 > 0
ly.Learn.TrgAvgAct.SynScaleRate = 0.005 // 0.005 > others
ly.Learn.TrgAvgAct.ErrLRate = 0.02 // 0.02 def
ly.Acts.Gbar.L = 20 // std
ly.Acts.Decay.Act = 0.0 // 0 == 0.2
ly.Acts.Decay.Glong = 0.0 // 0.2 improves FirstZero slightly, but not LastZero
ly.Acts.Dt.LongAvgTau = 20 // 20 > higher for objrec, lvis
ly.Acts.Dend.GExp = 0.2 // 0.2 > 0.5 > 0.1 > 0
ly.Acts.Dend.GR = 3 // 3 / 0.2 > 6 / 0.5
ly.Acts.Dend.SSGi = 2 // 2 > 3
ly.Acts.AK.Gk = 0.1
ly.Acts.NMDA.MgC = 1.4 // 1.4, 5 > 1.2, 0 ?
ly.Acts.NMDA.Voff = 0
ly.Acts.NMDA.Ge = 0.006
ly.Acts.GabaB.Gk = 0.015 // 0.015 def -- makes no diff down to 0.008
ly.Acts.Mahp.Gk = 0.05 // 0.05 > 0.02
ly.Acts.Sahp.Gk = 0.05 // 0.05 > 0.1 def with kna .1
ly.Acts.Sahp.CaTau = 10 // 10 (def) > 5?
ly.Acts.KNa.On.SetBool(true) // false > true
ly.Acts.KNa.Med.Gk = 0.1 // 0.05 >= 0.1 but not worth nonstandard
ly.Acts.KNa.Slow.Gk = 0.1
ly.Learn.RLRate.SigmoidLinear.SetBool(false) // false > true
ly.Learn.CaLearn.Dt.MTau = 2 // 2 > 5 actually
ly.Learn.CaLearn.ETraceTau = 4 // 4 == 5
ly.Learn.CaLearn.ETraceScale = 0 // 0 > 0.1 > 0.05, 0.2 etc
ly.Learn.Timing.On.SetBool(false)
// ly.Learn.Timing.Refractory.SetBool(true)
ly.Learn.Timing.LearnThr = 0.1
ly.Learn.Timing.SynCaCycles = 160 // 160 def good
// ly.Learn.Timing.Cycles = 170
// ly.Learn.Timing.TimeDiffTau = 4
// ly.Learn.CaSpike.SpikeCaSyn = 8 // vs 12 in lvis -- 12 does NOT work here
}},
{Sel: ".SuperLayer", Doc: "super layer params",
Set: func(ly *axon.LayerParams) {
ly.Bursts.ThrRel = 0.1 // 0.1, 0.1 best
ly.Bursts.ThrAbs = 0.1
}},
{Sel: ".InLay", Doc: "input layers need more inhibition",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 0.9 // makes no diff
ly.Inhib.ActAvg.Nominal = 0.15
ly.Acts.Clamp.Ge = 1.5
}},
{Sel: ".CTLayer", Doc: "CT NMDA gbar factor is key",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 2.1 // 2.1 > others for SSGi = 2
ly.Inhib.Layer.FB = 1
ly.Acts.Dend.SSGi = 2 // 0 > higher -- kills nmda maint!
ly.CT.GeGain = 2.0 // 2.0 > 1.5 for sure (v0.2.1+)
ly.CT.DecayTau = 50 // 100 for Cycles=300 TODO: revisit!
ly.Acts.Decay.Act = 0.0
ly.Acts.Decay.Glong = 0.0
ly.Acts.GabaB.Gk = 0.015 // 0.015 def > 0.01
ly.Acts.MaintNMDA.Ge = 0.007 // 0.007 best, but 0.01 > lower if reg nmda weak
ly.Acts.MaintNMDA.Tau = 200 // 200 > 100 > 300
ly.Acts.NMDA.Ge = 0.007 // 0.007 matching maint best
ly.Acts.NMDA.Tau = 200 // 200 > 100
ly.Learn.TrgAvgAct.SynScaleRate = 0.005 // 0.005 > 0.0002 (much worse)
ly.Learn.TrgAvgAct.SubMean = 1 // 1 > 0
}},
{Sel: ".PulvinarLayer", Doc: "pulvinar",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 0.75 // 0.75 > higher v1.6.1
ly.Inhib.Layer.FB = 1
ly.Pulvinar.DriveScale = 0.2 // 0.2 > 0.1, 0.15, 0.25, 0.3
ly.Pulvinar.FullDriveAct = 0.6 // 0.6 def
ly.Acts.Spikes.Tr = 3 // 1 is best for ra25..
ly.Acts.Decay.Act = 0.0
ly.Acts.Decay.Glong = 0.0 // clear long
ly.Acts.Decay.AHP = 0.0 // clear ahp
ly.Learn.RLRate.SigmoidMin = 1.0 // 1 > 0.05 with CaD as var
}},
},
}
// PathParams sets the minimal non-default params.
// Base is always applied, and others can be optionally selected to apply on top of that.
var PathParams = axon.PathSheets{
"Base": {
{Sel: "Path", Doc: "std",
Set: func(pt *axon.PathParams) {
pt.Learn.DWt.SubMean = 0 // 0 > 1 -- even with CTCtxt = 0
pt.Learn.LRate.Base = 0.03 // .03 > others
pt.SWts.Adapt.LRate = 0.01 // 0.01 or 0.0001 music
pt.SWts.Init.SPct = 1.0 // 1 works fine here -- .5 also ok
pt.Learn.DWt.CaPScale = 0.95 // 0.95 > 0.98 > 1
pt.SWts.Adapt.HiMeanDecay = 0.0008 // 0.0008 default
pt.Learn.DWt.SynCa20.SetBool(false) // 10 > 20 reliably
pt.Learn.DWt.SynTraceTau = 1 // 1 >> 2 v0.0.9
pt.Learn.DWt.LearnThr = .2 // > 0 ok but not better
}},
{Sel: ".BackPath", Doc: "top-down back-pathways MUST have lower relative weight scale, otherwise network hallucinates",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.2 // 0.2 > 0.3
}},
{Sel: ".CTCtxtPath", Doc: "all CT context paths",
Set: func(pt *axon.PathParams) {
pt.Learn.LRate.Base = 0.02 // 0.02 >= 0.03 > 0.01
// pt.Learn.DWt.SynTraceTau = 2 // 1 > 2 now
pt.Learn.DWt.SubMean = 0 // 0 > 1 -- 1 is especially bad
pt.Learn.DWt.LearnThr = 0
}},
{Sel: ".CTFromSuper", Doc: "full > 1to1",
Set: func(pt *axon.PathParams) {
pt.Learn.Learn.SetBool(true)
pt.SWts.Init.Mean = 0.5
pt.SWts.Init.Var = 0.25
}},
{Sel: ".CTSelfCtxt", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.5 // 0.5 > 0.2 > 0.8
pt.SWts.Init.Sym.SetBool(true) // true > false
}},
{Sel: ".CTSelfMaint", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 0.5 // 0.5 > 0.4, 0.3 > 0.8 (very bad)
pt.Com.GType = axon.MaintG
pt.SWts.Init.Sym.SetBool(true) // no effect? not sure why
}},
// {Sel: ".CTSelfMaint", Doc: "",
// Set: func(pt *axon.PathParams) {
// pt.PathScale.Rel = 0.1
// pt.SWts.Init.Sym = true // no effect? not sure why
// }},
{Sel: ".FromPulv", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.1 // 0.1 > 0.2
}},
// {Sel: ".CTToPulv", Doc: "",
// Set: func(pt *axon.PathParams) {
// // pt.Learn.LRate.Base = 0.1
// // pt.SWts.Adapt.SigGain = 1 // 1 does not work as well with any tested lrates
// }},
},
}
// Copyright (c) 2023, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package deepmusic
import (
"cogentcore.org/core/core"
"github.com/emer/emergent/v2/egui"
)
// EnvConfig has config params for environment
// note: only adding fields for key Env params that matter for both Network and Env
// other params are set via the Env map data mechanism.
type EnvConfig struct {
// Env parameters: can set any field/subfield on Env struct,
// using standard TOML formatting.
Env map[string]any
// UnitsPer is the number of units per localist output unit. 4 best.
UnitsPer int `default:"4"`
// train the full song -- else 30 notes
FullSong bool
// during testing, play the target note instead of the actual network output
PlayTarg bool
// drive inputs from the training sequence during testing -- otherwise use network's own output
TestClamp bool `default:"true"`
}
// ParamConfig has config parameters related to sim params.
type ParamConfig struct { //types:add
// Hid2 makes a second hidden layer on top of the first,
// which has independent projections into same InputP
// pulvinar layer. It does not need to predict the first
// hidden layer to be useful.
Hid2 bool
// NUnitsY is Y dimension for number of units per hidden / CT layer,
// with X = 20. Full song, Hid2 is better with 15, 30Notes = 10
NUnitsY int `default:"15"`
// Script is an interpreted script that is run to set parameters in Layer and Path
// sheets, by default using the "Script" set name.
Script string `new-window:"+" width:"100"`
// Sheet is the extra params sheet name(s) to use (space separated
// if multiple). Must be valid name as listed in compiled-in params
// or loaded params.
Sheet string
// Tag is an extra tag to add to file names and logs saved from this run.
Tag string
// Note is additional info to describe the run params etc,
// like a git commit message for the run.
Note string
// SaveAll will save a snapshot of all current param and config settings
// in a directory named params_<datestamp> (or _good if Good is true),
// then quit. Useful for comparing to later changes and seeing multiple
// views of current params.
SaveAll bool `nest:"+"`
// Good is for SaveAll, save to params_good for a known good params state.
// This can be done prior to making a new release after all tests are passing.
// Add results to git to provide a full diff record of all params over level.
Good bool `nest:"+"`
}
func (pc *ParamConfig) FieldWidget(field string) core.Value {
return egui.ScriptFieldWidget(field)
}
// RunConfig has config parameters related to running the sim.
type RunConfig struct { //types:add
// GPUDevice selects the gpu device to use.
GPUDevice int
// NData is the number of data-parallel items to process in parallel per trial.
// Is significantly faster for both CPU and GPU. Results in an effective
// mini-batch of learning.
NData int `default:"16" min:"1"`
// NThreads is the number of parallel threads for CPU computation;
// 0 = use default.
NThreads int `default:"0"`
// Run is the _starting_ run number, which determines the random seed.
// Runs counts up from there. Can do all runs in parallel by launching
// separate jobs with each starting Run, Runs = 1.
Run int `default:"0" flag:"run"`
// Runs is the total number of runs to do when running Train, starting from Run.
Runs int `default:"5" min:"1"`
// Epochs is the total number of epochs per run.
Epochs int `default:"100"`
// Trials is the total number of trials per epoch.
// Should be an even multiple of NData.
Trials int `default:"196"`
// ISICycles is the number of no-input inter-stimulus interval
// cycles at the start of the trial.
ISICycles int `default:"0"`
// MinusCycles is the number of cycles in the minus phase per trial.
MinusCycles int `default:"150"`
// PlusCycles is the number of cycles in the plus phase per trial.
PlusCycles int `default:"50"`
// NZero is how many perfect, zero-error epochs before stopping a Run.
NZero int `default:"2"`
// TestInterval is how often (in epochs) to run through all the test patterns,
// in terms of training epochs. Can use 0 or -1 for no testing.
TestInterval int `default:"0"`
// PCAInterval is how often (in epochs) to compute PCA on hidden
// representations to measure variance.
PCAInterval int `default:"10"`
// StartWeights is the name of weights file to load at start of first run.
StartWeights string
}
// Cycles returns the total number of cycles per trial: ISI + Minus + Plus.
func (rc *RunConfig) Cycles() int {
return rc.ISICycles + rc.MinusCycles + rc.PlusCycles
}
// LogConfig has config parameters related to logging data.
type LogConfig struct {
// SaveWeights will save final weights after each run.
SaveWeights bool
// Train has the list of Train mode levels to save log files for.
Train []string `default:"['Expt', 'Run', 'Epoch']" nest:"+"`
// Test has the list of Test mode levels to save log files for.
Test []string `nest:"+"`
}
// Config has the overall Sim configuration options.
type Config struct {
egui.BaseConfig
// Env has environment related configuration options.
Env EnvConfig `display:"add-fields"`
// Params has parameter related configuration options.
Params ParamConfig `display:"add-fields"`
// Run has sim running related configuration options.
Run RunConfig `display:"add-fields"`
// Log has data logging related configuration options.
Log LogConfig `display:"add-fields"`
}
func (cfg *Config) Defaults() {
cfg.Name = "DeepMusic"
cfg.Title = "DeepAxon Music Prediction"
cfg.URL = "https://github.com/emer/axon/blob/main/sims/deepmusic/README.md"
cfg.Doc = "This demonstrates a basic deep predictive learning Axon model that learns to predict the next note in a song."
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// deep_music runs a DeepAxon network on predicting the next note
// in a musical sequence of notes.
package deepmusic
//go:generate core generate -add-types -gosl -add-funcs
import (
"fmt"
"os"
"reflect"
"cogentcore.org/core/base/errors"
"cogentcore.org/core/base/reflectx"
"cogentcore.org/core/core"
"cogentcore.org/core/enums"
"cogentcore.org/core/gpu"
"cogentcore.org/core/icons"
"cogentcore.org/core/math32"
"cogentcore.org/core/tree"
"cogentcore.org/lab/base/mpi"
"cogentcore.org/lab/base/randx"
"cogentcore.org/lab/plot"
"cogentcore.org/lab/stats/stats"
"cogentcore.org/lab/tensorfs"
"github.com/emer/axon/v2/axon"
"github.com/emer/emergent/v2/egui"
"github.com/emer/emergent/v2/env"
"github.com/emer/emergent/v2/etime"
"github.com/emer/emergent/v2/looper"
"github.com/emer/emergent/v2/netview"
"github.com/emer/emergent/v2/paths"
)
// Modes are the looping modes (Stacks) for running and statistics.
type Modes int32 //enums:enum
const (
Train Modes = iota
Test
)
// Levels are the looping levels for running and statistics.
type Levels int32 //enums:enum
const (
Cycle Levels = iota
Trial
Epoch
Run
Expt
)
// StatsPhase is the phase of stats processing for given mode, level.
// Accumulated values are reset at Start, added each Step.
type StatsPhase int32 //enums:enum
const (
Start StatsPhase = iota
Step
)
// see params.go for params, config.go for Config
// Sim encapsulates the entire simulation model, and we define all the
// functionality as methods on this struct. This structure keeps all relevant
// state information organized and available without having to pass everything around
// as arguments to methods, and provides the core GUI interface (note the view tags
// for the fields which provide hints to how things should be displayed).
type Sim struct {
// simulation configuration parameters -- set by .toml config file and / or args
Config *Config `new-window:"+"`
// Net is the network: click to view / edit parameters for layers, paths, etc.
Net *axon.Network `new-window:"+" display:"no-inline"`
// Params manages network parameter setting.
Params axon.Params `display:"inline"`
// Loops are the control loops for running the sim, in different Modes
// across stacks of Levels.
Loops *looper.Stacks `new-window:"+" display:"no-inline"`
// Envs provides mode-string based storage of environments.
Envs env.Envs `new-window:"+" display:"no-inline"`
// TrainUpdate has Train mode netview update parameters.
TrainUpdate axon.NetViewUpdate `display:"inline"`
// TestUpdate has Test mode netview update parameters.
TestUpdate axon.NetViewUpdate `display:"inline"`
// Root is the root tensorfs directory, where all stats and other misc sim data goes.
Root *tensorfs.Node `display:"-"`
// Stats has the stats directory within Root.
Stats *tensorfs.Node `display:"-"`
// Current has the current stats values within Stats.
Current *tensorfs.Node `display:"-"`
// StatFuncs are statistics functions called at given mode and level,
// to perform all stats computations. phase = Start does init at start of given level,
// and all intialization / configuration (called during Init too).
StatFuncs []func(mode Modes, level Levels, phase StatsPhase) `display:"-"`
// GUI manages all the GUI elements
GUI egui.GUI `display:"-"`
// RandSeeds is a list of random seeds to use for each run.
RandSeeds randx.Seeds `display:"-"`
}
func (ss *Sim) SetConfig(cfg *Config) { ss.Config = cfg }
func (ss *Sim) Body() *core.Body { return ss.GUI.Body }
func (ss *Sim) ConfigSim() {
ss.Root, _ = tensorfs.NewDir("Root")
tensorfs.CurRoot = ss.Root
ss.Net = axon.NewNetwork(ss.Config.Name)
ss.Params.Config(LayerParams, PathParams, ss.Config.Params.Sheet, ss.Config.Params.Tag, reflect.ValueOf(ss))
if ss.Config.Params.Hid2 {
ss.Params.ExtraSheets = "Hid2"
}
ss.RandSeeds.Init(100) // max 100 runs
ss.InitRandSeed(0)
if ss.Config.GPU {
gpu.SelectAdapter = ss.Config.Run.GPUDevice
axon.GPUInit()
axon.UseGPU = true
}
ss.ConfigEnv()
ss.ConfigNet(ss.Net)
ss.ConfigLoops()
ss.ConfigStats()
if ss.Config.Params.SaveAll {
ss.Config.Params.SaveAll = false
ss.Net.SaveParamsSnapshot(&ss.Config, ss.Config.Params.Good)
os.Exit(0)
}
}
func (ss *Sim) ConfigEnv() {
// Can be called multiple times -- don't re-create
var trn, tst *MusicEnv
if len(ss.Envs) == 0 {
trn = &MusicEnv{}
tst = &MusicEnv{}
} else {
trn = ss.Envs.ByMode(Train).(*MusicEnv)
tst = ss.Envs.ByMode(Test).(*MusicEnv)
}
song := "bach_goldberg.mid"
// maxRows := 60 // 30 is good benchmark, 25 it almost fully solves
// have to push it to 60 to get an effect of Tau=4 vs. 1
maxRows := 32
if ss.Config.Env.FullSong {
maxRows = 0 // full thing
ss.Params.ExtraSheets = "FullSong"
} else {
ss.Params.ExtraSheets = "30Notes"
}
if ss.Config.Params.Hid2 {
ss.Params.ExtraSheets += " Hid2"
}
track := 0
wrapNotes := false // does a bit better with false for short lengths (30)
// note: names must be standard here!
trn.Defaults()
trn.WrapNotes = wrapNotes
trn.Name = Train.String()
trn.Debug = false
if ss.Config.Env.Env != nil {
reflectx.SetFieldsFromMap(trn, ss.Config.Env.Env)
}
trn.Config(song, track, maxRows, ss.Config.Env.UnitsPer)
trn.ConfigNData(ss.Config.Run.NData)
fmt.Printf("song rows: %d\n", trn.Song.NumRows())
tst.Defaults()
tst.WrapNotes = wrapNotes
tst.Name = Test.String()
tst.Play = true // see notes in README for getting this to work
if ss.Config.Env.Env != nil {
reflectx.SetFieldsFromMap(tst, ss.Config.Env.Env)
}
tst.Config(song, track, maxRows, ss.Config.Env.UnitsPer)
tst.ConfigNData(ss.Config.Run.NData)
trn.Init(0)
tst.Init(0)
// note: names must be in place when adding
ss.Envs.Add(trn, tst)
}
func (ss *Sim) ConfigNet(net *axon.Network) {
net.SetMaxData(ss.Config.Run.NData)
net.Context().SetISICycles(int32(ss.Config.Run.ISICycles)).
SetMinusCycles(int32(ss.Config.Run.MinusCycles)).
SetPlusCycles(int32(ss.Config.Run.PlusCycles)).Update()
net.SetRandSeed(ss.RandSeeds[0]) // init new separate random seed, using run = 0
ev := ss.Envs.ByMode(etime.Train).(*MusicEnv)
nnotes := ev.NNotes
full := paths.NewFull()
full.SelfCon = true // unclear if this makes a diff for self cons at all
one2one := paths.NewOneToOne()
_ = one2one
space := float32(5)
nUnits := ss.Config.Params.NUnitsY
in, inPulv := net.AddInputPulv4D("Input", 1, nnotes, ss.Config.Env.UnitsPer, 1, space)
in.AddClass("InLay")
inPulv.AddClass("InLay")
var hidp, hid2, hid2ct *axon.Layer
hid, hidct := net.AddSuperCT2D("Hidden", "", 20, nUnits, space, one2one) // one2one learn > full
_ = hidp
if ss.Config.Params.Hid2 {
// hidp -> hid2 doesn't actually help at all..
// hidp = net.AddPulvForSuper(hid, space)
}
net.ConnectCTSelf(hidct, full, "")
// above maint + ctself is essential here, but not in deepfsa
// net.ConnectLayers(hidct, hidct, full, axon.CTCtxtPath).AddClass("CTSelfCtxt")
net.ConnectToPulv(hid, hidct, inPulv, full, full, "")
net.ConnectLayers(in, hid, full, axon.ForwardPath)
// net.ConnectLayers(hidct, hid, full, BackPath) // not useful
if ss.Config.Params.Hid2 {
hid2, hid2ct = net.AddSuperCT2D("Hidden2", "", 20, nUnits, space, one2one) // one2one learn > full
net.ConnectCTSelf(hid2ct, full, "")
net.ConnectToPulv(hid2, hid2ct, inPulv, full, full, "") // shortcut top-down
errors.Log1(inPulv.RecvPathBySendName(hid2ct.Name)).AsEmer().AddClass("CTToPulvHigher")
// net.ConnectToPulv(hid2, hid2ct, hidp, full, full) // predict layer below -- not useful
}
if ss.Config.Params.Hid2 {
net.BidirConnectLayers(hid, hid2, full)
net.ConnectLayers(hid2ct, hidct, full, axon.BackPath)
// net.ConnectLayers(hid2ct, hid, full, axon.BackPath)
}
hid.PlaceAbove(in)
if ss.Config.Params.Hid2 {
hid2.PlaceRightOf(hid, 2)
}
net.Build()
net.Defaults()
net.SetNThreads(ss.Config.Run.NThreads)
ss.ApplyParams()
net.InitWeights()
// fmt.Println(axon.GPUSystem.Vars().StringDoc())
}
func (ss *Sim) ApplyParams() {
ss.Params.Script = ss.Config.Params.Script
ss.Params.ApplyAll(ss.Net)
}
//////// Init, utils
// Init restarts the run, and initializes everything, including network weights
// and resets the epoch log table
func (ss *Sim) Init() {
ss.Loops.ResetCounters()
ss.SetRunName()
ss.InitRandSeed(0)
// ss.ConfigEnv() // re-config env just in case a different set of patterns was
// selected or patterns have been modified etc
ss.ApplyParams()
ss.StatsInit()
ss.NewRun()
ss.TrainUpdate.RecordSyns()
ss.TrainUpdate.Update(Train, Trial)
}
// InitRandSeed initializes the random seed based on current training run number
func (ss *Sim) InitRandSeed(run int) {
ss.RandSeeds.Set(run)
ss.RandSeeds.Set(run, &ss.Net.Rand)
}
// CurrentMode returns the current Train / Test mode from Context.
func (ss *Sim) CurrentMode() Modes {
ctx := ss.Net.Context()
var md Modes
md.SetInt64(int64(ctx.Mode))
return md
}
// NetViewUpdater returns the NetViewUpdate for given mode.
func (ss *Sim) NetViewUpdater(mode enums.Enum) *axon.NetViewUpdate {
if mode.Int64() == Train.Int64() {
return &ss.TrainUpdate
}
return &ss.TestUpdate
}
// ConfigLoops configures the control loops: Training, Testing
func (ss *Sim) ConfigLoops() {
ls := looper.NewStacks()
trials := int(math32.IntMultipleGE(float32(ss.Config.Run.Trials), float32(ss.Config.Run.NData)))
cycles := ss.Config.Run.Cycles()
ls.AddStack(Train, Trial).
AddLevel(Expt, 1).
AddLevel(Run, ss.Config.Run.Runs).
AddLevel(Epoch, ss.Config.Run.Epochs).
AddLevelIncr(Trial, trials, ss.Config.Run.NData).
AddLevel(Cycle, cycles)
ls.AddStack(Test, Trial).
AddLevel(Epoch, 1).
AddLevelIncr(Trial, trials, ss.Config.Run.NData).
AddLevel(Cycle, cycles)
axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, Cycle, Trial, Train,
func(mode enums.Enum) { ss.Net.ClearInputs() },
func(mode enums.Enum) { ss.ApplyInputs(mode.(Modes)) },
)
ls.Stacks[Train].OnInit.Add("Init", ss.Init)
ls.Loop(Train, Run).OnStart.Add("NewRun", ss.NewRun)
trainEpoch := ls.Loop(Train, Epoch)
trainEpoch.IsDone.AddBool("NZeroStop", func() bool {
stopNz := ss.Config.Run.NZero
if stopNz <= 0 {
return false
}
curModeDir := ss.Current.Dir(Train.String())
curNZero := int(curModeDir.Value("NZero").Float1D(-1))
stop := curNZero >= stopNz
return stop
return false
})
trainEpoch.OnStart.Add("TestAtInterval", func() {
if (ss.Config.Run.TestInterval > 0) && ((trainEpoch.Counter.Cur+1)%ss.Config.Run.TestInterval == 0) {
ss.TestAll()
}
})
ls.AddOnStartToAll("StatsStart", ss.StatsStart)
ls.AddOnEndToAll("StatsStep", ss.StatsStep)
ls.Loop(Train, Run).OnEnd.Add("SaveWeights", func() {
ctrString := fmt.Sprintf("%03d_%05d", ls.Loop(Train, Run).Counter.Cur, ls.Loop(Train, Epoch).Counter.Cur)
axon.SaveWeightsIfConfigSet(ss.Net, ss.Config.Log.SaveWeights, ctrString, ss.RunName())
})
// note: network just recalibrates weights down to low level..
// trainEpoch.OnStart.Add("TurnUpWts", func() {
// epc := trainEpoch.Counter.Cur
// if epc != 100 {
// return
// }
// ly := ss.Net.LayerByName("InputP")
// pt := ly.RecvPaths[0]
// pt.Params.PathScale.Abs = 5
// ss.Net.InitGScale()
// axon.ToGPUParams()
// fmt.Println("At epoch:", epc, "turned up PathScale.Abs to InputP")
// })
if ss.Config.GUI {
axon.LooperUpdateNetView(ls, Cycle, Trial, ss.NetViewUpdater)
ls.Stacks[Train].OnInit.Add("GUI-Init", ss.GUI.UpdateWindow)
ls.Stacks[Test].OnInit.Add("GUI-Init", ss.GUI.UpdateWindow)
}
if ss.Config.Debug {
mpi.Println(ls.DocString())
}
ss.Loops = ls
}
// ApplyInputs applies input patterns from given environment for given mode.
// Any other start-of-trial logic can also be put here.
func (ss *Sim) ApplyInputs(mode Modes) {
net := ss.Net
ctx := net.Context()
ndata := int(ctx.NData)
lays := net.LayersByClass("InputLayer", "TargetLayer")
curModeDir := ss.Current.Dir(mode.String())
ev := ss.Envs.ByMode(mode).(*MusicEnv)
ev.Step()
net.InitExt()
for di := uint32(0); di < ctx.NData; di++ {
ev.StepDi(int(di))
if ctx.Mode == int32(Test) && !ss.Config.Env.TestClamp {
// todo:
lastnote := int(curModeDir.Float64("OutNote", int(ctx.NData)).Float1D(int(di))) + ev.NoteRange.Min
ev.RenderNote(lastnote)
}
for _, lnm := range lays {
ly := ss.Net.LayerByName(lnm)
pats := ev.State("Note")
if pats != nil {
ly.ApplyExt(di, pats)
}
}
curModeDir.StringValue("TrialName", ndata).SetString1D(ev.String(), int(di))
}
ss.Net.ApplyExts()
}
// NewRun intializes a new Run level of the model.
func (ss *Sim) NewRun() {
ctx := ss.Net.Context()
run := ss.Loops.Loop(Train, Run).Counter.Cur
ss.InitRandSeed(run)
ss.Envs.ByMode(Train).Init(run)
ss.Envs.ByMode(Test).Init(run)
ctx.Reset()
ss.ApplyParams()
ss.Net.InitWeights()
if ss.Config.Run.StartWeights != "" { // this is just for testing -- not usually needed
ss.Net.OpenWeightsJSON(core.Filename(ss.Config.Run.StartWeights))
mpi.Printf("Starting with initial weights from: %s\n", ss.Config.Run.StartWeights)
}
}
// TestAll runs through the full set of testing items
func (ss *Sim) TestAll() {
ss.Envs.ByMode(Test).Init(0)
ss.Loops.ResetAndRun(Test)
ss.Loops.Mode = Train // important because this is called from Train Run: go back.
}
//////// Stats
// AddStat adds a stat compute function.
func (ss *Sim) AddStat(f func(mode Modes, level Levels, phase StatsPhase)) {
ss.StatFuncs = append(ss.StatFuncs, f)
}
// StatsStart is called by Looper at the start of given level, for each iteration.
// It needs to call RunStats Start at the next level down.
// e.g., each Epoch is the start of the full set of Trial Steps.
func (ss *Sim) StatsStart(lmd, ltm enums.Enum) {
mode := lmd.(Modes)
level := ltm.(Levels)
if level <= Trial {
return
}
ss.RunStats(mode, level-1, Start)
}
// StatsStep is called by Looper at each step of iteration,
// where it accumulates the stat results.
func (ss *Sim) StatsStep(lmd, ltm enums.Enum) {
mode := lmd.(Modes)
level := ltm.(Levels)
if level == Cycle {
return
}
ss.RunStats(mode, level, Step)
tensorfs.DirTable(axon.StatsNode(ss.Stats, mode, level), nil).WriteToLog()
}
// RunStats runs the StatFuncs for given mode, level and phase.
func (ss *Sim) RunStats(mode Modes, level Levels, phase StatsPhase) {
for _, sf := range ss.StatFuncs {
sf(mode, level, phase)
}
if phase == Step && ss.GUI.Tabs != nil {
nm := mode.String() + " " + level.String() + " Plot"
ss.GUI.Tabs.AsLab().GoUpdatePlot(nm)
}
}
// SetRunName sets the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) SetRunName() string {
runName := ss.Params.RunName(ss.Config.Run.Run)
ss.Current.StringValue("RunName", 1).SetString1D(runName, 0)
return runName
}
// RunName returns the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) RunName() string {
return ss.Current.StringValue("RunName", 1).String1D(0)
}
// StatsInit initializes all the stats by calling Start across all modes and levels.
func (ss *Sim) StatsInit() {
for md, st := range ss.Loops.Stacks {
mode := md.(Modes)
for _, lev := range st.Order {
level := lev.(Levels)
if level == Cycle {
continue
}
ss.RunStats(mode, level, Start)
}
}
if ss.GUI.Tabs != nil {
tbs := ss.GUI.Tabs.AsLab()
_, idx := tbs.CurrentTab()
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Epoch))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Run))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Test, Trial))
tbs.SelectTabIndex(idx)
}
}
// ConfigStats handles configures functions to do all stats computation
// in the tensorfs system.
func (ss *Sim) ConfigStats() {
net := ss.Net
ss.Stats = ss.Root.Dir("Stats")
ss.Current = ss.Stats.Dir("Current")
ss.SetRunName()
// last arg(s) are levels to exclude
counterFunc := axon.StatLoopCounters(ss.Stats, ss.Current, ss.Loops, net, Trial, Cycle)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
counterFunc(mode, level, phase == Start)
})
runNameFunc := axon.StatRunName(ss.Stats, ss.Current, ss.Loops, net, Trial, Cycle)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
runNameFunc(mode, level, phase == Start)
})
trialNameFunc := axon.StatTrialName(ss.Stats, ss.Current, ss.Loops, net, Trial)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
trialNameFunc(mode, level, phase == Start)
})
// up to a point, it is good to use loops over stats in one function,
// to reduce repetition of boilerplate.
statNames := []string{"InputP_CorSim", "Err", "NZero", "FirstZero", "LastZero"}
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
for _, name := range statNames {
if name == "NZero" && (mode != Train || level == Trial) {
return
}
modeDir := ss.Stats.Dir(mode.String())
curModeDir := ss.Current.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
subDir := modeDir.Dir((level - 1).String()) // note: will fail for Cycle
tsr := levelDir.Float64(name)
ctx := ss.Net.Context()
ndata := int(ctx.NData)
var stat float64
if phase == Start {
tsr.SetNumRows(0)
plot.SetFirstStyler(tsr, func(s *plot.Style) {
s.Range.SetMin(0).SetMax(1)
s.On = true
switch name {
case "NZero", "UnitErr", "Output":
s.On = false
case "FirstZero", "LastZero":
if level < Run {
s.On = false
}
}
})
switch name {
case "NZero":
if level == Epoch {
curModeDir.Float64(name, 1).SetFloat1D(0, 0)
}
case "FirstZero", "LastZero":
if level == Epoch {
curModeDir.Float64(name, 1).SetFloat1D(-1, 0)
}
}
continue
}
switch level {
case Trial:
inp := ss.Net.LayerByName("InputP")
for di := range ndata {
var stat float64
switch name {
case "InputP_CorSim":
stat = 1.0 - float64(axon.LayerStates.Value(int(inp.Index), int(di), int(axon.LayerPhaseDiff)))
case "Err":
err, minusIndex, plusIndex := inp.LocalistErr4D(ctx)
curModeDir.Float64("TargNote", ndata).SetFloat1D(float64(plusIndex[di]), di)
curModeDir.Float64("OutNote", ndata).SetFloat1D(float64(minusIndex[di]), di)
if err[di] {
stat = 1
} else {
stat = 0
}
ev := ss.Envs.ByMode(Modes(ss.Net.Context().Mode)).(*MusicEnv)
if ev.Play {
if ss.Config.Env.PlayTarg {
ev.PlayNote(plusIndex[di])
} else {
ev.PlayNote(minusIndex[di])
}
}
}
curModeDir.Float64(name, ndata).SetFloat1D(stat, di)
tsr.AppendRowFloat(stat)
}
case Epoch:
nz := curModeDir.Float64("NZero", 1).Float1D(0)
switch name {
case "NZero":
err := stats.StatSum.Call(subDir.Value("Err")).Float1D(0)
stat = curModeDir.Float64(name, 1).Float1D(0)
if err == 0 {
stat++
} else {
stat = 0
}
curModeDir.Float64(name, 1).SetFloat1D(stat, 0)
case "FirstZero":
stat = curModeDir.Float64(name, 1).Float1D(0)
if stat < 0 && nz == 1 {
stat = curModeDir.Int("Epoch", 1).Float1D(0)
}
curModeDir.Float64(name, 1).SetFloat1D(stat, 0)
case "LastZero":
stat = curModeDir.Float64(name, 1).Float1D(0)
if stat < 0 && nz >= float64(ss.Config.Run.NZero) {
stat = curModeDir.Int("Epoch", 1).Float1D(0)
}
curModeDir.Float64(name, 1).SetFloat1D(stat, 0)
default:
stat = stats.StatMean.Call(subDir.Value(name)).Float1D(0)
}
tsr.AppendRowFloat(stat)
case Run:
stat = stats.StatFinal.Call(subDir.Value(name)).Float1D(0)
tsr.AppendRowFloat(stat)
default: // Expt
stat = stats.StatMean.Call(subDir.Value(name)).Float1D(0)
tsr.AppendRowFloat(stat)
}
}
})
perTrlFunc := axon.StatPerTrialMSec(ss.Stats, Train, Trial)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
perTrlFunc(mode, level, phase == Start)
})
prevCorFunc := axon.StatPrevCorSim(ss.Stats, ss.Current, net, Trial, Run, "InputP")
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
prevCorFunc(mode, level, phase == Start)
})
lays := net.LayersByType(axon.SuperLayer, axon.CTLayer, axon.TargetLayer, axon.PulvinarLayer)
actGeFunc := axon.StatLayerActGe(ss.Stats, net, Train, Trial, Run, lays...)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
actGeFunc(mode, level, phase == Start)
})
pcaFunc := axon.StatPCA(ss.Stats, ss.Current, net, ss.Config.Run.PCAInterval, Train, Trial, Run, lays...)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
trnEpc := ss.Loops.Loop(Train, Epoch).Counter.Cur
pcaFunc(mode, level, phase == Start, trnEpc)
})
stateFunc := axon.StatLayerState(ss.Stats, net, Test, Trial, true, "ActM", "Input", "InputP")
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
stateFunc(mode, level, phase == Start)
})
}
// StatCounters returns counters string to show at bottom of netview.
func (ss *Sim) StatCounters(mode, level enums.Enum) string {
counters := ss.Loops.Stacks[mode].CountersString()
vu := ss.NetViewUpdater(mode)
if vu == nil || vu.View == nil {
return counters
}
di := vu.View.Di
counters += fmt.Sprintf(" Di: %d", di)
curModeDir := ss.Current.Dir(mode.String())
if curModeDir.Node("TrialName") == nil {
return counters
}
counters += fmt.Sprintf(" TrialName: %s", curModeDir.StringValue("TrialName").String1D(di))
statNames := []string{"InputP_CorSim", "Err"}
if level == Cycle || curModeDir.Node(statNames[0]) == nil {
return counters
}
for _, name := range statNames {
counters += fmt.Sprintf(" %s: %.4g", name, curModeDir.Float64(name).Float1D(di))
}
return counters
}
//////// GUI
func (ss *Sim) ConfigNetView(nv *netview.NetView) {
// nv.ViewDefaults()
nv.SceneXYZ().Camera.Pose.Pos.Set(0, 2.1, 2.0)
nv.SceneXYZ().Camera.LookAt(math32.Vec3(0, 0, 0), math32.Vec3(0, 1, 0))
}
// ConfigGUI configures the Cogent Core GUI interface for this simulation.
func (ss *Sim) ConfigGUI(b tree.Node) {
ss.GUI.MakeBody(b, ss, ss.Root, ss.Config.Name, ss.Config.Title, ss.Config.Doc)
ss.GUI.StopLevel = Trial
ss.GUI.CycleUpdateInterval = 10
nv := ss.GUI.AddNetView("Network")
nv.Options.MaxRecs = 2 * ss.Config.Run.Cycles()
nv.Options.Raster.Max = ss.Config.Run.Cycles()
nv.SetNet(ss.Net)
ss.TrainUpdate.Config(nv, axon.Theta, ss.StatCounters)
ss.TestUpdate.Config(nv, axon.Theta, ss.StatCounters)
ss.GUI.OnStop = func(mode, level enums.Enum) {
vu := ss.NetViewUpdater(mode)
vu.UpdateWhenStopped(mode, level)
}
ss.ConfigNetView(nv)
ss.StatsInit()
ss.GUI.FinalizeGUI(false)
}
func (ss *Sim) MakeToolbar(p *tree.Plan) {
ss.GUI.AddLooperCtrl(p, ss.Loops)
tree.Add(p, func(w *core.Separator) {})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "New Seed",
Icon: icons.Add,
Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
Active: egui.ActiveAlways,
Func: func() {
ss.RandSeeds.NewSeeds()
},
})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "README",
Icon: icons.FileMarkdown,
Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
Active: egui.ActiveAlways,
Func: func() {
core.TheApp.OpenURL(ss.Config.URL)
},
})
}
func (ss *Sim) RunNoGUI() {
ss.Init()
if ss.Config.Params.Note != "" {
mpi.Printf("Note: %s\n", ss.Config.Params.Note)
}
if ss.Config.Log.SaveWeights {
mpi.Printf("Saving final weights per run\n")
}
runName := ss.SetRunName()
netName := ss.Net.Name
cfg := &ss.Config.Log
axon.OpenLogFiles(ss.Loops, ss.Stats, netName, runName, [][]string{cfg.Train, cfg.Test})
mpi.Printf("Running %d Runs starting at %d\n", ss.Config.Run.Runs, ss.Config.Run.Run)
ss.Loops.Loop(Train, Run).Counter.SetCurMaxPlusN(ss.Config.Run.Run, ss.Config.Run.Runs)
ss.Loops.Run(Train)
axon.CloseLogFiles(ss.Loops, ss.Stats, Cycle)
axon.GPURelease()
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"github.com/emer/axon/v2/sims/deepmusic"
"github.com/emer/emergent/v2/egui"
)
func main() { egui.Run[deepmusic.Sim, deepmusic.Config]() }
// Code generated by "core generate -add-types -gosl -add-funcs"; DO NOT EDIT.
package deepmusic
import (
"cogentcore.org/core/enums"
)
var _ModesValues = []Modes{0, 1}
// ModesN is the highest valid value for type Modes, plus one.
//
//gosl:start
const ModesN Modes = 2
//gosl:end
var _ModesValueMap = map[string]Modes{`Train`: 0, `Test`: 1}
var _ModesDescMap = map[Modes]string{0: ``, 1: ``}
var _ModesMap = map[Modes]string{0: `Train`, 1: `Test`}
// String returns the string representation of this Modes value.
func (i Modes) String() string { return enums.String(i, _ModesMap) }
// SetString sets the Modes value from its string representation,
// and returns an error if the string is invalid.
func (i *Modes) SetString(s string) error { return enums.SetString(i, s, _ModesValueMap, "Modes") }
// Int64 returns the Modes value as an int64.
func (i Modes) Int64() int64 { return int64(i) }
// SetInt64 sets the Modes value from an int64.
func (i *Modes) SetInt64(in int64) { *i = Modes(in) }
// Desc returns the description of the Modes value.
func (i Modes) Desc() string { return enums.Desc(i, _ModesDescMap) }
// ModesValues returns all possible values for the type Modes.
func ModesValues() []Modes { return _ModesValues }
// Values returns all possible values for the type Modes.
func (i Modes) Values() []enums.Enum { return enums.Values(_ModesValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Modes) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Modes) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Modes") }
var _LevelsValues = []Levels{0, 1, 2, 3, 4}
// LevelsN is the highest valid value for type Levels, plus one.
//
//gosl:start
const LevelsN Levels = 5
//gosl:end
var _LevelsValueMap = map[string]Levels{`Cycle`: 0, `Trial`: 1, `Epoch`: 2, `Run`: 3, `Expt`: 4}
var _LevelsDescMap = map[Levels]string{0: ``, 1: ``, 2: ``, 3: ``, 4: ``}
var _LevelsMap = map[Levels]string{0: `Cycle`, 1: `Trial`, 2: `Epoch`, 3: `Run`, 4: `Expt`}
// String returns the string representation of this Levels value.
func (i Levels) String() string { return enums.String(i, _LevelsMap) }
// SetString sets the Levels value from its string representation,
// and returns an error if the string is invalid.
func (i *Levels) SetString(s string) error { return enums.SetString(i, s, _LevelsValueMap, "Levels") }
// Int64 returns the Levels value as an int64.
func (i Levels) Int64() int64 { return int64(i) }
// SetInt64 sets the Levels value from an int64.
func (i *Levels) SetInt64(in int64) { *i = Levels(in) }
// Desc returns the description of the Levels value.
func (i Levels) Desc() string { return enums.Desc(i, _LevelsDescMap) }
// LevelsValues returns all possible values for the type Levels.
func LevelsValues() []Levels { return _LevelsValues }
// Values returns all possible values for the type Levels.
func (i Levels) Values() []enums.Enum { return enums.Values(_LevelsValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Levels) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Levels) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Levels") }
var _StatsPhaseValues = []StatsPhase{0, 1}
// StatsPhaseN is the highest valid value for type StatsPhase, plus one.
//
//gosl:start
const StatsPhaseN StatsPhase = 2
//gosl:end
var _StatsPhaseValueMap = map[string]StatsPhase{`Start`: 0, `Step`: 1}
var _StatsPhaseDescMap = map[StatsPhase]string{0: ``, 1: ``}
var _StatsPhaseMap = map[StatsPhase]string{0: `Start`, 1: `Step`}
// String returns the string representation of this StatsPhase value.
func (i StatsPhase) String() string { return enums.String(i, _StatsPhaseMap) }
// SetString sets the StatsPhase value from its string representation,
// and returns an error if the string is invalid.
func (i *StatsPhase) SetString(s string) error {
return enums.SetString(i, s, _StatsPhaseValueMap, "StatsPhase")
}
// Int64 returns the StatsPhase value as an int64.
func (i StatsPhase) Int64() int64 { return int64(i) }
// SetInt64 sets the StatsPhase value from an int64.
func (i *StatsPhase) SetInt64(in int64) { *i = StatsPhase(in) }
// Desc returns the description of the StatsPhase value.
func (i StatsPhase) Desc() string { return enums.Desc(i, _StatsPhaseDescMap) }
// StatsPhaseValues returns all possible values for the type StatsPhase.
func StatsPhaseValues() []StatsPhase { return _StatsPhaseValues }
// Values returns all possible values for the type StatsPhase.
func (i StatsPhase) Values() []enums.Enum { return enums.Values(_StatsPhaseValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i StatsPhase) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *StatsPhase) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "StatsPhase")
}
// Copyright (c) 2019, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package deepmusic
import (
"bytes"
"embed"
"fmt"
"time"
"cogentcore.org/core/math32/minmax"
"cogentcore.org/lab/table"
"cogentcore.org/lab/tensor"
"github.com/emer/emergent/v2/env"
"gitlab.com/gomidi/midi/v2"
"gitlab.com/gomidi/midi/v2/gm"
"gitlab.com/gomidi/midi/v2/smf"
)
//go:embed bach_goldberg.mid
var embedfs embed.FS
// MusicEnv reads in a midi SMF file and presents it as a sequence of notes.
// Songs with one note at a time per track are currently supported.
// Renders note to a tensor with localist note coding with duplicate units for spiking.
type MusicEnv struct {
// name of this environment
Name string
// emit debugging messages about the music file
Debug bool
// use only 1 octave of 12 notes for everything -- keeps it consistent
WrapNotes bool
// number of time ticks per row in table -- note transitions that are faster than this will be lost
TicksPer int `default:"120"`
// which track to process
Track int
// play output as it steps
Play bool
// limit song length to given number of steps, if > 0
MaxSteps int
// time offset for data parallel = Song.Rows / (NData+1)
DiOffset int `edit:"-"`
// number of units per localist note value
UnitsPer int
// range of notes in given track
NoteRange minmax.Int
// number of notes
NNotes int
// the song encoded into 200 msec increments, with columns as tracks
Song *table.Table `display:"-"`
// current time step
Time env.Counter `display:"inline"`
// current note, rendered as a 4D tensor with shape:
Note tensor.Float32
// current note index
NoteIndex int
// the function for playing midi
Player func(msg midi.Message) error `display:"-"`
// for playing notes
LastNotePlayed int `display:"-"`
}
func (ev *MusicEnv) Label() string { return ev.Name }
func (ev *MusicEnv) Defaults() {
ev.TicksPer = 120
ev.WrapNotes = true
}
func (ev *MusicEnv) TrackInfo(track smf.Track) (name string, ticks int, bpm float64) {
for _, ev := range track {
ticks += int(ev.Delta)
msg := ev.Message
if msg.Type() == smf.MetaEndOfTrackMsg {
// ignore
continue
}
switch {
case msg.GetMetaTrackName(&name): // set the trackname
// fmt.Printf("track no: %d name: %s\n", no, name)
// case msg.GetMetaInstrument(&name): // set the trackname based on instrument name
// fmt.Printf("instr no: %d name: %s\n", no, name)
case msg.GetMetaTempo(&bpm):
// fmt.Printf("bpm: %0.2f\n", bpm)
}
}
if ev.Debug {
fmt.Printf("track name: %s ticks %d\n", name, ticks)
}
return
}
func (ev *MusicEnv) LoadSong(fname string) error {
ev.Song = table.New()
data, err := embedfs.ReadFile(fname)
if err != nil {
fmt.Println(err)
return err
}
// read the bytes
s, err := smf.ReadFrom(bytes.NewReader(data))
if err != nil {
fmt.Printf("MIDI error: %s", err.Error())
return err
}
// fmt.Printf("got %v tracks\n", len(s.Tracks))
var tslice []int
var ticks int
var bpm float64
for no, track := range s.Tracks {
name, tick, bp := ev.TrackInfo(track)
// fmt.Printf("track:\t%d\tlen:\t%d\n", no, len(track))
if bp > 0 {
bpm = bp
}
if tick == 0 || len(track) < 20 {
continue
}
tslice = append(tslice, no)
ticks = max(ticks, tick)
ev.Song.AddIntColumn(name)
}
if ev.Debug {
fmt.Printf("BPM: %g\n", bpm)
}
nrows := ticks / ev.TicksPer
if ev.MaxSteps > 0 && nrows > ev.MaxSteps {
nrows = ev.MaxSteps
}
toggleOn := true
ev.NoteRange.SetInfinity()
ev.Song.SetNumRows(nrows)
for ti, no := range tslice {
track := s.Tracks[no]
var tick int
lastOnRow := -1
for _, evt := range track {
tick += int(evt.Delta)
msg := evt.Message
if msg.Type() == smf.MetaEndOfTrackMsg {
// ignore
continue
}
row := tick / ev.TicksPer
if row >= nrows {
break
}
var channel, note, vel uint8
switch {
case msg.GetNoteOff(&channel, ¬e, &vel):
if ev.Debug && row < 20 {
fmt.Printf("%d\t%d\tnote off:\t%d\n", tick, row, note)
}
for ri := lastOnRow + 1; ri <= row; ri++ {
ev.Song.ColumnByIndex(ti).SetFloat1D(float64(note), ri)
}
case msg.GetNoteOn(&channel, ¬e, &vel):
if ti == ev.Track {
ev.NoteRange.FitValInRange(int(note))
}
if toggleOn && lastOnRow >= 0 {
if ev.Debug && row < 20 {
fmt.Printf("%d\t%d\tnote off:\t%d\n", tick, row, note)
}
for ri := lastOnRow + 1; ri <= row; ri++ {
ev.Song.ColumnByIndex(ti).SetFloat1D(float64(note), ri)
}
lastOnRow = -1
} else {
lastOnRow = row
ev.Song.ColumnByIndex(ti).SetFloat1D(float64(note), row)
if ev.Debug && row < 20 {
fmt.Printf("%d\t%d\tnote on:\t%d\n", tick, row, note)
}
}
}
}
}
return nil
}
func (ev *MusicEnv) State(element string) tensor.Values {
switch element {
case "Note":
return &ev.Note
}
return nil
}
// String returns the current state as a string
func (ev *MusicEnv) String() string {
return fmt.Sprintf("%d:%d", ev.Time.Cur, ev.NoteIndex)
}
func (ev *MusicEnv) ConfigPlay() error {
fmt.Printf("outports:\n%s\n", midi.GetOutPorts())
portname := "IAC Driver Bus 1"
out, err := midi.FindOutPort(portname)
if err != nil {
fmt.Println(err)
return err
}
ev.Player, _ = midi.SendTo(out)
ev.Player(midi.ProgramChange(0, gm.Instr_Harpsichord.Value()))
return nil
}
func (ev *MusicEnv) Config(fname string, track, maxRows, unitsper int) {
if ev.TicksPer == 0 {
ev.Defaults()
}
ev.Track = track
ev.UnitsPer = unitsper
ev.MaxSteps = maxRows
ev.LoadSong(fname)
ev.NNotes = ev.NoteRange.Range() + 1
if ev.WrapNotes {
ev.NNotes = 12
}
ev.Note.SetShapeSizes(1, ev.NNotes, ev.UnitsPer, 1)
if ev.Play {
ev.ConfigPlay()
}
}
func (ev *MusicEnv) ConfigNData(ndata int) {
ev.DiOffset = ev.Song.NumRows() / (ndata + 1)
if ev.DiOffset < 2 {
ev.DiOffset = 2
}
}
func (ev *MusicEnv) Init(run int) {
ev.Time.Init()
}
func (ev *MusicEnv) Step() bool {
ev.Time.Incr()
tm := ev.Time.Cur
if tm >= ev.Song.NumRows() {
ev.Time.Set(0)
tm = 0
}
// fmt.Println(ev.Song.NumRows(), ev.Song.NumColumns(), ev.Track)
// fmt.Println(ev.Song.ColumnByIndex(ev.Track))
note := int(ev.Song.ColumnByIndex(ev.Track).Float1D(tm))
ev.RenderNote(note)
return true
}
// StepDi is data parallel version sampling different offsets from current timestep
func (ev *MusicEnv) StepDi(di int) bool {
tm := (ev.Time.Cur + di*ev.DiOffset) % ev.Song.NumRows()
note := int(ev.Song.ColumnByIndex(ev.Track).Float1D(tm))
ev.RenderNote(note)
return true
}
func (ev *MusicEnv) RenderNote(note int) {
ev.NoteIndex = note
ev.Note.SetZeros()
if note <= 0 {
return
}
noteidx := note - ev.NoteRange.Min
// ev.PlayNote(noteidx)
if ev.WrapNotes {
noteidx = (note - 9) % 12 // A = 0, etc.
}
for ni := 0; ni < ev.UnitsPer; ni++ {
ev.Note.Set(1, 0, noteidx, ni, 0)
}
}
// PlayNote actually plays a note (based on index) to the midi device, if Play is active and working
func (ev *MusicEnv) PlayNote(noteIndex int) {
if !ev.Play || ev.Player == nil {
return
}
note := noteIndex + ev.NoteRange.Min
if ev.LastNotePlayed > 0 && note != ev.LastNotePlayed {
ev.Player(midi.NoteOff(0, uint8(ev.LastNotePlayed)))
}
if note != ev.LastNotePlayed {
ev.Player(midi.NoteOn(0, uint8(note), 100))
}
time.Sleep(time.Duration(ev.TicksPer) * time.Millisecond)
ev.LastNotePlayed = note
}
func (ev *MusicEnv) Action(element string, input tensor.Values) {
// nop
}
// Compile-time check that implements Env interface
var _ env.Env = (*MusicEnv)(nil)
// Copyright (c) 2019, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package deepmusic
import "github.com/emer/axon/v2/axon"
// LayerParams sets the minimal non-default params.
// Base is always applied, and others can be optionally selected to apply on top of that.
var LayerParams = axon.LayerSheets{
"Base": {
{Sel: "Layer", Doc: "generic layer params",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.1 // 0.05 needed to get hidden2 high to .1, 0.1 keeps it too low!
ly.Inhib.Layer.Gi = 0.9 // 0.9 > 0.95 > 1.0 > 1.1 SSGi = 2
ly.Learn.TrgAvgAct.SynScaleRate = 0.005 // 0.005 best
ly.Learn.TrgAvgAct.SubMean = 1 // 1 > 0
ly.Acts.Dend.SSGi = 2
ly.Acts.Gbar.L = 20 // std
ly.Acts.Decay.Act = 0.0 // 0 == 0.2
ly.Acts.Decay.Glong = 0.0
ly.Acts.NMDA.MgC = 1.4 // 1.4, 5 > 1.2, 0 ?
ly.Acts.NMDA.Voff = 0
ly.Acts.NMDA.Ge = 0.006
ly.Acts.GabaB.Gk = 0.015 // 0.015 > 0.012 lower
ly.Acts.Mahp.Gk = 0.05 // 0.05 > 0.02
ly.Acts.Sahp.Gk = 0.1 // 0.1 > 0.05 for sure
ly.Acts.Sahp.CaTau = 5 // 5 > 10 verfied
ly.Acts.KNa.Med.Gk = 0.1 // 0.05 > 0.1 for 30note, 0.1 > 0.05 full
ly.Acts.KNa.Slow.Gk = 0.1
ly.Learn.CaLearn.Dt.MTau = 2 // 2 > 5 actually
ly.Learn.CaLearn.ETraceTau = 4
ly.Learn.CaLearn.ETraceScale = 0.05 // 0.05 > 0.1, 0.2, > 0 -- actually useful!
ly.Learn.Timing.On.SetBool(false)
// ly.Learn.Timing.Refractory.SetBool(true)
// ly.Learn.Timing.LearnThr = 0.1
// ly.Learn.Timing.SynCaCycles = 160
// ly.Learn.Timing.Cycles = 170
// ly.Learn.Timing.TimeDiffTau = 4
}},
{Sel: ".SuperLayer", Doc: "super layer params",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.1
ly.Bursts.ThrRel = 0.1 // 0.1 > 0.2 > 0
ly.Bursts.ThrAbs = 0.1
}},
{Sel: ".InLay", Doc: "input layers need more inhibition",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.025 // 0.025 for full song
// ly.Inhib.ActAvg.Nominal = 0.05 // 0.08 for 18 notes -- 30 rows
}},
{Sel: ".CTLayer", Doc: "CT NMDA gbar factor is key",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.1 // 0.12 > lower, higher; actual ~.18, and MaxGeM is high, but anything > .12 fails -- needs strong self cons.
ly.Inhib.Layer.Gi = 2.1 // 2.1 with SSGi = 2
ly.Inhib.Layer.FB = 1 // 1 > 1.5
ly.CT.GeGain = 1.0 // 1.0 >= 1.5 > 2.0 (very bad) > 0.5
ly.Acts.Dend.SSGi = 2 // 2 def is good w/ lower Gi
ly.Acts.Decay.Act = 0.0
ly.Acts.Decay.Glong = 0.0
ly.Acts.MaintNMDA.Ge = 0.007 // 0.007 > 0.008 -- same w/ reg better than not
ly.Acts.MaintNMDA.Tau = 300 // 300 > 200
ly.Acts.NMDA.Ge = 0.007 // 0.007 >> 0.006 -- interesting
ly.Acts.NMDA.Tau = 300 // 300 > 200
ly.Acts.GabaB.Gk = 0.015 // 0.015 def
ly.Acts.Noise.On.SetBool(false) // todo?
ly.Acts.Noise.Ge = 0.005
ly.Acts.Noise.Gi = 0.005
}},
{Sel: ".PulvinarLayer", Doc: "Pulv = Pulvinar",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 1.0 // 1.0 > 1.05, 0.9
ly.Inhib.Layer.FB = 4 // 4 >> 1 in full, 1 > 4 in 30
ly.Pulvinar.DriveScale = 0.12 // 0.12 min w/ CT->P=2, 30note
ly.Pulvinar.FullDriveAct = 0.6 // 0.6 def
ly.Acts.Decay.Act = 0.0
ly.Acts.Decay.Glong = 0.0 // clear long
ly.Learn.RLRate.SigmoidMin = 1.0 // 1 > .05
}},
},
"Hid2": {
// {Sel: "#Hidden2CT", Doc: "same as CT1 actually",
// Set: func(ly *axon.LayerParams) {
// ly.Inhib.ActAvg.Nominal = 0.1
// }},
{Sel: ".CTLayer", Doc: "gi?",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 2.1
}},
{Sel: "#Hidden2", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 1.0
}},
},
"30Notes": {
{Sel: ".InLay", Doc: "input layers need more inhibition",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.05 // 0.08 for 18 notes -- 30 rows
}},
{Sel: ".PulvinarLayer", Doc: "less FB inhib",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.FB = 1 // 1 > 4
ly.Pulvinar.DriveScale = 0.12 // 0.12 > higher, min for CT->P 2.0
}},
{Sel: ".CTLayer", Doc: "30 needs less inhib, smaller layer",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 1.9
}},
},
"FullSong": {
{Sel: ".InLay", Doc: "input layers need more inhibition",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.025 // 0.025 for full song
}},
{Sel: ".PulvinarLayer", Doc: "more FB inhib",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.FB = 4 // 4 >> 1
ly.Pulvinar.DriveScale = 0.11 // 0.11 > 0.1, 0.12+
}},
},
}
// PathParams sets the minimal non-default params.
// Base is always applied, and others can be optionally selected to apply on top of that.
var PathParams = axon.PathSheets{
"Base": {
{Sel: "Path", Doc: "std",
Set: func(pt *axon.PathParams) {
pt.Learn.LRate.Base = 0.002 // full song and 30n: 0.002 > 0.005, 0.001 in the end
pt.Learn.DWt.SubMean = 0 // 0 > 1 -- doesn't work at all with 1
pt.SWts.Adapt.LRate = 0.0001 // 0.01 == 0.0001 but 0.001 not as good..
pt.SWts.Adapt.HiMeanDecay = 0 // 0 > 0.0008 (lvis best)
pt.SWts.Adapt.HiMeanThr = 0.5
pt.SWts.Init.SPct = 1.0 // 1 works fine here -- .5 also ok
pt.Learn.DWt.CaPScale = 0.95 // 0.95 def >> 1
pt.Learn.DWt.SynCa20.SetBool(true) // 20 > 10; 25 was even better before
pt.Learn.DWt.SynTraceTau = 1 // 1 > 2 v0.0.9
}},
{Sel: ".BackPath", Doc: "top-down back-pathways MUST have lower relative weight scale, otherwise network hallucinates",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.1 // 0.1 > 0.2
}},
{Sel: ".CTCtxtPath", Doc: "all CT context paths",
Set: func(pt *axon.PathParams) {
pt.Learn.LRate.Base = 0.001 // 0.001 >> 0.002 for full
pt.Learn.DWt.SubMean = 0 // 0 > 1 -- 1 is especially bad
// pt.Learn.DWt.SynTraceTau = 2 // 1 > 2 > 4 v0.0.9
}},
{Sel: ".CTFromSuper", Doc: "1to1 > full",
Set: func(pt *axon.PathParams) {
pt.Learn.Learn.SetBool(true) // learning > fixed 1to1
pt.SWts.Init.Mean = 0.5 // if fixed, 0.8 > 0.5, var = 0
pt.SWts.Init.Var = 0.25
}},
{Sel: ".FromPulv", Doc: "defaults to .Back but generally weaker is better",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.1 // 0.1 == 0.15 > 0.05
}},
{Sel: ".CTSelfCtxt", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.5 // 0.5 > 0.2 > 0.8
pt.SWts.Init.Sym.SetBool(true) // true > false
}},
{Sel: ".CTSelfMaint", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 0.2 // 0.2 > lower, higher
pt.Com.GType = axon.MaintG
pt.SWts.Init.Sym.SetBool(true) // no effect? not sure why
}},
{Sel: "#HiddenCTToInputP", Doc: "amp up P",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 2.0 // P is weak; interacts with DriveScale
}},
},
"30Notes": {
{Sel: "#HiddenCTToInputP", Doc: "amp up P",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 2 // 2 > 1.5 > 1
}},
},
"FullSong": {
{Sel: "#HiddenCTToInputP", Doc: "amp up P",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 2.0
}},
},
"Hid2": {
{Sel: "#Hidden2CTToHiddenCT", Doc: "ct top-down",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.1 // 0.1 > 0.2
}},
{Sel: "#HiddenToHidden2", Doc: "jack up fwd pathway",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1.5 // 1.5 fine
}},
{Sel: "#HiddenCTToInputP", Doc: "amp up P",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 2.0 // ?
}},
{Sel: "#Hidden2CTToInputP", Doc: "amp up P",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 2.0 // ?
}},
},
}
// Copyright (c) 2023, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package deepspace
import (
"cogentcore.org/core/core"
"github.com/emer/emergent/v2/egui"
)
// EnvConfig has config params for environment
// note: only adding fields for key Env params that matter for both Network and Env
// other params are set via the Env map data mechanism.
type EnvConfig struct { //types:add
// Env parameters: can set any field/subfield on Env struct,
// using standard TOML formatting.
Env map[string]any
// UnitsPer is the number of units per localist output unit. 4 best.
UnitsPer int `default:"4"`
}
// ParamConfig has config parameters related to sim params.
type ParamConfig struct { //types:add
// Script is an interpreted script that is run to set parameters in Layer and Path
// sheets, by default using the "Script" set name.
Script string `new-window:"+" width:"200"`
// Sheet is the extra params sheet name(s) to use (space separated
// if multiple). Must be valid name as listed in compiled-in params
// or loaded params.
Sheet string
// Tag is an extra tag to add to file names and logs saved from this run.
Tag string
// Note is additional info to describe the run params etc,
// like a git commit message for the run.
Note string
// SaveAll will save a snapshot of all current param and config settings
// in a directory named params_<datestamp> (or _good if Good is true),
// then quit. Useful for comparing to later changes and seeing multiple
// views of current params.
SaveAll bool `nest:"+"`
// Good is for SaveAll, save to params_good for a known good params state.
// This can be done prior to making a new release after all tests are passing.
// Add results to git to provide a full diff record of all params over level.
Good bool `nest:"+"`
}
func (pc *ParamConfig) FieldWidget(field string) core.Value {
return egui.ScriptFieldWidget(field)
}
// RunConfig has config parameters related to running the sim.
type RunConfig struct { //types:add
// GPUDevice selects the gpu device to use.
GPUDevice int
// NData is the number of data-parallel items to process in parallel per trial.
// Is significantly faster for both CPU and GPU. Results in an effective
// mini-batch of learning.
NData int `default:"16" min:"1"`
// NThreads is the number of parallel threads for CPU computation;
// 0 = use default.
NThreads int `default:"0"`
// Run is the _starting_ run number, which determines the random seed.
// Runs counts up from there. Can do all runs in parallel by launching
// separate jobs with each starting Run, Runs = 1.
Run int `default:"0" flag:"run"`
// Runs is the total number of runs to do when running Train, starting from Run.
Runs int `default:"2" min:"1"`
// Epochs is the total number of epochs per run.
Epochs int `default:"50"`
// Trials is the total number of trials per epoch.
// Should be an even multiple of NData.
Trials int `default:"196"`
// ISICycles is the number of no-input inter-stimulus interval
// cycles at the start of the trial.
ISICycles int `default:"0"`
// MinusCycles is the number of cycles in the minus phase per trial.
MinusCycles int `default:"150"`
// PlusCycles is the number of cycles in the plus phase per trial.
PlusCycles int `default:"50"`
// TestInterval is how often (in epochs) to run through all the test patterns,
// in terms of training epochs. Can use 0 or -1 for no testing.
TestInterval int `default:"0"`
// PCAInterval is how often (in epochs) to compute PCA on hidden
// representations to measure variance.
PCAInterval int `default:"10"`
// StartWeights is the name of weights file to load at start of first run.
StartWeights string
}
// Cycles returns the total number of cycles per trial: ISI + Minus + Plus.
func (rc *RunConfig) Cycles() int {
return rc.ISICycles + rc.MinusCycles + rc.PlusCycles
}
// LogConfig has config parameters related to logging data.
type LogConfig struct {
// SaveWeights will save final weights after each run.
SaveWeights bool
// Train has the list of Train mode levels to save log files for.
Train []string `default:"['Expt', 'Run', 'Epoch']" nest:"+"`
// Test has the list of Test mode levels to save log files for.
Test []string `nest:"+"`
}
// Config has the overall Sim configuration options.
type Config struct {
egui.BaseConfig
// Env has environment related configuration options.
Env EnvConfig `display:"add-fields"`
// Params has parameter related configuration options.
Params ParamConfig `display:"add-fields"`
// Run has sim running related configuration options.
Run RunConfig `display:"add-fields"`
// Log has data logging related configuration options.
Log LogConfig `display:"add-fields"`
}
func (cfg *Config) Defaults() {
cfg.Name = "DeepSpace"
cfg.Title = "Deep Space"
cfg.URL = "https://github.com/emer/axon/blob/main/sims/deepspace/README.md"
cfg.Doc = "This simulates deep cerebellar nucleus and deep cortical layer predictive learning on spatial updating from the vestibular and visual systems."
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// deepspace simulates deep cerebellar nucleus and deep cortical layer predictive
// learning on spatial updating from the vestibular and visual systems.
package deepspace
//go:generate core generate -add-types -add-funcs -gosl
import (
"fmt"
"os"
"reflect"
"cogentcore.org/core/base/metadata"
"cogentcore.org/core/base/reflectx"
"cogentcore.org/core/core"
"cogentcore.org/core/enums"
"cogentcore.org/core/gpu"
"cogentcore.org/core/icons"
"cogentcore.org/core/math32"
"cogentcore.org/core/tree"
"cogentcore.org/lab/base/mpi"
"cogentcore.org/lab/base/randx"
"cogentcore.org/lab/plot"
"cogentcore.org/lab/stats/stats"
"cogentcore.org/lab/tensorfs"
"github.com/emer/axon/v2/axon"
"github.com/emer/axon/v2/sims/deepspace/emery"
"github.com/emer/emergent/v2/egui"
"github.com/emer/emergent/v2/env"
"github.com/emer/emergent/v2/etime"
"github.com/emer/emergent/v2/looper"
"github.com/emer/emergent/v2/netview"
"github.com/emer/emergent/v2/paths"
)
// Modes are the looping modes (Stacks) for running and statistics.
type Modes int32 //enums:enum
const (
Train Modes = iota
Test
)
// Levels are the looping levels for running and statistics.
type Levels int32 //enums:enum
const (
Cycle Levels = iota
Trial
Epoch
Run
Expt
)
// StatsPhase is the phase of stats processing for given mode, level.
// Accumulated values are reset at Start, added each Step.
type StatsPhase int32 //enums:enum
const (
Start StatsPhase = iota
Step
)
// see params.go for params, config.go for Config
// Sim encapsulates the entire simulation model, and we define all the
// functionality as methods on this struct. This structure keeps all relevant
// state information organized and available without having to pass everything around
// as arguments to methods, and provides the core GUI interface (note the view tags
// for the fields which provide hints to how things should be displayed).
type Sim struct {
// simulation configuration parameters -- set by .toml config file and / or args
Config *Config `new-window:"+"`
// Net is the network: click to view / edit parameters for layers, paths, etc.
Net *axon.Network `new-window:"+" display:"no-inline"`
// Params manages network parameter setting.
Params axon.Params `display:"inline"`
// Loops are the control loops for running the sim, in different Modes
// across stacks of Levels.
Loops *looper.Stacks `new-window:"+" display:"no-inline"`
// Envs provides mode-string based storage of environments.
Envs env.Envs `new-window:"+" display:"no-inline"`
// TrainUpdate has Train mode netview update parameters.
TrainUpdate axon.NetViewUpdate `display:"inline"`
// Root is the root tensorfs directory, where all stats and other misc sim data goes.
Root *tensorfs.Node `display:"-"`
// Stats has the stats directory within Root.
Stats *tensorfs.Node `display:"-"`
// Current has the current stats values within Stats.
Current *tensorfs.Node `display:"-"`
// StatFuncs are statistics functions called at given mode and level,
// to perform all stats computations. phase = Start does init at start of given level,
// and all intialization / configuration (called during Init too).
StatFuncs []func(mode Modes, level Levels, phase StatsPhase) `display:"-"`
// GUI manages all the GUI elements
GUI egui.GUI `display:"-"`
// GUI for viewing env. `display:"-"`
EnvGUI *emery.GUI
// RandSeeds is a list of random seeds to use for each run.
RandSeeds randx.Seeds `display:"-"`
}
func (ss *Sim) SetConfig(cfg *Config) { ss.Config = cfg }
func (ss *Sim) Body() *core.Body { return ss.GUI.Body }
func (ss *Sim) ConfigSim() {
ss.Root, _ = tensorfs.NewDir("Root")
tensorfs.CurRoot = ss.Root
ss.Net = axon.NewNetwork(ss.Config.Name)
ss.Params.Config(LayerParams, PathParams, ss.Config.Params.Sheet, ss.Config.Params.Tag, reflect.ValueOf(ss))
ss.RandSeeds.Init(100) // max 100 runs
ss.InitRandSeed(0)
if ss.Config.GPU {
gpu.SelectAdapter = ss.Config.Run.GPUDevice
axon.GPUInit()
axon.UseGPU = true
}
ss.ConfigEnv()
ss.ConfigNet(ss.Net)
ss.ConfigLoops()
ss.ConfigStats()
if ss.Config.Params.SaveAll {
ss.Config.Params.SaveAll = false
ss.Net.SaveParamsSnapshot(&ss.Config, ss.Config.Params.Good)
os.Exit(0)
}
}
func (ss *Sim) ConfigEnv() {
// Can be called multiple times -- don't re-create
ndata := ss.Config.Run.NData
var trn *emery.EmeryEnv
if len(ss.Envs) == 0 {
trn = &emery.EmeryEnv{}
} else {
trn = ss.Envs.ByMode(Train).(*emery.EmeryEnv)
}
// note: names must be standard here!
trn.Defaults()
trn.Name = Train.String()
trn.Params.UnitsPer = ss.Config.Env.UnitsPer
// if ss.Config.Env.Env != nil {
// reflectx.SetFieldsFromMap(trn, ss.Config.Env.Env)
// }
trn.Config(ndata, ss.Config.Run.Cycles(), ss.Root.Dir("Env"), axon.ComputeGPU)
trn.Init(0)
// note: names must be in place when adding
ss.Envs.Add(trn)
}
func (ss *Sim) ConfigNet(net *axon.Network) {
net.SetMaxData(ss.Config.Run.NData)
net.Context().SetISICycles(int32(ss.Config.Run.ISICycles)).
SetMinusCycles(int32(ss.Config.Run.MinusCycles)).
SetPlusCycles(int32(ss.Config.Run.PlusCycles)).Update()
net.SetRandSeed(ss.RandSeeds[0]) // init new separate random seed, using run = 0
cycles := ss.Config.Run.Cycles()
ev := ss.Envs.ByMode(Train).(*emery.EmeryEnv)
full := paths.NewFull()
full.SelfCon = true // unclear if this makes a diff for self cons at all
one2one := paths.NewOneToOne()
_ = one2one
p1to1 := paths.NewPoolOneToOne()
_ = p1to1
space := float32(2)
// eyeSz := image.Point{2, 1}
addInput := func(nm string, doc string) (in, mf, thal *axon.Layer) {
in = net.AddLayer4D(nm, axon.InputLayer, 1, 2, ev.Params.UnitsPer, 1)
in.AddClass("RateIn")
in.Doc = "Rate code version. " + doc
mf = net.AddLayer4D(nm+"MF", axon.InputLayer, ev.Params.TimeBins, 1, 1, ev.Params.PopCodeUnits)
mf.AddClass("MFIn")
mf.Doc = "MF mossy fiber input, transient population code. " + doc
mf.PlaceBehind(in, space)
thal = net.AddLayer4D(nm+"Thal", axon.InputLayer, ev.Params.TimeBins, 1, 1, ev.Params.PopCodeUnits)
thal.AddClass("ThalIn")
thal.Doc = "Thalamic input, integrated population code. " + doc
thal.PlaceBehind(mf, space)
return
}
rotAct, rotActMF, rotActThal := addInput("Rotate", "Full body horizontal rotation action, population coded left to right with gaussian tuning curves for a range of degrees for each unit (X axis) and redundant units for population code in the Y axis.")
// rotActPrev, rotActPrevPop := addInput("ActRotatePrev", "Previous trial's version of ActRotate. This should be implicitly maintained but currently is not.")
// _ = rotActPrevPop
addInputPulv := func(nm string, doc string) (in, thal, thalP *axon.Layer) {
in = net.AddLayer4D(nm, axon.InputLayer, 1, 2, ev.Params.UnitsPer, 1)
in.AddClass("RateIn")
in.Doc = "Rate code version. " + doc
thal, thalP = net.AddInputPulv4D(nm+"Thal", ev.Params.TimeBins, 1, 1, ev.Params.PopCodeUnits, space)
thal.AddClass("ThalIn")
thalP.AddClass("ThalIn")
thal.Doc = "Thalamic input, integrated population code. " + doc
thal.PlaceBehind(in, space)
return
}
vsRotVel, vsRotVelThal, vsRotVelThalP := addInputPulv("VSRotHVel", "Vestibular horizontal rotation velocity, computed from the physics model over time. Population coded left to right with gaussian tuning curves for a range of degrees for each unit (X axis) and redundant units for population code in the Y axis.")
_ = vsRotVelThalP
vmRotVel, vmRotVelThal, vmRotVelThalP := addInputPulv("VMRotHVel", "Full-field visual motion computed from the eye using retinal motion filter (see Env tab for visual environment). Population coded left to right with gaussian tuning curves for a range of velocities for each unit (X axis) and redundant units for population code in the Y axis.")
s1, s1ct := net.AddSuperCT2D("S1", "", 10, 10, space, one2one) // one2one learn > full
s1.Doc = "Neocortical integrated vestibular and full-field visual motion processing. Does predictive learning on both input signals, more like S2 (secondary), but just using one for simplicity."
// net.ConnectCTSelf(s1ct, full, "") // self definitely doesn't make sense -- no need for 2-back ct
// net.LateralConnectLayer(s1ct, full).AddClass("CTSelfMaint") // no diff
net.ConnectToPulv(s1, s1ct, vsRotVelThalP, full, full, "")
net.ConnectLayers(rotActThal, s1, full, axon.ForwardPath).AddClass("FFToHid", "FromAct")
net.ConnectLayers(vsRotVelThal, s1, full, axon.ForwardPath).AddClass("FFToHid")
// visHid, visHidct := net.AddSuperCT2D("VisHid", "", 10, 10, space, one2one) // one2one learn > full
// net.ConnectToPulv(visHid, visHidct, vmRotVelp, full, full, "")
// net.ConnectLayers(rotAct, visHid, full, axon.ForwardPath).AddClass("FFToHid", "FromAct")
// net.ConnectLayers(vmRotVel, visHid, full, axon.ForwardPath).AddClass("FFToHid")
net.ConnectToPulv(s1, s1ct, vmRotVelThalP, full, full, "")
net.ConnectLayers(vmRotVelThal, s1, full, axon.ForwardPath).AddClass("FFToHid")
// net.ConnectLayers(vsRotVelThal, visHid, full, axon.ForwardPath).AddClass("FFToHid")
if ev.Params.LeftEye {
// net.ConnectToPulv(visHidThal, visHidct, eyeLInp, full, full, "")
// net.ConnectLayers(eyeLInThal, visHid, full, axon.ForwardPath).AddClass("FFToHid")
}
// cerebellum:
// cycles-20 is sufficient to allow time for motor to engage
ioUp, cniIOUp, cniUp, cneUp := net.AddNuclearCNUp(vsRotVel, rotAct, cycles-20, space)
_, _ = ioUp, cneUp
pt := net.ConnectLayers(vsRotVel, cneUp, p1to1, axon.ForwardPath).AddClass("SenseToCNeUp")
pt.AddDefaultParams(func(pt *axon.PathParams) { pt.SetFixedWts() })
// net.ConnectLayers(rotActPrev, cniIOUp, p1to1, axon.CNIOPath).AddClass("MFUp", "MFToCNiIOUp")
// net.ConnectLayers(s1ct, cniIOUp, p1to1, axon.CNIOPath).AddClass("MFUp", "MFToCNiIOUp")
net.ConnectLayers(rotActMF, cniIOUp, full, axon.CNIOPath).AddClass("MFUp", "MFToCNiIOUp")
// net.ConnectLayers(rotActPrev, cniUp, p1to1, axon.CNIOPath).AddClass("MFUp", "MFToCNiUp")
// net.ConnectLayers(s1ct, cniUp, p1to1, axon.CNIOPath).AddClass("MFUp", "MFToCNiUp")
net.ConnectLayers(rotActMF, cniUp, full, axon.CNIOPath).AddClass("MFUp", "MFToCNiUp")
// position
// rotActPrev.PlaceBehind(rotActThal, space)
vsRotVel.PlaceRightOf(rotAct, float32(ev.Params.PopCodeUnits))
vmRotVel.PlaceRightOf(vsRotVel, float32(ev.Params.PopCodeUnits))
// if ev.LeftEye {
// eyeLIn.PlaceRightOf(vmRotVel, space)
// }
s1.PlaceAbove(rotAct)
cniIOUp.PlaceRightOf(s1, space*3)
// visHid.PlaceRightOf(s1, space)
// if ss.Config.Params.Hid2 {
// hid2.PlaceBehind(hdHidct, 2*space)
// }
net.Build()
net.Defaults()
net.SetNThreads(ss.Config.Run.NThreads)
ss.ApplyParams()
net.InitWeights()
}
func (ss *Sim) ApplyParams() {
ss.Params.Script = ss.Config.Params.Script
ss.Params.ApplyAll(ss.Net)
}
//////// Init, utils
// Init restarts the run, and initializes everything, including network weights
// and resets the epoch log table
func (ss *Sim) Init() {
ss.Loops.ResetCounters()
ss.SetRunName()
ss.InitRandSeed(0)
// ss.ConfigEnv() // re-config env just in case a different set of patterns was
// selected or patterns have been modified etc
ss.ApplyParams()
ss.StatsInit()
ss.NewRun()
ss.TrainUpdate.RecordSyns()
ss.TrainUpdate.Update(Train, Trial)
ss.UpdateEnvGUI(Train)
}
// InitRandSeed initializes the random seed based on current training run number
func (ss *Sim) InitRandSeed(run int) {
ss.RandSeeds.Set(run)
ss.RandSeeds.Set(run, &ss.Net.Rand)
}
// NetViewUpdater returns the NetViewUpdate for given mode.
func (ss *Sim) NetViewUpdater(mode enums.Enum) *axon.NetViewUpdate {
return &ss.TrainUpdate
}
// ConfigLoops configures the control loops: Training, Testing
func (ss *Sim) ConfigLoops() {
ls := looper.NewStacks()
trials := int(math32.IntMultipleGE(float32(ss.Config.Run.Trials), float32(ss.Config.Run.NData)))
cycles := ss.Config.Run.Cycles()
ls.AddStack(Train, Trial).
AddLevel(Expt, 1).
AddLevel(Run, ss.Config.Run.Runs).
AddLevel(Epoch, ss.Config.Run.Epochs).
AddLevelIncr(Trial, trials, ss.Config.Run.NData).
AddLevel(Cycle, cycles)
axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, Cycle, Trial, Train,
func(mode enums.Enum) { ss.Net.ClearInputs() },
func(mode enums.Enum) { ss.TakeNextActions(mode.(Modes)) },
)
ls.Stacks[Train].OnInit.Add("Init", ss.Init)
ls.Loop(Train, Run).OnStart.Add("NewRun", ss.NewRun)
for mode, st := range ls.Stacks {
st.Loops[Cycle].OnStart.Add("ApplyInputs", func() { ss.ApplyInputs(mode.(Modes)) })
plusPhase := st.Loops[Cycle].EventByName("MinusPhase:End")
plusPhase.OnEvent.InsertBefore("PlusPhase:Start", "NextAction", func() bool {
// note: critical to have this happen *after* MinusPhase:End and *before* PlusPhase:Start
// because minus phase end has gated info, and plus phase start applies action input
ss.NextAction(mode.(Modes))
return false
})
}
ls.AddOnStartToAll("StatsStart", ss.StatsStart)
ls.AddOnEndToAll("StatsStep", ss.StatsStep)
ls.Loop(Train, Run).OnEnd.Add("SaveWeights", func() {
ctrString := fmt.Sprintf("%03d_%05d", ls.Loop(Train, Run).Counter.Cur, ls.Loop(Train, Epoch).Counter.Cur)
axon.SaveWeightsIfConfigSet(ss.Net, ss.Config.Log.SaveWeights, ctrString, ss.RunName())
})
if ss.Config.GUI {
axon.LooperUpdateNetView(ls, Cycle, Trial, ss.NetViewUpdater)
ls.Stacks[Train].OnInit.Add("GUI-Init", ss.GUI.UpdateWindow)
ls.Loop(Train, Trial).OnEnd.Add("UpdateEnvGUI", func() {
ss.UpdateEnvGUI(Train)
})
}
if ss.Config.Debug {
mpi.Println(ls.DocString())
}
ss.Loops = ls
}
// ApplyInputs applies input patterns from given environment for given mode.
// Any other start-of-trial logic can also be put here.
func (ss *Sim) ApplyInputs(mode Modes) {
net := ss.Net
ctx := ss.Net.Context()
ndata := int(ctx.NData)
lays := net.LayersByType(axon.InputLayer, axon.TargetLayer)
curModeDir := ss.Current.Dir(mode.String())
ev := ss.Envs.ByMode(mode).(*emery.EmeryEnv)
cyc := ss.Loops.Loop(mode, Cycle).Counter.Cur
render := cyc%ev.Params.TimeBinCycles == 0
ev.RenderStates = render
ev.Step()
if !render {
return
}
net.InitExt()
for _, lnm := range lays {
ly := ss.Net.LayerByName(lnm)
pats := ev.State(lnm)
if !reflectx.IsNil(reflect.ValueOf(pats)) {
ly.ApplyExtAll(ctx, pats)
} else {
// fmt.Println("nil pats:", lnm)
}
}
ss.Net.ApplyExts()
if cyc == 0 {
for di := uint32(0); di < ctx.NData; di++ {
curModeDir.StringValue("TrialName", ndata).SetString1D(ev.String(), int(di))
}
}
}
// NextAction sets next actions.
// Called at end of minus phase.
func (ss *Sim) NextAction(mode Modes) {
net := ss.Net
ctx := net.Context()
ndata := int(ctx.NData)
ev := ss.Envs.ByMode(mode).(*emery.EmeryEnv)
for di := 0; di < ndata; di++ {
ang := 2.0 * (ev.Rand.Float32() - 0.5) * ev.Params.MaxRotate
ev.NextAction(di, emery.Rotate, ang)
}
}
// TakeNextActions starts executing actions specified in NextAction.
// This is called at start of trial.
func (ss *Sim) TakeNextActions(mode Modes) {
ev := ss.Envs.ByMode(mode).(*emery.EmeryEnv)
ev.TakeNextActions()
}
// NewRun intializes a new Run level of the model.
func (ss *Sim) NewRun() {
ctx := ss.Net.Context()
run := ss.Loops.Loop(Train, Run).Counter.Cur
ss.InitRandSeed(run)
ss.Envs.ByMode(Train).Init(run)
ctx.Reset()
ss.Net.InitWeights()
if ss.Config.Run.StartWeights != "" { // this is just for testing -- not usually needed
ss.Net.OpenWeightsJSON(core.Filename(ss.Config.Run.StartWeights))
mpi.Printf("Starting with initial weights from: %s\n", ss.Config.Run.StartWeights)
}
}
//////// Stats
// AddStat adds a stat compute function.
func (ss *Sim) AddStat(f func(mode Modes, level Levels, phase StatsPhase)) {
ss.StatFuncs = append(ss.StatFuncs, f)
}
// StatsStart is called by Looper at the start of given level, for each iteration.
// It needs to call RunStats Start at the next level down.
// e.g., each Epoch is the start of the full set of Trial Steps.
func (ss *Sim) StatsStart(lmd, ltm enums.Enum) {
mode := lmd.(Modes)
level := ltm.(Levels)
if level < Trial { // < not <=
return
}
ss.RunStats(mode, level-1, Start)
}
// StatsStep is called by Looper at each step of iteration,
// where it accumulates the stat results.
func (ss *Sim) StatsStep(lmd, ltm enums.Enum) {
mode := lmd.(Modes)
level := ltm.(Levels)
// if level == Cycle {
// return
// }
ss.RunStats(mode, level, Step)
tensorfs.DirTable(axon.StatsNode(ss.Stats, mode, level), nil).WriteToLog()
}
// RunStats runs the StatFuncs for given mode, level and phase.
func (ss *Sim) RunStats(mode Modes, level Levels, phase StatsPhase) {
for _, sf := range ss.StatFuncs {
sf(mode, level, phase)
}
if level > Cycle {
if phase == Step && ss.GUI.Tabs != nil {
nm := mode.String() + " " + level.String() + " Plot"
ss.GUI.Tabs.AsLab().GoUpdatePlot(nm)
if level == Trial {
ss.GUI.Tabs.AsLab().GoUpdatePlot("Train Cycle Plot")
}
}
}
}
// SetRunName sets the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) SetRunName() string {
runName := ss.Params.RunName(ss.Config.Run.Run)
ss.Current.StringValue("RunName", 1).SetString1D(runName, 0)
return runName
}
// RunName returns the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) RunName() string {
return ss.Current.StringValue("RunName", 1).String1D(0)
}
// StatsInit initializes all the stats by calling Start across all modes and levels.
func (ss *Sim) StatsInit() {
for md, st := range ss.Loops.Stacks {
mode := md.(Modes)
for _, lev := range st.Order {
level := lev.(Levels)
// if level == Cycle {
// continue
// }
ss.RunStats(mode, level, Start)
}
}
if ss.GUI.Tabs != nil {
tbs := ss.GUI.Tabs.AsLab()
_, idx := tbs.CurrentTab()
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Epoch))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Trial))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Cycle))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Run))
tbs.SelectTabIndex(idx)
}
}
// ConfigStats handles configures functions to do all stats computation
// in the tensorfs system.
func (ss *Sim) ConfigStats() {
net := ss.Net
ss.Stats = ss.Root.Dir("Stats")
ss.Current = ss.Stats.Dir("Current")
ss.SetRunName()
// last arg(s) are levels to exclude
counterFunc := axon.StatLoopCounters(ss.Stats, ss.Current, ss.Loops, net, Trial)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
counterFunc(mode, level, phase == Start)
})
runNameFunc := axon.StatRunName(ss.Stats, ss.Current, ss.Loops, net, Trial, Cycle)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
runNameFunc(mode, level, phase == Start)
})
trialNameFunc := axon.StatTrialName(ss.Stats, ss.Current, ss.Loops, net, Trial)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
trialNameFunc(mode, level, phase == Start)
})
perTrlFunc := axon.StatPerTrialMSec(ss.Stats, Train, Trial)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
perTrlFunc(mode, level, phase == Start)
})
plays := net.LayersByType(axon.PulvinarLayer)
corSimFunc := axon.StatCorSim(ss.Stats, ss.Current, net, Trial, Run, plays...)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
corSimFunc(mode, level, phase == Start)
})
prevCorFunc := axon.StatPrevCorSim(ss.Stats, ss.Current, net, Trial, Run, plays...)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
prevCorFunc(mode, level, phase == Start)
})
ss.ConfigStatAdaptFilt()
ss.ConfigStatVis()
ss.ConfigStatNuclear()
lays := net.LayersByType(axon.SuperLayer, axon.CTLayer, axon.TargetLayer, axon.InputLayer, axon.PulvinarLayer)
actGeFunc := axon.StatLayerActGe(ss.Stats, net, Train, Trial, Run, lays...)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
actGeFunc(mode, level, phase == Start)
})
pcaFunc := axon.StatPCA(ss.Stats, ss.Current, net, ss.Config.Run.PCAInterval, Train, Trial, Run, lays...)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
trnEpc := ss.Loops.Loop(Train, Epoch).Counter.Cur
pcaFunc(mode, level, phase == Start, trnEpc)
})
stateFunc := axon.StatLayerState(ss.Stats, net, Test, Trial, true, "ActM", "Depth", "DepthP", "HeadDir", "HeadDirP")
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
stateFunc(mode, level, phase == Start)
})
}
func (ss *Sim) ConfigStatVis() {
statNames := []string{"VisVestibCor", "EmeryAng"}
statDescs := map[string]string{
"VisVestibCor": "Correlation between the visual motion and vestibular rotation velocity signals, indicating quality of visual motion filters",
"EmeryAng": "Emery's current body angle",
}
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
if level < Trial {
return
}
for _, name := range statNames {
modeDir := ss.Stats.Dir(mode.String())
curModeDir := ss.Current.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
subDir := modeDir.Dir((level - 1).String()) // note: will fail for Cycle
tsr := levelDir.Float64(name)
ndata := int(ss.Net.Context().NData)
if phase == Start {
tsr.SetNumRows(0)
// plot.SetFirstStyler(tsr, func(s *plot.Style) {
// s.On = true
// })
metadata.SetDoc(tsr, statDescs[name])
continue
}
switch level {
case Trial:
ev := ss.Envs.ByMode(mode).(*emery.EmeryEnv)
for di := range ndata {
var stat float64
switch name {
case "VisVestibCor":
stat = ev.VisVestibCorrelCycle(di)
case "EmeryAng":
stat = float64(ev.SenseValue(di, emery.VSRotHDir, false)) // current
}
curModeDir.Float64(name, ndata).SetFloat1D(float64(stat), di)
tsr.AppendRowFloat(float64(stat))
}
case Run:
stat := stats.StatFinal.Call(subDir.Value(name)).Float1D(0)
tsr.AppendRowFloat(stat)
default:
stat := stats.StatMean.Call(subDir.Value(name)).Float1D(0)
tsr.AppendRowFloat(stat)
}
}
})
}
func (ss *Sim) ConfigStatNuclear() {
net := ss.Net
prefix := "VSRotHVel"
pool := 1 // 0 = layer pool, get first pool
layerNames := []string{"IO", "CNiIO", "CNiUp", "CNeUp"}
layers := make([]*axon.Layer, len(layerNames))
pools := make([]uint32, len(layerNames))
for li, lnm := range layerNames {
layers[li] = net.LayerByName(prefix + lnm)
pools[li] = layers[li].Params.PoolIndex(1) // 4D
}
statNames := []string{"IOenv", "IOe", "IOi", "IOioff", "IOerr", "IOspike", "CNiIO", "CNiUp", "CNeUp", "CNeUpGe", "CNeUpGi"}
statDescs := map[string]string{
"IOenv": "IO envelope initiated by action input to IO neurons",
"IOe": "Integrated excitatory input to IO",
"IOi": "Integrated inhibitory input to IO at the current time",
"IOioff": "Integrated inhibitory input to IO offset from TimeOff, which is compared against IOe",
"IOerr": "IOe - IOi (positive only): the error signal that drives IO spiking, if above threshold",
"IOspike": "IO spike, either from IOerr or at end of the IOenv for the baseline spiking",
"CNiIO": "integrated activity (CaP) of CNiIO predictive inhibitory input to IO, generates IOi at a temporal offset 'in the future'",
"CNiUp": "inhibitory interneuron that projects to CNeUp, learns to inhibit CNeUp just prior to its activation",
"CNeUp": "excitatory output, driven directly by excitatory sensory input, which should be cancelled by CNiUp inputs",
"CNeUpGe": "excitatory conductance into CNeUp, from sensory input",
"CNeUpGi": "inhibitory conductance into CNeUp, from CNiUp",
}
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
if level != Cycle {
return
}
di := 0
diu := uint32(di)
for _, name := range statNames {
modeDir := ss.Stats.Dir(mode.String())
curModeDir := ss.Current.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
tsr := levelDir.Float64(name)
ndata := 1
if phase == Start {
tsr.SetNumRows(0)
plot.SetFirstStyler(tsr, func(s *plot.Style) {
s.Range.SetMin(0).SetMax(1)
if name != "CNeUpGe" && name != "CNeUpGi" {
s.On = true
}
})
metadata.SetDoc(tsr, statDescs[name])
continue
}
var stat float32
switch name {
case "IOenv":
stat = layers[0].AvgMaxVarByPool("TimeCycle", pool, di).Avg
if stat > 0 {
stat = 1
}
case "IOe":
stat = layers[0].AvgMaxVarByPool("GaP", pool, di).Avg
case "IOi":
stat = layers[0].AvgMaxVarByPool("GaM", pool, di).Avg
case "IOioff":
stat = layers[0].AvgMaxVarByPool("GaD", pool, di).Avg
case "IOerr":
stat = layers[0].AvgMaxVarByPool("TimeDiff", pool, di).Avg
case "IOspike":
stat = layers[0].AvgMaxVarByPool("Spike", pool, di).Avg
case "CNiIO":
stat = axon.PoolAvgMax(axon.AMCaP, axon.AMCycle, axon.Avg, pools[1], diu)
case "CNiUp":
stat = axon.PoolAvgMax(axon.AMCaP, axon.AMCycle, axon.Avg, pools[2], diu)
case "CNeUp":
stat = axon.PoolAvgMax(axon.AMCaP, axon.AMCycle, axon.Avg, pools[3], diu)
case "CNeUpGe":
stat = layers[3].AvgMaxVarByPool("Ge", pool, di).Avg
case "CNeUpGi":
stat = layers[3].AvgMaxVarByPool("Gi", pool, di).Avg
}
curModeDir.Float64(name, ndata).SetFloat1D(float64(stat), di)
tsr.AppendRowFloat(float64(stat))
}
})
}
func (ss *Sim) ConfigStatAdaptFilt() {
net := ss.Net
prefix := "VSRotHVel"
cnely := net.LayerByName(prefix + "CNeUp")
cnepi := cnely.Params.PoolIndex(0)
ioly := net.LayerByName(prefix + "IO")
// iopi := ioly.Params.PoolIndex(0)
statNames := []string{"CNeUpMax", "IOErrs"}
statDescs := map[string]string{
"CNeUpMax": "Maximum activity across the trial for CNeUp Adaptive Filtering layer. Should be around .5 in general",
"IOErrs": "Average number of IO error spikes across trials",
}
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
if level < Trial {
return
}
for _, name := range statNames {
modeDir := ss.Stats.Dir(mode.String())
curModeDir := ss.Current.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
subDir := modeDir.Dir((level - 1).String()) // note: will fail for Cycle
tsr := levelDir.Float64(name)
ndata := int(ss.Net.Context().NData)
if phase == Start {
tsr.SetNumRows(0)
plot.SetFirstStyler(tsr, func(s *plot.Style) {
s.Range.SetMin(0).SetMax(1)
s.On = true
})
metadata.SetDoc(tsr, statDescs[name])
continue
}
switch level {
case Trial:
for di := range ndata {
var stat float32
switch name {
case "CNeUpMax":
stat = axon.PoolAvgMax(axon.AMCaPMax, axon.AMCycle, axon.Max, cnepi, uint32(di))
case "IOErrs":
stat = ioly.AvgMaxVarByPool("TimePeak", 0, di).Avg
}
curModeDir.Float64(name, ndata).SetFloat1D(float64(stat), di)
tsr.AppendRowFloat(float64(stat))
}
case Run:
stat := stats.StatFinal.Call(subDir.Value(name)).Float1D(0)
tsr.AppendRowFloat(stat)
default:
stat := stats.StatMean.Call(subDir.Value(name)).Float1D(0)
tsr.AppendRowFloat(stat)
}
}
})
}
// StatCounters returns counters string to show at bottom of netview.
func (ss *Sim) StatCounters(mode, level enums.Enum) string {
counters := ss.Loops.Stacks[mode].CountersString()
vu := ss.NetViewUpdater(mode)
if vu == nil || vu.View == nil {
return counters
}
di := vu.View.Di
counters += fmt.Sprintf(" Di: %d", di)
curModeDir := ss.Current.Dir(mode.String())
if curModeDir.Node("TrialName") == nil {
return counters
}
counters += fmt.Sprintf(" TrialName: %s", curModeDir.StringValue("TrialName").String1D(di))
statNames := []string{"DepthP_CorSim", "HeadDirP_CorSim"}
if level == Cycle || curModeDir.Node(statNames[0]) == nil {
return counters
}
for _, name := range statNames {
counters += fmt.Sprintf(" %s: %.4g", name, curModeDir.Float64(name).Float1D(di))
}
return counters
}
//////// GUI
func (ss *Sim) ConfigNetView(nv *netview.NetView) {
// nv.ViewDefaults()
nv.SceneXYZ().Camera.Pose.Pos.Set(0, 2.1, 2.0)
nv.SceneXYZ().Camera.LookAt(math32.Vec3(0, 0, 0), math32.Vec3(0, 1, 0))
}
// ConfigGUI configures the Cogent Core GUI interface for this simulation.
func (ss *Sim) ConfigGUI(b tree.Node) {
ss.GUI.MakeBody(b, ss, ss.Root, ss.Config.Name, ss.Config.Title, ss.Config.Doc)
ss.GUI.CycleUpdateInterval = 10
ss.GUI.StopLevel = Trial
nv := ss.GUI.AddNetView("Network")
nv.Options.MaxRecs = 2 * ss.Config.Run.Cycles()
nv.Options.Raster.Max = ss.Config.Run.Cycles()
nv.Options.LayerNameSize = 0.03
nv.SetNet(ss.Net)
ss.TrainUpdate.Config(nv, axon.Cycle, ss.StatCounters) // Theta
ss.GUI.OnStop = func(mode, level enums.Enum) {
vu := ss.NetViewUpdater(mode)
vu.UpdateWhenStopped(mode, level)
}
ss.ConfigNetView(nv)
evtab, _ := ss.GUI.Tabs.NewTab("Env")
ev := ss.Envs.ByMode(etime.Train).(*emery.EmeryEnv)
ss.EnvGUI = &emery.GUI{}
ss.EnvGUI.ConfigGUI(ev, evtab)
ss.StatsInit()
ss.GUI.Tabs.SelectTabIndex(0)
ss.GUI.FinalizeGUI(false)
}
func (ss *Sim) MakeToolbar(p *tree.Plan) {
ss.GUI.AddLooperCtrl(p, ss.Loops)
tree.Add(p, func(w *core.Separator) {})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "New Seed",
Icon: icons.Add,
Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
Active: egui.ActiveAlways,
Func: func() {
ss.RandSeeds.NewSeeds()
},
})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "README",
Icon: icons.FileMarkdown,
Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
Active: egui.ActiveAlways,
Func: func() {
core.TheApp.OpenURL(ss.Config.URL)
},
})
}
func (ss *Sim) UpdateEnvGUI(mode Modes) {
vu := ss.NetViewUpdater(mode)
if vu == nil || vu.View == nil || ss.EnvGUI == nil {
return
}
ss.EnvGUI.Update()
}
func (ss *Sim) RunNoGUI() {
// profile.Profiling = true
ss.Init()
if ss.Config.Params.Note != "" {
mpi.Printf("Note: %s\n", ss.Config.Params.Note)
}
if ss.Config.Log.SaveWeights {
mpi.Printf("Saving final weights per run\n")
}
runName := ss.SetRunName()
netName := ss.Net.Name
cfg := &ss.Config.Log
axon.OpenLogFiles(ss.Loops, ss.Stats, netName, runName, [][]string{cfg.Train, cfg.Test})
mpi.Printf("Running %d Runs starting at %d\n", ss.Config.Run.Runs, ss.Config.Run.Run)
ss.Loops.Loop(Train, Run).Counter.SetCurMaxPlusN(ss.Config.Run.Run, ss.Config.Run.Runs)
ss.Loops.Run(Train)
// profile.Report(time.Millisecond)
axon.CloseLogFiles(ss.Loops, ss.Stats, Cycle)
axon.GPURelease()
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"github.com/emer/axon/v2/sims/deepspace"
"github.com/emer/emergent/v2/egui"
)
func main() { egui.Run[deepspace.Sim, deepspace.Config]() }
// Copyright (c) 2025, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package emery
// Actions are motor actions as abstracted coordinated plans
// that unfold over time, at a level above individual muscles.
// They are recorded in data continuously, with 0 meaning no
// action being taken, and non-zero indicating strength of action.
type Actions int32 //enums:enum
const (
Rotate Actions = iota
Forward
)
// ActionMaxValues are expected max sensory value, for normalizing.
var ActionMaxValues = [ActionsN]float32{3, 3}
// NextAction specifies the next value for given action, for given data parallel agent.
// This simulates the sequence of planning a new action followed by that action
// actually being executed. The planning state is critical for predictive learning.
// Multiple calls can be made per step, for as many actions as need updating.
// Call RenderNextActions when done specifying NextAction's, so they are presented
// to the sim.
func (ev *EmeryEnv) NextAction(di int, act Actions, val float32) {
es := ev.EmeryState(di)
es.NextActions[act] = val
}
// TakeNextActions actually starts performing in the physics model the
// actions specified by prior NextAction calls, copying NextActions
// to CurActions and activating them in the model.
// This calls RenderCurAction so the current action is shown to the sim.
func (ev *EmeryEnv) TakeNextActions() {
for di := range ev.NData {
es := ev.EmeryState(di)
for act := range ActionsN {
val := es.NextActions[act]
es.CurActions[act] = val
ev.WriteData(ev.ActionData, di, act.String(), val)
}
}
ev.RenderCurActions()
}
// TakeActions applies current actions to physics.
func (ev *EmeryEnv) TakeActions() {
for di := range ev.NData {
for act := range ActionsN {
val := ev.ReadData(ev.ActionData, di, act.String(), 10) // 0 = last written
ev.TakeAction(di, act, val)
}
}
}
// ZeroActions zero action values after WriteIndex has been incremented.
func (ev *EmeryEnv) ZeroActions() {
for di := range ev.NData {
for act := range ActionsN {
ev.WriteData(ev.ActionData, di, act.String(), 0)
}
}
}
//////// Rendering
// RenderNextActions renders the action values specified in NextAction calls.
func (ev *EmeryEnv) RenderNextActions() {
ev.renderActions(false)
}
// RenderCurActions renders the current action values, from TakeNextActions.
func (ev *EmeryEnv) RenderCurActions() {
ev.renderActions(true)
}
// renderActions renders sensory states for current sensory values.
func (ev *EmeryEnv) renderActions(cur bool) {
for act := range Forward { // only render below Forward for now
for di := range ev.NData {
es := ev.EmeryState(di)
val := es.NextActions[act]
if cur {
val = es.CurActions[act]
}
val /= ActionMaxValues[act]
ev.RenderValue(di, act.String(), val)
}
}
}
// Copyright (c) 2019, Cogent Core. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package emery
import (
"image"
"cogentcore.org/core/math32"
"cogentcore.org/lab/physics"
"cogentcore.org/lab/physics/builder"
"cogentcore.org/lab/physics/phyxyz"
)
// EmeryBodies are indexes for the physics body elements of Emery.
type EmeryBodies int32 //enums:enum
const (
EmeryBody EmeryBodies = iota
EmeryHead
EmeryEyeL
EmeryEyeR
)
// Emery encapsulates all the emery agent config and physics.
type Emery struct {
// full length of emery
Length float32
// emery object
Obj *builder.Object `display:"-"`
// PlaneXZ joint for controlling 2D position.
XZ *builder.Joint
// joint for the neck.
Neck *builder.Joint
// Right eye of emery
EyeR *builder.Body `display:"-"`
}
func (em *Emery) Defaults() {
em.Length = 1
}
// Make constructs a new Emery virtual hamster Object in given World.
func (em *Emery) Make(wl *builder.World, sc *phyxyz.Scene, ev *EmeryEnv) {
name := "emery"
mass := float32(0.5) // kg -- typical for adult rat
hl := em.Length / 2
hh := hl / 2
hw := hh
headsz := hh * 0.75
eyesz := headsz * .2
rot := math32.NewQuatIdentity()
obj := wl.NewObject()
em.Obj = obj
emr := obj.NewDynamicSkin(sc, name+"_body", physics.Box, "purple", mass, math32.Vec3(hw, hh, hl), math32.Vec3(0, hh, 0), rot)
// esk := emr.Skin
// esk.InitSkin = func(sld *xyz.Solid) {
// esk.BoxInit(sld)
// sld.Updater(func() {
// esk.Color = vw.StateColor()
// esk.UpdateColor(esk.Color, sld)
// })
// }
em.XZ = obj.NewJointPlaneXZ(nil, emr, math32.Vec3(0, 0, 0), math32.Vec3(0, -hh, 0))
headPos := math32.Vec3(0, hh, -(hl + headsz))
head := obj.NewDynamicSkin(sc, name+"_head", physics.Box, "tan", mass*.1, math32.Vec3(headsz, headsz, headsz), headPos, rot)
em.Neck = obj.NewJointFixed(emr, head, math32.Vec3(0, 0, -hl), math32.Vec3(0, 0, headsz))
em.Neck.ParentFixed = true
em.Neck.NoLinearRotation = true
obj.NewSensor(func(obj *builder.Object) {
hd := obj.Body(int(EmeryHead))
world := obj.WorldIndex - 1
params := physics.GetParams(0)
av := physics.AngularVelocityAt(hd.DynamicIndex, math32.Vec3(headsz, 0, 0), math32.Vec3(0, 1, 0))
ev.SetSenseValue(world, VSRotHVel, -av.Z)
bd := obj.Body(int(EmeryBody))
av = physics.DynamicQuat(bd.DynamicIndex, params.Next).ToEuler()
ev.SetSenseValue(world, VSRotHDir, math32.RadToDeg(av.Y))
av = physics.AngularAccelAt(hd.DynamicIndex, math32.Vec3(headsz, 0, 0), math32.Vec3(0, 1, 0))
ev.SetSenseValue(world, VSRotHAccel, av.Z)
av = physics.DynamicVel(hd.DynamicIndex, params.Next)
ev.SetSenseValue(world, VSLinearVel, av.Length())
av = physics.DynamicAcc(hd.DynamicIndex, params.Next)
ev.SetSenseValue(world, VSLinearAccel, av.Length())
})
eyeoff := math32.Vec3(-headsz*.6, headsz*.1, -(headsz + eyesz*.3))
bd := obj.NewDynamicSkin(sc, name+"_eye-l", physics.Box, "green", mass*.01, math32.Vec3(eyesz, eyesz*.5, eyesz*.2), headPos.Add(eyeoff), rot)
ej := obj.NewJointFixed(head, bd, eyeoff, math32.Vec3(0, 0, -eyesz*.3))
ej.ParentFixed = true
eyeoff.X = headsz * .6
em.EyeR = obj.NewDynamicSkin(sc, name+"_eye-r", physics.Box, "green", mass*.01, math32.Vec3(eyesz, eyesz*.5, eyesz*.2), headPos.Add(eyeoff), rot)
ej = obj.NewJointFixed(head, em.EyeR, eyeoff, math32.Vec3(0, 0, -eyesz*.3))
ej.ParentFixed = true
// emr.Updater(func() {
// ev := vw.Env
// x, y := vw.Geom.Pos(ev.Arm, ev.Pos)
// emr.Rel.Pos.Set(x, 0, y)
// })
}
// EmeryState has all the state info for each Emery instance.
type EmeryState struct {
// SenseValues has the current sensory values from physics model,
// stored here by the Sensor function for subsequent recording.
SenseValues [SensesN]float32
// SenseAverages has the average delayed sensory values over
// SensoryWindow, which goes into SenseNormed for rendering.
SenseAverages [SensesN]float32
// SenseNormed has the normalized versions of SenseAverages,
// which is what is actually rendered.
SenseNormed [SensesN]float32
// current captured images
EyeRImage, EyeLImage image.Image
// NextActions are the next action values set by sim, and rendered
// depending on RenderNextAction value.
NextActions [ActionsN]float32
// CurActions are the current action values, updated by TakeNextAction,
// and rendered depending on RenderNextAction value.
CurActions [ActionsN]float32
}
// SetSenseValue sets the current sense value from the physics sensor.
func (ev *EmeryEnv) SetSenseValue(di int, sense Senses, val float32) {
es := ev.EmeryState(di)
es.SenseValues[sense] = val
}
// TakeAction performs given action in Emery.
func (ev *EmeryEnv) TakeAction(di int, act Actions, val float32) {
// fmt.Println("Action:", di, act, val)
jd := ev.Physics.Builder.ReplicaJoint(ev.Emery.XZ, di)
switch act {
case Rotate:
jd.AddTargetAngle(2, val, ev.Params.ActionStiff)
case Forward:
ang := math32.Pi*.5 - jd.DoF(2).Current.Pos
jd.AddPlaneXZPos(ang, val, ev.Params.ActionStiff)
}
}
// SetEmeryInitConfig sets the initial configuration of emery per di.
func (ev *EmeryEnv) SetEmeryInitConfig(di int) {
// ang := -5 + 10*ev.Rand.Float32()
// ang := float32(di) * 20
ang := float32(0)
obj := ev.Physics.Builder.ReplicaObject(ev.Emery.Obj, di)
obj.RotateOnAxisBody(0, 0, 1, 0, ang)
obj.PoseToPhysics()
}
// Code generated by "core generate -add-types -add-funcs -setters -gosl"; DO NOT EDIT.
package emery
import (
"cogentcore.org/core/enums"
)
var _ActionsValues = []Actions{0, 1}
// ActionsN is the highest valid value for type Actions, plus one.
//
//gosl:start
const ActionsN Actions = 2
//gosl:end
var _ActionsValueMap = map[string]Actions{`Rotate`: 0, `Forward`: 1}
var _ActionsDescMap = map[Actions]string{0: ``, 1: ``}
var _ActionsMap = map[Actions]string{0: `Rotate`, 1: `Forward`}
// String returns the string representation of this Actions value.
func (i Actions) String() string { return enums.String(i, _ActionsMap) }
// SetString sets the Actions value from its string representation,
// and returns an error if the string is invalid.
func (i *Actions) SetString(s string) error {
return enums.SetString(i, s, _ActionsValueMap, "Actions")
}
// Int64 returns the Actions value as an int64.
func (i Actions) Int64() int64 { return int64(i) }
// SetInt64 sets the Actions value from an int64.
func (i *Actions) SetInt64(in int64) { *i = Actions(in) }
// Desc returns the description of the Actions value.
func (i Actions) Desc() string { return enums.Desc(i, _ActionsDescMap) }
// ActionsValues returns all possible values for the type Actions.
func ActionsValues() []Actions { return _ActionsValues }
// Values returns all possible values for the type Actions.
func (i Actions) Values() []enums.Enum { return enums.Values(_ActionsValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Actions) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Actions) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Actions") }
var _EmeryBodiesValues = []EmeryBodies{0, 1, 2, 3}
// EmeryBodiesN is the highest valid value for type EmeryBodies, plus one.
//
//gosl:start
const EmeryBodiesN EmeryBodies = 4
//gosl:end
var _EmeryBodiesValueMap = map[string]EmeryBodies{`EmeryBody`: 0, `EmeryHead`: 1, `EmeryEyeL`: 2, `EmeryEyeR`: 3}
var _EmeryBodiesDescMap = map[EmeryBodies]string{0: ``, 1: ``, 2: ``, 3: ``}
var _EmeryBodiesMap = map[EmeryBodies]string{0: `EmeryBody`, 1: `EmeryHead`, 2: `EmeryEyeL`, 3: `EmeryEyeR`}
// String returns the string representation of this EmeryBodies value.
func (i EmeryBodies) String() string { return enums.String(i, _EmeryBodiesMap) }
// SetString sets the EmeryBodies value from its string representation,
// and returns an error if the string is invalid.
func (i *EmeryBodies) SetString(s string) error {
return enums.SetString(i, s, _EmeryBodiesValueMap, "EmeryBodies")
}
// Int64 returns the EmeryBodies value as an int64.
func (i EmeryBodies) Int64() int64 { return int64(i) }
// SetInt64 sets the EmeryBodies value from an int64.
func (i *EmeryBodies) SetInt64(in int64) { *i = EmeryBodies(in) }
// Desc returns the description of the EmeryBodies value.
func (i EmeryBodies) Desc() string { return enums.Desc(i, _EmeryBodiesDescMap) }
// EmeryBodiesValues returns all possible values for the type EmeryBodies.
func EmeryBodiesValues() []EmeryBodies { return _EmeryBodiesValues }
// Values returns all possible values for the type EmeryBodies.
func (i EmeryBodies) Values() []enums.Enum { return enums.Values(_EmeryBodiesValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i EmeryBodies) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *EmeryBodies) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "EmeryBodies")
}
var _SensesValues = []Senses{0, 1, 2, 3, 4, 5}
// SensesN is the highest valid value for type Senses, plus one.
//
//gosl:start
const SensesN Senses = 6
//gosl:end
var _SensesValueMap = map[string]Senses{`VSRotHVel`: 0, `VMRotHVel`: 1, `VSRotHDir`: 2, `VSRotHAccel`: 3, `VSLinearVel`: 4, `VSLinearAccel`: 5}
var _SensesDescMap = map[Senses]string{0: `VSRotHVel is vestibular rotational head velocity (horiz plane).`, 1: `VMRotHVel is full-field visual-motion rotation (horiz plane).`, 2: `VSRotHDir is the ground-truth actual head direction (horiz plane).`, 3: `VSRotHAccel is vestibular rotational head acceleration (horiz plane).`, 4: `VSLinearVel is vestibular linear velocity. This is not actually something that can be sensed directly by the vestibular system: only linear accel.`, 5: `VSLinearAccel is vestibular linear acceleration.`}
var _SensesMap = map[Senses]string{0: `VSRotHVel`, 1: `VMRotHVel`, 2: `VSRotHDir`, 3: `VSRotHAccel`, 4: `VSLinearVel`, 5: `VSLinearAccel`}
// String returns the string representation of this Senses value.
func (i Senses) String() string { return enums.String(i, _SensesMap) }
// SetString sets the Senses value from its string representation,
// and returns an error if the string is invalid.
func (i *Senses) SetString(s string) error { return enums.SetString(i, s, _SensesValueMap, "Senses") }
// Int64 returns the Senses value as an int64.
func (i Senses) Int64() int64 { return int64(i) }
// SetInt64 sets the Senses value from an int64.
func (i *Senses) SetInt64(in int64) { *i = Senses(in) }
// Desc returns the description of the Senses value.
func (i Senses) Desc() string { return enums.Desc(i, _SensesDescMap) }
// SensesValues returns all possible values for the type Senses.
func SensesValues() []Senses { return _SensesValues }
// Values returns all possible values for the type Senses.
func (i Senses) Values() []enums.Enum { return enums.Values(_SensesValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Senses) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Senses) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Senses") }
// Copyright (c) 2023, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package emery
//go:generate core generate -add-types -add-funcs -setters -gosl
import (
"fmt"
"image"
"cogentcore.org/core/colors"
"cogentcore.org/core/gpu"
"cogentcore.org/core/xyz"
"cogentcore.org/lab/base/randx"
"cogentcore.org/lab/physics"
"cogentcore.org/lab/physics/builder"
"cogentcore.org/lab/physics/phyxyz"
"cogentcore.org/lab/tensor"
"cogentcore.org/lab/tensorfs"
"github.com/emer/emergent/v2/env"
"github.com/emer/emergent/v2/popcode"
"github.com/emer/v1vision/v1std"
"github.com/emer/v1vision/v1vision"
)
// EmeryEnv is the emery rat environment.
type EmeryEnv struct {
// name of this environment: Train or Test
Name string
// NData is number of data-parallel Emery's to run.
NData int
// Params has all the parameters for the environment.
Params Params
// RenderStates should be updated by sim prior to running Step.
// It tells Step to render States input for the model.
// Otherwise, physics is updated and sensory state is recorded, but
// no rendering. Rendered states average over SensoryWindow.
RenderStates bool
// Visual motion processing
Motion v1std.MotionDoG
// Image processing for Motion.
MotionImage v1std.Image
// World specifies the physical world parameters.
World World
// Emery has the parameters for (the first) Emery.
Emery Emery
// The core physics elements: Model, Builder, Scene
Physics builder.Physics
// Camera has offscreen render camera settings
Camera phyxyz.Camera
// CurrentTime is the current timestep in msec. Counts up every Step,
// 1 per msec (cycle).
CurrentTime int
// SenseData records the sensory data for each emery agent.
SenseData *tensorfs.Node
// ActionData records the motor action data for each emery agent.
ActionData *tensorfs.Node
// WriteIndex is the current write index in tensorfs Cycle-level
// sensory and motor data. Add post-increments.
WriteIndex int `edit:"-"`
// AvgWriteIndex is the current write index for averages data,
// which is less frequently updated.
AvgWriteIndex int `edit:"-"`
// SensoryDelays are the actual delays for each sense: from [SensoryDelays]
// params.
SensoryDelays [SensesN]int
// SenseNorms are the normalization factors for each sense (1/typical max).
SenseNorms [SensesN]float32
// Emerys has the state values for each NData emery.
Emerys []EmeryState
// States is the current rendered state tensors.
States map[string]*tensor.Float32
// Rand is the random number generator for the env.
// All random calls must use this.
// Set seed here for weight initialization values.
Rand randx.SysRand `display:"-"`
// Cycle tracks cycles, for interval-based updates etc.
Cycle env.Counter
// random seed
RandSeed int64 `edit:"-"`
}
func (ev *EmeryEnv) Label() string { return ev.Name }
func (ev *EmeryEnv) EmeryState(di int) *EmeryState { return &ev.Emerys[di] }
func (ev *EmeryEnv) Defaults() {
ev.Params.Defaults()
ev.Emery.Defaults()
ev.World.Defaults()
ev.Camera.Defaults()
ev.Camera.FOV = 100
ev.Camera.Size = image.Point{64, 64}
ev.Motion.Defaults()
ev.Motion.SetSize(8, 2)
ev.MotionImage.Size = ev.Camera.Size
for s := range SensesN {
ev.SenseNorms[s] = 1.0 / SenseMaxValues[s]
}
}
// Config configures the environment
func (ev *EmeryEnv) Config(ndata, ncycles int, dataNode *tensorfs.Node, netGPU *gpu.GPU) {
ev.NData = ndata
ev.Cycle.Max = ncycles
ev.Params.TimeBins = ncycles / ev.Params.TimeBinCycles
v1vision.ComputeGPU = netGPU
ev.Motion.Config(ndata, ev.MotionImage.Size)
ev.Emerys = make([]EmeryState, ndata)
ev.SenseData = dataNode.Dir("Senses")
ev.ActionData = dataNode.Dir("Actions")
ev.ConfigSensoryDelays()
ev.States = make(map[string]*tensor.Float32)
// No extension = rate code, Pop = population code version for cortex
// rate code has up and down versions, with redundancy
for s := range VSRotHDir { // only render below VSRotHDir ground truth
ev.States[s.String()] = tensor.NewFloat32(ndata, ev.Params.UnitsPer, 2)
ev.States[s.String()+"MF"] = tensor.NewFloat32(ndata, ev.Params.TimeBins, 1, 1, ev.Params.PopCodeUnits)
ev.States[s.String()+"Thal"] = tensor.NewFloat32(ndata, ev.Params.TimeBins, 1, 1, ev.Params.PopCodeUnits)
}
for a := range Forward { // only rotate now
ev.States[a.String()] = tensor.NewFloat32(ndata, ev.Params.UnitsPer, 2)
ev.States[a.String()+"MF"] = tensor.NewFloat32(ndata, ev.Params.TimeBins, 1, 1, ev.Params.PopCodeUnits)
ev.States[a.String()+"Thal"] = tensor.NewFloat32(ndata, ev.Params.TimeBins, 1, 1, ev.Params.PopCodeUnits)
}
gp := netGPU
var dev *gpu.Device
var err error
if gp == nil {
gp, dev, err = gpu.NoDisplayGPU()
} else {
dev, err = gpu.NewDevice(netGPU)
}
if err != nil {
panic(err)
}
sc := phyxyz.NoDisplayScene(gp, dev)
ev.ConfigPhysics(sc)
}
func (ev *EmeryEnv) Init(run int) {
ev.RandSeed = int64(73 + run)
if ev.Rand.Rand == nil {
ev.Rand.NewRand(ev.RandSeed)
} else {
ev.Rand.Seed(ev.RandSeed)
}
ev.CurrentTime = 0
ev.WriteIndex = 0
ev.Motion.Init()
ev.Cycle.Init()
ev.Cycle.Cur = -1
if ev.Physics.Model != nil {
ev.Physics.InitState()
for di := range ev.NData {
ev.SetEmeryInitConfig(di)
}
physics.ToGPU(physics.DynamicsVar)
}
}
func (ev *EmeryEnv) ConfigPhysics(sc *xyz.Scene) {
ev.Physics.Model = physics.NewModel()
ev.Physics.Builder = builder.NewBuilder()
ev.Physics.Model.GPU = false // todo: true, set GPU
params := physics.GetParams(0)
// params.Gravity.Y = 0
params.ControlDt = 0.1
params.SubSteps = 1
params.Dt = 0.001
ev.ConfigXYZScene(sc)
ev.Physics.Scene = phyxyz.NewScene(sc)
wl := ev.Physics.Builder.NewGlobalWorld()
ev.World.Make(wl, ev.Physics.Scene, ev)
ew := ev.Physics.Builder.NewWorld()
ev.Emery.Make(ew, ev.Physics.Scene, ev)
ev.Physics.Builder.ReplicateWorld(nil, 1, 1, ev.NData)
// note: critical to not include scene, so skins only for first body
ev.Physics.Build()
}
func (ev *EmeryEnv) ConfigXYZScene(sc *xyz.Scene) {
sc.Background = colors.Scheme.Select.Container
xyz.NewAmbient(sc, "ambient", 0.3, xyz.DirectSun)
dir := xyz.NewDirectional(sc, "dir", 1, xyz.DirectSun)
dir.Pos.Set(0, 2, 1) // default: 0,1,1 = above and behind us (we are at 0,0,X)
}
func (ev *EmeryEnv) StepPhysics() {
ev.Physics.StepQuiet(1)
}
// WriteIncr increments the WriteIndex, after writing current row.
// Wraps around at BufferSize.
func (ev *EmeryEnv) WriteIncr() {
ev.WriteIndex++
if ev.WriteIndex >= ev.Params.BufferSize {
ev.WriteIndex = 0
}
}
// AvgWriteIncr increments the AvgWriteIndex, after writing current row.
// Wraps around at BufferSize.
func (ev *EmeryEnv) AvgWriteIncr() {
ev.AvgWriteIndex++
if ev.AvgWriteIndex >= ev.Params.BufferSize/10 {
ev.AvgWriteIndex = 0
}
}
// PriorIndex returns index into tensorfs data relative to the
// current WriteIndex, for n steps (ms) prior states,
// where 0 = last-added data, and e.g., 40 = 40 msec (steps) prior.
// Does the necessary wrapping.
func (ev *EmeryEnv) PriorIndex(nPrior int) int {
ix := ev.WriteIndex - nPrior
for ix < 0 {
ix += ev.Params.BufferSize
}
return ix
}
// diName is the string rep for given data parallel index, for tensorfs.
func (ev *EmeryEnv) diName(di int) string {
return fmt.Sprintf("%02d", di)
}
// WriteData writes sensory / action data to given tensorfs dir, for given
// di data parallel index, state name, and value. Writes to WriteIndex.
func (ev *EmeryEnv) WriteData(dir *tensorfs.Node, di int, name string, val float32) {
dd := dir.Dir(ev.diName(di))
dd.Float32(name, ev.Params.BufferSize).SetFloat1D(float64(val), ev.WriteIndex)
}
// ReadData reads sensory / action data from given tensorfs dir, for given
// di data parallel index, state name, and time prior offset (PriorIndex).
func (ev *EmeryEnv) ReadData(dir *tensorfs.Node, di int, name string, nPrior int) float32 {
dd := dir.Dir(ev.diName(di))
pidx := ev.PriorIndex(nPrior)
val := float32(dd.Float32(name, ev.Params.BufferSize).Float1D(pidx))
return val
}
func (ev *EmeryEnv) State(element string) tensor.Values {
return ev.States[element]
}
// String returns the current state as a string
func (ev *EmeryEnv) String() string {
// return fmt.Sprintf("Pos_%g_%g_Ang_%g_Act_%s", ps.Pos.X, ps.Pos.Y, ang, ev.LastAct.String())
return "todo"
}
// Step is called to advance the environment state at every cycle.
// Actions set after the prior step are taken first.
func (ev *EmeryEnv) Step() bool {
ev.Cycle.Incr()
ev.TakeActions()
ev.StepPhysics()
ev.RecordSenses()
if ev.RenderStates {
ev.RenderSenses()
ev.RenderCurActions()
}
ev.CurrentTime++
ev.WriteIncr()
ev.ZeroActions()
return true
}
// RenderValue renders rate code and population-code state,
// as normalized 0-1 value.
func (ev *EmeryEnv) RenderValue(di int, snm string, val float32) {
ev.RenderRate(di, snm, val)
bin := max(ev.Cycle.Cur/ev.Params.TimeBinCycles, 0)
ev.RenderPop(di, bin, true, snm+"MF", val)
ev.RenderPop(di, bin, (bin == 0), snm+"Thal", val)
}
// RenderRate renders rate code state, as normalized 0-1 value
// as both 0-1 and 1-0 coded value across X axis, Y axis is
// positive vs. negative numbers.
func (ev *EmeryEnv) RenderRate(di int, snm string, val float32) {
minVal := float32(0.1)
minScale := 1.0 - minVal
var nv, pv float32
if val < 0 {
nv = -val
} else {
pv = val
}
df := float32(0.9)
vs := ev.States[snm]
for i := range ev.Params.UnitsPer {
vs.Set(minVal+minScale*nv, di, i, 0)
vs.Set(minVal+minScale*pv, di, i, 1)
nv *= df // discount so values are different across units
pv *= df
}
}
// RenderPop renders population code state into given time bin.
// clear resets other values before rendering.
func (ev *EmeryEnv) RenderPop(di, bin int, clear bool, snm string, val float32) {
vs := ev.States[snm]
if clear {
for i := range ev.Params.TimeBins {
sv := vs.SubSpace(di, i, 0).(*tensor.Float32)
tensor.SetAllFloat64(sv, 0)
}
}
sv := vs.SubSpace(di, bin, 0).(*tensor.Float32)
ev.Params.PopCode.Encode(&sv.Values, val, ev.Params.PopCodeUnits, popcode.Set)
}
// Compile-time check that implements Env interface
var _ env.Env = (*EmeryEnv)(nil)
// Copyright (c) 2023, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package emery
import (
"image"
"cogentcore.org/core/core"
"cogentcore.org/core/events"
"cogentcore.org/core/icons"
"cogentcore.org/core/math32"
"cogentcore.org/core/styles"
"cogentcore.org/core/styles/units"
"cogentcore.org/core/tree"
"cogentcore.org/core/xyz/xyzcore"
"cogentcore.org/lab/physics/phyxyz"
)
// GUI provides a GUI view onto the EmeryEnv
type GUI struct {
// Env is the environment we're viewing
Env *EmeryEnv
// Di is the data parallel item to view.
Di int
// GUI version of scene
Scene *phyxyz.Scene
// 3D visualization of the Scene
SceneEditor *xyzcore.SceneEditor
// first-person right-eye full field view
EyeRImageDisp *core.Image `display:"-"`
// first-person left-eye fovea view
EyeLImageDisp *core.Image `display:"-"`
}
func (ge *GUI) ConfigGUI(ev *EmeryEnv, b core.Widget) {
ge.Env = ev
core.NewToolbar(b).Maker(ge.MakeToolbar)
fr := core.NewFrame(b)
fr.Styler(func(s *styles.Style) {
s.Direction = styles.Column
s.Grow.Set(1, 1)
})
imfr := core.NewFrame(fr)
imfr.Styler(func(s *styles.Style) {
s.Display = styles.Grid
s.Columns = 2
s.Grow.Set(0, 0)
})
core.NewText(imfr).SetText("Eye-View, Left:")
core.NewText(imfr).SetText("Right:")
ge.EyeLImageDisp = core.NewImage(imfr)
ge.EyeLImageDisp.Styler(func(s *styles.Style) {
s.Min.Set(units.Dot(128))
})
ge.EyeLImageDisp.Name = "eye-l-image"
ge.EyeLImageDisp.Image = image.NewRGBA(image.Rectangle{Max: ev.Camera.Size})
ge.EyeRImageDisp = core.NewImage(imfr)
ge.EyeRImageDisp.Styler(func(s *styles.Style) {
s.Min.Set(units.Dot(128))
})
ge.EyeRImageDisp.Name = "eye-r-image"
ge.EyeRImageDisp.Image = image.NewRGBA(image.Rectangle{Max: ev.Camera.Size})
// re-use existing scene!
ge.SceneEditor = xyzcore.NewSceneEditor(fr)
ge.SceneEditor.UpdateWidget()
sc := ge.SceneEditor.SceneXYZ()
sc.Camera.Pose.Pos = math32.Vec3(0, 29, -4)
sc.Camera.LookAt(math32.Vec3(0, 4, -5), math32.Vec3(0, 1, 0))
sc.SaveCamera("2")
sc.Camera.Pose.Pos = math32.Vec3(0, 24, 32)
sc.Camera.LookAt(math32.Vec3(0, 3.6, 0), math32.Vec3(0, 1, 0))
sc.SaveCamera("1")
sc.SaveCamera("default")
ge.Env.ConfigXYZScene(sc)
ge.Scene = phyxyz.NewScene(sc)
ge.Env.Physics.Builder.CloneSkins(ge.Scene)
ge.Scene.Init(ge.Env.Physics.Model)
}
func (ge *GUI) Update() {
if ge.SceneEditor == nil || !ge.SceneEditor.IsVisible() { // || !em.Disp {
return
}
ev := ge.Env
es := ev.EmeryState(ge.Di)
if es.EyeRImage != nil {
ge.EyeRImageDisp.SetImage(es.EyeRImage)
ge.EyeRImageDisp.NeedsRender()
}
if es.EyeLImage != nil {
ge.EyeLImageDisp.SetImage(es.EyeLImage)
ge.EyeLImageDisp.NeedsRender()
}
ge.Scene.Update()
ge.SceneEditor.NeedsRender()
}
func (ge *GUI) MakeToolbar(p *tree.Plan) {
tree.Add(p, func(w *core.Button) {
w.SetText("Init").SetIcon(icons.ClearAll).
SetTooltip("Init env").
OnClick(func(e events.Event) {
ge.Env.Init(0)
})
})
tt := "Data Parallel (di) world to view"
tree.Add(p, func(w *core.Text) { w.SetText("Di:").SetTooltip(tt) })
tree.Add(p, func(w *core.Spinner) {
core.Bind(&ge.Di, w)
w.SetMin(0).SetTooltip(tt)
w.Styler(func(s *styles.Style) {
replN := ge.Env.NData
w.SetMax(float32(replN - 1))
s.SetEnabled(replN > 1)
})
w.OnChange(func(e events.Event) {
sc := ge.Scene
sc.ReplicasIndex = ge.Di
sc.Update()
ge.Update()
})
})
}
// Copyright (c) 2025, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package emery
import "github.com/emer/emergent/v2/popcode"
// SensoryDelays are delays from motor actions for different sensory modalities.
type SensoryDelays struct {
Vestibular int `default:"40"`
Visual int `default:"50"`
}
func (sd *SensoryDelays) Defaults() {
sd.Vestibular = 40
sd.Visual = 50
}
// Params are misc parameters the environment.
type Params struct {
// MaxRotate is maximum rotation angle magnitude per action, in degrees.
MaxRotate float32
// VisMotionInterval is interval between vis motion computation in cycles.
// This is a very expensive computation in general so spacing it out.
// todo: revisit once mac metal timer bug is fixed in wgpu.
VisMotionInterval int
// TimeBinCycles is the number of cycles per time bin, which also determines
// how frequently the inputs are applied to the network, which affects performance
// and learning there.
TimeBinCycles int `default:"10"`
// TimeBins is the total number of time bins per trial, for MF and Thal reps:
TimeBins int
// UnitsPer is the number of units per localist value.
UnitsPer int
// PopCodeUnits is the number of units to use for population code.
PopCodeUnits int
// AvgWindow is the time window in Cycles (ms) over which the sensory
// state is averaged, for the purposes of rendering state.
AvgWindow int
// ActionStiff is the stiffness for performing actions.
ActionStiff float32
// population code, for linear values, -1..1, in normalized units
PopCode popcode.OneD
// LeftEye determines whether to process left eye image or not.
LeftEye bool
// BufferSize is the number of time steps (ms) to retain in the tensorfs
// sensory and motor state buffers.
BufferSize int `default:"4000" edit:"-"`
// Delays are sensory delays
Delays SensoryDelays `display:"inline"`
}
func (pr *Params) Defaults() {
pr.Delays.Defaults()
pr.LeftEye = false
pr.MaxRotate = 5
pr.VisMotionInterval = 5
pr.TimeBinCycles = 10
pr.TimeBins = 20 // updated with actual per cycles
pr.AvgWindow = 20
pr.UnitsPer = 4
pr.PopCodeUnits = 12 // 12 > 16 for both
pr.ActionStiff = 1000
pr.BufferSize = 4000
popSigma := float32(0.2) // .15 > .2 for vnc, but opposite for eye
pr.PopCode.Defaults()
pr.PopCode.SetRange(-1.2, 1.2, popSigma) // 1.2 > 1.1 for eye
}
// Copyright (c) 2025, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package emery
import (
"cogentcore.org/core/math32"
"cogentcore.org/lab/stats/metric"
"cogentcore.org/lab/tensor"
)
// Senses are sensory inputs that unfold over time.
// Can also use to store abstracted sensory state.
type Senses int32 //enums:enum
const (
// VSRotHVel is vestibular rotational head velocity (horiz plane).
VSRotHVel Senses = iota
// VMRotHVel is full-field visual-motion rotation (horiz plane).
VMRotHVel
// note: values below VSRotHDir are not rendered, only for reference
// VSRotHDir is the ground-truth actual head direction (horiz plane).
VSRotHDir
// VSRotHAccel is vestibular rotational head acceleration (horiz plane).
VSRotHAccel
// VSLinearVel is vestibular linear velocity. This is not actually something
// that can be sensed directly by the vestibular system: only linear accel.
VSLinearVel
// VSLinearAccel is vestibular linear acceleration.
VSLinearAccel
)
// IsVestibular returns true if given sense is vestibular, else visual
func (s Senses) IsVestibular() bool {
if s == VMRotHVel {
return false
}
return true
}
// SenseMaxValues are expected max sensory value, for normalizing.
var SenseMaxValues = [SensesN]float32{.2, .2, 180, 10, 1, 10}
// ConfigSensoryDelays sets the sensory delays for each sense.
func (ev *EmeryEnv) ConfigSensoryDelays() {
for s := range SensesN {
if s.IsVestibular() {
ev.SensoryDelays[s] = ev.Params.Delays.Vestibular
} else {
ev.SensoryDelays[s] = ev.Params.Delays.Visual
}
}
}
// RecordSenses records senses, every step.
func (ev *EmeryEnv) RecordSenses() {
ev.Physics.Builder.RunSensors()
if ev.Cycle.Cur%ev.Params.VisMotionInterval == 0 {
// note: due to https://github.com/gfx-rs/wgpu/issues/8119
// this is very slow in reading back images from the GPU
// so until that is fixed, we need a reasonably large interval
// which is generally fine once the time-averaging is taken into account.
ev.VisMotion()
}
dir := ev.SenseData.Dir("Cycle")
for di := range ev.NData {
es := ev.EmeryState(di)
for sense := range SensesN {
snm := sense.String()
val := es.SenseValues[sense]
if sense.IsVestibular() {
for t := range ev.Params.VisMotionInterval {
val += ev.ReadData(dir, di, snm, t)
}
val /= float32(1 + float32(ev.Params.VisMotionInterval))
}
ev.WriteData(dir, di, snm, val)
}
}
}
// VisMotion updates the visual motion value based on last action.
func (ev *EmeryEnv) VisMotion() {
eyesk := ev.Emery.EyeR.Skin
imgs := ev.Physics.Scene.RenderFrom(eyesk, &ev.Camera)
ev.Motion.RunImages(&ev.MotionImage, imgs...)
full := ev.Motion.FullField
for di := range ev.NData {
es := ev.EmeryState(di)
es.EyeRImage = imgs[di]
eyelv := full.Value(di, 0, 1) - full.Value(di, 0, 0)
ev.SetSenseValue(di, VMRotHVel, eyelv)
}
}
// AverageSenses computes time-lagged sensory averages over SensoryWindow.
// These are the values that are actually rendered for input to the model.
func (ev *EmeryEnv) AverageSenses() {
dir := ev.SenseData.Dir("Cycle")
avgDir := ev.SenseData.Dir("Avg")
avgBufSz := ev.Params.BufferSize / 10
for s := range SensesN {
del := ev.SensoryDelays[s]
for di := range ev.NData {
es := ev.EmeryState(di)
diName := ev.diName(di)
ts := dir.Dir(diName).Float32(s.String(), ev.Params.BufferSize)
avg := float64(0)
for t := range ev.Params.AvgWindow {
pidx := ev.PriorIndex(t + del)
avg += ts.Float1D(pidx)
}
avg /= float64(ev.Params.AvgWindow)
es.SenseAverages[s] = float32(avg)
nrm := float32(avg) * ev.SenseNorms[s]
if math32.Abs(nrm) > 1 {
nrm = math32.Sign(nrm)
}
es.SenseNormed[s] = nrm
avgDir.Dir(diName).Float32(s.String(), avgBufSz).SetFloat1D(float64(nrm), ev.AvgWriteIndex)
}
}
ev.AvgWriteIncr()
// es := ev.EmeryState(0)
// fmt.Println("avgs: ", es.SenseAverages)
// fmt.Println("norms:", es.SenseNormed)
}
// RenderSenses renders sensory states for current sensory values.
func (ev *EmeryEnv) RenderSenses() {
ev.AverageSenses()
for s := range VSRotHDir { // only render below VSRotHDir ground truth
for di := range ev.NData {
es := ev.EmeryState(di)
val := es.SenseNormed[s]
ev.RenderValue(di, s.String(), val)
}
}
}
// VisVestibCorrelCycle returns the correlation between the visual (VMRotHVel)
// and vestibular (VSRotHVel) signals at the cycle level,
// for tuning the visual motion params (VSRotHVel is ground truth).
func (ev *EmeryEnv) VisVestibCorrelCycle(di int) float64 {
dd := ev.SenseData.Dir("Cycle").Dir(ev.diName(di))
vm := dd.Float32(VMRotHVel.String(), ev.Params.BufferSize)
madj := tensor.NewFloat32FromValues(vm.Values[ev.Params.VisMotionInterval:]...)
vs := dd.Float32(VSRotHVel.String(), ev.Params.BufferSize)
sadj := tensor.NewFloat32FromValues(vs.Values[:ev.Params.BufferSize-ev.Params.VisMotionInterval]...)
cor := metric.Correlation(madj, sadj).Float1D(0)
return cor
}
// VisVestibCorrelAvg returns the correlation between the visual (VMRotHVel)
// and vestibular (VSRotHVel) signals at the averaged and normalized level.
// for tuning the visual motion params (VSRotHVel is ground truth).
func (ev *EmeryEnv) VisVestibCorrelAvg(di int) float64 {
avgBufSz := ev.Params.BufferSize / 10
dd := ev.SenseData.Dir("Avg").Dir(ev.diName(di))
vm := dd.Float32(VMRotHVel.String(), avgBufSz)
vs := dd.Float32(VSRotHVel.String(), avgBufSz)
cor := metric.Correlation(vm, vs).Float1D(0)
return cor
}
// SenseValue returns the given sensory value, either current or
// delayed.
func (ev *EmeryEnv) SenseValue(di int, sense Senses, delayed bool) float32 {
nPrior := 0
if delayed {
nPrior = ev.SensoryDelays[sense]
}
val := ev.ReadData(ev.SenseData.Dir("Cycle"), di, sense.String(), nPrior)
return val
}
// Code generated by "core generate -add-types -add-funcs -setters -gosl"; DO NOT EDIT.
package emery
import (
"image"
"cogentcore.org/core/core"
"cogentcore.org/core/math32/minmax"
"cogentcore.org/core/types"
"cogentcore.org/core/xyz/xyzcore"
"cogentcore.org/lab/base/randx"
"cogentcore.org/lab/physics/builder"
"cogentcore.org/lab/physics/phyxyz"
"cogentcore.org/lab/tensor"
"cogentcore.org/lab/tensorfs"
"github.com/emer/emergent/v2/env"
"github.com/emer/emergent/v2/popcode"
"github.com/emer/v1vision/v1std"
)
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/sims/deepspace/emery.Actions", IDName: "actions", Doc: "Actions are motor actions as abstracted coordinated plans\nthat unfold over time, at a level above individual muscles.\nThey are recorded in data continuously, with 0 meaning no\naction being taken, and non-zero indicating strength of action."})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/sims/deepspace/emery.EmeryBodies", IDName: "emery-bodies", Doc: "EmeryBodies are indexes for the physics body elements of Emery."})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/sims/deepspace/emery.Emery", IDName: "emery", Doc: "Emery encapsulates all the emery agent config and physics.", Fields: []types.Field{{Name: "Length", Doc: "full length of emery"}, {Name: "Obj", Doc: "emery object"}, {Name: "XZ", Doc: "PlaneXZ joint for controlling 2D position."}, {Name: "Neck", Doc: "joint for the neck."}, {Name: "EyeR", Doc: "Right eye of emery"}}})
// SetLength sets the [Emery.Length]:
// full length of emery
func (t *Emery) SetLength(v float32) *Emery { t.Length = v; return t }
// SetObj sets the [Emery.Obj]:
// emery object
func (t *Emery) SetObj(v *builder.Object) *Emery { t.Obj = v; return t }
// SetXZ sets the [Emery.XZ]:
// PlaneXZ joint for controlling 2D position.
func (t *Emery) SetXZ(v *builder.Joint) *Emery { t.XZ = v; return t }
// SetNeck sets the [Emery.Neck]:
// joint for the neck.
func (t *Emery) SetNeck(v *builder.Joint) *Emery { t.Neck = v; return t }
// SetEyeR sets the [Emery.EyeR]:
// Right eye of emery
func (t *Emery) SetEyeR(v *builder.Body) *Emery { t.EyeR = v; return t }
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/sims/deepspace/emery.EmeryState", IDName: "emery-state", Doc: "EmeryState has all the state info for each Emery instance.", Fields: []types.Field{{Name: "SenseValues", Doc: "SenseValues has the current sensory values from physics model,\nstored here by the Sensor function for subsequent recording."}, {Name: "SenseAverages", Doc: "SenseAverages has the average delayed sensory values over\nSensoryWindow, which goes into SenseNormed for rendering."}, {Name: "SenseNormed", Doc: "SenseNormed has the normalized versions of SenseAverages,\nwhich is what is actually rendered."}, {Name: "EyeRImage", Doc: "current captured images"}, {Name: "EyeLImage", Doc: "current captured images"}, {Name: "NextActions", Doc: "NextActions are the next action values set by sim, and rendered\ndepending on RenderNextAction value."}, {Name: "CurActions", Doc: "CurActions are the current action values, updated by TakeNextAction,\nand rendered depending on RenderNextAction value."}}})
// SetSenseValues sets the [EmeryState.SenseValues]:
// SenseValues has the current sensory values from physics model,
// stored here by the Sensor function for subsequent recording.
func (t *EmeryState) SetSenseValues(v [SensesN]float32) *EmeryState { t.SenseValues = v; return t }
// SetSenseAverages sets the [EmeryState.SenseAverages]:
// SenseAverages has the average delayed sensory values over
// SensoryWindow, which goes into SenseNormed for rendering.
func (t *EmeryState) SetSenseAverages(v [SensesN]float32) *EmeryState { t.SenseAverages = v; return t }
// SetSenseNormed sets the [EmeryState.SenseNormed]:
// SenseNormed has the normalized versions of SenseAverages,
// which is what is actually rendered.
func (t *EmeryState) SetSenseNormed(v [SensesN]float32) *EmeryState { t.SenseNormed = v; return t }
// SetEyeRImage sets the [EmeryState.EyeRImage]:
// current captured images
func (t *EmeryState) SetEyeRImage(v image.Image) *EmeryState { t.EyeRImage = v; return t }
// SetEyeLImage sets the [EmeryState.EyeLImage]:
// current captured images
func (t *EmeryState) SetEyeLImage(v image.Image) *EmeryState { t.EyeLImage = v; return t }
// SetNextActions sets the [EmeryState.NextActions]:
// NextActions are the next action values set by sim, and rendered
// depending on RenderNextAction value.
func (t *EmeryState) SetNextActions(v [ActionsN]float32) *EmeryState { t.NextActions = v; return t }
// SetCurActions sets the [EmeryState.CurActions]:
// CurActions are the current action values, updated by TakeNextAction,
// and rendered depending on RenderNextAction value.
func (t *EmeryState) SetCurActions(v [ActionsN]float32) *EmeryState { t.CurActions = v; return t }
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/sims/deepspace/emery.EmeryEnv", IDName: "emery-env", Doc: "EmeryEnv is the emery rat environment.", Fields: []types.Field{{Name: "Name", Doc: "name of this environment: Train or Test"}, {Name: "NData", Doc: "NData is number of data-parallel Emery's to run."}, {Name: "Params", Doc: "Params has all the parameters for the environment."}, {Name: "RenderStates", Doc: "RenderStates should be updated by sim prior to running Step.\nIt tells Step to render States input for the model.\nOtherwise, physics is updated and sensory state is recorded, but\nno rendering. Rendered states average over SensoryWindow."}, {Name: "Motion", Doc: "Visual motion processing"}, {Name: "MotionImage", Doc: "Image processing for Motion."}, {Name: "World", Doc: "World specifies the physical world parameters."}, {Name: "Emery", Doc: "Emery has the parameters for (the first) Emery."}, {Name: "Physics", Doc: "The core physics elements: Model, Builder, Scene"}, {Name: "Camera", Doc: "Camera has offscreen render camera settings"}, {Name: "CurrentTime", Doc: "CurrentTime is the current timestep in msec. Counts up every Step,\n1 per msec (cycle)."}, {Name: "SenseData", Doc: "SenseData records the sensory data for each emery agent."}, {Name: "ActionData", Doc: "ActionData records the motor action data for each emery agent."}, {Name: "WriteIndex", Doc: "WriteIndex is the current write index in tensorfs Cycle-level\nsensory and motor data. Add post-increments."}, {Name: "AvgWriteIndex", Doc: "AvgWriteIndex is the current write index for averages data,\nwhich is less frequently updated."}, {Name: "SensoryDelays", Doc: "SensoryDelays are the actual delays for each sense: from [SensoryDelays]\nparams."}, {Name: "SenseNorms", Doc: "SenseNorms are the normalization factors for each sense (1/typical max)."}, {Name: "Emerys", Doc: "Emerys has the state values for each NData emery."}, {Name: "States", Doc: "States is the current rendered state tensors."}, {Name: "Rand", Doc: "Rand is the random number generator for the env.\nAll random calls must use this.\nSet seed here for weight initialization values."}, {Name: "Cycle", Doc: "Cycle tracks cycles, for interval-based updates etc."}, {Name: "RandSeed", Doc: "random seed"}}})
// SetName sets the [EmeryEnv.Name]:
// name of this environment: Train or Test
func (t *EmeryEnv) SetName(v string) *EmeryEnv { t.Name = v; return t }
// SetNData sets the [EmeryEnv.NData]:
// NData is number of data-parallel Emery's to run.
func (t *EmeryEnv) SetNData(v int) *EmeryEnv { t.NData = v; return t }
// SetParams sets the [EmeryEnv.Params]:
// Params has all the parameters for the environment.
func (t *EmeryEnv) SetParams(v Params) *EmeryEnv { t.Params = v; return t }
// SetRenderStates sets the [EmeryEnv.RenderStates]:
// RenderStates should be updated by sim prior to running Step.
// It tells Step to render States input for the model.
// Otherwise, physics is updated and sensory state is recorded, but
// no rendering. Rendered states average over SensoryWindow.
func (t *EmeryEnv) SetRenderStates(v bool) *EmeryEnv { t.RenderStates = v; return t }
// SetMotion sets the [EmeryEnv.Motion]:
// Visual motion processing
func (t *EmeryEnv) SetMotion(v v1std.MotionDoG) *EmeryEnv { t.Motion = v; return t }
// SetMotionImage sets the [EmeryEnv.MotionImage]:
// Image processing for Motion.
func (t *EmeryEnv) SetMotionImage(v v1std.Image) *EmeryEnv { t.MotionImage = v; return t }
// SetWorld sets the [EmeryEnv.World]:
// World specifies the physical world parameters.
func (t *EmeryEnv) SetWorld(v World) *EmeryEnv { t.World = v; return t }
// SetEmery sets the [EmeryEnv.Emery]:
// Emery has the parameters for (the first) Emery.
func (t *EmeryEnv) SetEmery(v Emery) *EmeryEnv { t.Emery = v; return t }
// SetPhysics sets the [EmeryEnv.Physics]:
// The core physics elements: Model, Builder, Scene
func (t *EmeryEnv) SetPhysics(v builder.Physics) *EmeryEnv { t.Physics = v; return t }
// SetCamera sets the [EmeryEnv.Camera]:
// Camera has offscreen render camera settings
func (t *EmeryEnv) SetCamera(v phyxyz.Camera) *EmeryEnv { t.Camera = v; return t }
// SetCurrentTime sets the [EmeryEnv.CurrentTime]:
// CurrentTime is the current timestep in msec. Counts up every Step,
// 1 per msec (cycle).
func (t *EmeryEnv) SetCurrentTime(v int) *EmeryEnv { t.CurrentTime = v; return t }
// SetSenseData sets the [EmeryEnv.SenseData]:
// SenseData records the sensory data for each emery agent.
func (t *EmeryEnv) SetSenseData(v *tensorfs.Node) *EmeryEnv { t.SenseData = v; return t }
// SetActionData sets the [EmeryEnv.ActionData]:
// ActionData records the motor action data for each emery agent.
func (t *EmeryEnv) SetActionData(v *tensorfs.Node) *EmeryEnv { t.ActionData = v; return t }
// SetWriteIndex sets the [EmeryEnv.WriteIndex]:
// WriteIndex is the current write index in tensorfs Cycle-level
// sensory and motor data. Add post-increments.
func (t *EmeryEnv) SetWriteIndex(v int) *EmeryEnv { t.WriteIndex = v; return t }
// SetAvgWriteIndex sets the [EmeryEnv.AvgWriteIndex]:
// AvgWriteIndex is the current write index for averages data,
// which is less frequently updated.
func (t *EmeryEnv) SetAvgWriteIndex(v int) *EmeryEnv { t.AvgWriteIndex = v; return t }
// SetSensoryDelays sets the [EmeryEnv.SensoryDelays]:
// SensoryDelays are the actual delays for each sense: from [SensoryDelays]
// params.
func (t *EmeryEnv) SetSensoryDelays(v [SensesN]int) *EmeryEnv { t.SensoryDelays = v; return t }
// SetSenseNorms sets the [EmeryEnv.SenseNorms]:
// SenseNorms are the normalization factors for each sense (1/typical max).
func (t *EmeryEnv) SetSenseNorms(v [SensesN]float32) *EmeryEnv { t.SenseNorms = v; return t }
// SetEmerys sets the [EmeryEnv.Emerys]:
// Emerys has the state values for each NData emery.
func (t *EmeryEnv) SetEmerys(v ...EmeryState) *EmeryEnv { t.Emerys = v; return t }
// SetStates sets the [EmeryEnv.States]:
// States is the current rendered state tensors.
func (t *EmeryEnv) SetStates(v map[string]*tensor.Float32) *EmeryEnv { t.States = v; return t }
// SetRand sets the [EmeryEnv.Rand]:
// Rand is the random number generator for the env.
// All random calls must use this.
// Set seed here for weight initialization values.
func (t *EmeryEnv) SetRand(v randx.SysRand) *EmeryEnv { t.Rand = v; return t }
// SetCycle sets the [EmeryEnv.Cycle]:
// Cycle tracks cycles, for interval-based updates etc.
func (t *EmeryEnv) SetCycle(v env.Counter) *EmeryEnv { t.Cycle = v; return t }
// SetRandSeed sets the [EmeryEnv.RandSeed]:
// random seed
func (t *EmeryEnv) SetRandSeed(v int64) *EmeryEnv { t.RandSeed = v; return t }
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/sims/deepspace/emery.GUI", IDName: "gui", Doc: "GUI provides a GUI view onto the EmeryEnv", Fields: []types.Field{{Name: "Env", Doc: "Env is the environment we're viewing"}, {Name: "Di", Doc: "Di is the data parallel item to view."}, {Name: "Scene", Doc: "GUI version of scene"}, {Name: "SceneEditor", Doc: "3D visualization of the Scene"}, {Name: "EyeRImageDisp", Doc: "first-person right-eye full field view"}, {Name: "EyeLImageDisp", Doc: "first-person left-eye fovea view"}}})
// SetEnv sets the [GUI.Env]:
// Env is the environment we're viewing
func (t *GUI) SetEnv(v *EmeryEnv) *GUI { t.Env = v; return t }
// SetDi sets the [GUI.Di]:
// Di is the data parallel item to view.
func (t *GUI) SetDi(v int) *GUI { t.Di = v; return t }
// SetScene sets the [GUI.Scene]:
// GUI version of scene
func (t *GUI) SetScene(v *phyxyz.Scene) *GUI { t.Scene = v; return t }
// SetSceneEditor sets the [GUI.SceneEditor]:
// 3D visualization of the Scene
func (t *GUI) SetSceneEditor(v *xyzcore.SceneEditor) *GUI { t.SceneEditor = v; return t }
// SetEyeRImageDisp sets the [GUI.EyeRImageDisp]:
// first-person right-eye full field view
func (t *GUI) SetEyeRImageDisp(v *core.Image) *GUI { t.EyeRImageDisp = v; return t }
// SetEyeLImageDisp sets the [GUI.EyeLImageDisp]:
// first-person left-eye fovea view
func (t *GUI) SetEyeLImageDisp(v *core.Image) *GUI { t.EyeLImageDisp = v; return t }
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/sims/deepspace/emery.SensoryDelays", IDName: "sensory-delays", Doc: "SensoryDelays are delays from motor actions for different sensory modalities.", Fields: []types.Field{{Name: "Vestibular"}, {Name: "Visual"}}})
// SetVestibular sets the [SensoryDelays.Vestibular]
func (t *SensoryDelays) SetVestibular(v int) *SensoryDelays { t.Vestibular = v; return t }
// SetVisual sets the [SensoryDelays.Visual]
func (t *SensoryDelays) SetVisual(v int) *SensoryDelays { t.Visual = v; return t }
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/sims/deepspace/emery.Params", IDName: "params", Doc: "Params are misc parameters the environment.", Fields: []types.Field{{Name: "MaxRotate", Doc: "MaxRotate is maximum rotation angle magnitude per action, in degrees."}, {Name: "VisMotionInterval", Doc: "VisMotionInterval is interval between vis motion computation in cycles.\nThis is a very expensive computation in general so spacing it out.\ntodo: revisit once mac metal timer bug is fixed in wgpu."}, {Name: "TimeBinCycles", Doc: "TimeBinCycles is the number of cycles per time bin, which also determines\nhow frequently the inputs are applied to the network, which affects performance\nand learning there."}, {Name: "TimeBins", Doc: "TimeBins is the total number of time bins per trial, for MF and Thal reps:"}, {Name: "UnitsPer", Doc: "UnitsPer is the number of units per localist value."}, {Name: "PopCodeUnits", Doc: "PopCodeUnits is the number of units to use for population code."}, {Name: "AvgWindow", Doc: "AvgWindow is the time window in Cycles (ms) over which the sensory\nstate is averaged, for the purposes of rendering state."}, {Name: "ActionStiff", Doc: "ActionStiff is the stiffness for performing actions."}, {Name: "PopCode", Doc: "population code, for linear values, -1..1, in normalized units"}, {Name: "LeftEye", Doc: "LeftEye determines whether to process left eye image or not."}, {Name: "BufferSize", Doc: "BufferSize is the number of time steps (ms) to retain in the tensorfs\nsensory and motor state buffers."}, {Name: "Delays", Doc: "Delays are sensory delays"}}})
// SetMaxRotate sets the [Params.MaxRotate]:
// MaxRotate is maximum rotation angle magnitude per action, in degrees.
func (t *Params) SetMaxRotate(v float32) *Params { t.MaxRotate = v; return t }
// SetVisMotionInterval sets the [Params.VisMotionInterval]:
// VisMotionInterval is interval between vis motion computation in cycles.
// This is a very expensive computation in general so spacing it out.
// todo: revisit once mac metal timer bug is fixed in wgpu.
func (t *Params) SetVisMotionInterval(v int) *Params { t.VisMotionInterval = v; return t }
// SetTimeBinCycles sets the [Params.TimeBinCycles]:
// TimeBinCycles is the number of cycles per time bin, which also determines
// how frequently the inputs are applied to the network, which affects performance
// and learning there.
func (t *Params) SetTimeBinCycles(v int) *Params { t.TimeBinCycles = v; return t }
// SetTimeBins sets the [Params.TimeBins]:
// TimeBins is the total number of time bins per trial, for MF and Thal reps:
func (t *Params) SetTimeBins(v int) *Params { t.TimeBins = v; return t }
// SetUnitsPer sets the [Params.UnitsPer]:
// UnitsPer is the number of units per localist value.
func (t *Params) SetUnitsPer(v int) *Params { t.UnitsPer = v; return t }
// SetPopCodeUnits sets the [Params.PopCodeUnits]:
// PopCodeUnits is the number of units to use for population code.
func (t *Params) SetPopCodeUnits(v int) *Params { t.PopCodeUnits = v; return t }
// SetAvgWindow sets the [Params.AvgWindow]:
// AvgWindow is the time window in Cycles (ms) over which the sensory
// state is averaged, for the purposes of rendering state.
func (t *Params) SetAvgWindow(v int) *Params { t.AvgWindow = v; return t }
// SetActionStiff sets the [Params.ActionStiff]:
// ActionStiff is the stiffness for performing actions.
func (t *Params) SetActionStiff(v float32) *Params { t.ActionStiff = v; return t }
// SetPopCode sets the [Params.PopCode]:
// population code, for linear values, -1..1, in normalized units
func (t *Params) SetPopCode(v popcode.OneD) *Params { t.PopCode = v; return t }
// SetLeftEye sets the [Params.LeftEye]:
// LeftEye determines whether to process left eye image or not.
func (t *Params) SetLeftEye(v bool) *Params { t.LeftEye = v; return t }
// SetBufferSize sets the [Params.BufferSize]:
// BufferSize is the number of time steps (ms) to retain in the tensorfs
// sensory and motor state buffers.
func (t *Params) SetBufferSize(v int) *Params { t.BufferSize = v; return t }
// SetDelays sets the [Params.Delays]:
// Delays are sensory delays
func (t *Params) SetDelays(v SensoryDelays) *Params { t.Delays = v; return t }
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/sims/deepspace/emery.Senses", IDName: "senses", Doc: "Senses are sensory inputs that unfold over time.\nCan also use to store abstracted sensory state."})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/sims/deepspace/emery.World", IDName: "world", Doc: "World describes the physics world parameters.", Fields: []types.Field{{Name: "Depth", Doc: "computed total depth, starts at 0 goes deep"}, {Name: "Width", Doc: "computed total width"}, {Name: "Thick", Doc: "thickness of walls"}, {Name: "HalfWidth", Doc: "half width for centering on 0 X"}, {Name: "ObjWidth", Doc: "ObjWidth is the range in width of objects (landmarks)."}, {Name: "ObjHeight", Doc: "ObjHeight is the range in height of objects (landmarks)."}, {Name: "ObjSpace", Doc: "ObjSpace is the range in space between objects (landmarks) in degrees."}}})
// SetDepth sets the [World.Depth]:
// computed total depth, starts at 0 goes deep
func (t *World) SetDepth(v float32) *World { t.Depth = v; return t }
// SetWidth sets the [World.Width]:
// computed total width
func (t *World) SetWidth(v float32) *World { t.Width = v; return t }
// SetThick sets the [World.Thick]:
// thickness of walls
func (t *World) SetThick(v float32) *World { t.Thick = v; return t }
// SetHalfWidth sets the [World.HalfWidth]:
// half width for centering on 0 X
func (t *World) SetHalfWidth(v float32) *World { t.HalfWidth = v; return t }
// SetObjWidth sets the [World.ObjWidth]:
// ObjWidth is the range in width of objects (landmarks).
func (t *World) SetObjWidth(v minmax.F32) *World { t.ObjWidth = v; return t }
// SetObjHeight sets the [World.ObjHeight]:
// ObjHeight is the range in height of objects (landmarks).
func (t *World) SetObjHeight(v minmax.F32) *World { t.ObjHeight = v; return t }
// SetObjSpace sets the [World.ObjSpace]:
// ObjSpace is the range in space between objects (landmarks) in degrees.
func (t *World) SetObjSpace(v minmax.F32) *World { t.ObjSpace = v; return t }
// Copyright (c) 2023, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package emery
import (
"strconv"
"cogentcore.org/core/math32"
"cogentcore.org/core/math32/minmax"
"cogentcore.org/lab/physics"
"cogentcore.org/lab/physics/builder"
"cogentcore.org/lab/physics/phyxyz"
)
// World describes the physics world parameters.
type World struct {
// computed total depth, starts at 0 goes deep
Depth float32 `edit:"-"`
// computed total width
Width float32 `edit:"-"`
// thickness of walls
Thick float32 `default:"0.1"`
// half width for centering on 0 X
HalfWidth float32 `edit:"-"`
// ObjWidth is the range in width of objects (landmarks).
ObjWidth minmax.F32
// ObjHeight is the range in height of objects (landmarks).
ObjHeight minmax.F32
// ObjSpace is the range in space between objects (landmarks) in degrees.
ObjSpace minmax.F32
}
func (ew *World) Defaults() {
ew.Depth = 5
ew.Width = 10
ew.Thick = 0.1
ew.HalfWidth = ew.Width / 2
ew.ObjWidth.Set(1, 2)
ew.ObjHeight.Set(5, 10)
ew.ObjSpace.Set(20, 35)
}
// Make makes the World
func (ew *World) Make(wl *builder.World, sc *phyxyz.Scene, ev *EmeryEnv) {
rot := math32.NewQuatIdentity()
obj := wl.NewObject()
obj.NewBodySkin(sc, "floor", physics.Plane, "grey", math32.Vec3(ew.Width/2, 0, ew.Depth/2), math32.Vec3(0, 0, 0), rot)
ew.MakeLandmarks(wl, sc, ev)
}
func (ew *World) MakeLandmarks(wl *builder.World, sc *phyxyz.Scene, ev *EmeryEnv) {
rot := math32.NewQuatIdentity()
radius := 1.2 * max(ew.Width, ew.Depth)
sp := func() float32 { return ew.ObjSpace.ProjValue(ev.Rand.Float32()) }
wd := func() float32 { return ew.ObjWidth.ProjValue(ev.Rand.Float32()) }
ht := func() float32 { return ew.ObjHeight.ProjValue(ev.Rand.Float32()) }
var pos math32.Vector2
colors := []string{"red", "green", "blue", "yellow", "orange", "violet"}
deg := float32(0)
idx := 0
for {
deg += sp()
if deg > 360 {
break
}
mydeg := deg
myidx := idx
dnm := "lmark_" + strconv.Itoa(idx)
idx++
cw := wd() / 2
ch := ht() / 2
pos.Y = radius * math32.Sin(math32.DegToRad(mydeg))
pos.X = radius * math32.Cos(math32.DegToRad(mydeg))
// fmt.Println(dnm, pos)
clr := colors[myidx%len(colors)]
obj := wl.NewObject()
obj.NewBodySkin(sc, dnm, physics.Box, clr, math32.Vec3(cw, ch, cw), math32.Vec3(pos.X, ch, pos.Y), rot)
}
}
// Code generated by "core generate -add-types -add-funcs -gosl"; DO NOT EDIT.
package deepspace
import (
"cogentcore.org/core/enums"
)
var _ModesValues = []Modes{0, 1}
// ModesN is the highest valid value for type Modes, plus one.
//
//gosl:start
const ModesN Modes = 2
//gosl:end
var _ModesValueMap = map[string]Modes{`Train`: 0, `Test`: 1}
var _ModesDescMap = map[Modes]string{0: ``, 1: ``}
var _ModesMap = map[Modes]string{0: `Train`, 1: `Test`}
// String returns the string representation of this Modes value.
func (i Modes) String() string { return enums.String(i, _ModesMap) }
// SetString sets the Modes value from its string representation,
// and returns an error if the string is invalid.
func (i *Modes) SetString(s string) error { return enums.SetString(i, s, _ModesValueMap, "Modes") }
// Int64 returns the Modes value as an int64.
func (i Modes) Int64() int64 { return int64(i) }
// SetInt64 sets the Modes value from an int64.
func (i *Modes) SetInt64(in int64) { *i = Modes(in) }
// Desc returns the description of the Modes value.
func (i Modes) Desc() string { return enums.Desc(i, _ModesDescMap) }
// ModesValues returns all possible values for the type Modes.
func ModesValues() []Modes { return _ModesValues }
// Values returns all possible values for the type Modes.
func (i Modes) Values() []enums.Enum { return enums.Values(_ModesValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Modes) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Modes) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Modes") }
var _LevelsValues = []Levels{0, 1, 2, 3, 4}
// LevelsN is the highest valid value for type Levels, plus one.
//
//gosl:start
const LevelsN Levels = 5
//gosl:end
var _LevelsValueMap = map[string]Levels{`Cycle`: 0, `Trial`: 1, `Epoch`: 2, `Run`: 3, `Expt`: 4}
var _LevelsDescMap = map[Levels]string{0: ``, 1: ``, 2: ``, 3: ``, 4: ``}
var _LevelsMap = map[Levels]string{0: `Cycle`, 1: `Trial`, 2: `Epoch`, 3: `Run`, 4: `Expt`}
// String returns the string representation of this Levels value.
func (i Levels) String() string { return enums.String(i, _LevelsMap) }
// SetString sets the Levels value from its string representation,
// and returns an error if the string is invalid.
func (i *Levels) SetString(s string) error { return enums.SetString(i, s, _LevelsValueMap, "Levels") }
// Int64 returns the Levels value as an int64.
func (i Levels) Int64() int64 { return int64(i) }
// SetInt64 sets the Levels value from an int64.
func (i *Levels) SetInt64(in int64) { *i = Levels(in) }
// Desc returns the description of the Levels value.
func (i Levels) Desc() string { return enums.Desc(i, _LevelsDescMap) }
// LevelsValues returns all possible values for the type Levels.
func LevelsValues() []Levels { return _LevelsValues }
// Values returns all possible values for the type Levels.
func (i Levels) Values() []enums.Enum { return enums.Values(_LevelsValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Levels) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Levels) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Levels") }
var _StatsPhaseValues = []StatsPhase{0, 1}
// StatsPhaseN is the highest valid value for type StatsPhase, plus one.
//
//gosl:start
const StatsPhaseN StatsPhase = 2
//gosl:end
var _StatsPhaseValueMap = map[string]StatsPhase{`Start`: 0, `Step`: 1}
var _StatsPhaseDescMap = map[StatsPhase]string{0: ``, 1: ``}
var _StatsPhaseMap = map[StatsPhase]string{0: `Start`, 1: `Step`}
// String returns the string representation of this StatsPhase value.
func (i StatsPhase) String() string { return enums.String(i, _StatsPhaseMap) }
// SetString sets the StatsPhase value from its string representation,
// and returns an error if the string is invalid.
func (i *StatsPhase) SetString(s string) error {
return enums.SetString(i, s, _StatsPhaseValueMap, "StatsPhase")
}
// Int64 returns the StatsPhase value as an int64.
func (i StatsPhase) Int64() int64 { return int64(i) }
// SetInt64 sets the StatsPhase value from an int64.
func (i *StatsPhase) SetInt64(in int64) { *i = StatsPhase(in) }
// Desc returns the description of the StatsPhase value.
func (i StatsPhase) Desc() string { return enums.Desc(i, _StatsPhaseDescMap) }
// StatsPhaseValues returns all possible values for the type StatsPhase.
func StatsPhaseValues() []StatsPhase { return _StatsPhaseValues }
// Values returns all possible values for the type StatsPhase.
func (i StatsPhase) Values() []enums.Enum { return enums.Values(_StatsPhaseValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i StatsPhase) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *StatsPhase) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "StatsPhase")
}
// Copyright (c) 2019, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package deepspace
import "github.com/emer/axon/v2/axon"
// LayerParams sets the minimal non-default params.
// Base is always applied, and others can be optionally selected to apply on top of that.
var LayerParams = axon.LayerSheets{
"Base": {
{Sel: "Layer", Doc: "generic layer params",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.1 // 0.05 needed to get hidden2 high to .1, 0.1 keeps it too low!
}},
{Sel: ".CNiLayer", Doc: "all cerebellar nucleus inhibitory neurons",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.1 // 0.05 needed to get hidden2 high to .1, 0.1 keeps it too low!
ly.Acts.Noise.On.SetBool(true) // true >= false (minor)
ly.Acts.Noise.Ge = 0.05 // 0.05 >= 0.1?
ly.Acts.Noise.Gi = 0.05
}},
{Sel: ".SuperLayer", Doc: "super layer params",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.05
}},
{Sel: ".ThalIn", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.2 // 0.2 > 0.13, 0.2 accurate
ly.Inhib.Layer.Gi = 0.8 //
}},
{Sel: ".MFIn", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.05 // 0.2 > 0.13, 0.2 accurate
ly.Inhib.Layer.Gi = 0.8 //
}},
{Sel: ".RateIn", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.LinearDefaults()
ly.Acts.Clamp.Ge = 0.5
ly.Inhib.Layer.On.SetBool(false)
ly.Inhib.ActAvg.Nominal = 0.2
}},
{Sel: ".CTLayer", Doc: "CT NMDA gbar factor is key",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.12 // CT in general more active
ly.Inhib.Layer.Gi = 1.8 // 1.8 == 1.6 > 2.0
ly.CT.GeGain = 1.0 // 1 == 1.5 > 0.5 except depth
ly.CT.DecayTau = 0 // decay is very bad
ly.Acts.Decay.Act = 0.0
ly.Acts.Decay.Glong = 0.0
ly.Acts.GabaB.Gk = 0.015 // 0.015 standard gaba
ly.Acts.NMDA.Ge = 0.006
ly.Acts.NMDA.Tau = 100
ly.Acts.MaintNMDA.Ge = 0.006 // not relevant -- no CTSelf
ly.Acts.MaintNMDA.Tau = 100
}},
{Sel: ".PulvinarLayer", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.15 // 0.15 accurate
ly.Inhib.Layer.Gi = 0.8 // 0.8 good -- was 0.9
ly.Pulvinar.DriveScale = 0.12 // 0.12 ~= .1
ly.Pulvinar.FullDriveAct = 0.6 // 0.6 def
ly.Acts.Decay.Act = 0.0
ly.Acts.Decay.Glong = 0.0 // clear long
ly.Acts.Decay.AHP = 0.0 // clear long
ly.Learn.RLRate.SigmoidMin = 1.0 // 1 > .05
}},
{Sel: ".IOLayer", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.IO.ErrThr = 0.02
ly.IO.TimeOff = 50
ly.IO.EfferentOff = 30
ly.IO.GTau = 20
}},
{Sel: ".CNiIOLayer", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Nuclear.Decay = 0.5
ly.Nuclear.SendTimeOff = 40
ly.Nuclear.SendTimeWindow = 30
}},
{Sel: ".CNiUpLayer", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Nuclear.Decay = 0.5
ly.Nuclear.SendTimeOff = 10
ly.Nuclear.SendTimeWindow = 30
}},
{Sel: ".CNeLayer", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Acts.Init.GeBase = 0.2
ly.Nuclear.GeBaseLRate = 0.0001
ly.Acts.GabaB.Gk = 0 // 0 > 0.005 > 0.010 > 0.015 def
ly.Acts.NMDA.Ge = 0 // 0 > 0.006 def
}},
},
}
// PathParams sets the minimal non-default params.
// Base is always applied, and others can be optionally selected to apply on top of that.
var PathParams = axon.PathSheets{
"Base": {
{Sel: "Path", Doc: "std",
Set: func(pt *axon.PathParams) {
pt.Learn.LRate.Base = 0.01 // 0.01 >= 0.02
pt.Learn.DWt.SubMean = 0 // 0 > 1 even with CTCtxt = 0
pt.SWts.Adapt.LRate = 0.01 // 0.01 == 0.0001 but 0.001 not as good..
pt.SWts.Init.SPct = 1.0 // 1 works fine here -- .5 also ok
pt.Learn.DWt.SynTraceTau = 2 // 4 == 2 > 1 still 0.2.59
}},
{Sel: ".BackPath", Doc: "top-down back-pathways MUST have lower relative weight scale, otherwise network hallucinates",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.2
}},
{Sel: ".CTToPulv", Doc: "all CT to pulvinar",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1.5 // 1.5 > 1.2 for vnc
}},
{Sel: ".CTCtxtPath", Doc: "all CT context paths",
Set: func(pt *axon.PathParams) {
pt.Learn.LRate.Base = 0.002 // has almost no effect in 1to1
pt.Learn.DWt.SubMean = 0 //
pt.Learn.DWt.SynTraceTau = 2 // 2 > 1 still 0.2.28
}},
{Sel: ".CTFromSuper", Doc: "1to1 > full",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 0.5 // 0.5 > 1
pt.Learn.Learn.SetBool(true) // learning > fixed 1to1
pt.SWts.Init.Mean = 0.5 // if fixed, 0.8 > 0.5, var = 0
pt.SWts.Init.Var = 0.25
}},
{Sel: ".FromPulv", Doc: "defaults to .Back but generally weaker is better",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.1 // 0.1 == 0.15 > 0.05
}},
{Sel: ".FromAct", Doc: "strong from act",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 2
}},
{Sel: ".FFToHid", Doc: "stronger",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 2
}},
{Sel: ".CNiIOToIO", Doc: "inhibition to IO",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 0.5
}},
{Sel: ".SenseToCNeUp", Doc: "excitation to CNeUp",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1
}},
{Sel: ".CNeUpPath", Doc: "initial weights",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 0.5
pt.Learn.LRate.Base = 0.02 //
pt.SWts.Init.Mean = 0.5 // std initial
pt.SWts.Init.Var = 0
pt.SWts.Init.SPct = 0
pt.SWts.Adapt.On.SetBool(false)
}},
{Sel: ".MFToCNiIOUp", Doc: "initial weights",
Set: func(pt *axon.PathParams) {
pt.Learn.LRate.Base = 0.01 //
pt.SWts.Init.Mean = 0.05 // weak initial
pt.SWts.Init.Var = 0
pt.SWts.Init.SPct = 0
pt.SWts.Adapt.On.SetBool(false)
}},
{Sel: ".MFToCNiUp", Doc: "initial weights",
Set: func(pt *axon.PathParams) {
pt.Learn.LRate.Base = 0.01 //
pt.SWts.Init.Mean = 0.05 // ?
pt.SWts.Init.Var = 0
pt.SWts.Init.SPct = 0
pt.SWts.Adapt.On.SetBool(false)
}},
/* not used
{Sel: ".CTSelfCtxt", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.5 // 0.5 > 0.2 > 0.8
pt.Com.PFail = 0.0 // never useful for random gen
pt.SWts.Init.Sym = true // true > false
}},
{Sel: ".CTSelfMaint", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.1 // 0.1 >= 0.05 > 0.2
pt.Com.PFail = 0.0
pt.SWts.Init.Sym = true // no effect? not sure why
}},
*/
},
}
// Copyright (c) 2023, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package deepvision
import (
"cogentcore.org/core/core"
"github.com/emer/emergent/v2/egui"
)
// EnvConfig has config params for environment
// note: only adding fields for key Env params that matter for both Network and Env
// other params are set via the Env map data mechanism.
type EnvConfig struct { //types:add
// env parameters -- can set any field/subfield on Env struct, using standard TOML formatting
Env map[string]any
}
// ParamConfig has config parameters related to sim params.
type ParamConfig struct {
// Script is an interpreted script that is run to set parameters in Layer and Path
// sheets, by default using the "Script" set name.
Script string `new-window:"+" width:"100"`
// Sheet is the extra params sheet name(s) to use (space separated
// if multiple). Must be valid name as listed in compiled-in params
// or loaded params.
Sheet string
// Tag is an extra tag to add to file names and logs saved from this run.
Tag string
// Note is additional info to describe the run params etc,
// like a git commit message for the run.
Note string
// SaveAll will save a snapshot of all current param and config settings
// in a directory named params_<datestamp> (or _good if Good is true),
// then quit. Useful for comparing to later changes and seeing multiple
// views of current params.
SaveAll bool `nest:"+"`
// Good is for SaveAll, save to params_good for a known good params state.
// This can be done prior to making a new release after all tests are passing.
// Add results to git to provide a full diff record of all params over level.
Good bool `nest:"+"`
}
func (pc *ParamConfig) FieldWidget(field string) core.Value {
return egui.ScriptFieldWidget(field)
}
// RunConfig has config parameters related to running the sim.
type RunConfig struct {
// V2Plus includes V2 and higher layers
V2Plus bool
// V3Plus includes V3 and higher layers
V3Plus bool
// DP adds the DP layer, on top of V3
DP bool
// V4Plus includes V4 and higher layers
V4Plus bool
// TEOPlus includes TEO and higher layers
TEOPlus bool
// TE includes TE layer
TE bool
// GPUDevice selects the gpu device to use.
GPUDevice int
// MPI uses MPI message passing interface for data parallel computation
// between nodes running identical copies of the same sim, sharing DWt changes.
MPI bool
// GPUSameNodeMPI if true and both MPI and GPU are being used, this selects
// a different GPU for each MPI proc rank, assuming a multi-GPU node.
// set to false if running MPI across multiple GPU nodes.
GPUSameNodeMPI bool
// NData is the number of data-parallel items to process in parallel per trial.
// Is significantly faster for both CPU and GPU. Results in an effective
// mini-batch of learning.
NData int `default:"8" min:"1"`
// SlowInterval is the interval between slow adaptive processes.
// This generally needs to be longer than the default of 100 in larger models.
SlowInterval int `default:"400"` // 400 best > 800 >> 100
// AdaptGiInterval is the interval between adapting inhibition steps.
AdaptGiInterval int `default:"400"` // ?
// NThreads is the number of parallel threads for CPU computation;
// 0 = use default.
NThreads int `default:"0"`
// Run is the _starting_ run number, which determines the random seed.
// Runs counts up from there. Can do all runs in parallel by launching
// separate jobs with each starting Run, Runs = 1.
Run int `default:"0" flag:"run"`
// Runs is the total number of runs to do when running Train, starting from Run.
Runs int `default:"1" min:"1"`
// Epochs is the total number of epochs per run.
Epochs int `default:"1000"`
// Trials is the total number of trials per epoch.
// Should be an even multiple of NData. Was 512 in Leabra model.
Trials int `default:"512"`
// ISICycles is the number of no-input inter-stimulus interval
// cycles at the start of the trial.
ISICycles int `default:"0"`
// MinusCycles is the number of cycles in the minus phase per trial.
MinusCycles int `default:"160"`
// PlusCycles is the number of cycles in the plus phase per trial.
PlusCycles int `default:"60"`
// NZero is how many perfect, zero-error epochs before stopping a Run.
NZero int `default:"2"`
// TestInterval is how often (in epochs) to run through all the test patterns,
// in terms of training epochs. Can use 0 or -1 for no testing.
TestInterval int `default:"20"`
// PCAInterval is how often (in epochs) to compute PCA on hidden
// representations to measure variance.
PCAInterval int `default:"10"`
// RSAInterval is how often to run RSA analyses over epochs.
RSAInterval int `default:"10"`
// ConfusionEpc is the epoch to start recording confusion matrix.
ConfusionEpc int `default:"500"`
// StartWeights is the name of weights file to load at start of first run.
StartWeights string
// Epoch counter to set when loading start weights.
StartEpoch int
}
// Cycles returns the total number of cycles per trial: ISI + Minus + Plus.
func (rc *RunConfig) Cycles() int {
return rc.ISICycles + rc.MinusCycles + rc.PlusCycles
}
// LogConfig has config parameters related to logging data.
type LogConfig struct {
// SaveWeights will save final weights after each run.
SaveWeights bool
// SaveWeightsAt is a list of epoch counters at which to save weights.
SaveWeightsAt []int `default:"[400, 800]"`
// Train has the list of Train mode levels to save log files for.
Train []string `default:"['Run', 'Epoch']" nest:"+"`
// Test has the list of Test mode levels to save log files for.
Test []string `default:"['Epoch']" nest:"+"`
}
// Config has the overall Sim configuration options.
type Config struct {
egui.BaseConfig
// environment configuration options
Env EnvConfig `display:"add-fields"`
// Params has parameter related configuration options.
Params ParamConfig `display:"add-fields"`
// Run has sim running related configuration options.
Run RunConfig `display:"add-fields"`
// Log has data logging related configuration options.
Log LogConfig `display:"add-fields"`
}
func (cfg *Config) Defaults() {
cfg.Name = "DeepVision"
cfg.Title = "Deep Vision"
cfg.URL = "https://github.com/emer/axon/blob/main/sims/deepvision/README.md"
cfg.Doc = "This simulation does deep predictive learning on 3D objects tumbling through space, using a visual system with both where (dorsal, LIP) and what (ventral, IT) pathways learning based purely on predicting the next frame of the image."
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// lvis explores how a hierarchy of areas in the ventral stream
// of visual processing (up to inferotemporal (IT) cortex) can produce
// robust object recognition that is invariant to changes in position,
// size, etc of retinal input images.
package deepvision
//go:generate core generate -add-types -add-funcs -gosl
import (
"fmt"
"math"
"os"
"reflect"
"slices"
"cogentcore.org/core/base/reflectx"
"cogentcore.org/core/core"
"cogentcore.org/core/enums"
"cogentcore.org/core/gpu"
"cogentcore.org/core/icons"
"cogentcore.org/core/math32"
"cogentcore.org/core/tree"
"cogentcore.org/lab/base/mpi"
"cogentcore.org/lab/base/randx"
"cogentcore.org/lab/plot"
"cogentcore.org/lab/stats/metric"
"cogentcore.org/lab/stats/stats"
"cogentcore.org/lab/tensor"
"cogentcore.org/lab/tensorcore"
"cogentcore.org/lab/tensorfs"
"github.com/emer/axon/v2/axon"
"github.com/emer/emergent/v2/decoder"
"github.com/emer/emergent/v2/egui"
"github.com/emer/emergent/v2/emer"
"github.com/emer/emergent/v2/env"
"github.com/emer/emergent/v2/looper"
"github.com/emer/emergent/v2/paths"
)
// Modes are the looping modes (Stacks) for running and statistics.
type Modes int32 //enums:enum
const (
Train Modes = iota
Test
NovelTrain
)
// Levels are the looping levels for running and statistics.
type Levels int32 //enums:enum
const (
Cycle Levels = iota
Trial
Epoch
Run
)
// StatsPhase is the phase of stats processing for given mode, level.
// Accumulated values are reset at Start, added each Step.
type StatsPhase int32 //enums:enum
const (
Start StatsPhase = iota
Step
)
// see params.go for params
// Sim encapsulates the entire simulation model, and we define all the
// functionality as methods on this struct. This structure keeps all relevant
// state information organized and available without having to pass everything around
// as arguments to methods, and provides the core GUI interface (note the view tags
// for the fields which provide hints to how things should be displayed).
type Sim struct {
// simulation configuration parameters -- set by .toml config file and / or args
Config *Config `new-window:"+"`
// Net is the network: click to view / edit parameters for layers, paths, etc.
Net *axon.Network `new-window:"+" display:"no-inline"`
// Params manages network parameter setting.
Params axon.Params `display:"inline"`
// Paths are all the specialized pathways for the network.
Paths Paths `new-window:"+" display:"no-inline"`
// Decoder is used as a comparison vs. the Output layer.
Decoder decoder.SoftMax
// Loops are the control loops for running the sim, in different Modes
// across stacks of Levels.
Loops *looper.Stacks `new-window:"+" display:"no-inline"`
// Envs provides mode-string based storage of environments.
Envs env.Envs `new-window:"+" display:"no-inline"`
// TrainUpdate has Train mode netview update parameters.
TrainUpdate axon.NetViewUpdate `display:"inline"`
// TestUpdate has Test mode netview update parameters.
TestUpdate axon.NetViewUpdate `display:"inline"`
// Root is the root tensorfs directory, where all stats and other misc sim data goes.
Root *tensorfs.Node `display:"-"`
// Stats has the stats directory within Root.
Stats *tensorfs.Node `display:"-"`
// Current has the current stats values within Stats.
Current *tensorfs.Node `display:"-"`
// StatFuncs are statistics functions called at given mode and level,
// to perform all stats computations. phase = Start does init at start of given level,
// and all initialization / configuration (called during Init too).
StatFuncs []func(mode Modes, level Levels, phase StatsPhase) `display:"-"`
// GUI manages all the GUI elements
GUI egui.GUI `display:"-"`
// RandSeeds is a list of random seeds to use for each run.
RandSeeds randx.Seeds `display:"-"`
}
func (ss *Sim) SetConfig(cfg *Config) { ss.Config = cfg }
func (ss *Sim) Body() *core.Body { return ss.GUI.Body }
func (ss *Sim) ConfigSim() {
ss.Root, _ = tensorfs.NewDir("Root")
tensorfs.CurRoot = ss.Root
ss.Paths.Defaults()
ss.Net = axon.NewNetwork(ss.Config.Name)
ss.Params.Config(LayerParams, PathParams, ss.Config.Params.Sheet, ss.Config.Params.Tag, reflect.ValueOf(ss))
ss.RandSeeds.Init(100) // max 100 runs
ss.InitRandSeed(0)
if ss.Config.GPU {
gpu.SelectAdapter = ss.Config.Run.GPUDevice
axon.GPUInit()
axon.UseGPU = true
}
ss.ConfigEnv()
ss.ConfigNet(ss.Net)
ss.ConfigLoops()
ss.ConfigStats()
// if ss.Config..GPU {
// fmt.Println(axon.GPUSystem.Vars().StringDoc())
// }
if ss.Config.Params.SaveAll {
ss.Config.Params.SaveAll = false
ss.Net.SaveParamsSnapshot(&ss.Config, ss.Config.Params.Good)
os.Exit(0)
}
ss.RSAInit()
}
func (ss *Sim) ConfigEnv() {
ndata := ss.Config.Run.NData
// Can be called multiple times -- don't re-create
var trn, tst *Obj3DSacEnv
if len(ss.Envs) == 0 {
trn = &Obj3DSacEnv{}
tst = &Obj3DSacEnv{}
} else {
trn = ss.Envs.ByMode(Train).(*Obj3DSacEnv)
tst = ss.Envs.ByMode(Test).(*Obj3DSacEnv)
}
trn.Name = Train.String()
trn.Defaults()
if ss.Config.Env.Env != nil {
reflectx.SetFieldsFromMap(trn, ss.Config.Env.Env)
}
trn.Config(ndata, axon.ComputeGPU)
trn.OpenTable()
tst.Name = Test.String()
tst.Defaults()
if ss.Config.Env.Env != nil {
reflectx.SetFieldsFromMap(tst, ss.Config.Env.Env)
}
tst.Config(ndata, axon.ComputeGPU)
tst.Table = trn.Table
// if ss.Config.Run.MPI {
// if ss.Config.Debug {
// mpi.Printf("Did Env MPIAlloc\n")
// }
// trn.MPIAlloc()
// tst.MPIAlloc()
// }
trn.Init(0)
trn.Step() // needs an image
trn.Init(0)
tst.Init(0)
ss.Envs.Add(trn, tst)
}
func (ss *Sim) ConfigNet(net *axon.Network) {
net.SetMaxData(ss.Config.Run.NData)
net.Context().SetISICycles(int32(ss.Config.Run.ISICycles)).
SetMinusCycles(int32(ss.Config.Run.MinusCycles)).
SetPlusCycles(int32(ss.Config.Run.PlusCycles)).
SetSlowInterval(int32(ss.Config.Run.SlowInterval)).
SetAdaptGiInterval(int32(ss.Config.Run.AdaptGiInterval)).Update()
net.SetRandSeed(ss.RandSeeds[0]) // init new separate random seed, using run = 0
space := float32(4)
// one2one := paths.NewOneToOne()
full := paths.NewFull()
pool1to1 := paths.NewPoolOneToOne()
pts := &ss.Paths
rndcut := paths.NewUniformRand()
rndcut.PCon = 0.1
_ = rndcut
trn := ss.Envs.ByMode(Train).(*Obj3DSacEnv)
v1nrows := trn.V1c.Out4Rows()
sample2 := func(ly *axon.Layer) {
ly.SetSampleShape(emer.CenterPoolIndexes(ly, 2), emer.CenterPoolShape(ly, 2))
}
// LIP network
v1m := net.AddLayer4D("V1m", axon.InputLayer, 8, 8, v1nrows, 4).AddClass("V1m")
v1h := net.AddLayer4D("V1h", axon.InputLayer, 16, 16, v1nrows, 4).AddClass("V1h")
sample2(v1m)
sample2(v1h)
eyepos := net.AddLayer2D("EyePos", axon.InputLayer, 21, 21).AddClass("PopCode")
sacplan := net.AddLayer2D("SacPlan", axon.InputLayer, 11, 11).AddClass("PopCode")
sac := net.AddLayer2D("Saccade", axon.InputLayer, 11, 11).AddClass("PopCode")
objvel := net.AddLayer2D("ObjVel", axon.InputLayer, 11, 11).AddClass("PopCode")
// 2,2 for mt > 1,1
mtpos := net.AddLayer4D("MTpos", axon.SuperLayer, 8, 8, 2, 2).AddClass("MTpos")
mtposP := net.AddPulvForLayer(mtpos, space).AddClass("MTpos")
lip, lipCT := net.AddSuperCT4D("LIP", "LIPCtxt", 8, 8, 4, 4, space, pts.PT3x3Skp1) // 4x4 == 5x5
// net.ConnectCTSelf(lipCT, full, "LIPSelf") // maint + ctself: bad
// net.ConnectLayers(lipCT, lipCT, pts.PT3x3Skp1, axon.CTCtxtPath).AddClass("CTSelfCtxt")
sample2(lip)
sample2(lipCT)
net.ConnectLayers(v1m, mtpos, pool1to1, axon.ForwardPath).AddClass("Fixed")
net.ConnectLayers(mtpos, lip, pool1to1, axon.ForwardPath).AddClass("Fixed")
net.ConnectToPulv(lip, lipCT, mtposP, full, pool1to1, "") // full >> pts.PT3x3Skp1
// these are important for good performance on pure LIP version:
net.ConnectLayers(eyepos, lip, full, axon.ForwardPath)
net.ConnectLayers(sacplan, lip, full, axon.ForwardPath)
net.ConnectLayers(objvel, lip, full, axon.ForwardPath)
net.ConnectLayers(eyepos, lipCT, full, axon.ForwardPath)
net.ConnectLayers(sac, lipCT, full, axon.ForwardPath)
net.ConnectLayers(objvel, lipCT, full, axon.ForwardPath)
net.ConnectLayers(sac, lip, full, axon.ForwardPath)
// Positioning
v1h.PlaceRightOf(v1m, space)
mtpos.PlaceAbove(v1m)
mtposP.PlaceRightOf(mtpos, space)
lip.PlaceBehind(mtpos, space*2)
lipCT.PlaceBehind(lip, space)
eyepos.PlaceRightOf(mtposP, space)
sacplan.PlaceBehind(eyepos, space)
sac.PlaceBehind(sacplan, space)
objvel.PlaceBehind(sac, space)
var v1mP, v2, v2CT, v3, v3CT, dp, dpCT, v3P, v4, v4CT, teo, teoCT, v4P, te, teCT, teoP *axon.Layer
//////// V2
if !ss.Config.Run.V2Plus {
goto build
}
v1mP = net.AddPulvForLayer(v1m, space).AddClass("V1m")
v2, v2CT = net.AddSuperCT4D("V2", "", 8, 8, 10, 10, space, pts.PT3x3Skp1) // 3x3 >> p1to1
sample2(v2)
sample2(v2CT)
net.ConnectToPulv(v2, v2CT, v1mP, pts.PT3x3Skp1, pts.PT3x3Skp1, "FromV1mP") // 3x3 >> p1to1
// old has v2selfct 3x3s1, not good here:
// net.ConnectLayers(v2CT, v2CT, pts.PT3x3Skp1, axon.CTCtxtPath).AddClass("CTSelfCtxt")
net.ConnectLayers(v1m, v2, pts.PT3x3Skp1, axon.ForwardPath).AddClass("V1V2")
net.ConnectLayers(v1h, v2, pts.PT4x4Skp2, axon.ForwardPath).AddClass("V1V2")
// net.ConnectLayers(v2CT, lipCT, pool1to1, axon.ForwardPath).AddClass("FwdWeak") // harmful
net.ConnectLayers(lipCT, v2CT, pool1to1, axon.BackPath) // critical!
net.ConnectLayers(v2, lip, pool1to1, axon.ForwardPath).AddClass("FwdWeak") // good later
net.ConnectLayers(lip, v2, pool1to1, axon.BackPath) // helpful
v2.PlaceAbove(v1m)
mtpos.PlaceAbove(v2)
//////// V3
if !ss.Config.Run.V3Plus {
goto build
}
v3, v3CT = net.AddSuperCT4D("V3", "", 4, 4, 10, 10, space, pts.PT3x3Skp1)
sample2(v3)
sample2(v3CT)
// orig 4x4
net.ConnectToPulv(v3, v3CT, v1mP, pts.PT4x4Skp2Recip, pts.PT4x4Skp2, "FromV1mP")
// old has v3selfct 3x3s1: not bad up to .2, but no benefit
// net.ConnectLayers(v3CT, v3CT, pts.PT3x3Skp1, axon.CTCtxtPath).AddClass("CTSelfCtxt")
// orig 4x4skp2
net.ConnectLayers(v2, v3, pts.PT4x4Skp2, axon.ForwardPath)
net.ConnectLayers(v3, v2, pts.PT4x4Skp2Recip, axon.BackPath)
// net.ConnectLayers(v3CT, lipCT, pts.PT2x2Skp2Recip, axon.ForwardPath).AddClass("FwdWeak") // bad for lip
// net.ConnectLayers(lipCT, v3CT, pts.PT2x2Skp2, axon.BackPath) // bad; 2x2 orig
// missing in orig, slower at start but needed for later:
net.ConnectLayers(v2CT, v3CT, pts.PT4x4Skp2, axon.ForwardPath).AddClass("FwdWeak")
// todo: strong .5 in orig
// net.ConnectLayers(v3CT, v2CT, pts.PT4x4Skp2Recip, axon.BackPath) // yes top-down CT
// orig has a "leak" from super -> CT here: (2x2 == 4x4) --
// good: mostly prevents "sag" at end
net.ConnectLayers(v3, v2CT, pts.PT2x2Skp2Recip, axon.BackPath)
// orig 2x2:
net.ConnectLayers(v3, lip, pts.PT2x2Skp2Recip, axon.ForwardPath).AddClass("FwdWeak")
net.ConnectLayers(lip, v3, pts.PT2x2Skp2, axon.BackPath)
net.ConnectLayers(v1m, v3, rndcut, axon.ForwardPath).AddClass("V1SC") // shortcut!
net.ConnectLayers(v1m, v3CT, rndcut, axon.ForwardPath).AddClass("V1SC") // shortcut! // CT def good
v3.PlaceRightOf(v2, space)
//////// DP
if ss.Config.Run.DP { // now a significant benefit in V1mP performance!
dp, dpCT = net.AddSuperCT4D("DP", "", 4, 4, 10, 10, space, pts.PT3x3Skp1)
sample2(dp)
sample2(dpCT)
net.ConnectToPulv(dp, dpCT, v1mP, pts.PT4x4Skp2Recip, pts.PT4x4Skp2, "FromV1mP")
// todo test:
// net.ConnectLayers(dpCT, dpCT, pts.PT3x3Skp1, axon.CTCtxtPath).AddClass("CTSelfCtxt")
// maint is maybe better:
// net.ConnectCTSelf(dpCT, pts.PT3x3Skp1, "DPCTSelf")
// net.ConnectLayers(v2, dp, pts.PT4x4Skp2, axon.ForwardPath) // better without
// net.ConnectLayers(dp, v2, pts.PT4x4Skp2Recip, axon.BackPath)
net.ConnectLayers(v3, dp, pts.PT3x3Skp1, axon.ForwardPath) // v3 > v2 connectivity
net.ConnectLayers(dp, v3, pts.PT3x3Skp1, axon.BackPath)
v3P = net.AddPulvForLayer(v3, space).AddClass("V3")
net.ConnectToPulv(dp, dpCT, v3P, pts.PT3x3Skp1, pts.PT3x3Skp1, "FromV3P")
net.ConnectLayers(v2CT, v3P, pts.PT4x4Skp2, axon.ForwardPath) // fwd CT, but not recip!
// no DP <-> LIP?
// no FF CT -> CT?
// net.ConnectLayers(v2CT, dpCT, pts.PT4x4Skp2, axon.ForwardPath).AddClass("FwdWeak")
// net.ConnectLayers(v3, dp, pts.PT3x3Skp1, axon.ForwardPath).AddClass("FwdWeak")
// net.ConnectLayers(dpCT, v2CT, pts.PT4x4Skp2Recip, axon.BackPath) // tiny bit worse
// leak from super to CT:
net.ConnectLayers(dp, v2CT, pts.PT2x2Skp2Recip, axon.BackPath) // strong .5 in orig
net.ConnectLayers(dp, v3CT, pts.PT3x3Skp1, axon.BackPath)
// net.ConnectLayers(dpCT, v3CT, full, axon.BackPath)
net.ConnectLayers(v1m, dp, rndcut, axon.ForwardPath).AddClass("V1SC") // shortcut!
net.ConnectLayers(v1m, dpCT, rndcut, axon.ForwardPath).AddClass("V1SC") // shortcut! // CT good
v3P.PlaceBehind(v3CT, space)
dp.PlaceBehind(v3P, space)
}
//////// V4
if !ss.Config.Run.V4Plus {
goto build
}
v4, v4CT = net.AddSuperCT4D("V4", "", 4, 4, 10, 10, space, pts.PT3x3Skp1) // 3x3 >> p1to1?? orig 1to1
sample2(v4)
sample2(v4CT)
net.ConnectToPulv(v4, v4CT, v1mP, pts.PT4x4Skp2Recip, pts.PT4x4Skp2, "FromV1mP") // 3x3 >> p1to1??
// no V4 -> v1mP: sig worse overall
// net.ConnectLayers(v1mP, v4, pts.PT4x4Skp2, axon.BackPath).AddClass("FromPulv", "FromV1mP")
// net.ConnectLayers(v1mP, v4CT, pts.PT4x4Skp2, axon.BackPath).AddClass("FromPulv", "FromV1mP")
// orig has v4selfct 3x3s1
// net.ConnectLayers(v4CT, v4CT, pts.PT3x3Skp1, axon.CTCtxtPath).AddClass("CTSelfCtxt")
// maint is maybe better:
net.ConnectCTSelf(v4CT, pts.PT3x3Skp1, "V4CTSelf")
net.ConnectLayers(v2, v4, pts.PT4x4Skp2, axon.ForwardPath)
net.ConnectLayers(v4, v2, pts.PT4x4Skp2Recip, axon.BackPath) // actually beneficial on v2, v1 pred
// not useful:
// net.ConnectLayers(v4, v3, pts.PT3x3Skp1, axon.BackPath) // v4 -> v3 but not v3 -> v4
// no V4 <-> LIP
// no FF CT -> CT?
// net.ConnectLayers(v2CT, v4CT, pts.PT4x4Skp2, axon.ForwardPath).AddClass("FwdWeak")
// net.ConnectLayers(v3, v4, pts.PT3x3Skp1, axon.ForwardPath).AddClass("FwdWeak")
// net.ConnectLayers(v4CT, v2CT, pts.PT4x4Skp2Recip, axon.BackPath) // tiny bit worse
// leak from super to CT:
net.ConnectLayers(v4, v2CT, pts.PT2x2Skp2Recip, axon.BackPath) // fails to run without!?
net.ConnectLayers(v1m, v4, rndcut, axon.ForwardPath).AddClass("V1SC") // shortcut, not IT
net.ConnectLayers(v1m, v4CT, rndcut, axon.ForwardPath).AddClass("V1SC") // shortcut, not IT
v4.PlaceRightOf(v3, space)
//////// TEO
if !ss.Config.Run.TEOPlus {
goto build
}
teo, teoCT = net.AddSuperCT4D("TEO", "", 4, 4, 10, 10, space, pool1to1)
sample2(teo)
sample2(teoCT)
net.LateralConnectLayer(teo, pool1to1).AddClass("TEOSelfMaint")
// orig has teoselfct 3x3s1 -- todo try, also one with maint
// net.ConnectLayers(teoCT, teoCT, pool1to1, axon.CTCtxtPath).AddClass("CTSelfCtxt")
net.ConnectCTSelf(teoCT, pool1to1, "TEOCTSelf") // 1to1? blows up later
net.ConnectLayers(v4, teo, pts.PT3x3Skp1, axon.ForwardPath)
net.ConnectLayers(teo, v4, pts.PT3x3Skp1, axon.BackPath)
net.ConnectLayers(teo, v3, pts.PT3x3Skp1, axon.BackPath) // teo -> v3 but not v3 -> teo
// net.ConnectLayers(v4CT, teoCT, pts.PT3x3Skp1, axon.ForwardPath).AddClass("FwdWeak")
// net.ConnectLayers(teoCT, v2CT, pts.PT4x4Skp2Recip, axon.BackPath) // not needed..
net.ConnectLayers(teoCT, v4CT, pts.PT4x4Skp2Recip, axon.BackPath)
v4P = net.AddPulvForLayer(v4, space).AddClass("V4")
net.ConnectToPulv(teo, teoCT, v4P, pts.PT4x4Skp2Recip, pts.PT4x4Skp2, "FromV4P")
// orig has a "leak" from super -> CT here, helps stabilize reps
// net.ConnectLayers(teo, v2CT, pts.PT4x4Skp2Recip, axon.BackPath) // maybe not
// net.ConnectLayers(teo, v3CT, pts.PT3x3Skp1, axon.BackPath)
// net.ConnectLayers(teo, v4CT, pts.PT3x3Skp1, axon.BackPath) // no diff really
net.ConnectLayers(v1m, teo, rndcut, axon.ForwardPath).AddClass("V1SCIT") // shortcut!
net.ConnectLayers(v1m, teoCT, rndcut, axon.ForwardPath).AddClass("V1SCIT") // shortcut!
v4P.PlaceBehind(v4CT, space)
teo.PlaceRightOf(eyepos, space)
//////// TE
if !ss.Config.Run.TE {
goto build
}
te, teCT = net.AddSuperCT4D("TE", "", 2, 2, 10, 10, space, pool1to1)
sample2(te)
sample2(teCT)
net.LateralConnectLayer(te, pool1to1).AddClass("TESelfMaint")
// net.ConnectLayers(teCT, teCT, pool1to1, axon.CTCtxtPath).AddClass("CTSelfCtxt")
net.ConnectCTSelf(teCT, pool1to1, "TECTSelf") // maint plus does better
net.ConnectLayers(teo, te, full, axon.ForwardPath)
net.ConnectLayers(te, teo, full, axon.BackPath)
net.ConnectLayers(te, v4, full, axon.BackPath) // te -> v3 but not v3 -> te
net.ConnectLayers(teCT, v4CT, full, axon.BackPath)
net.ConnectLayers(teCT, teoCT, full, axon.BackPath)
teoP = net.AddPulvForLayer(teo, space).AddClass("TEO")
net.ConnectToPulv(te, teCT, teoP, full, full, "FromTEOP")
net.ConnectLayers(v1m, te, rndcut, axon.ForwardPath).AddClass("V1SCIT") // shortcut!
net.ConnectLayers(v1m, teCT, rndcut, axon.ForwardPath).AddClass("V1SCIT") // shortcut!
teoP.PlaceBehind(teoCT, space)
te.PlaceRightOf(teo, space)
build:
net.Build()
net.Defaults()
net.SetNThreads(ss.Config.Run.NThreads)
ss.ApplyParams()
ss.InitWeights(net)
mpi.Println(net.SizeReport(false))
// adding each additional layer type improves decoding..
// layers := []emer.Layer{v4f16, v4f8, teo16, teo8, out}
// layers := []emer.Layer{teo16, teo8, out}
// layers := []emer.Layer{teo16, teo8}
// layers := []emer.Layer{out}
// todo: decoder
// ss.Decoder.InitLayer(len(trn.Images.Cats), layers)
// ss.Decoder.Lrate = 0.05 // 0.05 > 0.1 > 0.2 for larger number of objs!
// if ss.Config.Run.MPI {
// ss.Decoder.Comm = ss.Comm
// }
}
func (ss *Sim) SetTopoScales(net *axon.Network, send, recv string, pooltile *paths.PoolTile) {
return // TODO:
// slay := net.LayerByName(send)
// rlay := net.LayerByName(recv)
// pt, _ := rlay.RecvPathBySendName(send)
// scales := &tensor.Float32{}
// pooltile.TopoWeights(&slay.Shape, &rlay.Shape, scales)
// TODO: this function does not exist:
// pt.SetScalesRPool(scales)
}
func (ss *Sim) InitWeights(net *axon.Network) {
// net.InitTopoScales() // sets all wt scales
pts := &ss.Paths
// these are not set automatically b/c prjn is Full, not PoolTile
ss.SetTopoScales(net, "EyePos", "LIP", pts.PTGaussTopo)
ss.SetTopoScales(net, "SacPlan", "LIP", pts.PTSigTopo)
ss.SetTopoScales(net, "ObjVel", "LIP", pts.PTSigTopo)
ss.SetTopoScales(net, "LIP", "LIPCT", pts.PT3x3Skp1)
ss.SetTopoScales(net, "EyePos", "LIPCT", pts.PTGaussTopo)
ss.SetTopoScales(net, "Saccade", "LIPCT", pts.PTSigTopo)
ss.SetTopoScales(net, "ObjVel", "LIPCT", pts.PTSigTopo)
net.InitWeights()
}
func (ss *Sim) ApplyParams() {
ss.Params.Script = ss.Config.Params.Script
ss.Params.ApplyAll(ss.Net)
}
//////// Init, utils
// Init restarts the run, and initializes everything, including network weights
// and resets the epoch log table
func (ss *Sim) Init() {
ss.Loops.ResetCounters()
ss.SetRunName()
ss.InitRandSeed(0)
// ss.ConfigEnv() // re-config env just in case a different set of patterns was
// selected or patterns have been modified etc
ss.ApplyParams()
ss.StatsInit()
ss.NewRun()
ss.TrainUpdate.RecordSyns()
ss.TrainUpdate.Update(Train, Trial)
}
// InitRandSeed initializes the random seed based on current training run number
func (ss *Sim) InitRandSeed(run int) {
ss.RandSeeds.Set(run)
ss.RandSeeds.Set(run, &ss.Net.Rand)
}
// NetViewUpdater returns the NetViewUpdate for given mode.
func (ss *Sim) NetViewUpdater(mode enums.Enum) *axon.NetViewUpdate {
if mode.Int64() == Train.Int64() {
return &ss.TrainUpdate
}
return &ss.TestUpdate
}
// ConfigLoops configures the control loops: Training, Testing
func (ss *Sim) ConfigLoops() {
ls := looper.NewStacks()
trials := int(math32.IntMultipleGE(float32(ss.Config.Run.Trials), float32(ss.Config.Run.NData)))
cycles := ss.Config.Run.Cycles()
ls.AddStack(Train, Trial).
AddLevel(Run, ss.Config.Run.Runs).
AddLevel(Epoch, ss.Config.Run.Epochs).
AddLevelIncr(Trial, trials, ss.Config.Run.NData).
AddLevel(Cycle, cycles)
ls.AddStack(Test, Trial).
AddLevel(Epoch, 1).
AddLevelIncr(Trial, trials, ss.Config.Run.NData).
AddLevel(Cycle, cycles)
axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, Cycle, Trial, Train,
func(mode enums.Enum) { ss.Net.ClearInputs() },
func(mode enums.Enum) { ss.ApplyInputs(mode.(Modes)) },
)
ls.Stacks[Train].OnInit.Add("Init", ss.Init)
ls.Loop(Train, Run).OnStart.Add("NewRun", ss.NewRun)
trainEpoch := ls.Loop(Train, Epoch)
trainEpoch.OnStart.Add("SaveWeightsAt", func() {
epc := trainEpoch.Counter.Cur
for _, se := range ss.Config.Log.SaveWeightsAt {
if epc != se {
continue
}
ctrString := fmt.Sprintf("%03d_%05d", ls.Loop(Train, Run).Counter.Cur, epc)
axon.SaveWeights(ss.Net, ctrString, ss.RunName())
ss.RSASaveRActs("RSARActs_" + ss.RunName() + "_" + ctrString + ".tar.gz")
}
})
trainEpoch.OnStart.Add("TurnOnAdaptGi", func() {
epc := trainEpoch.Counter.Cur
if epc != 400 {
return
}
lays := ss.Net.LayersByType(axon.CTLayer)
for _, lnm := range lays {
ly := ss.Net.LayerByName(lnm)
if ly == nil {
continue
}
ly.Params.Inhib.ActAvg.AdaptGi.SetBool(true)
}
axon.ToGPUParams()
fmt.Println("At epoch:", epc, "turned on AdaptGi in CT layers")
})
// trainEpoch.OnStart.Add("TestAtInterval", func() {
// if (ss.Config.Run.TestInterval > 0) && ((trainEpoch.Counter.Cur+1)%ss.Config.Run.TestInterval == 0) {
// ss.TestAll()
// }
// })
ls.AddOnStartToAll("StatsStart", ss.StatsStart)
ls.AddOnEndToAll("StatsStep", ss.StatsStep)
ls.Loop(Train, Run).OnEnd.Add("SaveWeights", func() {
ctrString := fmt.Sprintf("%03d_%05d", ls.Loop(Train, Run).Counter.Cur, ls.Loop(Train, Epoch).Counter.Cur)
axon.SaveWeightsIfConfigSet(ss.Net, ss.Config.Log.SaveWeights, ctrString, ss.RunName())
if ss.Config.Log.SaveWeights {
ss.RSASaveRActs("RSARActs_" + ss.RunName() + "_" + ctrString + ".tar.gz")
}
})
if ss.Config.GUI {
axon.LooperUpdateNetView(ls, Cycle, Trial, ss.NetViewUpdater)
ls.Stacks[Train].OnInit.Add("GUI-Init", ss.GUI.UpdateWindow)
ls.Stacks[Test].OnInit.Add("GUI-Init", ss.GUI.UpdateWindow)
}
if ss.Config.Debug {
mpi.Println(ls.DocString())
}
ss.Loops = ls
}
// ApplyInputs applies input patterns from given environment for given mode.
// Any other start-of-trial logic can also be put here.
func (ss *Sim) ApplyInputs(mode Modes) {
net := ss.Net
ctx := ss.Net.Context()
ndata := int(ctx.NData)
curModeDir := ss.Current.Dir(mode.String())
ev := ss.Envs.ByMode(mode).(*Obj3DSacEnv)
lays := net.LayersByType(axon.InputLayer, axon.TargetLayer)
net.InitExt()
ev.Step()
for _, lnm := range lays {
ly := ss.Net.LayerByName(lnm)
st := ev.State(ly.Name)
if st != nil {
ly.ApplyExtAll(ctx, st)
}
}
for di := uint32(0); di < ctx.NData; di++ {
curModeDir.StringValue("TrialName", ndata).SetString1D(ev.TrialName(int(di)), int(di))
}
net.ApplyExts()
ss.UpdateImage()
}
// NewRun intializes a new Run level of the model.
func (ss *Sim) NewRun() {
ctx := ss.Net.Context()
run := ss.Loops.Loop(Train, Run).Counter.Cur
ss.InitRandSeed(run)
ss.Envs.ByMode(Train).Init(run)
ss.Envs.ByMode(Test).Init(run)
ctx.Reset()
ss.ApplyParams() // must reapply due to changes @250
ss.Net.InitWeights()
if ss.Config.Run.StartWeights != "" {
ss.Net.OpenWeightsJSON(core.Filename(ss.Config.Run.StartWeights))
mpi.Printf("Starting with initial weights from: %s\n", ss.Config.Run.StartWeights)
}
}
// TestAll runs through the full set of testing items
func (ss *Sim) TestAll() {
ss.Envs.ByMode(Test).Init(0)
ss.Loops.ResetAndRun(Test)
ss.Loops.Mode = Train // important because this is called from Train Run: go back.
}
//////// Stats
// AddStat adds a stat compute function.
func (ss *Sim) AddStat(f func(mode Modes, level Levels, phase StatsPhase)) {
ss.StatFuncs = append(ss.StatFuncs, f)
}
// StatsStart is called by Looper at the start of given level, for each iteration.
// It needs to call RunStats Start at the next level down.
// e.g., each Epoch is the start of the full set of Trial Steps.
func (ss *Sim) StatsStart(lmd, ltm enums.Enum) {
mode := lmd.(Modes)
level := ltm.(Levels)
if level <= Trial {
return
}
ss.RunStats(mode, level-1, Start)
}
// StatsStep is called by Looper at each step of iteration,
// where it accumulates the stat results.
func (ss *Sim) StatsStep(lmd, ltm enums.Enum) {
mode := lmd.(Modes)
level := ltm.(Levels)
if level == Cycle {
return
}
ss.RunStats(mode, level, Step)
tensorfs.DirTable(axon.StatsNode(ss.Stats, mode, level), nil).WriteToLog()
}
// RunStats runs the StatFuncs for given mode, level and phase.
func (ss *Sim) RunStats(mode Modes, level Levels, phase StatsPhase) {
for _, sf := range ss.StatFuncs {
sf(mode, level, phase)
}
if phase == Step && ss.GUI.Tabs != nil {
nm := mode.String() + " " + level.String() + " Plot"
ss.GUI.Tabs.AsLab().GoUpdatePlot(nm)
}
}
// SetRunName sets the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) SetRunName() string {
runName := ss.Params.RunName(ss.Config.Run.Run)
ss.Current.StringValue("RunName", 1).SetString1D(runName, 0)
return runName
}
// RunName returns the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) RunName() string {
return ss.Current.StringValue("RunName", 1).String1D(0)
}
// StatsInit initializes all the stats by calling Start across all modes and levels.
func (ss *Sim) StatsInit() {
for md, st := range ss.Loops.Stacks {
mode := md.(Modes)
for _, lev := range st.Order {
level := lev.(Levels)
if level == Cycle {
continue
}
ss.RunStats(mode, level, Start)
}
}
if ss.GUI.Tabs != nil {
tbs := ss.GUI.Tabs.AsLab()
_, idx := tbs.CurrentTab()
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Epoch))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Run))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Test, Trial))
// ev := ss.Envs.ByMode(Train).(*Obj3DSacEnv)
// tbs.TensorGrid("Image", &ev.Vis.ImgTsr)
tbs.SelectTabIndex(idx)
}
}
// ConfigStats handles configures functions to do all stats computation
// in the tensorfs system.
func (ss *Sim) ConfigStats() {
net := ss.Net
ss.Stats = ss.Root.Dir("Stats")
ss.Current = ss.Stats.Dir("Current")
ss.SetRunName()
// last arg(s) are levels to exclude
counterFunc := axon.StatLoopCounters(ss.Stats, ss.Current, ss.Loops, net, Trial, Cycle)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
counterFunc(mode, level, phase == Start)
})
runNameFunc := axon.StatRunName(ss.Stats, ss.Current, ss.Loops, net, Trial, Cycle)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
runNameFunc(mode, level, phase == Start)
})
trialNameFunc := axon.StatTrialName(ss.Stats, ss.Current, ss.Loops, net, Trial)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
trialNameFunc(mode, level, phase == Start)
})
perTrlFunc := axon.StatPerTrialMSec(ss.Stats, Train, Trial)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
perTrlFunc(mode, level, phase == Start)
})
corSimFunc := ss.StatCorSim()
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
corSimFunc(mode, level, phase)
})
prevCorFunc := ss.StatPrevCorSim()
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
prevCorFunc(mode, level, phase)
})
if ss.Config.Run.V2Plus {
slays := net.LayersByType(axon.SuperLayer)
slays = slices.DeleteFunc(slays, func(s string) bool {
return s == "MTpos"
})
slays = append(slays, "V1m")
rsaFunc := ss.StatRSA(slays...)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
rsaFunc(mode, level, phase)
})
}
lays := net.LayersByType(axon.SuperLayer, axon.CTLayer, axon.PulvinarLayer, axon.InputLayer)
actGeFunc := axon.StatLayerActGe(ss.Stats, net, Train, Trial, Run, lays...)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
actGeFunc(mode, level, phase == Start)
})
giMultFunc := axon.StatLayerGiMult(ss.Stats, net, Train, Epoch, Run, lays...)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
giMultFunc(mode, level, phase == Start)
})
pcaFunc := axon.StatPCA(ss.Stats, ss.Current, net, ss.Config.Run.PCAInterval, Train, Trial, Run, lays...)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
trnEpc := ss.Loops.Loop(Train, Epoch).Counter.Cur
pcaFunc(mode, level, phase == Start, trnEpc)
})
}
// StatCorSim returns a Stats function that records 1 - [LayerPhaseDiff] stats,
// i.e., Correlation-based similarity, for given layer names.
func (ss *Sim) StatCorSim() func(mode Modes, level Levels, phase StatsPhase) {
net := ss.Net
layers := net.LayersByType(axon.PulvinarLayer)
ticks := []string{"", "0", "Foc", "Sac"}
return func(mode Modes, level Levels, phase StatsPhase) {
if level < Trial {
return
}
modeDir := ss.Stats.Dir(mode.String())
curModeDir := ss.Current.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
subDir := modeDir.Dir((level - 1).String())
ndata := int(net.Context().NData)
for _, lnm := range layers {
for _, t := range ticks {
ly := net.LayerByName(lnm)
li := ly.Params.Index
name := lnm + "_CorSim" + t
tsr := levelDir.Float64(name)
if phase == Start {
tsr.SetNumRows(0)
plot.SetFirstStyler(tsr, func(s *plot.Style) {
s.Range.SetMin(0).SetMax(1)
s.On = true
})
continue
}
switch level {
case Trial:
ev := ss.Envs.ByMode(mode).(*Obj3DSacEnv)
tick := ev.Tick.Cur
for di := range ndata {
nan := math.NaN()
stat := 1.0 - float64(axon.LayerStates.Value(int(li), int(di), int(axon.LayerPhaseDiff)))
switch t {
case "":
case "0":
if tick != 0 {
stat = nan
}
case "Foc":
if tick == 0 || tick%2 == 0 {
stat = nan
}
case "Sac":
if tick == 0 || tick%2 == 1 {
stat = nan
}
}
curModeDir.Float64(name, ndata).SetFloat1D(stat, di)
tsr.AppendRowFloat(float64(stat))
}
case Run:
tsr.AppendRow(stats.StatFinal.Call(subDir.Value(name)))
default:
tsr.AppendRow(stats.StatMean.Call(subDir.Value(name)))
}
}
}
}
}
// StatPrevCorSim returns a Stats function that compute correlations
// between previous trial activity state and current minus phase and
// plus phase state. This is important for predictive learning.
// Also the super layer stats track overall representation change over time.
func (ss *Sim) StatPrevCorSim() func(mode Modes, level Levels, phase StatsPhase) {
net := ss.Net
layers := net.LayersByType(axon.PulvinarLayer, axon.SuperLayer)
ticks := []string{"", "0", "Foc", "Sac"}
statNames := []string{"PrevToM", "PrevToP"}
return func(mode Modes, level Levels, phase StatsPhase) {
if level < Trial {
return
}
modeDir := ss.Stats.Dir(mode.String())
curModeDir := ss.Current.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
subDir := modeDir.Dir((level - 1).String())
ndata := int(net.Context().NData)
for _, lnm := range layers {
for _, t := range ticks {
for si, statName := range statNames {
ly := net.LayerByName(lnm)
name := lnm + "_" + statName + t
tsr := levelDir.Float64(name)
if phase == Start {
tsr.SetNumRows(0)
plot.SetFirstStyler(tsr, func(s *plot.Style) {
s.Range.SetMin(0).SetMax(1)
})
continue
}
switch level {
case Trial:
// note: current lnm + _var is standard reusable unit vals buffer
actM := curModeDir.Float64(lnm+"_ActM", ly.GetSampleShape().Sizes...)
actP := curModeDir.Float64(lnm+"_ActP", ly.GetSampleShape().Sizes...)
// note: CaD is sufficiently stable that it is fine to compare with ActM and ActP
prev := curModeDir.Float64(lnm+"_CaDPrev", ly.GetSampleShape().Sizes...)
ev := ss.Envs.ByMode(mode).(*Obj3DSacEnv)
tick := ev.Tick.Cur
for di := range ndata {
nan := math.NaN()
ly.UnitValuesSampleTensor(prev, "CaDPrev", di)
prev.SetShapeSizes(prev.Len()) // set to 1D -- inexpensive and faster for computation
var stat float64
switch si {
case 0:
ly.UnitValuesSampleTensor(actM, "ActM", di)
actM.SetShapeSizes(actM.Len())
cov := metric.Correlation(actM, prev)
stat = cov.Float1D(0)
case 1:
ly.UnitValuesSampleTensor(actP, "ActP", di)
actP.SetShapeSizes(actP.Len())
cov := metric.Correlation(actP, prev)
stat = cov.Float1D(0)
}
switch t {
case "":
case "0":
if tick != 0 {
stat = nan
}
case "Foc":
if tick == 0 || tick%2 == 0 {
stat = nan
}
case "Sac":
if tick == 0 || tick%2 == 1 {
stat = nan
}
}
curModeDir.Float64(name, ndata).SetFloat1D(stat, di)
tsr.AppendRowFloat(stat)
}
case Run:
tsr.AppendRow(stats.StatFinal.Call(subDir.Value(name)))
default:
tsr.AppendRow(stats.StatMean.Call(subDir.Value(name)))
}
}
}
}
}
}
// StatCounters returns counters string to show at bottom of netview.
func (ss *Sim) StatCounters(mode, level enums.Enum) string {
counters := ss.Loops.Stacks[mode].CountersString()
vu := ss.NetViewUpdater(mode)
if vu == nil || vu.View == nil {
return counters
}
di := vu.View.Di
counters += fmt.Sprintf(" Di: %d", di)
curModeDir := ss.Current.Dir(mode.String())
if curModeDir.Node("TrialName") == nil {
return counters
}
counters += fmt.Sprintf(" TrialName: %s", curModeDir.StringValue("TrialName").String1D(di))
ev := ss.Envs.ByMode(mode).(*Obj3DSacEnv)
counters += fmt.Sprintf(" Tick: %d", ev.Tick.Cur)
if level == Cycle {
return counters
}
statNames := []string{"MTposP_CorSim", "V1mP_CorSim"}
for _, name := range statNames {
if curModeDir.Node(name) != nil {
counters += fmt.Sprintf(" %s: %.4g", name, curModeDir.Float64(name).Float1D(di))
}
}
return counters
}
//////// GUI
// ConfigGUI configures the Cogent Core GUI interface for this simulation.
func (ss *Sim) ConfigGUI(b tree.Node) {
ss.GUI.MakeBody(b, ss, ss.Root, ss.Config.Name, ss.Config.Title, ss.Config.Doc)
ss.GUI.StopLevel = Trial
nv := ss.GUI.AddNetView("Network")
nv.Options.MaxRecs = 2 * ss.Config.Run.Cycles()
nv.Options.Raster.Max = ss.Config.Run.Cycles()
nv.Options.LayerNameSize = 0.03
nv.SetNet(ss.Net)
ss.TrainUpdate.Config(nv, axon.Theta, ss.StatCounters)
ss.TestUpdate.Config(nv, axon.Theta, ss.StatCounters)
ss.GUI.OnStop = func(mode, level enums.Enum) {
vu := ss.NetViewUpdater(mode)
vu.UpdateWhenStopped(mode, level)
}
nv.SceneXYZ().Camera.Pose.Pos.Set(0, 1.3, 2.15)
nv.SceneXYZ().Camera.LookAt(math32.Vec3(0, -.1, .05), math32.Vec3(0, 1, 0))
ss.StatsInit()
trn := ss.Envs.ByMode(Train).(*Obj3DSacEnv)
img := trn.V1c.Image.Tsr.SubSpace(0).(*tensor.Float32)
tensorcore.AddGridStylerTo(img, func(s *tensorcore.GridStyle) {
s.Image = true
s.Range.SetMin(0)
})
ss.GUI.Tabs.TensorGrid("Image", img)
ss.RSAGUI()
ss.GUI.Tabs.SelectTabIndex(0)
ss.GUI.FinalizeGUI(false)
}
func (ss *Sim) UpdateImage() {
if !ss.Config.GUI {
return
}
ss.GUI.Tabs.TabUpdateRender("Image")
}
func (ss *Sim) MakeToolbar(p *tree.Plan) {
ss.GUI.AddLooperCtrl(p, ss.Loops)
tree.Add(p, func(w *core.Separator) {})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "New Seed",
Icon: icons.Add,
Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
Active: egui.ActiveAlways,
Func: func() {
ss.RandSeeds.NewSeeds()
},
})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "Open RAvgs",
Icon: icons.Open,
Tooltip: "Open running-average activation data from tar file, and run stats on data.",
Active: egui.ActiveAlways,
Func: func() {
core.CallFunc(ss.GUI.Body, ss.RSAOpenRActs)
},
})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "README",
Icon: icons.FileMarkdown,
Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
Active: egui.ActiveAlways,
Func: func() {
core.TheApp.OpenURL(ss.Config.URL)
},
})
}
func (ss *Sim) RunNoGUI() {
ss.Init()
if ss.Config.Params.Note != "" {
mpi.Printf("Note: %s\n", ss.Config.Params.Note)
}
if ss.Config.Log.SaveWeights {
mpi.Printf("Saving final weights per run\n")
}
runName := ss.SetRunName()
netName := ss.Net.Name
cfg := &ss.Config.Log
axon.OpenLogFiles(ss.Loops, ss.Stats, netName, runName, [][]string{cfg.Train, cfg.Test})
mpi.Printf("Running %d Runs starting at %d\n", ss.Config.Run.Runs, ss.Config.Run.Run)
ss.Loops.Loop(Train, Run).Counter.SetCurMaxPlusN(ss.Config.Run.Run, ss.Config.Run.Runs)
if ss.Config.Run.StartWeights != "" {
ss.Loops.Loop(Train, Epoch).Counter.Cur = ss.Config.Run.StartEpoch
}
ss.Loops.Run(Train)
axon.CloseLogFiles(ss.Loops, ss.Stats, Cycle)
axon.GPURelease()
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"github.com/emer/axon/v2/sims/deepvision"
"github.com/emer/emergent/v2/egui"
)
func main() { egui.Run[deepvision.Sim, deepvision.Config]() }
// Code generated by "core generate -add-types -add-funcs -gosl"; DO NOT EDIT.
package deepvision
import (
"cogentcore.org/core/enums"
)
var _ModesValues = []Modes{0, 1, 2}
// ModesN is the highest valid value for type Modes, plus one.
//
//gosl:start
const ModesN Modes = 3
//gosl:end
var _ModesValueMap = map[string]Modes{`Train`: 0, `Test`: 1, `NovelTrain`: 2}
var _ModesDescMap = map[Modes]string{0: ``, 1: ``, 2: ``}
var _ModesMap = map[Modes]string{0: `Train`, 1: `Test`, 2: `NovelTrain`}
// String returns the string representation of this Modes value.
func (i Modes) String() string { return enums.String(i, _ModesMap) }
// SetString sets the Modes value from its string representation,
// and returns an error if the string is invalid.
func (i *Modes) SetString(s string) error { return enums.SetString(i, s, _ModesValueMap, "Modes") }
// Int64 returns the Modes value as an int64.
func (i Modes) Int64() int64 { return int64(i) }
// SetInt64 sets the Modes value from an int64.
func (i *Modes) SetInt64(in int64) { *i = Modes(in) }
// Desc returns the description of the Modes value.
func (i Modes) Desc() string { return enums.Desc(i, _ModesDescMap) }
// ModesValues returns all possible values for the type Modes.
func ModesValues() []Modes { return _ModesValues }
// Values returns all possible values for the type Modes.
func (i Modes) Values() []enums.Enum { return enums.Values(_ModesValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Modes) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Modes) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Modes") }
var _LevelsValues = []Levels{0, 1, 2, 3}
// LevelsN is the highest valid value for type Levels, plus one.
//
//gosl:start
const LevelsN Levels = 4
//gosl:end
var _LevelsValueMap = map[string]Levels{`Cycle`: 0, `Trial`: 1, `Epoch`: 2, `Run`: 3}
var _LevelsDescMap = map[Levels]string{0: ``, 1: ``, 2: ``, 3: ``}
var _LevelsMap = map[Levels]string{0: `Cycle`, 1: `Trial`, 2: `Epoch`, 3: `Run`}
// String returns the string representation of this Levels value.
func (i Levels) String() string { return enums.String(i, _LevelsMap) }
// SetString sets the Levels value from its string representation,
// and returns an error if the string is invalid.
func (i *Levels) SetString(s string) error { return enums.SetString(i, s, _LevelsValueMap, "Levels") }
// Int64 returns the Levels value as an int64.
func (i Levels) Int64() int64 { return int64(i) }
// SetInt64 sets the Levels value from an int64.
func (i *Levels) SetInt64(in int64) { *i = Levels(in) }
// Desc returns the description of the Levels value.
func (i Levels) Desc() string { return enums.Desc(i, _LevelsDescMap) }
// LevelsValues returns all possible values for the type Levels.
func LevelsValues() []Levels { return _LevelsValues }
// Values returns all possible values for the type Levels.
func (i Levels) Values() []enums.Enum { return enums.Values(_LevelsValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Levels) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Levels) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Levels") }
var _StatsPhaseValues = []StatsPhase{0, 1}
// StatsPhaseN is the highest valid value for type StatsPhase, plus one.
//
//gosl:start
const StatsPhaseN StatsPhase = 2
//gosl:end
var _StatsPhaseValueMap = map[string]StatsPhase{`Start`: 0, `Step`: 1}
var _StatsPhaseDescMap = map[StatsPhase]string{0: ``, 1: ``}
var _StatsPhaseMap = map[StatsPhase]string{0: `Start`, 1: `Step`}
// String returns the string representation of this StatsPhase value.
func (i StatsPhase) String() string { return enums.String(i, _StatsPhaseMap) }
// SetString sets the StatsPhase value from its string representation,
// and returns an error if the string is invalid.
func (i *StatsPhase) SetString(s string) error {
return enums.SetString(i, s, _StatsPhaseValueMap, "StatsPhase")
}
// Int64 returns the StatsPhase value as an int64.
func (i StatsPhase) Int64() int64 { return int64(i) }
// SetInt64 sets the StatsPhase value from an int64.
func (i *StatsPhase) SetInt64(in int64) { *i = StatsPhase(in) }
// Desc returns the description of the StatsPhase value.
func (i StatsPhase) Desc() string { return enums.Desc(i, _StatsPhaseDescMap) }
// StatsPhaseValues returns all possible values for the type StatsPhase.
func StatsPhaseValues() []StatsPhase { return _StatsPhaseValues }
// Values returns all possible values for the type StatsPhase.
func (i StatsPhase) Values() []enums.Enum { return enums.Values(_StatsPhaseValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i StatsPhase) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *StatsPhase) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "StatsPhase")
}
// Copyright (c) 2020, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package deepvision
import (
"fmt"
"image"
"path/filepath"
"cogentcore.org/core/base/errors"
"cogentcore.org/core/base/fsx"
"cogentcore.org/core/base/iox/imagex"
"cogentcore.org/core/base/iox/jsonx"
"cogentcore.org/core/base/slicesx"
"cogentcore.org/core/gpu"
"cogentcore.org/core/math32"
"cogentcore.org/lab/base/randx"
"cogentcore.org/lab/table"
"cogentcore.org/lab/tensor"
"github.com/emer/emergent/v2/env"
"github.com/emer/emergent/v2/popcode"
"github.com/emer/v1vision/v1std"
"github.com/emer/v1vision/v1vision"
)
// TrialState contains the state for a given trial.
// Trials are processed data-parallel per Step().
type TrialState struct {
// Cat is the current object category
Cat string
// Obj is the current object
Obj string
// Image is the rendered image as loaded
Image image.Image `display:"-"`
}
// Obj3DSacEnv provides the rendered results of the Obj3D + Saccade generator.
type Obj3DSacEnv struct {
// Name of this environment (Train, Test mode).
Name string
// NData is the number of steps to process in data-parallel.
NData int
// Path to data.tsv file as rendered, e.g., images/train.
Path string
// Trials has NData state per trial for last Step()
Trials []TrialState
// Table of generated trial / tick data.
Table *table.Table
// EyePop is the 2D population code for gaussian bump rendering
// of eye position.
EyePop popcode.TwoD
// SacPop is the 2d population code for gaussian bump rendering
// of saccade plan / execution.
SacPop popcode.TwoD
// ObjVelPop is the 2d population code for gaussian bump rendering
// of object velocity.
ObjVelPop popcode.TwoD
// V1c has the full set of V1c complex and DoG color contrast filters.
V1c v1std.V1cMulti
// Objs is the list of objects, as cat/objfile.
Objs []string
// Cats is the list of categories.
Cats []string
// TrialCtr counts each object trajectory
TrialCtr env.Counter `display:"inline"`
// Tick counts each step along the trajectory
Tick env.Counter `display:"inline"`
// current rendered state tensors
CurStates map[string]*tensor.Float32
// Rand is the random number generator for the env.
// All random calls must use this.
// Set seed here for weight initialization values.
Rand randx.SysRand `display:"-"`
// random seed
RandSeed int64 `edit:"-"`
}
func (ev *Obj3DSacEnv) Label() string { return ev.Name }
func (ev *Obj3DSacEnv) Trial(di int) *TrialState {
return &ev.Trials[di]
}
func (ev *Obj3DSacEnv) Defaults() {
// images is symlink, e.g., /Users/oreilly/ccn_images/CU3D_100_20obj8inst_8tick4sac
// https://drive.google.com/drive/folders/13Mi9aUlF1A3sx3JaofX-qzKlxGoViT86?usp=sharing
// CU3D_100_20obj8inst_8tick4sac.tar
ev.Path = "images/train"
ev.EyePop.Defaults()
ev.EyePop.Min.Set(-1.1, -1.1)
ev.EyePop.Max.Set(1.1, 1.1)
ev.EyePop.Sigma.Set(0.1, 0.1)
ev.SacPop.Defaults()
ev.SacPop.Min.Set(-0.45, -0.45)
ev.SacPop.Max.Set(0.45, 0.45)
ev.ObjVelPop.Defaults()
ev.ObjVelPop.Min.Set(-0.45, -0.45)
ev.ObjVelPop.Max.Set(0.45, 0.45)
ev.V1c.Defaults()
ev.V1c.SplitColor = false // false > true
ev.V1c.StdLowMed16DegNoDoG()
ev.Tick.Max = 8 // important: must be sync'd with actual data
}
func (ev *Obj3DSacEnv) Config(ndata int, netGPU *gpu.GPU) {
ev.NData = ndata
v1vision.ComputeGPU = netGPU
ev.Trials = slicesx.SetLength(ev.Trials, ndata)
ev.V1c.Config(ndata)
ev.CurStates = make(map[string]*tensor.Float32)
ev.CurStates["EyePos"] = tensor.NewFloat32(ndata, 21, 21)
ev.CurStates["SacPlan"] = tensor.NewFloat32(ndata, 11, 11)
ev.CurStates["Saccade"] = tensor.NewFloat32(ndata, 11, 11)
ev.CurStates["ObjVel"] = tensor.NewFloat32(ndata, 11, 11)
}
func (ev *Obj3DSacEnv) Init(run int) {
ev.RandSeed = int64(73 + run)
if ev.Rand.Rand == nil {
ev.Rand.NewRand(ev.RandSeed)
} else {
ev.Rand.Seed(ev.RandSeed)
}
ev.TrialCtr.Init()
ev.TrialCtr.Max = 0
ev.TrialCtr.Same()
ev.Tick.Init()
ev.Tick.Cur = -1
}
// OpenTable loads data.tsv file at Path.
// only do this for one env and copy to the others.
func (ev *Obj3DSacEnv) OpenTable() {
if ev.Table == nil {
ev.Table = table.New("obj3dsac_data")
}
fnm := filepath.Join(ev.Path, "data.tsv")
errors.Log(ev.Table.OpenCSV(fsx.Filename(fnm), tensor.Tab))
errors.Log(jsonx.Open(&ev.Objs, filepath.Join(ev.Path, "objs.json")))
errors.Log(jsonx.Open(&ev.Cats, filepath.Join(ev.Path, "cats.json")))
}
// OpenImage opens current image.
func (ev *Obj3DSacEnv) OpenImage(row int) (image.Image, error) {
ifnm := ev.Table.Column("ImgFile").StringRow(row, 0)
fnm := filepath.Join(ev.Path, ifnm)
img, _, err := imagex.Open(fnm)
return img, errors.Log(err)
}
// EncodePops encodes population codes from current row data
func (ev *Obj3DSacEnv) EncodePops(row, di int) {
val := math32.Vector2{}
val.X = float32(ev.Table.Column("EyePos").FloatRow(row, 0))
val.Y = float32(ev.Table.Column("EyePos").FloatRow(row, 1))
ps := ev.CurStates["EyePos"].SubSpace(di).(*tensor.Float32)
ev.EyePop.Encode(ps, val, popcode.Set)
ps = ev.CurStates["SacPlan"].SubSpace(di).(*tensor.Float32)
val.X = float32(ev.Table.Column("SacPlan").FloatRow(row, 0))
val.Y = float32(ev.Table.Column("SacPlan").FloatRow(row, 1))
ev.SacPop.Encode(ps, val, popcode.Set)
ps = ev.CurStates["Saccade"].SubSpace(di).(*tensor.Float32)
val.X = float32(ev.Table.Column("Saccade").FloatRow(row, 0))
val.Y = float32(ev.Table.Column("Saccade").FloatRow(row, 1))
ev.SacPop.Encode(ps, val, popcode.Set)
ps = ev.CurStates["ObjVel"].SubSpace(di).(*tensor.Float32)
val.X = float32(ev.Table.Column("ObjVel").FloatRow(row, 0))
val.Y = float32(ev.Table.Column("ObjVel").FloatRow(row, 1))
ev.ObjVelPop.Encode(ps, val, popcode.Set)
}
// SetCtrs sets ctrs from current row data, returns the current row
func (ev *Obj3DSacEnv) SetCtrs(st *TrialState, di int) int {
row := (ev.TrialCtr.Cur+di)*ev.Tick.Max + ev.Tick.Cur
row = row % ev.Table.NumRows()
// trial := ev.Table.Column("Trial").IntRow(row, 0)
// if ev.TrialCtr.Cur+di != trial { // note: this is expected after first epoch!
// fmt.Println("error trial mismatch:", row, ev.TrialCtr.Cur+di, trial)
// }
tick := ev.Table.Column("Tick").IntRow(row, 0)
if ev.Tick.Cur != tick {
fmt.Println("error tick mismatch:", row, ev.Tick.Cur, tick)
}
st.Cat = ev.Table.Column("Cat").StringRow(row, 0)
st.Obj = ev.Table.Column("Obj").StringRow(row, 0)
return row
}
func (ev *Obj3DSacEnv) String() string {
return ev.TrialName(0)
}
// TrialName returns the string rep of the env state
func (ev *Obj3DSacEnv) TrialName(di int) string {
st := ev.Trial(di)
return fmt.Sprintf("%s:%s_%d", st.Cat, st.Obj, ev.Tick.Cur)
}
func (ev *Obj3DSacEnv) Step() bool {
if ev.Tick.Incr() {
ev.TrialCtr.Cur += ev.NData
}
imgs := make([]image.Image, ev.NData)
for di := range ev.NData {
st := ev.Trial(di)
row := ev.SetCtrs(st, di)
ev.EncodePops(row, di)
img, err := ev.OpenImage(row)
if err != nil {
continue
}
imgs[di] = img
}
ev.V1c.RunImages(imgs...)
return true
}
func (ev *Obj3DSacEnv) State(element string) tensor.Values {
switch element {
case "V1m": // todo: L and M actually
return &ev.V1c.V1cParams[0].Output
case "V1h":
return &ev.V1c.V1cParams[1].Output
default:
return ev.CurStates[element]
}
}
func (ev *Obj3DSacEnv) Action(element string, input tensor.Values) {
// nop
}
// Compile-time check that implements Env interface
var _ env.Env = (*Obj3DSacEnv)(nil)
// Copyright (c) 2021, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package deepvision
import (
"github.com/emer/axon/v2/axon"
)
// LayerParams sets the minimal non-default params.
// Base is always applied, and others can be optionally selected to apply on top of that.
var LayerParams = axon.LayerSheets{
"Base": {
{Sel: "Layer", Doc: "needs some special inhibition and learning params",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.04 // 0.04 for most layers
ly.Inhib.ActAvg.Offset = 0.008 // good default
ly.Inhib.Layer.Gi = 1.1 // 1.1 def, 1.0 for lower layers is best
ly.Inhib.Pool.Gi = 1.1 // "
ly.Inhib.Layer.FB = 1 // setting for layers below
ly.Inhib.Pool.FB = 1
ly.Inhib.Layer.ClampExtMin = 0.0 // 0.05 default doesn't activate output!
ly.Inhib.Pool.ClampExtMin = 0.0
ly.Inhib.ActAvg.AdaptRate = 0.02 // 0.02 is slowest that tracks reasonably close
ly.Inhib.ActAvg.AdaptMax = 0.01 // 0.05 default; 0.01 has effect; lower not effective at preventing instability on its own.
ly.Inhib.ActAvg.LoTol = 0.8
ly.Inhib.ActAvg.HiTol = 0.0
ly.Acts.Dt.LongAvgTau = 100 // 100 >= 200
ly.Acts.Decay.Act = 0.0 // 0 == .2
ly.Acts.Decay.Glong = 0.3 // 0.3 > 0.2, 0.1, higher
ly.Acts.Dend.SSGi = 2 // 2 new default
ly.Acts.Dend.GExp = 0.2 // 0.2 > 0.1 > 0
ly.Acts.Dend.GR = 3 // 2 good for 0.2
ly.Acts.Dt.VmDendC = 500 // 500 def
ly.Acts.GabaB.Gk = 0.015 // 0.015 (def) > 0.012
ly.Acts.NMDA.Ge = 0.006 // 0.006 def > 0.005
ly.Acts.NMDA.MgC = 1.4 // mg1, voff0, gbarexp.2, gbarr3 = better
ly.Acts.NMDA.Voff = 0 // mg1, voff0 = mg1.4, voff5 w best params
ly.Acts.AK.Gk = 0.1
ly.Acts.VGCC.Ge = 0.02 // non nmda: 0.15 good, 0.3 blows up, nmda: .02 best
ly.Acts.VGCC.Ca = 25 // 25 / 10tau same as SpkVGCC
ly.Acts.Mahp.Gk = 0.05 // 0.05 def
ly.Acts.Sahp.Gk = 0.05 // 0.05 > 0.1
ly.Acts.Sahp.Off = 0.8 //
ly.Acts.Sahp.Slope = 0.02 //
ly.Acts.Sahp.CaTau = 5 // 5 ok -- not tested
ly.Acts.KNa.On.SetBool(true) // true, .05 > false
ly.Acts.KNa.Med.Gk = 0.1 // 0.1 > 0.05 -- 0.05 blows up in lvis
ly.Acts.KNa.Slow.Gk = 0.1
ly.Learn.CaLearn.Dt.MTau = 2 // 2 == 5?
ly.Learn.CaLearn.ETraceTau = 4
ly.Learn.CaLearn.ETraceScale = 0.1 // 0.05 similar to 0
ly.Learn.CaSpike.SpikeCaM = 12 // 12 > 8 -- dv too (lvis)
ly.Learn.CaSpike.SpikeCaSyn = 12 // 12 >> 8 -- "
ly.Learn.CaSpike.CaSynTau = 30 // 30 > 20, 40
ly.Learn.CaSpike.Dt.MTau = 5 // 5 > 10?
ly.Learn.LearnNMDA.Ge = 0.006 // 0.006 def
ly.Learn.LearnNMDA.MgC = 1.4 // 1.2 for unified Act params, else 1.4
ly.Learn.LearnNMDA.Voff = 0 // 0 for unified Act params, else 5
ly.Learn.LearnNMDA.Tau = 100 // 100 def
ly.Learn.TrgAvgAct.RescaleOn.SetBool(true) // critical!
ly.Learn.TrgAvgAct.SubMean = 0 // 0 > 1 key throughout -- even .5 slows learning -- doesn't help slow pca
ly.Learn.TrgAvgAct.SynScaleRate = 0.002 // 0.002 >= 0.005 > 0.001 > 0.0005 too weak even with adapt gi
ly.Learn.TrgAvgAct.ErrLRate = 0.02 // 0.02 def
ly.Learn.RLRate.On.SetBool(true) // beneficial for trace
ly.Learn.RLRate.SigmoidMin = 0.05
ly.Learn.RLRate.SigmoidLinear.SetBool(false) // false >> true
ly.Learn.RLRate.Diff.SetBool(true)
ly.Learn.RLRate.DiffThr = 0.02 // 0.02 def - todo
ly.Learn.RLRate.SpikeThr = 0.1 // 0.1 def
ly.Learn.RLRate.Min = 0.001
ly.Learn.Timing.On.SetBool(false) // time > trial!
ly.Learn.Timing.Refractory.SetBool(true) // ref > not
// ly.Learn.Timing.LearnThr = 0.1
// ly.Learn.Timing.SynCaCycles = 160
// ly.Learn.Timing.Cycles = 170
// ly.Learn.Timing.TimeDiffTau = 4
}},
{Sel: ".InputLayer", Doc: "all V1 input layers",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.FB = 1 // keep normalized
ly.Inhib.Pool.FB = 1
ly.Inhib.Pool.On.SetBool(true)
ly.Inhib.Layer.Gi = 0.9 // was 0.9
ly.Inhib.Pool.Gi = 0.9 // 0.9 >= 1.1 def -- more activity
ly.Inhib.ActAvg.Nominal = 0.05 // .06 for !SepColor actuals: V1m8: .04, V1m16: .03
ly.Acts.Clamp.Ge = 1.5 // was 1.0
ly.Acts.Decay.Act = 1 // these make no diff
ly.Acts.Decay.Glong = 1
}},
{Sel: ".PopCode", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.FB = 1 // keep normalized
ly.Inhib.Pool.On.SetBool(false)
ly.Inhib.Layer.Gi = 0.9 // 0.9
ly.Inhib.ActAvg.Nominal = 0.1
ly.Acts.Clamp.Ge = 1.5 // was 1.0
}},
{Sel: "#EyePos", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.04
}},
{Sel: ".CTLayer", Doc: "CT NMDA gbar factor is key",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.12 // CT in general more active
ly.CT.GeGain = 1.0 // 1 > 1.5
ly.CT.DecayTau = 0 // 0 >> 100
ly.Acts.Dend.SSGi = 2 // 0 > higher -- kills nmda maint!
ly.Acts.Decay.Act = 0.0
ly.Acts.Decay.Glong = 0.0 // 0 > 0.1
ly.Acts.GabaB.Gk = 0.015 // 0.015 standard gaba
ly.Acts.NMDA.Ge = 0.006
ly.Acts.NMDA.Tau = 100
ly.Acts.MaintNMDA.Ge = 0.006
ly.Acts.MaintNMDA.Tau = 100
}},
{Sel: ".PulvinarLayer", Doc: "Pulvinar",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 0.8 // 0.8 good -- was 0.9
ly.Pulvinar.DriveScale = 0.1 // 0.1 > 0.15 -- does not work with 0.05
ly.Pulvinar.FullDriveAct = 0.6 // 0.6 def
ly.Acts.Decay.Act = 0.0
ly.Acts.Decay.Glong = 0.0 // clear long
ly.Acts.Decay.AHP = 0.0 // clear long
ly.Learn.RLRate.SigmoidMin = 1.0 // 1 > .05
}},
//////// V1
{Sel: "#V1m", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.05
}},
{Sel: "#V1mP", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.05
ly.Inhib.Layer.FB = 1
ly.Inhib.Pool.FB = 4
ly.Inhib.Pool.On.SetBool(true)
ly.Inhib.Layer.Gi = 1.0
ly.Inhib.Pool.Gi = 0.85 // .85 >= .8 > .9 > higher for later perf
}},
{Sel: "#V1h", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.03
}},
//////// LIP
{Sel: ".LIP", Doc: "pool inhib",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.02 // ~0.02 actual
ly.Inhib.ActAvg.AdaptGi.SetBool(false) // adapt not needed
ly.Inhib.Pool.On.SetBool(true) // needs pool-level
ly.Inhib.Layer.FB = 1 // 1
ly.Inhib.Layer.Gi = 1.2 // 1.2 > lower for sure
ly.Inhib.Pool.FB = 4 // 4 == 2 > 1
ly.Inhib.Pool.Gi = 1 // 0.95 and lower = higher actmax, but worse corsim
}},
{Sel: "#LIPCT", Doc: "pool inhib",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.04 // 0.03 initial, goes up to .04..
// ly.Inhib.ActAvg.AdaptGi.SetBool(false) // not needed
// note: tried layer, pool Gi independent of LIP and same values are best here.
}},
{Sel: ".MTpos", Doc: "layer inhib",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.1 // note: has no effect due to 1to1 cons! actual .15
ly.Inhib.ActAvg.AdaptGi.SetBool(false)
ly.Inhib.Pool.On.SetBool(false)
ly.Inhib.Layer.FB = 1
ly.Inhib.Layer.Gi = 1 // 1 == 0.9 -- no advantage, 1 better matches P
}},
{Sel: "#MTposP", Doc: "layer inhib",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 0.9 // 0.9 > 1 > higher, lower
}},
//////// V2
{Sel: ".V2", Doc: "pool inhib, sparse activity",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.02
ly.Inhib.Pool.On.SetBool(true)
ly.Inhib.Layer.FB = 1
ly.Inhib.Pool.FB = 4
ly.Inhib.Layer.Gi = 1.0 // 1
ly.Inhib.Pool.Gi = 1.05 // 1.05 > others
}},
{Sel: "#V2CT", Doc: "more inhibition",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.02
ly.Inhib.ActAvg.AdaptGi.SetBool(false) // adapt @250
ly.Inhib.Layer.Gi = 1.2 // 1.2 == 1.15 > lower
ly.Inhib.Pool.Gi = 1.2 // 1.2 == 1.15 > lower
}},
//////// V3
{Sel: ".V3", Doc: "pool inhib, denser activity",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.03
ly.Inhib.ActAvg.AdaptGi.SetBool(false)
ly.Inhib.Pool.On.SetBool(true)
ly.Inhib.Layer.FB = 1
ly.Inhib.Pool.FB = 4
ly.Inhib.Layer.Gi = 1.0 // 1
ly.Inhib.Pool.Gi = 1.05 // 1.05 > 1
// ly.Acts.GabaB.Gk = 0.015 // 0.015 > 0.012 with shortcuts
// ly.Acts.Decay.Glong = 0.3 // 0.3 > 0.6
}},
{Sel: "#V3CT", Doc: "more activity",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.04
ly.Inhib.Layer.Gi = 1.1 // 1.1 > 1.2 > 1
ly.Inhib.Pool.Gi = 1.1 // 1.1 > 1.2 > 1
}},
//////// DP
{Sel: ".DP", Doc: "pool inhib, sparse activity",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.03
ly.Inhib.ActAvg.AdaptGi.SetBool(false)
ly.Inhib.Pool.On.SetBool(true)
ly.Inhib.Layer.FB = 1
ly.Inhib.Pool.FB = 4
ly.Inhib.Layer.Gi = 1.0 // 1
ly.Inhib.Pool.Gi = 1.05 // 1.05 > 1
}},
{Sel: "#DPCT", Doc: "more activity",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.04 // 0.04 better but needs stronger V1mP output
ly.Inhib.ActAvg.AdaptGi.SetBool(false)
ly.Inhib.Layer.Gi = 1.1 // 1.1 > 1.2
ly.Inhib.Pool.Gi = 1.1 // 1.1 > 1.2
}},
//////// V4
{Sel: ".V4", Doc: "pool inhib, sparse activity",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.03
ly.Inhib.ActAvg.AdaptGi.SetBool(false)
ly.Inhib.Pool.On.SetBool(true)
ly.Inhib.Layer.FB = 1
ly.Inhib.Pool.FB = 4
ly.Inhib.Layer.Gi = 1.0 // 1
ly.Inhib.Pool.Gi = 1.05 // 1.05 > 1
}},
{Sel: "#V4CT", Doc: "more activity",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.04 // 0.04 better but needs stronger V1mP output
ly.Inhib.Layer.Gi = 1.1 // 1.1
ly.Inhib.Pool.Gi = 1.1 // 1.1
}},
//////// TEO
{Sel: ".TEO", Doc: "pool inhib, sparse activity",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.04 // 0.04 > 0.03
ly.Inhib.ActAvg.AdaptGi.SetBool(false)
ly.Inhib.Pool.On.SetBool(true)
ly.Inhib.Layer.FB = 1
ly.Inhib.Pool.FB = 4
ly.Inhib.Layer.Gi = 1.1 // 1
ly.Inhib.Pool.Gi = 1.1 // 1.05
// ly.Acts.Decay.Glong = 0 // 0.3 def > 0
ly.Learn.Timing.Refractory.SetBool(false)
}},
{Sel: "#TEOCT", Doc: "more activity",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.05 // 0.05 > 0.04 but needs stronger V4P output
ly.Inhib.ActAvg.AdaptGi.SetBool(true)
ly.Inhib.Layer.Gi = 1.25 // 1.25 > lower
ly.Inhib.Pool.Gi = 1.25 // 1.25 > lower
}},
//////// TE
{Sel: ".TE", Doc: "pool inhib, sparse activity",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.04 // 0.04 > 0.03
ly.Inhib.ActAvg.AdaptGi.SetBool(false)
ly.Inhib.Pool.On.SetBool(true)
ly.Inhib.Layer.FB = 1
ly.Inhib.Pool.FB = 4
ly.Inhib.Layer.Gi = 1.1 // 1
ly.Inhib.Pool.Gi = 1.1 // 1.05
// ly.Acts.Decay.Glong = 0 // 0.3 def > 0
ly.Learn.Timing.Refractory.SetBool(false)
}},
{Sel: "#TECT", Doc: "more activity",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.05 // was .04
ly.Inhib.Layer.Gi = 1.25 // 1.25 > lower
ly.Inhib.Pool.Gi = 1.25 // 1.25 > lower
}},
},
}
// PathParams sets the minimal non-default params.
// Base is always applied, and others can be optionally selected to apply on top of that.
var PathParams = axon.PathSheets{
"Base": {
{Sel: "Path", Doc: "exploring",
Set: func(pt *axon.PathParams) {
pt.SWts.Adapt.On.SetBool(true) // true > false, esp in cosdiff
pt.SWts.Adapt.LRate = 0.0002 // 0.0002 (lvis) == 0.001
pt.SWts.Adapt.SubMean = 1 // 1 > 0 -- definitely needed
pt.SWts.Adapt.HiMeanDecay = 0.0008 // 0.0008 best
pt.SWts.Adapt.HiMeanThr = 0.5 // 0.5, 0.0008 goes the distance
pt.SWts.Init.SPct = 1 // 1 > 0.5
pt.Learn.LRate.Base = 0.0005 // 0.001 > 0.005 > higher
pt.Learn.DWt.SubMean = 1 // 1 > 0 for trgavg weaker
pt.Learn.DWt.CaPScale = 1 // Env10: 1
pt.Learn.DWt.SynCa20.SetBool(false)
}},
{Sel: ".BackPath", Doc: "top-down back-projections MUST have lower relative weight scale, otherwise network hallucinates -- smaller as network gets bigger",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.2
// pt.Learn.LRate.Base = 0
}},
{Sel: ".FwdWeak", Doc: "weak feedforward pathway",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.2
// pt.Learn.LRate.Base = 0
}},
{Sel: ".CTCtxtPath", Doc: "all CT context paths",
Set: func(pt *axon.PathParams) {
pt.Learn.DWt.SubMean = 0 // 0 > 1
pt.Learn.DWt.SynTraceTau = 2 // 2 > 1: faster start with 1, but then fails later
}},
{Sel: ".CTSelfCtxt", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.1 //
pt.PathScale.Abs = 0.2 // 0.5 orig?
}},
{Sel: ".CTSelfMaint", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 0.2
pt.Com.GType = axon.MaintG
}},
{Sel: ".FromPulv", Doc: "defaults to .Back",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.2 // 0.2 == 0.1
}},
{Sel: ".Fixed", Doc: "",
Set: func(pt *axon.PathParams) {
pt.Learn.Learn.SetBool(false)
pt.SWts.Init.Mean = 0.8
pt.SWts.Init.Var = 0
}},
{Sel: ".V1SC", Doc: "v1 shortcut",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 1 // 1 > .8 > .5 for predicting input
pt.SWts.Adapt.On.SetBool(false) // seems better
}},
{Sel: ".V1SCIT", Doc: "v1 shortcut to IT: TEO, TE",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.5 // 0.5 = weaker allows better invariant reps to form; .3 too low?
pt.SWts.Adapt.On.SetBool(false) // seems better
}},
//////// LIP
{Sel: "#MTposToLIP", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 1
pt.PathScale.Abs = 6 // 6 > 8
}},
{Sel: "#MTposPToLIP", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1 // higher not better?
}},
{Sel: "#LIPToLIPCT", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 0.25 // 0.3 == 0.2,.5 > 0.4
}},
{Sel: "#LIPCTToMTposP", Doc: "stronger",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 3.0 // 3 == 3.5, 4 > 2.5
}},
//////// V2
{Sel: ".V1V2", Doc: "special SWt params",
Set: func(pt *axon.PathParams) {
// todo: reinvestigate:
// pt.SWts.Init.Mean = 0.4 // .4 here is key!
// pt.SWts.Limit.Min = 0.1 // .1-.7
// pt.SWts.Limit.Max = 0.7 //
pt.PathScale.Abs = 1.0 // 1.4 in lvis
}},
{Sel: "#V2ToV2CT", Doc: "overactive",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 0.2 // 0.2
}},
{Sel: "#V2CTToV1mP", Doc: "more?",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1.0 // 1.0 == 1.2 > higher -- could try 1.2 again
}},
//////// V3
{Sel: "#V2ToV3", Doc: "ge is weakish",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1.2 // 1.2 > 1
}},
{Sel: "#V3ToV3CT", Doc: "overactive",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 0.5 // 0.5
}},
{Sel: "#V3CTToV1mP", Doc: "less",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.2 // 0.2 > 0.1 > 0.5+
pt.PathScale.Abs = 1 // 1 > 1.5
}},
// {Sel: "#V3ToLIP", Doc: "less?",
// Set: func(pt *axon.PathParams) {
// pt.PathScale.Rel = 0.2 // 0.2 == 0.1
// }},
//////// DP
// {Sel: "#V2ToDP", Doc: "ge is weakish",
// Set: func(pt *axon.PathParams) {
// pt.PathScale.Abs = 1.2 // 1.2 >= 1 > 1.5
// }},
{Sel: "#V3ToDP", Doc: "ge is weakish",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 1
pt.PathScale.Abs = 1.0 // 1.0 > 1.2
}},
{Sel: "#DPToDPCT", Doc: "overactive",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 0.5 // 0.5
}},
{Sel: "#DPCTToV1mP", Doc: "stronger",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1.5 // 1.5 > 1
}},
//////// V4
{Sel: "#V4ToV4CT", Doc: "overactive",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 0.5 // 0.5
}},
{Sel: "#V2ToV4", Doc: "ge is weakish",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1.2 // 1.2 > 1.0
}},
// {Sel: "#V4ToV2", Doc: "", // no benefit
// Set: func(pt *axon.PathParams) {
// pt.PathScale.Rel = 0.2
// }},
{Sel: "#V4CTToV1mP", Doc: "expt",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 1.0 // 1 > .5: improves V1mP sig
pt.PathScale.Abs = 1.5 // 1.5 > 1
}},
{Sel: ".V4CTSelf", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.1 // 0.1 > 0.2
pt.PathScale.Abs = 0.2 //
}},
//////// TEO
{Sel: "#TEOToTEOCT", Doc: "overactive",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 0.5 // 0.5
}},
{Sel: "#V4ToTEO", Doc: "ge is weakish",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1.2 // 1.2 > 1.0
}},
{Sel: ".TEOSelfMaint", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 0.2 // 0.2 > 0.3 > 0.1 for v1mP with ok categ; TE most important
pt.Com.GType = axon.MaintG
}},
{Sel: ".TEOCTSelf", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.1 //
pt.PathScale.Abs = 0.2 // 0.2 == 0.1
}},
{Sel: "#TEOCTToV4P", Doc: "stronger",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1.5 // 1.5 > 1
}},
//////// TE
{Sel: "#TEToTECT", Doc: "overactive",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 0.5 // 0.5
}},
{Sel: "#TEOToTE", Doc: "ge is weakish",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1.2 // 1.2 > 1.0
}},
{Sel: ".TESelfMaint", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 0.3 // 0.3 for categ -- this is most important
pt.Com.GType = axon.MaintG
}},
{Sel: ".TECTSelf", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.1 //
pt.PathScale.Abs = 0.2 // 0.2
}},
{Sel: "#TECTToTEOP", Doc: "stronger",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1.5 // 1.5 > 1
}},
},
}
// Copyright (c) 2023, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package deepvision
import "github.com/emer/emergent/v2/paths"
// Paths holds all the special projections.
type Paths struct {
// Standard feedforward topographic projection, recv = 1/2 send size
PT4x4Skp2 *paths.PoolTile
// Reciprocal
PT4x4Skp2Recip *paths.PoolTile
// sparser skip 2 -- no overlap
PT2x2Skp2 *paths.PoolTile
// Reciprocal
PT2x2Skp2Recip *paths.PoolTile
// Standard same-to-same size topographic projection
PT3x3Skp1 *paths.PoolTile
// Sigmoidal topographic projection used in LIP saccade remapping layers
PTSigTopo *paths.PoolTile
// Gaussian topographic projection used in LIP saccade remapping layers
PTGaussTopo *paths.PoolTile
}
func (pj *Paths) Defaults() {
pj.PT4x4Skp2 = paths.NewPoolTile()
pj.PT4x4Skp2.Size.Set(4, 4)
pj.PT4x4Skp2.Skip.Set(2, 2)
pj.PT4x4Skp2.Start.Set(-1, -1)
pj.PT4x4Skp2.TopoRange.Min = 0.8
// but using a symmetric scale range .8 - 1.2 seems like it might be good -- otherwise
// weights are systematicaly smaller.
// note: gauss defaults on
// pj.PT4x4Skp2.GaussFull.DefNoWrap()
// pj.PT4x4Skp2.GaussInPool.DefNoWrap()
pj.PT4x4Skp2Recip = paths.NewPoolTile()
pj.PT4x4Skp2Recip.Size.Set(4, 4)
pj.PT4x4Skp2Recip.Skip.Set(2, 2)
pj.PT4x4Skp2Recip.Start.Set(-1, -1)
pj.PT4x4Skp2Recip.TopoRange.Min = 0.8 // note: none of these make a very big diff
pj.PT4x4Skp2Recip.Recip = true
pj.PT2x2Skp2 = paths.NewPoolTile()
pj.PT2x2Skp2.Size.Set(2, 2)
pj.PT2x2Skp2.Skip.Set(2, 2)
pj.PT2x2Skp2.Start.Set(0, 0)
pj.PT2x2Skp2.TopoRange.Min = 0.8
pj.PT2x2Skp2Recip = paths.NewPoolTile()
pj.PT2x2Skp2Recip.Size.Set(2, 2)
pj.PT2x2Skp2Recip.Skip.Set(2, 2)
pj.PT2x2Skp2Recip.Start.Set(0, 0)
pj.PT2x2Skp2Recip.TopoRange.Min = 0.8
pj.PT2x2Skp2Recip.Recip = true
pj.PT3x3Skp1 = paths.NewPoolTile()
pj.PT3x3Skp1.Size.Set(3, 3)
pj.PT3x3Skp1.Skip.Set(1, 1)
pj.PT3x3Skp1.Start.Set(-1, -1)
pj.PT3x3Skp1.TopoRange.Min = 0.8 // note: none of these make a very big diff
pj.PTSigTopo = paths.NewPoolTile()
pj.PTSigTopo.GaussOff()
pj.PTSigTopo.Size.Set(1, 1)
pj.PTSigTopo.Skip.Set(0, 0)
pj.PTSigTopo.Start.Set(0, 0)
pj.PTSigTopo.TopoRange.Min = 0.6
pj.PTSigTopo.SigFull.On = true
pj.PTSigTopo.SigFull.Gain = 0.05
pj.PTSigTopo.SigFull.CtrMove = 0.5
pj.PTGaussTopo = paths.NewPoolTile()
pj.PTGaussTopo.Size.Set(1, 1)
pj.PTGaussTopo.Skip.Set(0, 0)
pj.PTGaussTopo.Start.Set(0, 0)
pj.PTGaussTopo.TopoRange.Min = 0.6
pj.PTGaussTopo.GaussInPool.On = false // Full only
pj.PTGaussTopo.GaussFull.Sigma = 0.6
pj.PTGaussTopo.GaussFull.Wrap = true
pj.PTGaussTopo.GaussFull.CtrMove = 1
}
// Copyright (c) 2020, The CCNLab Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package deepvision
import (
"embed"
"os"
"strings"
"cogentcore.org/core/base/errors"
"cogentcore.org/core/base/fsx"
"cogentcore.org/core/core"
"cogentcore.org/lab/plot"
"cogentcore.org/lab/stats/metric"
"cogentcore.org/lab/stats/stats"
"cogentcore.org/lab/tensor"
"cogentcore.org/lab/tensor/tmath"
"cogentcore.org/lab/tensorcore"
"cogentcore.org/lab/tensorfs"
)
//go:embed expt1_simat.csv
var embedfs embed.FS
type ObjCat struct {
Obj, Cat string
}
var (
Debug = false
// 20 Object categs: IMPORTANT: do not change the order of this list as it is used
// in various places as the cannonical ordering for e.g., Expt1 data
Objs = []string{
"banana",
"layercake",
"trafficcone",
"sailboat",
"trex",
"person",
"guitar",
"tablelamp",
"doorknob",
"handgun",
"donut",
"chair",
"slrcamera",
"elephant",
"piano",
"fish",
"car",
"heavycannon",
"stapler",
"motorcycle",
}
// CanonicalCats is best-fitting 5-category leabra ("Centroid")
CanonicalCats = []ObjCat{
{"banana", "1-pyramid"},
{"layercake", "1-pyramid"},
{"trafficcone", "1-pyramid"},
{"sailboat", "1-pyramid"},
{"trex", "1-pyramid"},
{"person", "2-vertical"},
{"guitar", "2-vertical"},
{"tablelamp", "2-vertical"},
{"doorknob", "3-round"},
{"donut", "3-round"},
{"handgun", "3-round"},
{"chair", "3-round"},
{"slrcamera", "4-box"},
{"elephant", "4-box"},
{"piano", "4-box"},
{"fish", "4-box"},
{"car", "5-horiz"},
{"heavycannon", "5-horiz"},
{"stapler", "5-horiz"},
{"motorcycle", "5-horiz"},
}
CanonicalGroups []string // CanonicalCats with repeats all blank, for grouped labels
// Alt1Cats alternative categories
Alt1Cats = []ObjCat{
{"layercake", "1-vertical"},
{"trafficcone", "1-vertical"},
{"sailboat", "1-vertical"},
{"person", "1-vertical"},
{"guitar", "1-vertical"}, // weaker
{"tablelamp", "1-vertical"}, // weaker
{"donut", "2-box"},
{"piano", "2-box"},
{"handgun", "2-box"},
{"elephant", "2-box"},
{"heavycannon", "3-wheels"},
{"trex", "3-wheels"},
{"motorcycle", "3-wheels"},
{"car", "3-wheels"},
{"slrcamera", "3-wheels"},
{"stapler", "4-horiz"},
{"banana", "4-horiz"},
{"fish", "4-horiz"},
{"chair", "6-chair"},
{"doorknob", "7-doorknob"},
}
rsaStatNames = []string{"RSAvsV1", "RSAvsTE", "RSAvsExpt", "MeanCentroid", "MeanAlt1"}
)
// StatRSA returns a Stats function that records RSA:
// representational similarity analysis stats.
func (ss *Sim) StatRSA(layers ...string) func(mode Modes, level Levels, phase StatsPhase) {
net := ss.Net
return func(mode Modes, level Levels, phase StatsPhase) {
if level < Trial {
return
}
trnEpc := ss.Loops.Loop(Train, Epoch).Counter.Cur
interval := ss.Config.Run.RSAInterval
modeDir := ss.Stats.Dir(mode.String())
curModeDir := ss.Current.Dir(mode.String()).Dir("RSA")
levelDir := modeDir.Dir(level.String())
subDir := modeDir.Dir((level - 1).String())
ndata := int(net.Context().NData)
for li, lnm := range layers {
if level == Trial {
ev := ss.Envs.ByMode(mode).(*Obj3DSacEnv)
tick := ev.Tick.Cur
for di := range ndata {
if tick == 2 { // using tick 2 for all data
ss.rsaTrial(curModeDir, lnm, ev.Trial(di).Cat, di)
}
}
continue // no actual stats at trial level
}
for si, stnm := range rsaStatNames {
name := lnm + "_" + stnm
tsr := levelDir.Float64(name)
if phase == Start {
tsr.SetNumRows(0)
plot.SetFirstStyler(tsr, func(s *plot.Style) {
s.Range.SetMin(0).SetMax(1)
s.On = true
})
continue
}
switch level {
case Epoch:
hasNew := false
if interval > 0 && trnEpc%interval == 0 {
hasNew = true
if li == 0 && si == 0 {
ss.rsaEpoch(curModeDir, layers...) // puts results in curModeDir
}
}
var stat float64
nr := tsr.DimSize(0)
if nr > 0 {
stat = tsr.FloatRow(nr-1, 0)
}
if hasNew {
stat = curModeDir.Float64(name).Float1D(0)
}
tsr.AppendRowFloat(float64(stat))
case Run:
tsr.AppendRow(stats.StatFinal.Call(subDir.Value(name)))
default:
tsr.AppendRow(stats.StatMean.Call(subDir.Value(name)))
}
}
}
}
}
func (ss *Sim) RSAInit() {
curModeDir := ss.Current.Dir(Train.String()).Dir("RSA")
nc := len(Objs)
smat := curModeDir.Float64("Expt_Smat", nc, nc)
errors.Log(tensor.OpenFS(smat, embedfs, "expt1_simat.csv", tensor.Comma))
mx := stats.Max(tensor.As1D(smat))
tmath.DivOut(smat, mx, smat)
CanonicalGroups = ss.gridLabels(CanonicalCats)
}
func (ss *Sim) gridLabels(cats []ObjCat) []string {
nc := len(cats)
gl := make([]string, nc)
lstcat := ""
for i, oc := range cats {
if oc.Cat != lstcat {
gl[i] = oc.Cat
lstcat = oc.Cat
}
}
return gl
}
var SimMatGridStyle = func(s *tensorcore.GridStyle) {
s.TopZero = true
s.Range.SetMin(0).SetMax(1)
s.ColorMap = core.ColorMapName("Viridis")
s.GridFill = 1
s.DimExtra = 0.15
}
func (ss *Sim) RSAGUI() {
ss.rsaSimMatGrid("Expt_Smat", CanonicalGroups)
}
func (ss *Sim) rsaSimMatGrid(nm string, labels []string) {
curModeDir := ss.Current.Dir(Train.String()).Dir("RSA")
tbs := ss.GUI.Tabs.AsLab()
_, idx := tbs.CurrentTab()
smat := curModeDir.Float64(nm)
tensorcore.AddGridStylerTo(smat, SimMatGridStyle)
tg := tbs.TensorGrid(strings.TrimSuffix(nm, "_Smat"), smat)
tg.RowLabels = labels
tg.ColumnLabels = labels
tbs.SelectTabIndex(idx)
}
// RSASaveRActs saves running average activation data to tar file.
func (ss *Sim) RSASaveRActs(fname string) error {
f, err := os.Create(fname)
if errors.Log(err) != nil {
return err
}
defer f.Close()
curModeDir := ss.Current.Dir(Train.String()).Dir("RSA")
err = tensorfs.Tar(f, curModeDir.Dir("RAvgs"), true, nil) // gz
return errors.Log(err)
}
// RSAOpenRActs opens running average activation data from tar file.
func (ss *Sim) RSAOpenRActs(fname fsx.Filename) error { //types:add
f, err := os.Open(string(fname))
if errors.Log(err) != nil {
return err
}
defer f.Close()
curModeDir := ss.Current.Dir(Train.String()).Dir("RSA")
err = errors.Log(tensorfs.Untar(f, curModeDir.Dir("RAvgs"), true))
if err == nil {
ss.RSAStats()
}
return err
}
// rsaTrial accumulates running-average activations for layer, object in _Ravg
func (ss *Sim) rsaTrial(curModeDir *tensorfs.Node, lnm, obj string, di int) {
avgDt := 0.1
avgDtC := 1 - avgDt
ly := ss.Net.LayerByName(lnm)
atsr := curModeDir.Dir("RAvgs").Dir(lnm).Float64(obj, ly.Shape.Sizes...)
varName := "Act"
vtsr := curModeDir.Float32(lnm+"_"+varName, ly.Shape.Sizes...)
ly.UnitValuesTensor(vtsr, varName, di)
nn := int(ly.Shape.Len())
for lni := range nn {
act := vtsr.Float1D(lni)
avg := atsr.Float1D(lni)
avg = avgDtC*avg + avgDt*act
atsr.SetFloat1D(avg, lni)
}
}
// RSAStats runs stats on current data, displaying grids.
// This is run on saved data, in GUI.
func (ss *Sim) RSAStats() {
curModeDir := ss.Current.Dir(Train.String()).Dir("RSA")
ravgs := curModeDir.Dir("RAvgs")
var slays []string
ravgs.NodesFunc(func(nd *tensorfs.Node) bool {
slays = append(slays, nd.Name())
return false
})
// fmt.Println("slays:", slays)
ss.rsaEpoch(curModeDir, slays...)
for _, lnm := range slays {
ss.rsaSimMatGrid(lnm+"_Smat", CanonicalGroups)
}
altgps := ss.gridLabels(Alt1Cats)
for _, lnm := range slays {
ss.rsaSimMatGrid(lnm+"_Alt1_Smat", altgps)
}
}
// rsaEpoch computes all stats at epoch level
func (ss *Sim) rsaEpoch(curModeDir *tensorfs.Node, layers ...string) {
// first get everything per-layer
rsaSimMats(curModeDir, "", CanonicalCats, layers...)
rsaSimMats(curModeDir, "_Alt1", Alt1Cats, layers...)
nc := len(Objs)
v1Smat := curModeDir.Float64("V1m_Smat", nc, nc)
teSmat := curModeDir.Float64("TE_Smat")
exSmat := curModeDir.Float64("Expt_Smat")
for _, lnm := range layers {
smat := curModeDir.Float64(lnm+"_Smat", nc, nc)
v1snm := lnm + "_" + rsaStatNames[0]
v1sim := 1.0
if lnm != "V1m" {
v1sim = metric.Correlation(v1Smat, smat).Float1D(0)
}
curModeDir.Float64(v1snm, 1).SetFloat1D(v1sim, 0)
tesnm := lnm + "_" + rsaStatNames[1]
tesim := 1.0
if teSmat.Len() == nc*nc && lnm != "TE" {
tesim = metric.Correlation(teSmat, smat).Float1D(0)
}
curModeDir.Float64(tesnm, 1).SetFloat1D(tesim, 0)
exsnm := lnm + "_" + rsaStatNames[2]
exsim := metric.Correlation(exSmat, smat).Float1D(0)
curModeDir.Float64(exsnm, 1).SetFloat1D(exsim, 0)
mcnm := lnm + "_" + rsaStatNames[3]
acd := AvgContrastDist(smat, CanonicalCats)
curModeDir.Float64(mcnm, 1).SetFloat1D(acd, 0)
asmat := curModeDir.Float64(lnm+"_Alt1_Smat", nc, nc)
mcnm = lnm + "_" + rsaStatNames[4]
acd = AvgContrastDist(asmat, Alt1Cats)
curModeDir.Float64(mcnm, 1).SetFloat1D(acd, 0)
}
}
// rsaSimMats computes the similarity matrixes from running average acts
func rsaSimMats(curModeDir *tensorfs.Node, typNm string, cats []ObjCat, layers ...string) {
nc := len(Objs)
for _, lnm := range layers {
// Canonical simat
smat := curModeDir.Float64(lnm+typNm+"_Smat", nc, nc)
adir := curModeDir.Dir("RAvgs").Dir(lnm)
for ci, oc := range cats {
atsr := tensor.As1D(adir.Float64(oc.Obj))
if atsr.Len() == 0 {
continue
}
for oci := ci + 1; oci < nc; oci++ {
oobj := cats[oci].Obj
otsr := tensor.As1D(adir.Float64(oobj))
sim := 0.0
if otsr.Len() > 0 {
sim = metric.InvCorrelation(atsr, otsr).Float1D(0)
}
smat.SetFloat(sim, ci, oci)
smat.SetFloat(sim, oci, ci)
}
}
}
}
// AvgContrastDist computes average contrast dist over given cat map
// nms gives the base category names for each row in the simat, which is
// then used to lookup the meta category in the catmap, which is used
// for determining the within vs. between category status.
func AvgContrastDist(smat *tensor.Float64, cats []ObjCat) float64 {
nc := len(cats)
avgd := 0.0
for ri := range nc {
aid := 0.0
ain := 0
abd := 0.0
abn := 0
rc := cats[ri]
for ci := range nc {
if ri == ci {
continue
}
cc := cats[ci]
d := smat.Float(ri, ci)
if cc.Cat == rc.Cat {
aid += d
ain++
} else {
abd += d
abn++
}
}
if ain > 0 {
aid /= float64(ain)
}
if abn > 0 {
abd /= float64(abn)
}
avgd += abd - aid
}
avgd /= float64(nc)
return avgd
}
// // RSA handles representational similarity analysis
// type RSA struct {
// Cats []string `desc:"category names for each row of simmat / activation table -- call SetCats"`
// Sims map[string]*simat.SimMat `desc:"similarity matricies for each layer"`
// V1Sims []float64 `desc:"similarity for each layer relative to V1"`
// CatDists []float64 `desc:"AvgContrastDist for each layer under CanonicalCats centroid meta categories"`
// BasicDists []float64 `desc:"AvgBasicDist for each layer -- basic-level distances"`
// ExptDists []float64 `desc:"AvgExptDist for each layer -- distances from expt data"`
// Cat5Sims map[string]*simat.SimMat `desc:"similarity matricies for each layer, organized into CanonicalCats and sorted"`
// Cat5Objs map[string]*[]string `desc:"corresponding ordering of objects in sorted Cat5Sims lists"`
// PermNCats map[string]int `desc:"number of categories remaining after permutation from LbaCat"`
// PermDists map[string]float64 `desc:"avg contrast dist for permutation"`
// }
//
//
// // StatsFmActs computes RSA stats from given acts table, for given columns (layer names)
// func (rs *RSA) StatsFmActs(acts *etable.Table, lays []string) {
// tick := 2 // use this tick for analyses..
// tix := etable.NewIdxView(acts)
// tix.Filter(func(et *etable.Table, row int) bool {
// tck := int(et.CellFloat("Tick", row))
// return tck == tick
// })
//
// expt := rs.SimByName("Expt1")
//
// for i, cn := range lays {
// sm := rs.SimByName(cn)
// rs.SimMatFmActs(sm, tix, cn)
//
// dist := metric.CrossEntropy64(osm.Mat.(*tensor.Float64).Values, expt.Mat.(*tensor.Float64).Values)
// rs.ExptDists[i] = dist
// }
//
// v1sm := rs.Sims["V1m"]
// v1sm64 := v1sm.Mat.(*tensor.Float64)
// for i, cn := range lays {
// osm := rs.SimByName(cn)
//
// rs.CatDists[i] = -rs.AvgContrastDist(osm, rs.Cats, CanonicalCats)
// rs.BasicDists[i] = rs.AvgBasicDist(osm, rs.Cats)
//
// if v1sm == osm {
// rs.V1Sims[i] = 1
// continue
// }
// osm64 := osm.Mat.(*tensor.Float64)
// rs.V1Sims[i] = metric.Correlation64(osm64.Values, v1sm64.Values)
// }
// cat5s := []string{"TE"}
// for _, cn := range cat5s {
// rs.StatsSortPermuteCat5(cn)
// }
// }
//
// func (rs *RSA) StatsSortPermuteCat5(laynm string) {
// sm := rs.SimByName(laynm)
// if len(sm.Rows) == 0 {
// return
// }
// sm5 := rs.Cat5SimByName(laynm)
// obj := rs.CatSortSimMat(sm, sm5, rs.Cats, CanonicalCats, true, laynm+"_LbaCat")
// obj5 := rs.Cat5ObjByName(laynm)
// copy(*obj5, obj)
// pnm := laynm + "perm"
// pcats, ncat, pdist := rs.PermuteCatTest(sm, rs.Cats, CanonicalCats, pnm)
// sm5p := rs.Cat5SimByName(pnm)
// objp := rs.CatSortSimMat(sm, sm5p, rs.Cats, pcats, true, pnm)
// obj5p := rs.Cat5ObjByName(pnm)
// copy(*obj5p, objp)
// rs.PermNCats[laynm] = ncat
// rs.PermDists[laynm] = pdist
// }
//
// // CatSortSimMat takes an input sim matrix and categorizes the items according to given cats
// // and then sorts items within that according to their average within - between cat similarity.
// // contrast = use within - between metric, otherwise just within
// // returns the new ordering of objects (like nms but sorted according to new sort)
// func (rs *RSA) CatSortSimMat(insm *simat.SimMat, osm *simat.SimMat, nms []string, catmap map[string]string, contrast bool, name string) []string {
// no := len(insm.Rows)
// sch := etable.Schema{
// {"Cat", tensor.STRING, nil, nil},
// {"Dist", tensor.FLOAT64, nil, nil},
// {"Obj", tensor.STRING, nil, nil},
// }
// dt := &etable.Table{}
// dt.SetFromSchema(sch, no)
// cats := dt.Cols[0].(*tensor.String).Values
// dists := dt.Cols[1].(*tensor.Float64).Values
// objs := dt.Cols[2].(*tensor.String).Values
// for i, nm := range nms {
// cats[i] = catmap[nm]
// objs[i] = nm
// }
// smatv := insm.Mat.(*tensor.Float64).Values
// avgCtrstDist := 0.0
// for ri := 0; ri < no; ri++ {
// roff := ri * no
// aid := 0.0
// ain := 0
// abd := 0.0
// abn := 0
// rc := cats[ri]
// for ci := 0; ci < no; ci++ {
// if ri == ci {
// continue
// }
// cc := cats[ci]
// d := smatv[roff+ci]
// if cc == rc {
// aid += d
// ain++
// } else {
// abd += d
// abn++
// }
// }
// if ain > 0 {
// aid /= float64(ain)
// }
// if abn > 0 {
// abd /= float64(abn)
// }
// dval := aid
// if contrast {
// dval -= abd
// }
// dists[ri] = dval
// avgCtrstDist += (1 - aid) - (1 - abd)
// }
// avgCtrstDist /= float64(no)
// ix := etable.NewIdxView(dt)
// ix.SortColNames([]string{"Cat", "Dist"}, true) // ascending
// osm.Init()
// osm.Mat.CopyShapeFrom(insm.Mat)
// osm.Mat.CopyMetaData(insm.Mat)
// rs.ConfigSimMat(osm)
// omatv := osm.Mat.(*tensor.Float64).Values
// bcols := make([]string, no)
// last := ""
// for sri := 0; sri < no; sri++ {
// sroff := sri * no
// ri := ix.Idxs[sri]
// roff := ri * no
// cat := cats[ri]
// if cat != last {
// bcols[sri] = cat
// last = cat
// }
// // bcols[sri] = nms[ri] // uncomment this to see all the names
// for sci := 0; sci < no; sci++ {
// ci := ix.Idxs[sci]
// d := smatv[roff+ci]
// omatv[sroff+sci] = d
// }
// }
// osm.Rows = bcols
// osm.Cols = bcols
// if Debug {
// fmt.Printf("%v avg contrast dist: %.4f\n", name, avgCtrstDist)
// }
// sobjs := make([]string, no)
// for i := 0; i < no; i++ {
// nm := nms[ix.Idxs[i]]
// sobjs[i] = catmap[nm] + ": " + nm
// }
// return sobjs
// }
//
//
// // AvgBasicDist computes average distance within basic-level categories given by nms
// func (rs *RSA) AvgBasicDist(insm *simat.SimMat, nms []string) float64 {
// no := len(insm.Rows)
// smatv := insm.Mat.(*tensor.Float64).Values
// avgd := 0.0
// ain := 0
// for ri := 0; ri < no; ri++ {
// roff := ri * no
// rnm := nms[ri]
// for ci := 0; ci < ri; ci++ {
// cnm := nms[ci]
// d := smatv[roff+ci]
// if rnm == cnm {
// avgd += d
// ain++
// }
// }
// }
// if ain > 0 {
// avgd /= float64(ain)
// }
// return avgd
// }
//
// // PermuteCatTest takes an input sim matrix and tries all one-off permutations relative to given
// // initial set of categories, and computes overall average constrast distance for each
// // selects categs with lowest dist and iterates until no better permutation can be found.
// // returns new map, number of categories used in new map, and the avg contrast distance for it
// func (rs *RSA) PermuteCatTest(insm *simat.SimMat, nms []string, catmap map[string]string, desc string) (map[string]string, int, float64) {
// if Debug {
// fmt.Printf("\n#########\n%v\n", desc)
// }
// catm := map[string]int{} // list of categories and index into catnms
// catnms := []string{}
// for _, nm := range nms {
// cat := catmap[nm]
// if _, has := catm[cat]; !has {
// catm[cat] = len(catnms)
// catnms = append(catnms, cat)
// }
// }
// ncats := len(catnms)
//
// itrmap := make(map[string]string)
// for k, v := range catmap {
// itrmap[k] = v
// }
//
// std := rs.AvgContrastDist(insm, nms, catmap)
// if Debug {
// fmt.Printf("std: %.4f starting\n", std)
// }
//
// for itr := 0; itr < 100; itr++ {
// std = rs.AvgContrastDist(insm, nms, itrmap)
//
// effmap := make(map[string]string)
// mind := 100.0
// mindnm := ""
// mindcat := ""
// for _, nm := range nms { // go over each item
// cat := itrmap[nm]
// for oc := 0; oc < ncats; oc++ { // go over alternative categories
// ocat := catnms[oc]
// if ocat == cat {
// continue
// }
// for k, v := range itrmap {
// if k == nm {
// effmap[k] = ocat // switch
// } else {
// effmap[k] = v
// }
// }
// avgd := rs.AvgContrastDist(insm, nms, effmap)
// if avgd < mind {
// mind = avgd
// mindnm = nm
// mindcat = ocat
// }
// // if avgd < std {
// // fmt.Printf("Permute test better than std dist: %v min dist: %v for name: %v in cat: %v\n", std, avgd, nm, ocat)
// // }
// }
// }
// if mind >= std {
// break
// }
// if Debug {
// fmt.Printf("itr %v std: %.4f min: %.4f name: %v cat: %v\n", itr, std, mind, mindnm, mindcat)
// }
// itrmap[mindnm] = mindcat // make the switch
// }
// if Debug {
// fmt.Printf("std: %.4f final\n", std)
// }
//
// nCatUsed := 0
// for oc := 0; oc < ncats; oc++ {
// cat := catnms[oc]
// if Debug {
// fmt.Printf("%v\n", cat)
// }
// nin := 0
// for _, nm := range Objs {
// ct := itrmap[nm]
// if ct == cat {
// nin++
// if Debug {
// fmt.Printf("\t%v\n", nm)
// }
// }
// }
// if nin > 0 {
// nCatUsed++
// }
// }
// return itrmap, nCatUsed, -std
// }
//
// Copyright (c) 2020, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// equations provides an interactive exploration of the various equations
// underlying the axon models, largely from the chans collection of channels.
package main
//go:generate core generate -add-types -gosl
import (
"cogentcore.org/lab/tensorfs"
"github.com/emer/axon/v2/chans/chanplots"
"github.com/emer/emergent/v2/egui"
)
func main() {
root, _ := tensorfs.NewDir("Root")
pl := &Plots{}
pl.GUI.MakeBody(nil, pl, root, "Equations", "Axon Equations", "Equations used in Axon")
pl.GUI.FinalizeGUI(false)
pl.Config(root)
pl.GUI.UpdateFiles()
pl.GUI.Body.RunMainWindow()
}
type Plots struct {
GUI egui.GUI `display:"-"`
// AK is an A-type K channel, which is voltage gated with maximal
// activation around -37 mV. It has two state variables, M (v-gated opening)
// and H (v-gated closing), which integrate with fast and slow time constants,
// respectively. H relatively quickly hits an asymptotic level of inactivation
// for sustained activity patterns.
// It is particularly important for counteracting the excitatory effects of
// voltage gated calcium channels which can otherwise drive runaway excitatory currents.
// See AKsParams for a much simpler version that works fine when full AP-like spikes are
// not simulated, as in our standard axon models.
AK chanplots.AKPlot `new-window:"+" display:"no-inline"`
// GABA-B is an inhibitory channel activated by the usual GABA inhibitory neurotransmitter,
// which is coupled to the GIRK G-protein coupled inwardly rectifying potassium (K) channel.
// It is ubiquitous in the brain, and critical for stability of spiking patterns over time in axon.
// The inward rectification is caused by a Mg+ ion block *from the inside* of the neuron,
// which means that these channels are most open when the neuron is hyperpolarized (inactive),
// and thus it serves to keep inactive neurons inactive. Based on Thomson & Destexhe (1999).
GABAB chanplots.GABABPlot `new-window:"+" display:"no-inline"`
// Kir is the kIR potassium inwardly rectifying current,
// based on the equations from Lindroos et al (2018).
// The conductance is highest at low membrane potentials.
Kir chanplots.KirPlot `new-window:"+" display:"no-inline"`
// Mahp implements an M-type medium afterhyperpolarizing (mAHP) channel,
// where m also stands for muscarinic due to the ACh inactivation of this channel.
// It has a slow activation and deactivation time constant, and opens at a lowish
// membrane potential.
// There is one gating variable n updated over time with a tau that is also voltage dependent.
// The infinite-time value of n is voltage dependent according to a logistic function
// of the membrane potential, centered at Voff with slope Vslope.
Mahp chanplots.MahpPlot `new-window:"+" display:"no-inline"`
// NMDA implements NMDA dynamics, based on Jahr & Stevens (1990) equations
// which are widely used in models, from Brunel & Wang (2001) to Sanders et al. (2013).
// The overall conductance is a function of a voltage-dependent postsynaptic factor based
// on Mg ion blockage, and presynaptic Glu-based opening, which in a simple model just
// increments
NMDA chanplots.NMDAPlot `new-window:"+" display:"no-inline"`
// Sahp implements a slow afterhyperpolarizing (sAHP) channel,
// It has a slowly accumulating calcium value, aggregated at the
// theta cycle level, that then drives the logistic gating function,
// so that it only activates after a significant accumulation.
// After which point it decays.
// For the theta-cycle updating, the normal m-type tau is all within
// the scope of a single theta cycle, so we just omit the time integration
// of the n gating value, but tau is computed in any case.
Sahp chanplots.SahpPlot `new-window:"+" display:"no-inline"`
// SKCa describes the small-conductance calcium-activated potassium channel,
// activated by intracellular stores in a way that drives pauses in firing,
// and can require inactivity to recharge the Ca available for release.
// These intracellular stores can release quickly, have a slow decay once released,
// and the stores can take a while to rebuild, leading to rapidly triggered,
// long-lasting pauses that don't recur until stores have rebuilt, which is the
// observed pattern of firing of STNp pausing neurons.
// CaIn = intracellular stores available for release; CaR = released amount from stores
// CaM = K channel conductance gating factor driven by CaR binding,
// computed using the Hill equations described in Fujita et al (2012), Gunay et al (2008)
// (also Muddapu & Chakravarthy, 2021): X^h / (X^h + C50^h) where h ~= 4 (hard coded)
SKCa chanplots.SKCaPlot `new-window:"+" display:"no-inline"`
// VGCC plots the standard L-type voltage gated Ca channel.
// All functions based on Urakubo et al (2008).
VGCC chanplots.VGCCPlot `new-window:"+" display:"no-inline"`
// SynCa plots synaptic calcium according to the kinase calcium dynamics.
SynCa chanplots.SynCaPlot `new-window:"+" display:"no-inline"`
}
func (pl *Plots) Config(root *tensorfs.Node) {
pl.AK.Config(root, pl.GUI.Tabs)
pl.GABAB.Config(root, pl.GUI.Tabs)
pl.Kir.Config(root, pl.GUI.Tabs)
pl.Mahp.Config(root, pl.GUI.Tabs)
pl.NMDA.Config(root, pl.GUI.Tabs)
pl.Sahp.Config(root, pl.GUI.Tabs)
pl.SKCa.Config(root, pl.GUI.Tabs)
pl.VGCC.Config(root, pl.GUI.Tabs)
pl.SynCa.Config(root, pl.GUI.Tabs)
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package hip
import (
"fmt"
"cogentcore.org/lab/patterns"
"cogentcore.org/lab/tensorcore"
)
// ConfigInputs generates the AB-AC input patterns
func (ss *Sim) ConfigInputs() {
abac := ss.Root.Dir("ABAC")
hp := &ss.Config.Hip
ecY := hp.EC3NPool.Y
ecX := hp.EC3NPool.X
plY := hp.EC3NNrn.Y // good idea to get shorter vars when used frequently
plX := hp.EC3NNrn.X // makes much more readable
trials := ss.Config.Run.Trials
pctAct := ss.Config.Env.ECPctAct
nOn := patterns.NFromPct(pctAct, plY*plX)
nDiff := patterns.NFromPct(ss.Config.Env.MinDiffPct, nOn)
ctxtFlip := patterns.NFromPct(ss.Config.Env.CtxtFlipPct, nOn)
voc := abac.Dir("Vocab")
empty := voc.Float32("empty", trials, plY, plX)
a := voc.Float32("A", trials, plY, plX)
b := voc.Float32("B", trials, plY, plX)
c := voc.Float32("C", trials, plY, plX)
la := voc.Float32("lA", trials, plY, plX)
lb := voc.Float32("lB", trials, plY, plX)
ctxt := voc.Float32("ctxt", 3, plY, plX)
// patterns.MinDiffPrintIterations = true
patterns.PermutedBinaryMinDiff(a, nOn, 1, 0, nDiff)
patterns.PermutedBinaryMinDiff(b, nOn, 1, 0, nDiff)
patterns.PermutedBinaryMinDiff(c, nOn, 1, 0, nDiff)
patterns.PermutedBinaryMinDiff(la, nOn, 1, 0, nDiff)
patterns.PermutedBinaryMinDiff(lb, nOn, 1, 0, nDiff)
patterns.PermutedBinaryMinDiff(ctxt, nOn, 1, 0, nDiff)
// 12 contexts! 1: 1 row of stimuli pats; 3: 3 diff ctxt bases
for i := range (ecY - 1) * ecX * 3 {
list := i / ((ecY - 1) * ecX)
ctxtNm := fmt.Sprintf("ctxt%d", i)
tsr := voc.Float32(ctxtNm, 0, plY, plX)
patterns.ReplicateRows(tsr, ctxt.SubSpace(list), trials)
patterns.FlipBitsRows(tsr, ctxtFlip, ctxtFlip, 1, 0)
}
abName := voc.StringValue("ABName", trials)
acName := voc.StringValue("ACName", trials)
lureName := voc.StringValue("LureName", trials)
patterns.NameRows(abName, "AB_", 2)
patterns.NameRows(acName, "AC_", 2)
patterns.NameRows(lureName, "Lure_", 2)
abFull := voc.Float32("ABFull", trials, ecY, ecX, plY, plX)
patterns.Mix(abFull, trials, a, b, voc.Float32("ctxt0"), voc.Float32("ctxt1"), voc.Float32("ctxt2"), voc.Float32("ctxt3"))
abTest := voc.Float32("ABTest", trials, ecY, ecX, plY, plX)
patterns.Mix(abTest, trials, a, empty, voc.Float32("ctxt0"), voc.Float32("ctxt1"), voc.Float32("ctxt2"), voc.Float32("ctxt3"))
acFull := voc.Float32("ACFull", trials, ecY, ecX, plY, plX)
patterns.Mix(acFull, trials, a, b, voc.Float32("ctxt4"), voc.Float32("ctxt5"), voc.Float32("ctxt6"), voc.Float32("ctxt7"))
acTest := voc.Float32("ACTest", trials, ecY, ecX, plY, plX)
patterns.Mix(acTest, trials, a, empty, voc.Float32("ctxt4"), voc.Float32("ctxt5"), voc.Float32("ctxt6"), voc.Float32("ctxt7"))
lureFull := voc.Float32("LureFull", trials, ecY, ecX, plY, plX)
patterns.Mix(lureFull, trials, la, lb, voc.Float32("ctxt8"), voc.Float32("ctxt9"), voc.Float32("ctxt10"), voc.Float32("ctxt11"))
lureTest := voc.Float32("LureTest", trials, ecY, ecX, plY, plX)
patterns.Mix(lureTest, trials, la, empty, voc.Float32("ctxt8"), voc.Float32("ctxt9"), voc.Float32("ctxt10"), voc.Float32("ctxt11"))
//////// Inputs
inp := abac.Dir("Inputs")
ab := inp.Dir("TrainAB")
ab.Set("Name", abName.Clone())
ab.Set("Input", abFull.Clone())
ab.Set("EC5", abFull.Clone())
ac := inp.Dir("TrainAC")
ac.Set("Name", acName.Clone())
ac.Set("Input", acFull.Clone())
ac.Set("EC5", acFull.Clone())
test := inp.Dir("TestAll")
test.Set("Name", abName.Clone().AppendFrom(acName).AppendFrom(lureName))
test.Set("Input", abTest.Clone().AppendFrom(acTest).AppendFrom(lureTest))
test.Set("EC5", abFull.Clone().AppendFrom(acFull).AppendFrom(lureFull))
sty := func(s *tensorcore.GridStyle) {
s.Size.Min = 20
}
all := abac.ValuesFunc(nil)
for _, vl := range all {
tensorcore.AddGridStylerTo(vl, sty)
}
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package hip
import (
"cogentcore.org/core/core"
"github.com/emer/axon/v2/axon"
"github.com/emer/emergent/v2/egui"
)
// EnvConfig has config params for environment.
type EnvConfig struct {
// Env parameters: can set any field/subfield on Env struct,
// using standard TOML formatting.
Env map[string]any
// ECPctAct is percent activation in EC pool, used in patgen for input generation.
ECPctAct float64 `default:"0.2"`
// MinDiffPct is the minimum difference between item random patterns,
// as a proportion (0-1) of total active
MinDiffPct float64 `default:"0.5"`
// DriftCtxt means use drifting context representations,
// otherwise does bit flips from prototype.
DriftCtxt bool
// CtxtFlipPct is the proportion (0-1) of active bits to flip
// for each context pattern, relative to a prototype, for non-drifting.
CtxtFlipPct float64 `default:"0.25"`
// DriftPct is percentage of active bits that drift, per step, for drifting context.
DriftPct float64 `default:"0.1"`
}
// ParamConfig has config parameters related to sim params.
type ParamConfig struct {
// InToEc2PCon is percent connectivity from Input to EC2.
InToEc2PCon float32 `default:"0.25"`
// Script is an interpreted script that is run to set parameters in Layer and Path
// sheets, by default using the "Script" set name.
Script string `new-window:"+" width:"100"`
// Sheet is the extra params sheet name(s) to use (space separated
// if multiple). Must be valid name as listed in compiled-in params
// or loaded params.
Sheet string
// Tag is an extra tag to add to file names and logs saved from this run.
Tag string
// Note is additional info to describe the run params etc,
// like a git commit message for the run.
Note string
// SaveAll will save a snapshot of all current param and config settings
// in a directory named params_<datestamp> (or _good if Good is true),
// then quit. Useful for comparing to later changes and seeing multiple
// views of current params.
SaveAll bool `nest:"+"`
// Good is for SaveAll, save to params_good for a known good params state.
// This can be done prior to making a new release after all tests are passing.
// Add results to git to provide a full diff record of all params over level.
Good bool `nest:"+"`
}
func (pc *ParamConfig) FieldWidget(field string) core.Value {
return egui.ScriptFieldWidget(field)
}
// RunConfig has config parameters related to running the sim.
type RunConfig struct {
// GPUDevice selects the gpu device to use.
GPUDevice int
// NData is the number of data-parallel items to process in parallel per trial.
// Is significantly faster for both CPU and GPU. Results in an effective
// mini-batch of learning.
NData int `default:"10" min:"1"`
// NThreads is the number of parallel threads for CPU computation;
// 0 = use default.
NThreads int `default:"0"`
// MemThr is the threshold on proportion on / off error to count item as remembered
MemThr float64 `default:"0.34"`
// StopMem is memory pct correct level (proportion) above which training
// on current list stops (switch from AB to AC or stop on AC).
StopMem float32 `default:"0.9"`
// Run is the _starting_ run number, which determines the random seed.
// Runs counts up from there. Can do all runs in parallel by launching
// separate jobs with each starting Run, Runs = 1.
Run int `default:"0" flag:"run"`
// Runs is the total number of runs to do when running Train, starting from Run.
Runs int `default:"5" min:"1"`
// Epochs is the total number of epochs per run.
Epochs int `default:"100"`
// Trials is the total number of trials per epoch.
// Should be an even multiple of NData.
Trials int `default:"20"`
// Cycles is the total number of cycles per trial: at least 200.
Cycles int `default:"200"`
// PlusCycles is the total number of plus-phase cycles per trial. For Cycles=300, use 100.
PlusCycles int `default:"50"`
// TestInterval is how often (in epochs) to run through all the test patterns,
// in terms of training epochs. Can use 0 or -1 for no testing.
TestInterval int `default:"5"`
}
// LogConfig has config parameters related to logging data.
type LogConfig struct {
// SaveWeights will save final weights after each run.
SaveWeights bool
// Train has the list of Train mode levels to save log files for.
Train []string `default:"['Run', 'Epoch']" nest:"+"`
// Test has the list of Test mode levels to save log files for.
Test []string `nest:"+"`
}
// Config has the overall Sim configuration options.
type Config struct {
egui.BaseConfig
// Hip has hippocampus sizing parameters.
Hip axon.HipConfig `display:"add-fields"`
// Env has environment configuration options.
Env EnvConfig `display:"add-fields"`
// Params has parameter related configuration options.
Params ParamConfig `display:"add-fields"`
// Run has sim running related configuration options.
Run RunConfig `display:"add-fields"`
// Log has data logging related configuration options.
Log LogConfig `display:"add-fields"`
}
func (cfg *Config) Defaults() {
cfg.Name = "Hip"
cfg.Title = "Axon hippocampus"
cfg.URL = "https://github.com/emer/axon/blob/main/sims/hip/README.md"
cfg.Doc = "Simulates the hippocampus on basic AB-AC paired associates task."
cfg.Hip.Defaults()
}
// Code generated by "core generate -add-types -gosl -add-funcs"; DO NOT EDIT.
package hip
import (
"cogentcore.org/core/enums"
)
var _ModesValues = []Modes{0, 1}
// ModesN is the highest valid value for type Modes, plus one.
//
//gosl:start
const ModesN Modes = 2
//gosl:end
var _ModesValueMap = map[string]Modes{`Train`: 0, `Test`: 1}
var _ModesDescMap = map[Modes]string{0: ``, 1: ``}
var _ModesMap = map[Modes]string{0: `Train`, 1: `Test`}
// String returns the string representation of this Modes value.
func (i Modes) String() string { return enums.String(i, _ModesMap) }
// SetString sets the Modes value from its string representation,
// and returns an error if the string is invalid.
func (i *Modes) SetString(s string) error { return enums.SetString(i, s, _ModesValueMap, "Modes") }
// Int64 returns the Modes value as an int64.
func (i Modes) Int64() int64 { return int64(i) }
// SetInt64 sets the Modes value from an int64.
func (i *Modes) SetInt64(in int64) { *i = Modes(in) }
// Desc returns the description of the Modes value.
func (i Modes) Desc() string { return enums.Desc(i, _ModesDescMap) }
// ModesValues returns all possible values for the type Modes.
func ModesValues() []Modes { return _ModesValues }
// Values returns all possible values for the type Modes.
func (i Modes) Values() []enums.Enum { return enums.Values(_ModesValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Modes) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Modes) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Modes") }
var _LevelsValues = []Levels{0, 1, 2, 3}
// LevelsN is the highest valid value for type Levels, plus one.
//
//gosl:start
const LevelsN Levels = 4
//gosl:end
var _LevelsValueMap = map[string]Levels{`Cycle`: 0, `Trial`: 1, `Epoch`: 2, `Run`: 3}
var _LevelsDescMap = map[Levels]string{0: ``, 1: ``, 2: ``, 3: ``}
var _LevelsMap = map[Levels]string{0: `Cycle`, 1: `Trial`, 2: `Epoch`, 3: `Run`}
// String returns the string representation of this Levels value.
func (i Levels) String() string { return enums.String(i, _LevelsMap) }
// SetString sets the Levels value from its string representation,
// and returns an error if the string is invalid.
func (i *Levels) SetString(s string) error { return enums.SetString(i, s, _LevelsValueMap, "Levels") }
// Int64 returns the Levels value as an int64.
func (i Levels) Int64() int64 { return int64(i) }
// SetInt64 sets the Levels value from an int64.
func (i *Levels) SetInt64(in int64) { *i = Levels(in) }
// Desc returns the description of the Levels value.
func (i Levels) Desc() string { return enums.Desc(i, _LevelsDescMap) }
// LevelsValues returns all possible values for the type Levels.
func LevelsValues() []Levels { return _LevelsValues }
// Values returns all possible values for the type Levels.
func (i Levels) Values() []enums.Enum { return enums.Values(_LevelsValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Levels) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Levels) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Levels") }
var _StatsPhaseValues = []StatsPhase{0, 1}
// StatsPhaseN is the highest valid value for type StatsPhase, plus one.
//
//gosl:start
const StatsPhaseN StatsPhase = 2
//gosl:end
var _StatsPhaseValueMap = map[string]StatsPhase{`Start`: 0, `Step`: 1}
var _StatsPhaseDescMap = map[StatsPhase]string{0: ``, 1: ``}
var _StatsPhaseMap = map[StatsPhase]string{0: `Start`, 1: `Step`}
// String returns the string representation of this StatsPhase value.
func (i StatsPhase) String() string { return enums.String(i, _StatsPhaseMap) }
// SetString sets the StatsPhase value from its string representation,
// and returns an error if the string is invalid.
func (i *StatsPhase) SetString(s string) error {
return enums.SetString(i, s, _StatsPhaseValueMap, "StatsPhase")
}
// Int64 returns the StatsPhase value as an int64.
func (i StatsPhase) Int64() int64 { return int64(i) }
// SetInt64 sets the StatsPhase value from an int64.
func (i *StatsPhase) SetInt64(in int64) { *i = StatsPhase(in) }
// Desc returns the description of the StatsPhase value.
func (i StatsPhase) Desc() string { return enums.Desc(i, _StatsPhaseDescMap) }
// StatsPhaseValues returns all possible values for the type StatsPhase.
func StatsPhaseValues() []StatsPhase { return _StatsPhaseValues }
// Values returns all possible values for the type StatsPhase.
func (i StatsPhase) Values() []enums.Enum { return enums.Values(_StatsPhaseValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i StatsPhase) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *StatsPhase) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "StatsPhase")
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// hip runs a hippocampus model for testing parameters and new learning ideas
package hip
//go:generate core generate -add-types -gosl -add-funcs
import (
"fmt"
"os"
"reflect"
"cogentcore.org/core/base/metadata"
"cogentcore.org/core/core"
"cogentcore.org/core/enums"
"cogentcore.org/core/gpu"
"cogentcore.org/core/icons"
"cogentcore.org/core/math32"
"cogentcore.org/core/tree"
"cogentcore.org/lab/base/mpi"
"cogentcore.org/lab/base/randx"
"cogentcore.org/lab/plot"
"cogentcore.org/lab/stats/stats"
"cogentcore.org/lab/table"
"cogentcore.org/lab/tensor"
"cogentcore.org/lab/tensorfs"
"github.com/emer/axon/v2/axon"
"github.com/emer/emergent/v2/egui"
"github.com/emer/emergent/v2/env"
"github.com/emer/emergent/v2/looper"
"github.com/emer/emergent/v2/paths"
)
// Modes are the looping modes (Stacks) for running and statistics.
type Modes int32 //enums:enum
const (
Train Modes = iota
Test
)
// Levels are the looping levels for running and statistics.
type Levels int32 //enums:enum
const (
Cycle Levels = iota
Trial
Epoch
Run
)
// StatsPhase is the phase of stats processing for given mode, level.
// Accumulated values are reset at Start, added each Step.
type StatsPhase int32 //enums:enum
const (
Start StatsPhase = iota
Step
)
// see params.go for params
// Sim encapsulates the entire simulation model, and we define all the
// functionality as methods on this struct. This structure keeps all relevant
// state information organized and available without having to pass everything around
// as arguments to methods, and provides the core GUI interface (note the view tags
// for the fields which provide hints to how things should be displayed).
type Sim struct {
// simulation configuration parameters -- set by .toml config file and / or args
Config *Config `new-window:"+"`
// Net is the network: click to view / edit parameters for layers, paths, etc.
Net *axon.Network `new-window:"+" display:"no-inline"`
// Params manages network parameter setting.
Params axon.Params `display:"inline"`
// Loops are the control loops for running the sim, in different Modes
// across stacks of Levels.
Loops *looper.Stacks `new-window:"+" display:"no-inline"`
// Envs provides mode-string based storage of environments.
Envs env.Envs `new-window:"+" display:"no-inline"`
// TrainUpdate has Train mode netview update parameters.
TrainUpdate axon.NetViewUpdate `display:"inline"`
// TestUpdate has Test mode netview update parameters.
TestUpdate axon.NetViewUpdate `display:"inline"`
// Root is the root tensorfs directory, where all stats and other misc sim data goes.
Root *tensorfs.Node `display:"-"`
// Stats has the stats directory within Root.
Stats *tensorfs.Node `display:"-"`
// Current has the current stats values within Stats.
Current *tensorfs.Node `display:"-"`
// StatFuncs are statistics functions called at given mode and level,
// to perform all stats computations. phase = Start does init at start of given level,
// and all intialization / configuration (called during Init too).
StatFuncs []func(mode Modes, level Levels, phase StatsPhase) `display:"-"`
// GUI manages all the GUI elements
GUI egui.GUI `display:"-"`
// RandSeeds is a list of random seeds to use for each run.
RandSeeds randx.Seeds `display:"-"`
}
func (ss *Sim) SetConfig(cfg *Config) { ss.Config = cfg }
func (ss *Sim) Body() *core.Body { return ss.GUI.Body }
func (ss *Sim) ConfigSim() {
ss.Root, _ = tensorfs.NewDir("Root")
tensorfs.CurRoot = ss.Root
ss.Net = axon.NewNetwork(ss.Config.Name)
ss.Params.Config(LayerParams, PathParams, ss.Config.Params.Sheet, ss.Config.Params.Tag, reflect.ValueOf(ss))
ss.RandSeeds.Init(100) // max 100 runs
ss.InitRandSeed(0)
if ss.Config.GPU {
gpu.SelectAdapter = ss.Config.Run.GPUDevice
axon.GPUInit()
axon.UseGPU = true
}
ss.ConfigInputs()
ss.ConfigEnv()
ss.ConfigNet(ss.Net)
ss.ConfigLoops()
ss.ConfigStats()
// if ss.Config..GPU {
// fmt.Println(axon.GPUSystem.Vars().StringDoc())
// }
if ss.Config.Params.SaveAll {
ss.Config.Params.SaveAll = false
ss.Net.SaveParamsSnapshot(&ss.Config, ss.Config.Params.Good)
os.Exit(0)
}
}
//////// Inputs
func (ss *Sim) ConfigEnv() {
// Can be called multiple times -- don't re-create
var trn, tst *env.FixedTable
if len(ss.Envs) == 0 {
trn = &env.FixedTable{}
tst = &env.FixedTable{}
} else {
trn = ss.Envs.ByMode(Train).(*env.FixedTable)
tst = ss.Envs.ByMode(Test).(*env.FixedTable)
}
trnAB := tensorfs.DirTable(ss.Root.Dir("ABAC/Inputs/TrainAB"), nil)
tstAll := tensorfs.DirTable(ss.Root.Dir("ABAC/Inputs/TestAll"), nil)
// this logic can be used to create train-test splits of a set of patterns:
// n := inputs.NumRows()
// order := rand.Perm(n)
// ntrn := int(0.85 * float64(n))
// trnEnv := table.NewView(inputs)
// tstEnv := table.NewView(inputs)
// trnEnv.Indexes = order[:ntrn]
// tstEnv.Indexes = order[ntrn:]
// note: names must be standard here!
trn.Name = Train.String()
trn.Config(table.NewView(trnAB))
trn.Validate()
tst.Name = Test.String()
tst.Config(table.NewView(tstAll))
tst.Sequential = true
tst.Validate()
trn.Init(0)
tst.Init(0)
// note: names must be in place when adding
ss.Envs.Add(trn, tst)
}
func (ss *Sim) ConfigNet(net *axon.Network) {
net.SetMaxData(ss.Config.Run.NData)
net.Context().ThetaCycles = int32(ss.Config.Run.Cycles)
net.SetRandSeed(ss.RandSeeds[0]) // init new separate random seed, using run = 0
hip := &ss.Config.Hip
in := net.AddLayer4D("Input", axon.InputLayer, hip.EC3NPool.Y, hip.EC3NPool.X, hip.EC3NNrn.Y, hip.EC3NNrn.X)
inToEc2 := paths.NewUniformRand()
inToEc2.PCon = ss.Config.Params.InToEc2PCon
onetoone := paths.NewOneToOne()
ec2, ec3, _, _, _, _ := net.AddHip(hip, 2)
net.ConnectLayers(in, ec2, inToEc2, axon.ForwardPath)
net.ConnectLayers(in, ec3, onetoone, axon.ForwardPath)
ec2.PlaceAbove(in)
net.Build()
net.Defaults()
net.SetNThreads(ss.Config.Run.NThreads)
ss.ApplyParams()
// net.InitWeights()
}
func (ss *Sim) ApplyParams() {
ss.Params.Script = ss.Config.Params.Script
ss.Params.ApplyAll(ss.Net)
}
//////// Init, utils
// Init restarts the run, and initializes everything, including network weights
// and resets the epoch log table
func (ss *Sim) Init() {
ss.Loops.ResetCounters()
ss.SetRunName()
ss.InitRandSeed(0)
// ss.ConfigEnv() // re-config env just in case a different set of patterns was
// selected or patterns have been modified etc
ss.ApplyParams()
ss.StatsInit()
ss.NewRun()
ss.TrainUpdate.RecordSyns()
ss.TrainUpdate.Update(Train, Trial)
}
// InitRandSeed initializes the random seed based on current training run number
func (ss *Sim) InitRandSeed(run int) {
ss.RandSeeds.Set(run)
ss.RandSeeds.Set(run, &ss.Net.Rand)
}
// NetViewUpdater returns the NetViewUpdate for given mode.
func (ss *Sim) NetViewUpdater(mode enums.Enum) *axon.NetViewUpdate {
if mode.Int64() == Train.Int64() {
return &ss.TrainUpdate
}
return &ss.TestUpdate
}
// ConfigLoops configures the control loops: Training, Testing
func (ss *Sim) ConfigLoops() {
ls := looper.NewStacks()
trials := int(math32.IntMultipleGE(float32(ss.Config.Run.Trials), float32(ss.Config.Run.NData)))
cycles := ss.Config.Run.Cycles
ls.AddStack(Train, Trial).
AddLevel(Run, ss.Config.Run.Runs).
AddLevel(Epoch, ss.Config.Run.Epochs).
AddLevelIncr(Trial, trials, ss.Config.Run.NData).
AddLevel(Cycle, cycles)
ls.AddStack(Test, Trial).
AddLevel(Epoch, 1).
AddLevelIncr(Trial, trials, ss.Config.Run.NData).
AddLevel(Cycle, cycles)
axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, Cycle, Trial, Train,
func(mode enums.Enum) { ss.Net.ClearInputs() },
func(mode enums.Enum) { ss.ApplyInputs(mode.(Modes)) },
)
ls.Stacks[Train].OnInit.Add("Init", ss.Init)
ls.Loop(Train, Run).OnStart.Add("NewRun", ss.NewRun)
trainEpoch := ls.Loop(Train, Epoch)
trainEpoch.OnStart.Add("TestAtInterval", func() {
if (ss.Config.Run.TestInterval > 0) && ((trainEpoch.Counter.Cur+1)%ss.Config.Run.TestInterval == 0) {
ss.TestAll()
}
})
ls.AddOnStartToAll("StatsStart", ss.StatsStart)
ls.AddOnEndToAll("StatsStep", ss.StatsStep)
ls.Loop(Train, Run).OnEnd.Add("SaveWeights", func() {
ctrString := fmt.Sprintf("%03d_%05d", ls.Loop(Train, Run).Counter.Cur, ls.Loop(Train, Epoch).Counter.Cur)
axon.SaveWeightsIfConfigSet(ss.Net, ss.Config.Log.SaveWeights, ctrString, ss.RunName())
})
if ss.Config.GUI {
axon.LooperUpdateNetView(ls, Cycle, Trial, ss.NetViewUpdater)
ls.Stacks[Train].OnInit.Add("GUI-Init", ss.GUI.UpdateWindow)
ls.Stacks[Test].OnInit.Add("GUI-Init", ss.GUI.UpdateWindow)
}
if ss.Config.Debug {
mpi.Println(ls.DocString())
}
ss.Loops = ls
}
// ApplyInputs applies input patterns from given environment for given mode.
// Any other start-of-trial logic can also be put here.
func (ss *Sim) ApplyInputs(mode Modes) {
net := ss.Net
ndata := int(net.Context().NData)
curModeDir := ss.Current.Dir(mode.String())
ev := ss.Envs.ByMode(mode)
lays := net.LayersByType(axon.InputLayer, axon.TargetLayer)
net.InitExt()
for di := range ndata {
ev.Step()
curModeDir.StringValue("TrialName", ndata).SetString1D(ev.String(), di)
for _, lnm := range lays {
ly := ss.Net.LayerByName(lnm)
st := ev.State(ly.Name)
if st != nil {
ly.ApplyExt(uint32(di), st)
}
}
}
net.ApplyExts()
}
// NewRun intializes a new Run level of the model.
func (ss *Sim) NewRun() {
ctx := ss.Net.Context()
run := ss.Loops.Loop(Train, Run).Counter.Cur
ss.InitRandSeed(run)
ss.Envs.ByMode(Train).Init(run)
ss.Envs.ByMode(Test).Init(run)
ctx.Reset()
ss.Net.InitWeights()
}
// TestAll runs through the full set of testing items
func (ss *Sim) TestAll() {
ss.Envs.ByMode(Test).Init(0)
ss.Loops.ResetAndRun(Test)
ss.Loops.Mode = Train // important because this is called from Train Run: go back.
}
//////// Stats
// AddStat adds a stat compute function.
func (ss *Sim) AddStat(f func(mode Modes, level Levels, phase StatsPhase)) {
ss.StatFuncs = append(ss.StatFuncs, f)
}
// StatsStart is called by Looper at the start of given level, for each iteration.
// It needs to call RunStats Start at the next level down.
// e.g., each Epoch is the start of the full set of Trial Steps.
func (ss *Sim) StatsStart(lmd, ltm enums.Enum) {
mode := lmd.(Modes)
level := ltm.(Levels)
if level <= Trial {
return
}
ss.RunStats(mode, level-1, Start)
}
// StatsStep is called by Looper at each step of iteration,
// where it accumulates the stat results.
func (ss *Sim) StatsStep(lmd, ltm enums.Enum) {
mode := lmd.(Modes)
level := ltm.(Levels)
if level == Cycle {
return
}
ss.RunStats(mode, level, Step)
tensorfs.DirTable(axon.StatsNode(ss.Stats, mode, level), nil).WriteToLog()
}
// RunStats runs the StatFuncs for given mode, level and phase.
func (ss *Sim) RunStats(mode Modes, level Levels, phase StatsPhase) {
for _, sf := range ss.StatFuncs {
sf(mode, level, phase)
}
if phase == Step && ss.GUI.Tabs != nil {
nm := mode.String() + " " + level.String() + " Plot"
ss.GUI.Tabs.AsLab().GoUpdatePlot(nm)
if level == Run {
ss.GUI.Tabs.AsLab().GoUpdatePlot("Train RunAll Plot")
}
}
}
// SetRunName sets the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) SetRunName() string {
runName := ss.Params.RunName(ss.Config.Run.Run)
ss.Current.StringValue("RunName", 1).SetString1D(runName, 0)
return runName
}
// RunName returns the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) RunName() string {
return ss.Current.StringValue("RunName", 1).String1D(0)
}
// StatsInit initializes all the stats by calling Start across all modes and levels.
func (ss *Sim) StatsInit() {
for md, st := range ss.Loops.Stacks {
mode := md.(Modes)
for _, lev := range st.Order {
level := lev.(Levels)
if level == Cycle {
continue
}
ss.RunStats(mode, level, Start)
}
}
if ss.GUI.Tabs != nil {
tbs := ss.GUI.Tabs.AsLab()
_, idx := tbs.CurrentTab()
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Epoch))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Run))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Test, Trial))
tbs.PlotTensorFS(ss.Stats.Dir("Train/RunAll"))
tbs.SelectTabIndex(idx)
}
}
// ConfigStats handles configures functions to do all stats computation
// in the tensorfs system.
func (ss *Sim) ConfigStats() {
net := ss.Net
ss.Stats = ss.Root.Dir("Stats")
ss.Current = ss.Stats.Dir("Current")
ss.SetRunName()
// last arg(s) are levels to exclude
counterFunc := axon.StatLoopCounters(ss.Stats, ss.Current, ss.Loops, net, Trial, Cycle)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
counterFunc(mode, level, phase == Start)
})
runNameFunc := axon.StatRunName(ss.Stats, ss.Current, ss.Loops, net, Trial, Cycle)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
runNameFunc(mode, level, phase == Start)
})
trialNameFunc := axon.StatTrialName(ss.Stats, ss.Current, ss.Loops, net, Trial)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
trialNameFunc(mode, level, phase == Start)
})
// todo: update stats
statNames := []string{"CorSim", "UnitErr", "Err", "NZero", "FirstZero", "LastZero"}
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
for _, name := range statNames {
if name == "NZero" && (mode != Train || level == Trial) {
return
}
modeDir := ss.Stats.Dir(mode.String())
curModeDir := ss.Current.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
subDir := modeDir.Dir((level - 1).String()) // note: will fail for Cycle
tsr := levelDir.Float64(name)
ndata := int(ss.Net.Context().NData)
var stat float64
if phase == Start {
tsr.SetNumRows(0)
plot.SetFirstStyler(tsr, func(s *plot.Style) {
s.Range.SetMin(0).SetMax(1)
s.On = true
switch name {
case "NZero":
s.On = false
case "FirstZero", "LastZero":
if level < Run {
s.On = false
}
}
})
switch name {
case "NZero":
if level == Epoch {
curModeDir.Float64(name, 1).SetFloat1D(0, 0)
}
case "FirstZero", "LastZero":
if level == Epoch {
curModeDir.Float64(name, 1).SetFloat1D(-1, 0)
}
}
continue
}
switch level {
case Trial:
out := ss.Net.LayerByName("EC5") // todo: need to update stats
for di := range ndata {
var stat float64
switch name {
case "CorSim":
stat = 1.0 - float64(axon.LayerStates.Value(int(out.Index), int(di), int(axon.LayerPhaseDiff)))
case "UnitErr":
stat = out.PctUnitErr(ss.Net.Context())[di]
case "Err":
uniterr := curModeDir.Float64("UnitErr", ndata).Float1D(di)
stat = 1.0
if uniterr == 0 {
stat = 0
}
}
curModeDir.Float64(name, ndata).SetFloat1D(stat, di)
tsr.AppendRowFloat(stat)
}
case Epoch:
// nz := curModeDir.Float64("NZero", 1).Float1D(0)
switch name {
// case "NZero":
// err := stats.StatSum.Call(subDir.Value("Err")).Float1D(0)
// stat = curModeDir.Float64(name, 1).Float1D(0)
// if err == 0 {
// stat++
// } else {
// stat = 0
// }
// curModeDir.Float64(name, 1).SetFloat1D(stat, 0)
// case "FirstZero":
// stat = curModeDir.Float64(name, 1).Float1D(0)
// if stat < 0 && nz == 1 {
// stat = curModeDir.Int("Epoch", 1).Float1D(0)
// }
// curModeDir.Float64(name, 1).SetFloat1D(stat, 0)
// case "LastZero":
// stat = curModeDir.Float64(name, 1).Float1D(0)
// if stat < 0 && nz >= float64(ss.Config.Run.NZero) {
// stat = curModeDir.Int("Epoch", 1).Float1D(0)
// }
// curModeDir.Float64(name, 1).SetFloat1D(stat, 0)
default:
stat = stats.StatMean.Call(subDir.Value(name)).Float1D(0)
}
tsr.AppendRowFloat(stat)
case Run:
switch name {
case "NZero", "FirstZero", "LastZero":
stat = subDir.Value(name).Float1D(-1)
default:
stat = stats.StatMean.Call(subDir.Value(name)).Float1D(0)
}
tsr.AppendRowFloat(stat)
}
}
})
perTrlFunc := axon.StatPerTrialMSec(ss.Stats, Train, Trial)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
perTrlFunc(mode, level, phase == Start)
})
lays := net.LayersByType(axon.SuperLayer, axon.CTLayer, axon.TargetLayer)
actGeFunc := axon.StatLayerActGe(ss.Stats, net, Train, Trial, Run, lays...)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
actGeFunc(mode, level, phase == Start)
})
runAllFunc := axon.StatLevelAll(ss.Stats, Train, Run, func(s *plot.Style, cl tensor.Values) {
name := metadata.Name(cl)
switch name {
case "FirstZero", "LastZero":
s.On = true
s.Range.SetMin(0)
}
})
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
runAllFunc(mode, level, phase == Start)
})
}
// StatCounters returns counters string to show at bottom of netview.
func (ss *Sim) StatCounters(mode, level enums.Enum) string {
counters := ss.Loops.Stacks[mode].CountersString()
vu := ss.NetViewUpdater(mode)
if vu == nil || vu.View == nil {
return counters
}
di := vu.View.Di
counters += fmt.Sprintf(" Di: %d", di)
curModeDir := ss.Current.Dir(mode.String())
if curModeDir.Node("TrialName") == nil {
return counters
}
counters += fmt.Sprintf(" TrialName: %s", curModeDir.StringValue("TrialName").String1D(di))
statNames := []string{"CorSim", "UnitErr", "Err"}
if level == Cycle || curModeDir.Node(statNames[0]) == nil {
return counters
}
for _, name := range statNames {
counters += fmt.Sprintf(" %s: %.4g", name, curModeDir.Float64(name).Float1D(di))
}
return counters
}
//////// GUI
// ConfigGUI configures the Cogent Core GUI interface for this simulation.
func (ss *Sim) ConfigGUI(b tree.Node) {
ss.GUI.MakeBody(b, ss, ss.Root, ss.Config.Name, ss.Config.Title, ss.Config.Doc)
ss.GUI.CycleUpdateInterval = 10
ss.GUI.StopLevel = Trial
nv := ss.GUI.AddNetView("Network")
nv.Options.MaxRecs = 2 * ss.Config.Run.Cycles
nv.Options.Raster.Max = ss.Config.Run.Cycles
nv.SetNet(ss.Net)
ss.TrainUpdate.Config(nv, axon.Theta, ss.StatCounters)
ss.TestUpdate.Config(nv, axon.Theta, ss.StatCounters)
ss.GUI.OnStop = func(mode, level enums.Enum) {
vu := ss.NetViewUpdater(mode)
vu.UpdateWhenStopped(mode, level)
}
nv.SceneXYZ().Camera.Pose.Pos.Set(0, 1, 2.75) // more "head on" than default which is more "top down"
nv.SceneXYZ().Camera.LookAt(math32.Vec3(0, 0, 0), math32.Vec3(0, 1, 0))
ss.StatsInit()
ss.GUI.FinalizeGUI(false)
}
func (ss *Sim) MakeToolbar(p *tree.Plan) {
ss.GUI.AddLooperCtrl(p, ss.Loops)
tree.Add(p, func(w *core.Separator) {})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "New Seed",
Icon: icons.Add,
Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
Active: egui.ActiveAlways,
Func: func() {
ss.RandSeeds.NewSeeds()
},
})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "README",
Icon: icons.FileMarkdown,
Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
Active: egui.ActiveAlways,
Func: func() {
core.TheApp.OpenURL(ss.Config.URL)
},
})
}
func (ss *Sim) RunNoGUI() {
ss.Init()
if ss.Config.Params.Note != "" {
mpi.Printf("Note: %s\n", ss.Config.Params.Note)
}
if ss.Config.Log.SaveWeights {
mpi.Printf("Saving final weights per run\n")
}
runName := ss.SetRunName()
netName := ss.Net.Name
cfg := &ss.Config.Log
axon.OpenLogFiles(ss.Loops, ss.Stats, netName, runName, [][]string{cfg.Train, cfg.Test})
mpi.Printf("Running %d Runs starting at %d\n", ss.Config.Run.Runs, ss.Config.Run.Run)
ss.Loops.Loop(Train, Run).Counter.SetCurMaxPlusN(ss.Config.Run.Run, ss.Config.Run.Runs)
ss.Loops.Run(Train)
axon.CloseLogFiles(ss.Loops, ss.Stats, Cycle)
axon.GPURelease()
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"github.com/emer/axon/v2/sims/hip"
"github.com/emer/emergent/v2/egui"
)
func main() { egui.Run[hip.Sim, hip.Config]() }
// Copyright (c) 2020, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package hip
import "github.com/emer/axon/v2/axon"
// LayerParams sets the minimal non-default params.
// Base is always applied, and others can be optionally selected to apply on top of that.
var LayerParams = axon.LayerSheets{
"Base": {
{Sel: ".EC", Doc: "all EC layers: only pools, no layer-level -- now for EC3 and EC5",
Set: func(ly *axon.LayerParams) {
// ly.Inhib.ActAvg.Nominal = 0.2
// ly.Inhib.Layer.On = false
// ly.Inhib.Layer.Gi = 0.2 // weak just to keep it from blowing up
// ly.Inhib.Pool.Gi = 1.1
// ly.Inhib.Pool.On = true
// ly.Act.Gbar.L = 10
ly.Inhib.ActAvg.Nominal = 0.05
ly.Inhib.Layer.On.SetBool(false)
ly.Inhib.Pool.On.SetBool(true)
ly.Inhib.Pool.Gi = 1.1
ly.Acts.Clamp.Ge = 1.4
// ly.Learn.TrgAvgAct.SubMean = 0
ly.Learn.TrgAvgAct.SynScaleRate = 0.0002
}},
{Sel: "#DG", Doc: "very sparse = high inhibition",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.01
ly.Inhib.Layer.Gi = 2.4
// ly.Learn.TrgAvgAct.SubMean = 0
ly.Learn.TrgAvgAct.SynScaleRate = 0.0002
// ly.Inhib.Layer.FB = 4
// ly.Learn.RLRate.SigmoidMin = 0.01
}},
{Sel: "#EC2", Doc: "very sparse = high inhibition",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.02
ly.Inhib.Layer.Gi = 1.2
ly.Learn.TrgAvgAct.SynScaleRate = 0.0002
// ly.Inhib.Layer.FB = 4
// ly.Learn.RLRate.SigmoidMin = 0.01
}},
{Sel: "#CA3", Doc: "sparse = high inhibition",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.01
ly.Inhib.Layer.Gi = 1.2
// ly.Learn.TrgAvgAct.SubMean = 0
ly.Learn.TrgAvgAct.SynScaleRate = 0.0002
// ly.Inhib.Layer.FB = 4
// ly.Learn.RLRate.SigmoidMin = 0.01
}},
{Sel: "#CA1", Doc: "CA1 only Pools",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.03
ly.Inhib.Layer.On.SetBool(false)
ly.Inhib.Pool.On.SetBool(true)
ly.Inhib.Pool.Gi = 1.1
// ly.Learn.TrgAvgAct.SubMean = 0
// ly.Learn.TrgAvgAct.On = false
ly.Learn.TrgAvgAct.SynScaleRate = 0.0002
// ly.Inhib.Pool.FB = 4
// ly.Learn.RLRate.SigmoidMin = 0.01
}},
},
}
// PathParams sets the minimal non-default params.
// Base is always applied, and others can be optionally selected to apply on top of that.
var PathParams = axon.PathSheets{
"Base": {
// {Sel: "Path", Doc: "basic path params",
// Set: func(pt *axon.PathParams) {
// pt.Learn.LRate.Base = 0.4
// }},
{Sel: ".InhibLateral", Doc: "circle lateral inhibitory connection -- good params, longer time, more ABmem",
Set: func(pt *axon.PathParams) {
pt.Learn.Learn.SetBool(false) // ??? not sure
// pt.SWts.Init.Mean = 1 // 0.1 was the standard Grid model as of 02242023
pt.SWts.Init.Var = 0
pt.SWts.Init.Sym.SetBool(false)
pt.PathScale.Abs = 0.1 // lower is better for spiking model?
}},
// {Sel: ".EcCa1Path", Doc: "encoder pathways -- Abs only affecting ec3toca1 and ec5toca1, not ca1toec5",
// Set: func(pt *axon.PathParams) {
// pt.PathScale.Abs = 0.1 // as low as 0.3 helped hugely preventing CA1 fixation, even 0.1 works -- try each one of them separately
// pt.Learn.LRate.Base = 0.2
// }},
{Sel: ".HippoCHL", Doc: "hippo CHL pathways -- no norm, moment, but YES wtbal = sig better",
Set: func(pt *axon.PathParams) {
pt.Learn.Learn.SetBool(true)
// pt.CHL.Hebb = 0.01 // .01 > .05? > .1?
pt.Learn.LRate.Base = 0.2 // .2
}},
{Sel: ".PPath", Doc: "performant path, new Dg error-driven EcCa1Path paths",
Set: func(pt *axon.PathParams) {
// pt.PathScale.Abs = 0.8 // 0.8 helps preventing CA3 fixation
pt.Learn.Learn.SetBool(true)
pt.Learn.LRate.Base = 0.2 // err driven: .15 > .2 > .25 > .1
}},
{Sel: "#CA1ToEC5", Doc: "extra strong from CA1 to EC5",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 3.0 // 4 > 6 > 2 (fails)
pt.Learn.LRate.Base = 0.4 // ABmem slightly impaired compared to 0.2 but faster
}},
{Sel: "#InputToEC2", Doc: "for CAN ec2",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 2.0 // 2 vs. 1: memory much better, FirstPerfect generally longer
pt.Learn.Learn.SetBool(false) // no learning better
}},
{Sel: "#InputToEC3", Doc: "one-to-one input to EC",
Set: func(pt *axon.PathParams) {
pt.Learn.Learn.SetBool(false)
pt.SWts.Init.Mean = 0.8
pt.SWts.Init.Var = 0.0
}},
{Sel: "#EC3ToEC2", Doc: "copied from InputToEC2",
Set: func(pt *axon.PathParams) {
pt.Learn.Learn.SetBool(false) // no learning better
//pt.Learn.LRate.Base = 0.01
//pt.SWts.Init.Mean = 0.8 // 0.8 is for one to one deterministic connections, not for learning!
//pt.SWts.Init.Var = "0
pt.PathScale.Abs = 0.5 // was 1, lower better
}},
{Sel: "#EC5ToEC3", Doc: "one-to-one out to in",
Set: func(pt *axon.PathParams) {
pt.Learn.Learn.SetBool(false)
pt.SWts.Init.Mean = 0.9
pt.SWts.Init.Var = 0.01
pt.PathScale.Rel = 0.5 // was 0.5
}},
{Sel: "#DGToCA3", Doc: "Mossy fibers: strong, non-learning",
Set: func(pt *axon.PathParams) {
pt.Learn.Learn.SetBool(false) // learning here definitely does NOT work!
// pt.SWts.Init.Mean = 0.9 // commmenting this our prevents CA3 overactivation
pt.SWts.Init.Var = 0.01
pt.PathScale.Rel = 4 // err del 4: 4 > 6 > 8
pt.PathScale.Abs = 0.3
}},
// {Sel: "#EC2ToCA3", Doc: "EC2 Perforant Path",
// Set: func(pt *axon.PathParams) {
// // pt.PathScale.Abs = 2
// pt.Learn.LRate.Base = 0.4 // list150: 0.2 > 0.3 > 0.1 > 0.05 > 0.01
// }},
{Sel: "#CA3ToCA3", Doc: "CA3 recurrent cons: rel=2 still the best",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 0.3
pt.PathScale.Rel = 2 // 2 > 1 > .5 = .1
// pt.Learn.LRate.Base = 0.4 // .1 > .08 (close) > .15 > .2 > .04; large list size: 0.01>0.1~=0.04
}},
{Sel: "#EC2ToDG", Doc: "DG learning is surprisingly critical: maxed out fast, hebbian works best",
Set: func(pt *axon.PathParams) {
// pt.Hip.Hebb = 0.2
// pt.Hip.Err = 0.8
// pt.Hip.SAvgCor = 0.1
// pt.Hip.SNominal = 0.02 // !! need to keep it the same as actual layer Nominal
pt.Learn.Learn.SetBool(true) // absolutely essential to have on! learning slow if off. key for NoDGLearn
pt.PathScale.Abs = 0.7
pt.Learn.LRate.Base = 0.2
}},
{Sel: "#CA3ToCA1", Doc: "Schaffer collaterals -- slower, less hebb",
Set: func(pt *axon.PathParams) {
// pt.PathScale.Abs = 1.5
// pt.Hip.Hebb = 0.01 // worked whole 300 epcs!
// pt.Hip.Err = 0.9
// pt.Hip.Hebb = 0
// pt.Hip.Err = 1
// pt.SWts.Adapt.SigGain = 1
// pt.SWts.Init.SPct = 0
// pt.Learn.DWt.SubMean = 1 // predition: zero-sum at LWt level makes more fixation
// pt.PathScale.Abs = 0.1
// pt.Hip.SAvgCor = 0.4
// pt.Hip.SNominal = 0.03 // !! need to keep it the same as actual layer Nominal
pt.Learn.LRate.Base = 0.2 // CHL: .1 =~ .08 > .15 > .2, .05 (sig worse)
}},
// {Sel: "#EC3ToCA1", Doc: "EC3 Perforant Path",
// Set: func(pt *axon.PathParams) {
// pt.PathScale.Abs = 0.1
// // pt.SWts.Adapt.SigGain = 1 // if 1, Wt = LWt, weight more linear less extreme, if 6 (default), Wt = sigmoid(LWt)
// }},
{Sel: "#EC5ToCA1", Doc: "EC5 Perforant Path",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.3 // Back proj should generally be very weak but we're specifically setting this here bc others are set already
}},
},
}
// Copyright (c) 2023, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package inhib
import (
"cogentcore.org/core/core"
"cogentcore.org/core/math32/vecint"
"github.com/emer/emergent/v2/egui"
)
// ParamConfig has config parameters related to sim params.
type ParamConfig struct {
// InputPct has the percent of active units in input layer
// (literally number of active units, because input has 100 units total).
InputPct float32 `default:"15" min:"5" max:"50" step:"1"`
// NLayers is the number of hidden layers to add.
NLayers int `default:"2" min:"1"`
// HiddenSize is the size of hidden layers.
HiddenSize vecint.Vector2i `default:"{'X':10,'Y':10}"`
// Script is an interpreted script that is run to set parameters in Layer and Path
// sheets, by default using the "Script" set name.
Script string `new-window:"+" width:"100"`
// Sheet is the extra params sheet name(s) to use (space separated
// if multiple). Must be valid name as listed in compiled-in params
// or loaded params.
Sheet string
// Tag is an extra tag to add to file names and logs saved from this run.
Tag string
// Note is additional info to describe the run params etc,
// like a git commit message for the run.
Note string
// SaveAll will save a snapshot of all current param and config settings
// in a directory named params_<datestamp> (or _good if Good is true),
// then quit. Useful for comparing to later changes and seeing multiple
// views of current params.
SaveAll bool `nest:"+"`
// Good is for SaveAll, save to params_good for a known good params state.
// This can be done prior to making a new release after all tests are passing.
// Add results to git to provide a full diff record of all params over level.
Good bool `nest:"+"`
}
func (pc *ParamConfig) FieldWidget(field string) core.Value {
return egui.ScriptFieldWidget(field)
}
// RunConfig has config parameters related to running the sim.
type RunConfig struct {
// GPUDevice selects the gpu device to use.
GPUDevice int
// Trials is the total number of trials of different random patterns to generate.
Trials int `default:"10"`
// Cycles is the total number of cycles per trial: at least 200.
Cycles int `default:"200"`
// PlusCycles is the total number of plus-phase cycles per trial. For Cycles=300, use 100.
PlusCycles int `default:"50"`
}
// LogConfig has config parameters related to logging data.
type LogConfig struct {
// Save has the list of levels to save log files for.
Save []string `default:"['Trial', 'Cycle']" nest:"+"`
}
// Config has the overall Sim configuration options.
type Config struct {
egui.BaseConfig
// Params has parameter related configuration options.
Params ParamConfig `display:"add-fields"`
// Run has sim running related configuration options.
Run RunConfig `display:"add-fields"`
// Log has data logging related configuration options.
Log LogConfig `display:"add-fields"`
}
func (cfg *Config) Defaults() {
cfg.Name = "Inhib"
cfg.Title = "Axon inhibition test"
cfg.URL = "https://github.com/emer/axon/blob/main/sims/inhib/README.md"
cfg.Doc = "This explores how inhibitory interneurons can dynamically control overall activity levels within the network, by providing both feedforward and feedback inhibition to excitatory pyramidal neurons, with different time scales provided by PV neurons (fast spiking) and SST neurons (slow spiking)."
}
// Code generated by "core generate -add-types -add-funcs -gosl"; DO NOT EDIT.
package inhib
import (
"cogentcore.org/core/enums"
)
var _ModesValues = []Modes{0, 1}
// ModesN is the highest valid value for type Modes, plus one.
//
//gosl:start
const ModesN Modes = 2
//gosl:end
var _ModesValueMap = map[string]Modes{`Test`: 0, `Train`: 1}
var _ModesDescMap = map[Modes]string{0: ``, 1: ``}
var _ModesMap = map[Modes]string{0: `Test`, 1: `Train`}
// String returns the string representation of this Modes value.
func (i Modes) String() string { return enums.String(i, _ModesMap) }
// SetString sets the Modes value from its string representation,
// and returns an error if the string is invalid.
func (i *Modes) SetString(s string) error { return enums.SetString(i, s, _ModesValueMap, "Modes") }
// Int64 returns the Modes value as an int64.
func (i Modes) Int64() int64 { return int64(i) }
// SetInt64 sets the Modes value from an int64.
func (i *Modes) SetInt64(in int64) { *i = Modes(in) }
// Desc returns the description of the Modes value.
func (i Modes) Desc() string { return enums.Desc(i, _ModesDescMap) }
// ModesValues returns all possible values for the type Modes.
func ModesValues() []Modes { return _ModesValues }
// Values returns all possible values for the type Modes.
func (i Modes) Values() []enums.Enum { return enums.Values(_ModesValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Modes) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Modes) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Modes") }
var _LevelsValues = []Levels{0, 1, 2}
// LevelsN is the highest valid value for type Levels, plus one.
//
//gosl:start
const LevelsN Levels = 3
//gosl:end
var _LevelsValueMap = map[string]Levels{`Cycle`: 0, `Trial`: 1, `Epoch`: 2}
var _LevelsDescMap = map[Levels]string{0: ``, 1: ``, 2: ``}
var _LevelsMap = map[Levels]string{0: `Cycle`, 1: `Trial`, 2: `Epoch`}
// String returns the string representation of this Levels value.
func (i Levels) String() string { return enums.String(i, _LevelsMap) }
// SetString sets the Levels value from its string representation,
// and returns an error if the string is invalid.
func (i *Levels) SetString(s string) error { return enums.SetString(i, s, _LevelsValueMap, "Levels") }
// Int64 returns the Levels value as an int64.
func (i Levels) Int64() int64 { return int64(i) }
// SetInt64 sets the Levels value from an int64.
func (i *Levels) SetInt64(in int64) { *i = Levels(in) }
// Desc returns the description of the Levels value.
func (i Levels) Desc() string { return enums.Desc(i, _LevelsDescMap) }
// LevelsValues returns all possible values for the type Levels.
func LevelsValues() []Levels { return _LevelsValues }
// Values returns all possible values for the type Levels.
func (i Levels) Values() []enums.Enum { return enums.Values(_LevelsValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Levels) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Levels) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Levels") }
var _StatsPhaseValues = []StatsPhase{0, 1}
// StatsPhaseN is the highest valid value for type StatsPhase, plus one.
//
//gosl:start
const StatsPhaseN StatsPhase = 2
//gosl:end
var _StatsPhaseValueMap = map[string]StatsPhase{`Start`: 0, `Step`: 1}
var _StatsPhaseDescMap = map[StatsPhase]string{0: ``, 1: ``}
var _StatsPhaseMap = map[StatsPhase]string{0: `Start`, 1: `Step`}
// String returns the string representation of this StatsPhase value.
func (i StatsPhase) String() string { return enums.String(i, _StatsPhaseMap) }
// SetString sets the StatsPhase value from its string representation,
// and returns an error if the string is invalid.
func (i *StatsPhase) SetString(s string) error {
return enums.SetString(i, s, _StatsPhaseValueMap, "StatsPhase")
}
// Int64 returns the StatsPhase value as an int64.
func (i StatsPhase) Int64() int64 { return int64(i) }
// SetInt64 sets the StatsPhase value from an int64.
func (i *StatsPhase) SetInt64(in int64) { *i = StatsPhase(in) }
// Desc returns the description of the StatsPhase value.
func (i StatsPhase) Desc() string { return enums.Desc(i, _StatsPhaseDescMap) }
// StatsPhaseValues returns all possible values for the type StatsPhase.
func StatsPhaseValues() []StatsPhase { return _StatsPhaseValues }
// Values returns all possible values for the type StatsPhase.
func (i StatsPhase) Values() []enums.Enum { return enums.Values(_StatsPhaseValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i StatsPhase) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *StatsPhase) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "StatsPhase")
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// inhib: This simulation explores how inhibitory interneurons can dynamically
// control overall activity levels within the network, by providing both
// feedforward and feedback inhibition to excitatory pyramidal neurons,
// with different time scales provided by PV neurons (fast spiking)
// and SST neurons (slow spiking).
package inhib
//go:generate core generate -add-types -add-funcs -gosl
import (
"fmt"
"os"
"reflect"
"strings"
"cogentcore.org/core/base/metadata"
"cogentcore.org/core/core"
"cogentcore.org/core/enums"
"cogentcore.org/core/gpu"
"cogentcore.org/core/icons"
"cogentcore.org/core/math32"
"cogentcore.org/core/tree"
"cogentcore.org/lab/base/mpi"
"cogentcore.org/lab/base/randx"
"cogentcore.org/lab/patterns"
"cogentcore.org/lab/plot"
"cogentcore.org/lab/stats/stats"
"cogentcore.org/lab/table"
"cogentcore.org/lab/tensorfs"
"github.com/emer/axon/v2/axon"
"github.com/emer/axon/v2/fsfffb"
"github.com/emer/emergent/v2/egui"
"github.com/emer/emergent/v2/env"
"github.com/emer/emergent/v2/looper"
"github.com/emer/emergent/v2/paths"
)
// Modes are the looping modes (Stacks) for running and statistics.
type Modes int32 //enums:enum
const (
Test Modes = iota
Train // not used, but needed for some things
)
// Levels are the looping levels for running and statistics.
type Levels int32 //enums:enum
const (
Cycle Levels = iota
Trial
Epoch
)
// StatsPhase is the phase of stats processing for given mode, level.
// Accumulated values are reset at Start, added each Step.
type StatsPhase int32 //enums:enum
const (
Start StatsPhase = iota
Step
)
// see params.go for params
// Sim encapsulates the entire simulation model, and we define all the
// functionality as methods on this struct. This structure keeps all relevant
// state information organized and available without having to pass everything around
// as arguments to methods, and provides the core GUI interface (note the view tags
// for the fields which provide hints to how things should be displayed).
type Sim struct {
// FSFFFB turns on the FS-FFFB summary inhibition function instead of using
// the inhibitory interneurons directly.
FSFFFB bool
// Gi is overall inhibition gain, which is the main parameter to adjust
// to change overall activation levels, scaling both the FS and SS factors.
Gi float32 `min:"0" default:"1,1.1,0.75,0.9"`
// FB is a scaling factor for contribution of FB spikes to FSi value,
// where FF spikes always contribute with a factor of 1.
// For small networks, 0.5 or 1 works best; larger networks and
// more demanding inhibition requires higher levels.
FB float32 `min:"0" default:"0.5,1,4"`
// FSTau is fast spiking (PV+) intgration time constant in cycles (msec).
// Tau is roughly 2/3 of the way to asymptotic value.
FSTau float32 `min:"0" default:"6"`
// SS is the multiplier on SS slow-spiking (SST+) in contributing to the
// overall Gi inhibition. FS contributes at a factor of 1.
SS float32 `min:"0" default:"30"`
// SSfTau is the slow-spiking (SST+) facilitation decay time constant
// in cycles (msec). Facilication factor SSf determines impact of FB spikes
// as a function of spike input.
// Tau is roughly 2/3 of the way to asymptotic value.
SSfTau float32 `min:"0" default:"20"`
// SSiTau is the slow-spiking (SST+) integration time constant in cycles (msec)
// cascaded on top of FSTau.
// Tau is roughly 2/3 of the way to asymptotic value.
SSiTau float32 `min:"0" default:"50"`
// InhibExcite is the scaling factor for inhibition to excitation pathways,
// which determines the strength of inhibition when not using FSFFFB function.
InhibExcite float32
// InhibInhib is the scaling factor for inhibition to inhibition pathways,
// which determines the strength of inhibition when not using FSFFFB function.
InhibInhib float32
// simulation configuration parameters -- set by .toml config file and / or args
Config *Config `new-window:"+"`
// Net is the network: click to view / edit parameters for layers, paths, etc.
Net *axon.Network `new-window:"+" display:"no-inline"`
// Params manages network parameter setting.
Params axon.Params `display:"inline"`
// Loops are the control loops for running the sim, in different Modes
// across stacks of Levels.
Loops *looper.Stacks `new-window:"+" display:"no-inline"`
// Envs provides mode-string based storage of environments.
Envs env.Envs `new-window:"+" display:"no-inline"`
// NetUpdate has netview update parameters.
NetUpdate axon.NetViewUpdate `display:"inline"`
// Root is the root tensorfs directory, where all stats and other misc sim data goes.
Root *tensorfs.Node `display:"-"`
// Stats has the stats directory within Root.
Stats *tensorfs.Node `display:"-"`
// Current has the current stats values within Stats.
Current *tensorfs.Node `display:"-"`
// StatFuncs are statistics functions called at given mode and level,
// to perform all stats computations. phase = Start does init at start of given level,
// and all intialization / configuration (called during Init too).
StatFuncs []func(mode Modes, level Levels, phase StatsPhase) `display:"-"`
// GUI manages all the GUI elements
GUI egui.GUI `display:"-"`
// RandSeeds is a list of random seeds to use for each run.
RandSeeds randx.Seeds `display:"-"`
}
func Embed(b tree.Node) { egui.Embed[Sim, Config](b) }
func (ss *Sim) SetConfig(cfg *Config) { ss.Config = cfg }
func (ss *Sim) Body() *core.Body { return ss.GUI.Body }
func (ss *Sim) ShouldDisplay(field string) bool {
switch field {
case "Gi", "FB", "FSTau", "SS", "SSfTau", "SSiTau":
return ss.FSFFFB
case "InhibExcite", "InhibInhib":
return !ss.FSFFFB
default:
return true
}
}
func (ss *Sim) Defaults() {
ss.FSFFFB = true
ss.Gi = 1
ss.FB = 1
ss.FSTau = 6
ss.SS = 30
ss.SSfTau = 20
ss.SSiTau = 50
ss.InhibExcite = 0.8
ss.InhibInhib = 0.8
}
func (ss *Sim) ConfigSim() {
ss.Defaults()
ss.Root, _ = tensorfs.NewDir("Root")
tensorfs.CurRoot = ss.Root
ss.Net = axon.NewNetwork(ss.Config.Name)
ss.Params.Config(LayerParams, PathParams, ss.Config.Params.Sheet, ss.Config.Params.Tag, reflect.ValueOf(ss))
ss.RandSeeds.Init(100) // max 100 runs
ss.InitRandSeed(0)
if ss.Config.GPU {
gpu.SelectAdapter = ss.Config.Run.GPUDevice
axon.GPUInit()
axon.UseGPU = true
}
ss.ConfigInputs()
ss.ConfigEnv()
ss.ConfigNet(ss.Net)
ss.ConfigLoops()
ss.ConfigStats()
// if ss.Config..GPU {
// fmt.Println(axon.GPUSystem.Vars().StringDoc())
// }
if ss.Config.Params.SaveAll {
ss.Config.Params.SaveAll = false
ss.Net.SaveParamsSnapshot(&ss.Config, ss.Config.Params.Good)
os.Exit(0)
}
}
func (ss *Sim) ConfigEnv() {
// Can be called multiple times -- don't re-create
var tst *env.FixedTable
if len(ss.Envs) == 0 {
tst = &env.FixedTable{}
} else {
tst = ss.Envs.ByMode(Test).(*env.FixedTable)
}
inputs := tensorfs.DirTable(ss.Root.Dir("Inputs/Test"), nil)
tst.Name = Test.String()
tst.Config(table.NewView(inputs))
tst.Sequential = true
tst.Validate()
tst.Init(0)
// note: names must be in place when adding
ss.Envs.Add(tst)
}
func (ss *Sim) ReConfigNet() {
ss.Net.DeleteAll()
ss.ConfigNet(ss.Net)
// ss.GUI.NetView.Config()
}
func LayNm(n int) string {
return fmt.Sprintf("Layer%d", n)
}
func InhNm(n int) string {
return fmt.Sprintf("Inhib%d", n)
}
func LayByNm(net *axon.Network, n int) *axon.Layer {
return net.LayerByName(LayNm(n))
}
func InhByNm(net *axon.Network, n int) *axon.Layer {
return net.LayerByName(InhNm(n))
}
func (ss *Sim) ConfigNet(net *axon.Network) {
net.SetMaxData(1)
net.Context().ThetaCycles = int32(ss.Config.Run.Cycles)
net.SetRandSeed(ss.RandSeeds[0]) // init new separate random seed, using run = 0
sz := ss.Config.Params.HiddenSize
inlay := net.AddLayer2D(LayNm(0), axon.InputLayer, sz.Y, sz.X)
_ = inlay
for hi := 1; hi <= ss.Config.Params.NLayers; hi++ {
net.AddLayer2D(LayNm(hi), axon.SuperLayer, sz.Y, sz.X)
net.AddLayer2D(InhNm(hi), axon.SuperLayer, sz.Y, 2).AddClass("InhibLay")
}
full := paths.NewFull()
rndcut := paths.NewUniformRand()
rndcut.PCon = 0.1
for hi := 1; hi <= ss.Config.Params.NLayers; hi++ {
ll := LayByNm(net, hi-1)
tl := LayByNm(net, hi)
il := InhByNm(net, hi)
net.ConnectLayers(ll, tl, full, axon.ForwardPath).AddClass("Excite")
net.ConnectLayers(ll, il, full, axon.ForwardPath).AddClass("ToInhib")
net.ConnectLayers(tl, il, full, axon.BackPath).AddClass("ToInhib")
net.ConnectLayers(il, tl, full, axon.InhibPath)
net.ConnectLayers(il, il, full, axon.InhibPath)
// if hi > 1 {
// net.ConnectLayers(inlay, tl, rndcut, axon.ForwardPath).AddClass("RandSc")
// }
tl.PlaceAbove(ll)
il.PlaceRightOf(tl, 1)
if hi < ss.Config.Params.NLayers {
nl := LayByNm(net, hi+1)
net.ConnectLayers(nl, il, full, axon.ForwardPath).AddClass("ToInhib")
net.ConnectLayers(tl, nl, full, axon.ForwardPath).AddClass("Excite")
net.ConnectLayers(nl, tl, full, axon.BackPath).AddClass("Excite")
}
}
net.Build()
net.Defaults()
ss.ApplyParams()
net.InitWeights()
}
func (ss *Sim) ApplyParams() {
if ss.FSFFFB {
ss.Params.ExtraSheets = "FSFFFB Trained"
} else {
ss.Params.ExtraSheets = "Trained"
}
ss.Params.Script = ss.Config.Params.Script
ss.Params.ApplyAll(ss.Net)
for _, ly := range ss.Net.Layers {
ip := &ly.Params.Inhib.Layer
ip.Gi = ss.Gi
ip.FB = ss.FB
ip.FSTau = ss.FSTau
ip.SS = ss.SS
ip.SSfTau = ss.SSfTau
ip.SSiTau = ss.SSiTau
for _, pt := range ly.RecvPaths {
if pt.Type != axon.InhibPath {
continue
}
if ss.FSFFFB {
pt.Params.PathScale.Abs = ss.InhibInhib
} else {
if strings.HasPrefix(ly.Name, "Inhib") {
pt.Params.PathScale.Abs = ss.InhibInhib
} else {
pt.Params.PathScale.Abs = ss.InhibExcite
}
}
}
}
}
//////// Init, utils
// Init restarts the run, and initializes everything, including network weights
// and resets the epoch log table
func (ss *Sim) Init() {
ss.Loops.ResetCounters()
ss.SetRunName()
ss.InitRandSeed(0)
ss.ApplyParams()
ss.StatsInit()
ss.NewRun()
ss.NetUpdate.RecordSyns()
ss.NetUpdate.Update(Test, Cycle)
}
// InitRandSeed initializes the random seed based on current training run number
func (ss *Sim) InitRandSeed(run int) {
ss.RandSeeds.Set(run)
ss.RandSeeds.Set(run, &ss.Net.Rand)
}
// NetViewUpdater returns the NetViewUpdate for given mode.
func (ss *Sim) NetViewUpdater(mode enums.Enum) *axon.NetViewUpdate {
return &ss.NetUpdate
}
// ConfigLoops configures the control loops: Training, Testing
func (ss *Sim) ConfigLoops() {
ls := looper.NewStacks()
cycles := ss.Config.Run.Cycles
ls.AddStack(Test, Trial).
AddLevel(Epoch, 1).
AddLevelIncr(Trial, ss.Config.Run.Trials, 1).
AddLevel(Cycle, cycles)
axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, Cycle, Trial, Train,
func(mode enums.Enum) { ss.Net.ClearInputs() },
func(mode enums.Enum) { ss.ApplyInputs(mode.(Modes)) },
)
ls.Stacks[Test].OnInit.Add("Init", func() { ss.Init() })
ls.AddOnStartToAll("StatsStart", ss.StatsStart)
ls.AddOnEndToAll("StatsStep", ss.StatsStep)
if ss.Config.GUI {
axon.LooperUpdateNetView(ls, Cycle, Trial, ss.NetViewUpdater)
ls.Stacks[Test].OnInit.Add("GUI-Init", ss.GUI.UpdateWindow)
}
if ss.Config.Debug {
mpi.Println(ls.DocString())
}
ss.Loops = ls
}
// ApplyInputs applies input patterns from given environment for given mode.
// Any other start-of-trial logic can also be put here.
func (ss *Sim) ApplyInputs(mode Modes) {
net := ss.Net
curModeDir := ss.Current.Dir(mode.String())
ev := ss.Envs.ByMode(mode)
lays := net.LayersByType(axon.InputLayer, axon.TargetLayer)
net.InitExt()
ev.Step()
curModeDir.StringValue("TrialName", 1).SetString1D(ev.String(), 0)
for _, lnm := range lays {
ly := ss.Net.LayerByName(lnm)
st := ev.State("Input")
if st != nil {
ly.ApplyExt(uint32(0), st)
}
}
net.ApplyExts()
}
// NewRun intializes a new Run level of the model.
func (ss *Sim) NewRun() {
ctx := ss.Net.Context()
ss.InitRandSeed(0)
ss.Envs.ByMode(Test).Init(0)
ctx.Reset()
ss.Net.InitWeights()
}
//////// Inputs
func (ss *Sim) ConfigInputs() {
dt := table.New()
metadata.SetName(dt, "Test")
metadata.SetDoc(dt, "Testing inputs")
dt.AddStringColumn("Name")
dt.AddFloat32Column("Input", 10, 10)
dt.SetNumRows(25)
patterns.PermutedBinaryMinDiff(dt.Columns.Values[1], int(ss.Config.Params.InputPct), 1, 0, int(ss.Config.Params.InputPct)/2)
tensorfs.DirFromTable(ss.Root.Dir("Inputs/Test"), dt)
}
//////// Stats
// AddStat adds a stat compute function.
func (ss *Sim) AddStat(f func(mode Modes, level Levels, phase StatsPhase)) {
ss.StatFuncs = append(ss.StatFuncs, f)
}
// StatsStart is called by Looper at the start of given level, for each iteration.
// It needs to call RunStats Start at the next level down.
// e.g., each Epoch is the start of the full set of Trial Steps.
func (ss *Sim) StatsStart(lmd, ltm enums.Enum) {
mode := lmd.(Modes)
level := ltm.(Levels)
if level < Trial {
return
}
ss.RunStats(mode, level-1, Start)
}
// StatsStep is called by Looper at each step of iteration,
// where it accumulates the stat results.
func (ss *Sim) StatsStep(lmd, ltm enums.Enum) {
mode := lmd.(Modes)
level := ltm.(Levels)
ss.RunStats(mode, level, Step)
tensorfs.DirTable(axon.StatsNode(ss.Stats, mode, level), nil).WriteToLog()
}
// RunStats runs the StatFuncs for given mode, level and phase.
func (ss *Sim) RunStats(mode Modes, level Levels, phase StatsPhase) {
for _, sf := range ss.StatFuncs {
sf(mode, level, phase)
}
if phase == Step && ss.GUI.Tabs != nil {
nm := mode.String() + " " + level.String() + " Plot"
ss.GUI.Tabs.AsLab().GoUpdatePlot(nm)
}
}
// SetRunName sets the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) SetRunName() string {
runName := ss.Params.RunName(0)
ss.Current.StringValue("RunName", 1).SetString1D(runName, 0)
return runName
}
// RunName returns the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) RunName() string {
return ss.Current.StringValue("RunName", 1).String1D(0)
}
// StatsInit initializes all the stats by calling Start across all modes and levels.
func (ss *Sim) StatsInit() {
for md, st := range ss.Loops.Stacks {
mode := md.(Modes)
for _, lev := range st.Order {
level := lev.(Levels)
ss.RunStats(mode, level, Start)
}
}
if ss.GUI.Tabs != nil {
tbs := ss.GUI.Tabs.AsLab()
_, idx := tbs.CurrentTab()
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Test, Cycle))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Test, Trial))
tbs.SelectTabIndex(idx)
}
}
// ConfigStats handles configures functions to do all stats computation
// in the tensorfs system.
func (ss *Sim) ConfigStats() {
net := ss.Net
ss.Stats = ss.Root.Dir("Stats")
ss.Current = ss.Stats.Dir("Current")
ss.SetRunName()
// last arg(s) are levels to exclude
counterFunc := axon.StatLoopCounters(ss.Stats, ss.Current, ss.Loops, net, Trial)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
counterFunc(mode, level, phase == Start)
})
runNameFunc := axon.StatRunName(ss.Stats, ss.Current, ss.Loops, net, Trial)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
runNameFunc(mode, level, phase == Start)
})
trialNameFunc := axon.StatTrialName(ss.Stats, ss.Current, ss.Loops, net, Trial)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
trialNameFunc(mode, level, phase == Start)
})
layers := []string{"Layer1", "Layer2"}
statNames := []string{"Spike", "Vm", "VmDend", "Ge", "Act", "Gi", "FFs", "FBs", "FSi", "SSi", "SSf", "FSGi", "SSGi"}
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
for _, lnm := range layers {
ly := ss.Net.LayerByName(lnm)
pi := int(ly.Params.PoolIndex(0))
di := 0
for _, stnm := range statNames {
name := lnm + "_" + stnm
modeDir := ss.Stats.Dir(mode.String())
curModeDir := ss.Current.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
tsr := levelDir.Float64(name)
ndata := 1
if phase == Start {
tsr.SetNumRows(0)
plot.SetFirstStyler(tsr, func(s *plot.Style) {
// s.Range.SetMin(0).SetMax(1)
s.On = false
switch stnm {
case "Act":
s.On = true
case "Vm", "VmDend":
s.RightY = true
}
})
continue
}
switch level {
case Cycle:
var stat float32
switch stnm {
case "Spike", "Vm", "VmDend":
stat = ly.AvgMaxVarByPool(stnm, 0, di).Avg
case "Ge":
stat = axon.PoolAvgMax(axon.AMGeInt, axon.AMCycle, axon.Avg, uint32(pi), uint32(di))
case "Act":
stat = axon.PoolAvgMax(axon.AMAct, axon.AMCycle, axon.Avg, uint32(pi), uint32(di))
case "Gi":
stat = axon.Neurons.Value(int(ly.NeurStIndex), di, int(axon.Gi))
default:
var ivar fsfffb.InhibVars
ivar.SetString(stnm)
stat = axon.Pools.Value(pi, di, int(ivar))
}
curModeDir.Float64(name, ndata).SetFloat1D(float64(stat), di)
tsr.AppendRowFloat(float64(stat))
default:
subDir := modeDir.Dir((level - 1).String())
stat := stats.StatMean.Call(subDir.Value(name)).Float1D(0)
tsr.AppendRowFloat(stat)
}
}
}
})
perTrlFunc := axon.StatPerTrialMSec(ss.Stats, Test, Trial)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
perTrlFunc(mode, level, phase == Start)
})
}
// StatCounters returns counters string to show at bottom of netview.
func (ss *Sim) StatCounters(mode, level enums.Enum) string {
counters := ss.Loops.Stacks[mode].CountersString()
vu := ss.NetViewUpdater(mode)
if vu == nil || vu.View == nil {
return counters
}
di := vu.View.Di
counters += fmt.Sprintf(" Di: %d", di)
curModeDir := ss.Current.Dir(mode.String())
if curModeDir.Node("TrialName") == nil {
return counters
}
counters += fmt.Sprintf(" TrialName: %s", curModeDir.StringValue("TrialName").String1D(di))
statNames := []string{"CorSim", "UnitErr", "Err"}
if level == Cycle || curModeDir.Node(statNames[0]) == nil {
return counters
}
for _, name := range statNames {
counters += fmt.Sprintf(" %s: %.4g", name, curModeDir.Float64(name).Float1D(di))
}
return counters
}
//////// GUI
// ConfigGUI configures the Cogent Core GUI interface for this simulation.
func (ss *Sim) ConfigGUI(b tree.Node) {
ss.GUI.MakeBody(b, ss, ss.Root, ss.Config.Name, ss.Config.Title, ss.Config.Doc)
ss.GUI.StopLevel = Trial
ss.GUI.CycleUpdateInterval = 10
nv := ss.GUI.AddNetView("Network")
nv.Options.MaxRecs = 2 * ss.Config.Run.Cycles
nv.Options.Raster.Max = ss.Config.Run.Cycles
nv.SetNet(ss.Net)
ss.NetUpdate.Config(nv, axon.Theta, ss.StatCounters)
ss.GUI.OnStop = func(mode, level enums.Enum) {
vu := ss.NetViewUpdater(mode)
vu.UpdateWhenStopped(mode, level)
}
nv.SceneXYZ().Camera.Pose.Pos.Set(0, 1.5, 2.5)
nv.SceneXYZ().Camera.LookAt(math32.Vec3(0, 0, 0), math32.Vec3(0, 1, 0))
ss.StatsInit()
ss.GUI.FinalizeGUI(false)
}
func (ss *Sim) MakeToolbar(p *tree.Plan) {
ss.GUI.AddLooperCtrl(p, ss.Loops)
tree.Add(p, func(w *core.Separator) {})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{Label: "Defaults", Icon: icons.Update,
Tooltip: "Restore initial default parameters.",
Active: egui.ActiveStopped,
Func: func() {
ss.Defaults()
ss.Init()
ss.GUI.UpdateWindow()
},
})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "New Seed",
Icon: icons.Add,
Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
Active: egui.ActiveAlways,
Func: func() {
ss.RandSeeds.NewSeeds()
},
})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "README",
Icon: icons.FileMarkdown,
Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
Active: egui.ActiveAlways,
Func: func() {
core.TheApp.OpenURL(ss.Config.URL)
},
})
}
func (ss *Sim) RunNoGUI() {
ss.Init()
if ss.Config.Params.Note != "" {
mpi.Printf("Note: %s\n", ss.Config.Params.Note)
}
runName := ss.SetRunName()
netName := ss.Net.Name
cfg := &ss.Config.Log
axon.OpenLogFiles(ss.Loops, ss.Stats, netName, runName, [][]string{cfg.Save})
ss.Loops.Run(Test)
axon.CloseLogFiles(ss.Loops, ss.Stats)
axon.GPURelease()
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"github.com/emer/axon/v2/sims/inhib"
"github.com/emer/emergent/v2/egui"
)
func main() { egui.Run[inhib.Sim, inhib.Config]() }
// Copyright (c) 2019, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package inhib
import "github.com/emer/axon/v2/axon"
// LayerParams sets the minimal non-default params.
// Base is always applied, and others can be optionally selected to apply on top of that.
var LayerParams = axon.LayerSheets{
"Base": {
{Sel: "Layer", Doc: "generic params for all layers: lower gain, slower, soft clamp",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.On.SetBool(false)
ly.Inhib.Layer.Gi = 1.0
ly.Inhib.ActAvg.Nominal = 0.1
ly.Acts.Dt.GeTau = 5
ly.Acts.Dt.GiTau = 7
ly.Acts.Gbar.I = 100
ly.Acts.Gbar.L = 20
ly.Acts.Decay.Act = 0.0 // 0.2 def
ly.Acts.Decay.Glong = 0.0 // 0.6 def
ly.Acts.Noise.On.SetBool(false)
ly.Acts.Noise.GeHz = 100
ly.Acts.Noise.Ge = 0.002 // 0.001 min
ly.Acts.Noise.GiHz = 200
ly.Acts.Noise.Gi = 0.002 // 0.001 min
}},
{Sel: ".InhibLay", Doc: "generic params for all layers: lower gain, slower, soft clamp",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.5
ly.Acts.Spikes.Thr = 0.5
ly.Acts.Spikes.Tr = 1 // 3 def
ly.Acts.Spikes.VmR = -60 // key for firing early, plus noise
ly.Acts.Init.Vm = -60 // key for firing early, plus noise
ly.Acts.Erev.L = -60 // more excitable
ly.Acts.KNa.On.SetBool(false)
ly.Acts.Noise.On.SetBool(true)
ly.Acts.Noise.Ge = 0.2
ly.Acts.Noise.Gi = 0.01 //
}},
{Sel: "#Layer0", Doc: "Input layer",
Set: func(ly *axon.LayerParams) {
ly.Acts.Clamp.Ge = 0.6 // no inhib so needs to be lower
ly.Acts.Noise.On.SetBool(true)
ly.Acts.Noise.Gi = 0.002 // hard to disrupt strong inputs!
}},
},
"FSFFFB": {
{Sel: "Layer", Doc: "use FSFFFB computed inhibition",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.On.SetBool(true)
ly.Inhib.Layer.Gi = 1.0
ly.Inhib.Layer.SS = 30 // 30
ly.Inhib.Layer.FB = 1
ly.Inhib.Layer.FS0 = 0.1
ly.Inhib.Layer.FSTau = 6
ly.Inhib.Layer.SSfTau = 20
ly.Inhib.Layer.SSiTau = 50
}},
},
"Untrained": {},
"Trained": {},
}
// PathParams sets the minimal non-default params.
// Base is always applied, and others can be optionally selected to apply on top of that.
var PathParams = axon.PathSheets{
"Base": {
{Sel: "Path", Doc: "no learning",
Set: func(pt *axon.PathParams) {
pt.Learn.Learn.SetBool(false)
pt.SWts.Init.Mean = 0.5
pt.SWts.Init.Var = 0.25
pt.Com.Delay = 2
}},
{Sel: ".BackPath", Doc: "feedback excitatory",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.2
}},
{Sel: ".InhibPath", Doc: "inhibitory pathways",
Set: func(pt *axon.PathParams) {
// pt.SWts.Init.Dist = "Uniform
pt.SWts.Init.Mean = 0.5
pt.SWts.Init.Var = 0
pt.SWts.Init.Sym.SetBool(false)
pt.Com.Delay = 0
pt.PathScale.Abs = 6 // key param
}},
{Sel: ".ToInhib", Doc: "to inhibitory pathways",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 1
pt.PathScale.Abs = 5 // 5: strong is critical to get into fast spiking regime
pt.Com.Delay = 1
}},
},
"FSFFFB": {
{Sel: ".InhibPath", Doc: "inhibitory pathways",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 0
}},
},
"Untrained": {
{Sel: ".Excite", Doc: "excitatory connections",
Set: func(pt *axon.PathParams) {
// pt.SWts.Init.Dist = Uniform
pt.SWts.Init.Mean = 0.5
pt.SWts.Init.Var = 0.25
}},
},
"Trained": {
{Sel: ".Excite", Doc: "excitatory connections",
Set: func(pt *axon.PathParams) {
// pt.SWts.Init.Dist = Gaussian
pt.SWts.Init.Mean = 0.4
pt.SWts.Init.Var = 0.8
}},
},
}
// Copyright (c) 2023, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package kinasesim
import (
"github.com/emer/emergent/v2/egui"
)
// ParamConfig has config parameters related to sim params.
type ParamConfig struct {
// Sheet is the extra params sheet name(s) to use (space separated
// if multiple). Must be valid name as listed in compiled-in params
// or loaded params.
Sheet string
// Tag is an extra tag to add to file names and logs saved from this run.
Tag string
// Note is additional info to describe the run params etc,
// like a git commit message for the run.
Note string
// SaveAll will save a snapshot of all current param and config settings
// in a directory named params_<datestamp> (or _good if Good is true),
// then quit. Useful for comparing to later changes and seeing multiple
// views of current params.
SaveAll bool `nest:"+"`
// Good is for SaveAll, save to params_good for a known good params state.
// This can be done prior to making a new release after all tests are passing.
// Add results to git to provide a full diff record of all params over level.
Good bool `nest:"+"`
}
// RunConfig has config parameters related to running the sim.
type RunConfig struct {
// Trials is the total number of epochs per run.
Trials int `default:"10"`
// Cycles is the total number of cycles to run.
Cycles int `min:"10" default:"200"`
// PlusCycles is the total number of plus-phase cycles per trial. For Cycles=300, use 100.
PlusCycles int `default:"50"`
// CaBinCycles is the number of cycles per CaBin: how fine-grained the synaptic Ca is.
CaBinCycles int `default:"10"`
// NCaBins is the total number of ca bins in unit variables.
// Set to Context.ThetaCycles / CaBinCycles in Build.
NCaBins int `edit:"-"`
}
func (rc *RunConfig) Update() {
rc.NCaBins = rc.Cycles / rc.CaBinCycles
}
// LogConfig has config parameters related to logging data.
type LogConfig struct {
// Save saves a log file when run in nogui mode.
Save bool
}
// Config has the overall Sim configuration options.
type Config struct {
egui.BaseConfig
// RandomHz generates random firing rates, for testing
RandomHz bool
// minus phase firing rate
MinusHz float32 `default:"40"`
// plus phase firing rate
PlusHz float32 `default:"50"`
// additive difference in sending firing frequency relative to recv (recv has basic minus, plus)
SendDiffHz float32
// clamp constant Ge value -- otherwise drive discrete spiking input
GeClamp bool `default:"false"`
// frequency of input spiking for !GeClamp mode
SpikeHz float32 `default:"50"`
// Raw synaptic excitatory conductance
Ge float32 `min:"0" step:"0.01" default:"2.0"`
// Inhibitory conductance
Gi float32 `min:"0" step:"0.01" default:"0.1"`
// Params has parameter related configuration options.
Params ParamConfig `display:"add-fields"`
// Run has sim running related configuration options.
Run RunConfig `display:"add-fields"`
// Log has data logging related configuration options.
Log LogConfig `display:"add-fields"`
}
func (cfg *Config) Defaults() {
cfg.Name = "KinaseEQ"
cfg.Title = "Kinase learning equations"
cfg.URL = "https://github.com/emer/axon/blob/main/sims/kinasesim/README.md"
cfg.Doc = "This simulation explores calcium-based synaptic learning rules, specifically at the synaptic level."
}
// Code generated by "core generate -add-types -add-funcs -gosl"; DO NOT EDIT.
package kinasesim
import (
"cogentcore.org/core/enums"
)
var _ModesValues = []Modes{0}
// ModesN is the highest valid value for type Modes, plus one.
//
//gosl:start
const ModesN Modes = 1
//gosl:end
var _ModesValueMap = map[string]Modes{`Test`: 0}
var _ModesDescMap = map[Modes]string{0: ``}
var _ModesMap = map[Modes]string{0: `Test`}
// String returns the string representation of this Modes value.
func (i Modes) String() string { return enums.String(i, _ModesMap) }
// SetString sets the Modes value from its string representation,
// and returns an error if the string is invalid.
func (i *Modes) SetString(s string) error { return enums.SetString(i, s, _ModesValueMap, "Modes") }
// Int64 returns the Modes value as an int64.
func (i Modes) Int64() int64 { return int64(i) }
// SetInt64 sets the Modes value from an int64.
func (i *Modes) SetInt64(in int64) { *i = Modes(in) }
// Desc returns the description of the Modes value.
func (i Modes) Desc() string { return enums.Desc(i, _ModesDescMap) }
// ModesValues returns all possible values for the type Modes.
func ModesValues() []Modes { return _ModesValues }
// Values returns all possible values for the type Modes.
func (i Modes) Values() []enums.Enum { return enums.Values(_ModesValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Modes) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Modes) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Modes") }
var _LevelsValues = []Levels{0, 1, 2}
// LevelsN is the highest valid value for type Levels, plus one.
//
//gosl:start
const LevelsN Levels = 3
//gosl:end
var _LevelsValueMap = map[string]Levels{`Cycle`: 0, `Trial`: 1, `Condition`: 2}
var _LevelsDescMap = map[Levels]string{0: ``, 1: ``, 2: ``}
var _LevelsMap = map[Levels]string{0: `Cycle`, 1: `Trial`, 2: `Condition`}
// String returns the string representation of this Levels value.
func (i Levels) String() string { return enums.String(i, _LevelsMap) }
// SetString sets the Levels value from its string representation,
// and returns an error if the string is invalid.
func (i *Levels) SetString(s string) error { return enums.SetString(i, s, _LevelsValueMap, "Levels") }
// Int64 returns the Levels value as an int64.
func (i Levels) Int64() int64 { return int64(i) }
// SetInt64 sets the Levels value from an int64.
func (i *Levels) SetInt64(in int64) { *i = Levels(in) }
// Desc returns the description of the Levels value.
func (i Levels) Desc() string { return enums.Desc(i, _LevelsDescMap) }
// LevelsValues returns all possible values for the type Levels.
func LevelsValues() []Levels { return _LevelsValues }
// Values returns all possible values for the type Levels.
func (i Levels) Values() []enums.Enum { return enums.Values(_LevelsValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Levels) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Levels) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Levels") }
var _StatsPhaseValues = []StatsPhase{0, 1}
// StatsPhaseN is the highest valid value for type StatsPhase, plus one.
//
//gosl:start
const StatsPhaseN StatsPhase = 2
//gosl:end
var _StatsPhaseValueMap = map[string]StatsPhase{`Start`: 0, `Step`: 1}
var _StatsPhaseDescMap = map[StatsPhase]string{0: ``, 1: ``}
var _StatsPhaseMap = map[StatsPhase]string{0: `Start`, 1: `Step`}
// String returns the string representation of this StatsPhase value.
func (i StatsPhase) String() string { return enums.String(i, _StatsPhaseMap) }
// SetString sets the StatsPhase value from its string representation,
// and returns an error if the string is invalid.
func (i *StatsPhase) SetString(s string) error {
return enums.SetString(i, s, _StatsPhaseValueMap, "StatsPhase")
}
// Int64 returns the StatsPhase value as an int64.
func (i StatsPhase) Int64() int64 { return int64(i) }
// SetInt64 sets the StatsPhase value from an int64.
func (i *StatsPhase) SetInt64(in int64) { *i = StatsPhase(in) }
// Desc returns the description of the StatsPhase value.
func (i StatsPhase) Desc() string { return enums.Desc(i, _StatsPhaseDescMap) }
// StatsPhaseValues returns all possible values for the type StatsPhase.
func StatsPhaseValues() []StatsPhase { return _StatsPhaseValues }
// Values returns all possible values for the type StatsPhase.
func (i StatsPhase) Values() []enums.Enum { return enums.Values(_StatsPhaseValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i StatsPhase) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *StatsPhase) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "StatsPhase")
}
// Copyright (c) 2019, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package kinasesim
import (
"fmt"
"math/rand"
"cogentcore.org/core/math32"
"github.com/emer/axon/v2/kinase"
)
// KinaseNeuron has Neuron state
type KinaseNeuron struct {
// Neuron spiking (0,1)
Spike float32 `edit:"-"`
// Neuron probability of spiking
SpikeP float32 `edit:"-"`
// CaSyn is spike-driven calcium trace for synapse-level Ca-driven learning:
// exponential integration of SpikeCaSyn * Spike at CaSynTau time constant (typically 30).
// Synapses integrate send.CaSyn * recv.CaSyn across M, P, D time integrals for the
// synaptic trace driving credit assignment in learning. Time constant reflects
// binding time of Glu to NMDA and Ca buffering postsynaptically, and determines
// time window where pre * post spiking must overlap to drive learning.
CaSyn float32 `edit:"-"`
// regression variables
StartCaSyn float32 `edit:"-"`
TotalSpikes float32 `edit:"-"`
// binned count of spikes, for regression learning
CaBins []float32
}
func (kn *KinaseNeuron) Init() {
kn.Spike = 0
kn.SpikeP = 1
kn.CaSyn = 0
kn.StartTrial()
}
func (kn *KinaseNeuron) Config(nCaBins int) {
kn.CaBins = make([]float32, nCaBins)
}
func (kn *KinaseNeuron) StartTrial() {
kn.StartCaSyn = kn.CaSyn
kn.TotalSpikes = 0
for i := range kn.CaBins {
kn.CaBins[i] = 0
}
// kn.CaSyn = 0 // note: better fits with carryover
}
// Cycle does one cycle of neuron updating, with given exponential spike interval
// based on target spiking firing rate.
func (ss *Sim) Cycle(kn *KinaseNeuron, expInt float32, cyc int) {
kn.Spike = 0
if expInt > 0 {
kn.SpikeP *= rand.Float32()
if kn.SpikeP <= expInt {
kn.Spike = 1
kn.SpikeP = 1
kn.TotalSpikes += 1
}
}
kn.CaSyn += ss.CaSpike.CaSynDt * (ss.CaSpike.SpikeCaSyn*kn.Spike - kn.CaSyn)
bin := cyc / ss.Config.Run.CaBinCycles
kn.CaBins[bin] += kn.CaSyn / float32(ss.Config.Run.CaBinCycles)
}
func (kn *KinaseNeuron) SetInput(inputs []float32, off int) {
inputs[off] = kn.StartCaSyn
inputs[off+1] = kn.TotalSpikes
for i, s := range kn.CaBins {
inputs[off+2+i] = s
}
}
// KinaseSynapse has Synapse state
type KinaseSynapse struct {
// CaM is first stage running average (mean) Ca calcium level (like CaM = calmodulin), feeds into CaP
CaM float32 `edit:"-" width:"12"`
// CaP is shorter timescale integrated CaM value, representing the plus, LTP direction of weight change and capturing the function of CaMKII in the Kinase learning rule
CaP float32 `edit:"-" width:"12"`
// CaD is longer timescale integrated CaP value, representing the minus, LTD direction of weight change and capturing the function of DAPK1 in the Kinase learning rule
CaD float32 `edit:"-" width:"12"`
// DWt is the CaP - CaD
DWt float32 `edit:"-" width:"12"`
}
func (ks *KinaseSynapse) Init() {
ks.CaM = 0
ks.CaP = 0
ks.CaD = 0
ks.DWt = 0
}
// KinaseState is basic Kinase equation state
type KinaseState struct {
// SSE for decoder
SSE float32
// Condition counter
Condition int
// Condition description
Cond string
// Trial counter
Trial int
// Cycle counter
Cycle int
// phase-based firing rates
MinusHz, PlusHz float32
// ErrDWt is the target error dwt: PlusHz - MinusHz
ErrDWt float32
// Sending neuron
Send KinaseNeuron
// Receiving neuron
Recv KinaseNeuron
// Standard synapse values
StdSyn KinaseSynapse
// Current ca bin value
CaBin float32
// Linear synapse values
LinearSyn KinaseSynapse
// binned integration of send, recv spikes
CaBins []float32
}
func (ks *KinaseState) Init() {
ks.Send.Init()
ks.Recv.Init()
ks.StdSyn.Init()
ks.LinearSyn.Init()
ks.CaBin = 0
}
func (ks *KinaseState) Config(nCaBins int) {
ks.Send.Config(nCaBins)
ks.Recv.Config(nCaBins)
ks.CaBins = make([]float32, nCaBins)
ks.StdSyn.Init()
ks.LinearSyn.Init()
}
func (kn *KinaseState) StartTrial() {
kn.Send.StartTrial()
kn.Recv.StartTrial()
kn.LinearSyn.CaM = 0
kn.LinearSyn.CaP = 0
kn.LinearSyn.CaD = 0
}
func (ss *Sim) ConfigKinase() {
ss.Config.Run.Update()
nbins := ss.Config.Run.NCaBins
ss.CaPWts = make([]float32, nbins)
ss.CaDWts = make([]float32, nbins)
kinase.CaBinWts(ss.Config.Run.PlusCycles, ss.CaPWts, ss.CaDWts)
ss.Kinase.Config(nbins)
}
// Sweep runs a sweep through minus-plus ranges
func (ss *Sim) Sweep() {
// hz := []float32{25, 50, 100}
// nhz := len(hz)
ss.StatsStart(Test, Condition)
nhz := 100 / 5
hz := make([]float32, nhz)
i := 0
for h := float32(5); h <= 100; h += 5 {
hz[i] = h
i++
}
cond := 0
for mi := 0; mi < nhz; mi++ {
minusHz := hz[mi]
for pi := 0; pi < nhz; pi++ {
plusHz := hz[pi]
condStr := fmt.Sprintf("%03d -> %03d", int(minusHz), int(plusHz))
ss.Kinase.Condition = cond
ss.Kinase.Cond = condStr
ss.StatsStart(Test, Condition)
ss.RunImpl(minusHz, plusHz, ss.Config.Run.Trials)
cond++
}
}
}
// Run runs for given parameters
func (ss *Sim) Run() {
ss.RunImpl(ss.Config.MinusHz, ss.Config.PlusHz, ss.Config.Run.Trials)
}
// RunImpl runs NTrials, recording to RunLog and TrialLog
func (ss *Sim) RunImpl(minusHz, plusHz float32, ntrials int) {
if ss.GUI.StopNow() {
return
}
ss.StatsStart(Test, Trial)
ss.Kinase.Init()
for trl := 0; trl < ntrials; trl++ {
ss.Kinase.Trial = trl
ss.TrialImpl(minusHz, plusHz)
}
ss.StatsStep(Test, Condition)
}
func (ss *Sim) Trial() {
ss.Kinase.Init()
ss.TrialImpl(ss.Config.MinusHz, ss.Config.PlusHz)
}
// TrialImpl runs one trial for given parameters
func (ss *Sim) TrialImpl(minusHz, plusHz float32) {
if ss.GUI.StopNow() {
return
}
ss.StatsStart(Test, Trial)
cfg := ss.Config
ks := &ss.Kinase
ks.MinusHz = minusHz
ks.PlusHz = plusHz
ks.Cycle = 0
ks.ErrDWt = (plusHz - minusHz) / 100
minusCycles := cfg.Run.Cycles - cfg.Run.PlusCycles
nbins := ss.Config.Run.NCaBins
spikeBinCycles := ss.Config.Run.CaBinCycles
lsint := 1.0 / float32(spikeBinCycles)
ks.StartTrial()
for phs := 0; phs < 2; phs++ {
var maxcyc int
var rhz float32
switch phs {
case 0:
rhz = minusHz
maxcyc = minusCycles
case 1:
rhz = plusHz
maxcyc = cfg.Run.PlusCycles
}
shz := rhz + cfg.SendDiffHz
if shz < 0 {
shz = 0
}
var Sint, Rint float32
if rhz > 5 {
Rint = math32.Exp(-1000.0 / float32(rhz))
}
if shz > 5 {
Sint = math32.Exp(-1000.0 / float32(shz))
}
for t := 0; t < maxcyc; t++ {
ss.Cycle(&ks.Send, Sint, ks.Cycle)
ss.Cycle(&ks.Recv, Rint, ks.Cycle)
// original synaptic-level integration into "StdSyn"
ca := 8 * ks.Send.CaSyn * ks.Recv.CaSyn // 8 is standard CaGain Factor
ss.CaSpike.Dt.FromCa(ca, &ks.StdSyn.CaM, &ks.StdSyn.CaP, &ks.StdSyn.CaD)
// CaBin linear regression integration.
bin := ks.Cycle / spikeBinCycles
sp := float32(0)
if bin == 0 {
sp = ks.Recv.CaBins[0] * ks.Send.CaBins[0]
} else {
if ss.SynCa20 {
sp = 0.25 * (ks.Recv.CaBins[0] + ks.Recv.CaBins[1]) * (ks.Send.CaBins[0] + ks.Send.CaBins[1])
} else {
sp = ks.Recv.CaBins[1] * ks.Send.CaBins[1]
}
}
ks.CaBins[bin] = sp
ks.CaBin = sp
ks.LinearSyn.CaM = sp
ks.LinearSyn.CaP += lsint * ss.CaPWts[bin] * sp // slow integ just for visualization
ks.LinearSyn.CaD += lsint * ss.CaDWts[bin] * sp
ss.StatsStep(Test, Cycle)
ks.Cycle++
}
}
ks.StdSyn.DWt = ks.StdSyn.CaP - ks.StdSyn.CaD
var cp, cd float32
for i := range nbins {
cp += ks.CaBins[i] * ss.CaPWts[i]
cd += ks.CaBins[i] * ss.CaDWts[i]
}
ks.LinearSyn.DWt = cp - cd
ks.LinearSyn.CaP = cp
ks.LinearSyn.CaD = cd
ss.StatsStep(Test, Trial)
}
// Regress runs the linear regression on the data
// func (ss *Sim) Regress() {
// r := glm.NewGLM()
// mode := Test
// level := Condition
// modeDir := ss.Stats.Dir(mode.String())
// levelDir := modeDir.Dir(level.String())
//
// dt := tensorfs.DirTable(axon.StatsNode(ss.Stats, Test, Condition))
// err := r.SetTable(&ls.Data, "State", "StdCa", "PredCa", "ErrCa")
// if err != nil {
// slog.Error(err.Error())
// return
// }
// r.DepNames = []string{"CaP", "CaD"}
// r.L1Cost = 0.1
// r.L2Cost = 0.1
// r.StopTolerance = 0.00001
// r.ZeroOffset = true
//
// // NBins = 4
// // r.Coeff.Values = []float64{
// // 0.05, 0.25, 0.5, 0.6, 0, // linear progression
// // 0.25, 0.5, 0.5, 0.25, 0} // hump in the middle
//
// // NBins = 8, 200+50 cycles
// // r.Coeff.Values = []float64{
// // 0.3, 0.4, 0.55, 0.65, 0.75, 0.85, 1.0, 1.0, 0, // linear progression
// // 0.5, 0.65, 0.75, 0.9, 0.9, 0.9, 0.65, 0.55, .0} // hump in the middle
//
// // NBins = 8, 280+70 cycles
// r.Coeff.Values = []float64{
// 0.0, 0.1, 0.23, 0.35, 0.45, 0.55, 0.75, 0.75, 0, // linear progression
// 0.2, 0.3, 0.4, 0.5, 0.5, 0.5, 0.4, 0.3, .0} // hump in the middle
//
// fmt.Println(r.Coeffs())
//
// r.Run()
//
// fmt.Println(r.Variance())
// fmt.Println(r.Coeffs())
//
// ls.Data.SaveCSV("linear_data.tsv", tensor.Tab, table.Headers)
// }
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"github.com/emer/axon/v2/sims/kinasesim"
"github.com/emer/emergent/v2/egui"
)
func main() { egui.Run[kinasesim.Sim, kinasesim.Config]() }
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// kinaseq: Explores calcium-based synaptic learning rules,
// specifically at the synaptic level.
package kinasesim
//go:generate core generate -add-types -add-funcs -gosl
import (
"reflect"
"cogentcore.org/core/base/errors"
"cogentcore.org/core/base/reflectx"
"cogentcore.org/core/cli"
"cogentcore.org/core/core"
"cogentcore.org/core/enums"
"cogentcore.org/core/icons"
"cogentcore.org/core/tree"
"cogentcore.org/lab/base/mpi"
"cogentcore.org/lab/base/randx"
"cogentcore.org/lab/plot"
"cogentcore.org/lab/stats/stats"
"cogentcore.org/lab/tensor"
"cogentcore.org/lab/tensorfs"
"github.com/emer/axon/v2/axon"
"github.com/emer/axon/v2/kinase"
"github.com/emer/emergent/v2/egui"
)
// Modes are the looping modes (Stacks) for running and statistics.
type Modes int32 //enums:enum
const (
Test Modes = iota
)
// Levels are the looping levels for running and statistics.
type Levels int32 //enums:enum
const (
Cycle Levels = iota
Trial
Condition
)
// StatsPhase is the phase of stats processing for given mode, level.
// Accumulated values are reset at Start, added each Step.
type StatsPhase int32 //enums:enum
const (
Start StatsPhase = iota
Step
)
// see config.go for Config
// Sim encapsulates the entire simulation model, and we define all the
// functionality as methods on this struct. This structure keeps all relevant
// state information organized and available without having to pass everything around
// as arguments to methods, and provides the core GUI interface (note the view tags
// for the fields which provide hints to how things should be displayed).
type Sim struct {
// simulation configuration parameters -- set by .toml config file and / or args
Config *Config `new-window:"+"`
// Kinase CaSpike params
CaSpike kinase.CaSpikeParams `display:"no-inline" new-window:"+"`
// SynCa20 determines whether to use 20 msec SynCa integration.
SynCa20 bool
// CaPWts are CaBin integration weights for CaP
CaPWts []float32 `new-window:"+"`
// CaDWts are CaBin integration weights for CaD
CaDWts []float32 `new-window:"+"`
// Kinase state
Kinase KinaseState `new-window:"+"`
// Training data for least squares solver
TrainData tensor.Float64 `new-window:"+"`
// Root is the root tensorfs directory, where all stats and other misc sim data goes.
Root *tensorfs.Node `display:"-"`
// Stats has the stats directory within Root.
Stats *tensorfs.Node `display:"-"`
// Current has the current stats values within Stats.
Current *tensorfs.Node `display:"-"`
// StatFuncs are statistics functions called at given mode and level,
// to perform all stats computations. phase = Start does init at start of given level,
// and all intialization / configuration (called during Init too).
StatFuncs []func(mode Modes, level Levels, phase StatsPhase) `display:"-"`
// GUI manages all the GUI elements
GUI egui.GUI `display:"-"`
// RandSeeds is a list of random seeds to use for each run.
RandSeeds randx.Seeds `display:"-"`
}
func (ss *Sim) SetConfig(cfg *Config) { ss.Config = cfg }
func (ss *Sim) Body() *core.Body { return ss.GUI.Body }
func (ss *Sim) Defaults() {
ss.CaSpike.Defaults()
ss.SynCa20 = false
cli.SetFromDefaults(&ss.Config)
}
func (ss *Sim) ConfigSim() {
ss.Defaults()
ss.Root, _ = tensorfs.NewDir("Root")
tensorfs.CurRoot = ss.Root
ss.RandSeeds.Init(100) // max 100 runs
ss.InitRandSeed(0)
ss.CaSpike.Defaults()
ss.ConfigKinase()
ss.ConfigStats()
if ss.Config.Params.SaveAll {
ss.Config.Params.SaveAll = false
return
}
}
//////// Init, utils
// Init restarts the run, and initializes everything, including network weights
// and resets the epoch log table
func (ss *Sim) Init() {
ss.SetRunName()
ss.InitRandSeed(0)
ss.ConfigKinase()
ss.StatsInit()
}
// InitRandSeed initializes the random seed based on current training run number
func (ss *Sim) InitRandSeed(run int) {
ss.RandSeeds.Set(run)
}
// Stop tells the sim to stop running
func (ss *Sim) Stop() {
ss.GUI.SetStopNow()
}
//////// Stats
// AddStat adds a stat compute function.
func (ss *Sim) AddStat(f func(mode Modes, level Levels, phase StatsPhase)) {
ss.StatFuncs = append(ss.StatFuncs, f)
}
// StatsStart is called by Looper at the start of given level, for each iteration.
// It needs to call RunStats Start at the next level down.
// e.g., each Epoch is the start of the full set of Trial Steps.
func (ss *Sim) StatsStart(lmd, ltm enums.Enum) {
mode := lmd.(Modes)
level := ltm.(Levels)
if level < Trial {
return
}
ss.RunStats(mode, level-1, Start)
}
// StatsStep is called by Looper at each step of iteration,
// where it accumulates the stat results.
func (ss *Sim) StatsStep(lmd, ltm enums.Enum) {
mode := lmd.(Modes)
level := ltm.(Levels)
ss.RunStats(mode, level, Step)
tensorfs.DirTable(axon.StatsNode(ss.Stats, mode, level), nil).WriteToLog()
}
// RunStats runs the StatFuncs for given mode, level and phase.
func (ss *Sim) RunStats(mode Modes, level Levels, phase StatsPhase) {
for _, sf := range ss.StatFuncs {
sf(mode, level, phase)
}
if phase == Step && ss.GUI.Tabs != nil {
nm := mode.String() + " " + level.String() + " Plot"
ss.GUI.Tabs.AsLab().GoUpdatePlot(nm)
}
}
// SetRunName sets the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) SetRunName() string {
runName := "Run"
ss.Current.StringValue("RunName", 1).SetString1D(runName, 0)
return runName
}
// RunName returns the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) RunName() string {
return ss.Current.StringValue("RunName", 1).String1D(0)
}
// StatsInit initializes all the stats by calling Start across all modes and levels.
func (ss *Sim) StatsInit() {
ss.RunStats(Test, Cycle, Start)
ss.RunStats(Test, Trial, Start)
ss.RunStats(Test, Condition, Start)
if ss.GUI.Tabs != nil {
tbs := ss.GUI.Tabs.AsLab()
_, idx := tbs.CurrentTab()
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Test, Cycle))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Test, Trial))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Test, Condition))
if idx < 0 {
idx = 0
}
tbs.SelectTabIndex(idx)
}
}
// ConfigStats handles configures functions to do all stats computation
// in the tensorfs system.
func (ss *Sim) ConfigStats() {
ss.Stats = ss.Root.Dir("Stats")
ss.Current = ss.Stats.Dir("Current")
ss.SetRunName()
vals := axon.StructValues(&ss.Kinase,
func(parent reflect.Value, field reflect.StructField, value reflect.Value) bool {
if field.Name == "CaBins" {
return false
}
return true
})
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
for _, sv := range vals {
name := sv.Path
kind := sv.Field.Type.Kind()
isNumber := reflectx.KindIsNumber(kind)
modeDir := ss.Stats.Dir(mode.String())
curModeDir := ss.Current.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
tsr := tensorfs.ValueType(levelDir, name, kind)
if phase == Start {
tsr.SetNumRows(0)
plot.SetFirstStyler(tsr, func(s *plot.Style) {
s.Range.SetMin(0)
switch level {
case Cycle:
switch name {
case "Send.Spike", "Recv.Spike", "StdSyn.CaP", "StdSyn.CaD":
s.On = true
}
case Trial:
switch name {
case "StdSyn.CaP", "StdSyn.CaD", "StdSyn.DWt", "LinearSyn.CaP", "LinearSyn.CaD", "LinearSyn.DWt":
s.On = true
case "Cycle":
s.Group = "none"
}
case Condition:
switch name {
case "StdSyn.DWt", "LinearSyn.DWt", "ErrDWt":
s.On = true
case "Cycle", "Trial":
s.Group = "none"
}
}
})
continue
}
switch level {
case Cycle, Trial:
if isNumber {
stat := errors.Log1(reflectx.ToFloat(sv.Value.Interface()))
tsr.AppendRowFloat(stat)
tensorfs.ValueType(curModeDir, name, kind, 1).SetFloat1D(stat, 0)
} else {
stat := reflectx.ToString(sv.Value.Interface())
tsr.AppendRowString(stat)
curModeDir.StringValue(name, 1).SetString1D(stat, 0)
}
default:
if isNumber {
subDir := modeDir.Dir((level - 1).String())
stat := stats.StatMean.Call(subDir.Value(name)).Float1D(0)
tsr.AppendRowFloat(stat)
}
}
}
})
// collect regression data
// ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
// if level != Trial {
// return
// }
// regressDir := ss.Stats.Dir("Regress")
// nbins := ss.Config.Run.NCaBins
// vars := []string{"Trial", "Hz", "Bins", "SynCa", "PredCa", "ErrCa", "SSE"}
// for vi, name := range vars {
// ndim := 2
// switch name {
// case "Hz":
// ndim = 4
// case "Bins":
// ndim = nbins
// case "SSE":
// ndim = 1
// }
// tsr := regressDir.Float64(name, 0, ndim)
// if phase == Start {
// tsr.SetNumRows(0)
// continue
// }
// }
// })
}
//////// GUI
// ConfigGUI configures the Cogent Core GUI interface for this simulation.
func (ss *Sim) ConfigGUI(b tree.Node) {
ss.GUI.MakeBody(b, ss, ss.Root, ss.Config.Name, ss.Config.Title, ss.Config.Doc)
ss.GUI.CycleUpdateInterval = 10
ss.StatsInit()
ss.GUI.FinalizeGUI(false)
}
func (ss *Sim) MakeToolbar(p *tree.Plan) {
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{Label: "Init", Icon: icons.Update,
Tooltip: "Initialize everything including network weights, and start over. Also applies current params.",
Active: egui.ActiveStopped,
Func: func() {
ss.Init()
ss.GUI.UpdateWindow()
},
})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{Label: "Stop", Icon: icons.Stop,
Tooltip: "Stops running.",
Active: egui.ActiveRunning,
Func: func() {
ss.Stop()
ss.GUI.UpdateWindow()
},
})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{Label: "Sweep", Icon: icons.PlayArrow,
Tooltip: "Runs Kinase sweep over set of minus / plus spiking levels.",
Active: egui.ActiveStopped,
Func: func() {
if !ss.GUI.IsRunning() {
go func() {
ss.GUI.StartRun()
ss.Sweep()
ss.GUI.Stopped(Test, Condition)
}()
}
},
})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{Label: "Run", Icon: icons.PlayArrow,
Tooltip: "Runs NTrials of Kinase updating.",
Active: egui.ActiveStopped,
Func: func() {
if !ss.GUI.IsRunning() {
go func() {
ss.GUI.StartRun()
ss.Run()
ss.GUI.Stopped(Test, Trial)
}()
}
},
})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{Label: "Trial", Icon: icons.PlayArrow,
Tooltip: "Runs one Trial of Kinase updating.",
Active: egui.ActiveStopped,
Func: func() {
if !ss.GUI.IsRunning() {
go func() {
ss.GUI.StartRun()
ss.Trial()
ss.GUI.Stopped(Test, Trial)
}()
}
},
})
tree.Add(p, func(w *core.Separator) {})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{Label: "Reset Plot", Icon: icons.Update,
Tooltip: "Reset TstCycPlot.",
Active: egui.ActiveStopped,
Func: func() {
ss.StatsInit()
ss.GUI.UpdateWindow()
},
})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{Label: "Defaults", Icon: icons.Update,
Tooltip: "Restore initial default parameters.",
Active: egui.ActiveStopped,
Func: func() {
ss.Defaults()
ss.Init()
ss.GUI.UpdateWindow()
},
})
tree.Add(p, func(w *core.Separator) {})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "New Seed",
Icon: icons.Add,
Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
Active: egui.ActiveAlways,
Func: func() {
ss.RandSeeds.NewSeeds()
},
})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "README",
Icon: icons.FileMarkdown,
Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
Active: egui.ActiveAlways,
Func: func() {
core.TheApp.OpenURL(ss.Config.URL)
},
})
}
func (ss *Sim) RunNoGUI() {
ss.Init()
if ss.Config.Params.Note != "" {
mpi.Printf("Note: %s\n", ss.Config.Params.Note)
}
// runName := ss.SetRunName()
// netName := ss.Net.Name
// axon.OpenLogFiles(ss.Loops, ss.Stats, netName, runName, [][]string{[]string{"Cycle"}})
mpi.Printf("Running %d Cycles\n", ss.Config.Run.Cycles)
ss.Sweep()
// axon.CloseLogFiles(ss.Loops, ss.Stats, Cycle)
}
// Copyright (c) 2023, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lvis
import (
"cogentcore.org/core/core"
"github.com/emer/emergent/v2/egui"
)
// EnvConfig has config params for environment
// note: only adding fields for key Env params that matter for both Network and Env
// other params are set via the Env map data mechanism.
type EnvConfig struct { //types:add
// env parameters -- can set any field/subfield on Env struct, using standard TOML formatting
Env map[string]any
// other option for below: Path = "images/CU3D_100_plus_renders", ImageFile = "cu3d100plus"
// works somewhat worse
// Path is the file path for the images. Create a symbolic link in sim dir for images.
Path string `default:"images/CU3D_100_renders_lr20_u30_nb"`
// ImageFile is the prefix for config files with lists of categories and images.
ImageFile string `default:"cu3d100old"`
// number of units per localist output unit
NOutPer int `default:"5"`
// If true, use random output patterns -- else localist
RndOutPats bool `default:"false"`
}
// ParamConfig has config parameters related to sim params.
type ParamConfig struct {
// SubPools if true, organize layers and connectivity with 2x2 sub-pools
// within each topological pool.
SubPools bool `default:"true"`
// Script is an interpreted script that is run to set parameters in Layer and Path
// sheets, by default using the "Script" set name.
Script string `new-window:"+" width:"100"`
// Sheet is the extra params sheet name(s) to use (space separated
// if multiple). Must be valid name as listed in compiled-in params
// or loaded params.
Sheet string
// Tag is an extra tag to add to file names and logs saved from this run.
Tag string
// Note is additional info to describe the run params etc,
// like a git commit message for the run.
Note string
// SaveAll will save a snapshot of all current param and config settings
// in a directory named params_<datestamp> (or _good if Good is true),
// then quit. Useful for comparing to later changes and seeing multiple
// views of current params.
SaveAll bool `nest:"+"`
// Good is for SaveAll, save to params_good for a known good params state.
// This can be done prior to making a new release after all tests are passing.
// Add results to git to provide a full diff record of all params over level.
Good bool `nest:"+"`
}
func (pc *ParamConfig) FieldWidget(field string) core.Value {
return egui.ScriptFieldWidget(field)
}
// RunConfig has config parameters related to running the sim.
type RunConfig struct {
// GPUDevice selects the gpu device to use.
GPUDevice int
// MPI uses MPI message passing interface for data parallel computation
// between nodes running identical copies of the same sim, sharing DWt changes.
MPI bool
// GPUSameNodeMPI if true and both MPI and GPU are being used, this selects
// a different GPU for each MPI proc rank, assuming a multi-GPU node.
// set to false if running MPI across multiple GPU nodes.
GPUSameNodeMPI bool
// NData is the number of data-parallel items to process in parallel per trial.
// Is significantly faster for both CPU and GPU. Results in an effective
// mini-batch of learning.
NData int `default:"8" min:"1"`
// SlowInterval is the interval between slow adaptive processes.
// This generally needs to be longer than the default of 100 in larger models.
SlowInterval int `default:"400"` // 400 best > 800 >> 100
// AdaptGiInterval is the interval between adapting inhibition steps.
AdaptGiInterval int `default:"400"` // ?
// NThreads is the number of parallel threads for CPU computation;
// 0 = use default.
NThreads int `default:"0"`
// Run is the _starting_ run number, which determines the random seed.
// Runs counts up from there. Can do all runs in parallel by launching
// separate jobs with each starting Run, Runs = 1.
Run int `default:"0" flag:"run"`
// Runs is the total number of runs to do when running Train, starting from Run.
Runs int `default:"1" min:"1"`
// Epochs is the total number of epochs per run.
Epochs int `default:"1000"`
// Trials is the total number of trials per epoch.
// Should be an even multiple of NData.
Trials int `default:"512"`
// ISICycles is the number of no-input inter-stimulus interval
// cycles at the start of the trial.
ISICycles int `default:"10"`
// MinusCycles is the number of cycles in the minus phase per trial.
MinusCycles int `default:"160"`
// PlusCycles is the number of cycles in the plus phase per trial.
PlusCycles int `default:"60"`
// NZero is how many perfect, zero-error epochs before stopping a Run.
NZero int `default:"2"`
// TestInterval is how often (in epochs) to run through all the test patterns,
// in terms of training epochs. Can use 0 or -1 for no testing.
TestInterval int `default:"20"`
// PCAInterval is how often (in epochs) to compute PCA on hidden
// representations to measure variance.
PCAInterval int `default:"10"`
// ConfusionEpc is the epoch to start recording confusion matrix.
ConfusionEpc int `default:"500"`
// StartWeights is the name of weights file to load at start of first run.
StartWeights string
// Epoch counter to set when loading start weights.
StartEpoch int
}
// Cycles returns the total number of cycles per trial: ISI + Minus + Plus.
func (rc *RunConfig) Cycles() int {
return rc.ISICycles + rc.MinusCycles + rc.PlusCycles
}
// LogConfig has config parameters related to logging data.
type LogConfig struct {
// SaveWeights will save final weights after each run.
SaveWeights bool
// SaveWeightsAt is a list of epoch counters at which to save weights.
SaveWeightsAt []int `default:"[400, 800, 1200]"`
// Train has the list of Train mode levels to save log files for.
Train []string `default:"['Run', 'Epoch']" nest:"+"`
// Test has the list of Test mode levels to save log files for.
Test []string `default:"['Epoch']" nest:"+"`
}
// Config has the overall Sim configuration options.
type Config struct {
egui.BaseConfig
// environment configuration options
Env EnvConfig `display:"add-fields"`
// Params has parameter related configuration options.
Params ParamConfig `display:"add-fields"`
// Run has sim running related configuration options.
Run RunConfig `display:"add-fields"`
// Log has data logging related configuration options.
Log LogConfig `display:"add-fields"`
}
func (cfg *Config) Defaults() {
cfg.Name = "LVis"
cfg.Title = "Leabra Vision"
cfg.URL = "https://github.com/emer/axon/blob/main/sims/lvis/README.md"
cfg.Doc = "This simulation explores how a hierarchy of areas in the ventral stream of visual processing (up to inferotemporal (IT) cortex) can produce robust object recognition that is invariant to changes in position, size, etc of retinal input images."
}
// Code generated by "core generate -add-types -add-funcs -gosl"; DO NOT EDIT.
package lvis
import (
"cogentcore.org/core/enums"
)
var _ModesValues = []Modes{0, 1, 2}
// ModesN is the highest valid value for type Modes, plus one.
//
//gosl:start
const ModesN Modes = 3
//gosl:end
var _ModesValueMap = map[string]Modes{`Train`: 0, `Test`: 1, `NovelTrain`: 2}
var _ModesDescMap = map[Modes]string{0: ``, 1: ``, 2: ``}
var _ModesMap = map[Modes]string{0: `Train`, 1: `Test`, 2: `NovelTrain`}
// String returns the string representation of this Modes value.
func (i Modes) String() string { return enums.String(i, _ModesMap) }
// SetString sets the Modes value from its string representation,
// and returns an error if the string is invalid.
func (i *Modes) SetString(s string) error { return enums.SetString(i, s, _ModesValueMap, "Modes") }
// Int64 returns the Modes value as an int64.
func (i Modes) Int64() int64 { return int64(i) }
// SetInt64 sets the Modes value from an int64.
func (i *Modes) SetInt64(in int64) { *i = Modes(in) }
// Desc returns the description of the Modes value.
func (i Modes) Desc() string { return enums.Desc(i, _ModesDescMap) }
// ModesValues returns all possible values for the type Modes.
func ModesValues() []Modes { return _ModesValues }
// Values returns all possible values for the type Modes.
func (i Modes) Values() []enums.Enum { return enums.Values(_ModesValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Modes) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Modes) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Modes") }
var _LevelsValues = []Levels{0, 1, 2, 3}
// LevelsN is the highest valid value for type Levels, plus one.
//
//gosl:start
const LevelsN Levels = 4
//gosl:end
var _LevelsValueMap = map[string]Levels{`Cycle`: 0, `Trial`: 1, `Epoch`: 2, `Run`: 3}
var _LevelsDescMap = map[Levels]string{0: ``, 1: ``, 2: ``, 3: ``}
var _LevelsMap = map[Levels]string{0: `Cycle`, 1: `Trial`, 2: `Epoch`, 3: `Run`}
// String returns the string representation of this Levels value.
func (i Levels) String() string { return enums.String(i, _LevelsMap) }
// SetString sets the Levels value from its string representation,
// and returns an error if the string is invalid.
func (i *Levels) SetString(s string) error { return enums.SetString(i, s, _LevelsValueMap, "Levels") }
// Int64 returns the Levels value as an int64.
func (i Levels) Int64() int64 { return int64(i) }
// SetInt64 sets the Levels value from an int64.
func (i *Levels) SetInt64(in int64) { *i = Levels(in) }
// Desc returns the description of the Levels value.
func (i Levels) Desc() string { return enums.Desc(i, _LevelsDescMap) }
// LevelsValues returns all possible values for the type Levels.
func LevelsValues() []Levels { return _LevelsValues }
// Values returns all possible values for the type Levels.
func (i Levels) Values() []enums.Enum { return enums.Values(_LevelsValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Levels) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Levels) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Levels") }
var _StatsPhaseValues = []StatsPhase{0, 1}
// StatsPhaseN is the highest valid value for type StatsPhase, plus one.
//
//gosl:start
const StatsPhaseN StatsPhase = 2
//gosl:end
var _StatsPhaseValueMap = map[string]StatsPhase{`Start`: 0, `Step`: 1}
var _StatsPhaseDescMap = map[StatsPhase]string{0: ``, 1: ``}
var _StatsPhaseMap = map[StatsPhase]string{0: `Start`, 1: `Step`}
// String returns the string representation of this StatsPhase value.
func (i StatsPhase) String() string { return enums.String(i, _StatsPhaseMap) }
// SetString sets the StatsPhase value from its string representation,
// and returns an error if the string is invalid.
func (i *StatsPhase) SetString(s string) error {
return enums.SetString(i, s, _StatsPhaseValueMap, "StatsPhase")
}
// Int64 returns the StatsPhase value as an int64.
func (i StatsPhase) Int64() int64 { return int64(i) }
// SetInt64 sets the StatsPhase value from an int64.
func (i *StatsPhase) SetInt64(in int64) { *i = StatsPhase(in) }
// Desc returns the description of the StatsPhase value.
func (i StatsPhase) Desc() string { return enums.Desc(i, _StatsPhaseDescMap) }
// StatsPhaseValues returns all possible values for the type StatsPhase.
func StatsPhaseValues() []StatsPhase { return _StatsPhaseValues }
// Values returns all possible values for the type StatsPhase.
func (i StatsPhase) Values() []enums.Enum { return enums.Values(_StatsPhaseValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i StatsPhase) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *StatsPhase) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "StatsPhase")
}
// Copyright (c) 2021, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lvis
import (
"fmt"
"math/rand"
"path/filepath"
"sort"
"strings"
"cogentcore.org/core/base/errors"
"cogentcore.org/core/base/fsx"
)
// Images implements management of lists of image files,
// with category names or organized in directories by category.
type Images struct {
// Path to image files: this should point to a directory that has files
// or subdirectories that then have image files in them.
Path string
// Extensions of image files to find (lowercase)
Exts []string
// CatSep is the separator in file name for category label.
// if empty then must have subdirs.
CatSep string
// Split by item -- each file name has an item label after CatSep.
SplitByItm bool
// Number of testing images per category. If SplitByItem images are split by item id.
NTestPerCat int
// List of image categories.
Cats []string
// CatMap is the map of categories to indexes in Cats list.
CatMap map[string]int
// ImagesAll is the full list of images, organized by category
// (directory) and then filename.
ImagesAll [][]string
// ImagesTrain is the list of training images, organized by category
// (directory) and then filename.
ImagesTrain [][]string
// ImagesTest is the list of testing images, organized by category
// (directory) and then filename.
ImagesTest [][]string
// FlatAlls is the flat list of all images, as cat/filename.ext
// Flats() makes from above data.
FlatAll []string
// FlatTrain is the flat list of all training images, as cat/filename.ext.
// Flats() makes from above data.
FlatTrain []string
// FlatTest is the flat list of all testing images, as cat/filename.ext.
// Flats() makes from above data.
FlatTest []string
}
// SetPath sets path, with given extensions, and separator
func (im *Images) SetPath(path string, exts []string, catsep string) {
im.Path = path
im.Exts = exts
im.CatSep = catsep
}
// OpenPath opens list of images at given path, with given extensions
func (im *Images) OpenPath(path string, exts []string, catsep string) error {
im.SetPath(path, exts, catsep)
if im.CatSep == "" {
return im.OpenDirs()
}
return im.OpenNames()
}
// OpenDirs opens images at Path with subdirs for category names
func (im *Images) OpenDirs() error {
im.Cats = fsx.Dirs(im.Path)
nc := len(im.Cats)
if nc == 0 {
err := fmt.Errorf("Images.OpenDirs() -- no directories for categories in: %s", im.Path)
return errors.Log(err)
}
im.ImagesAll = make([][]string, nc)
for ci := nc - 1; ci >= 0; ci-- {
cat := im.Cats[ci]
cp := filepath.Join(im.Path, cat)
fls := fsx.Filenames(cp, im.Exts...)
if len(fls) == 0 {
im.Cats = append(im.Cats[:ci], im.Cats[ci+1:]...)
im.ImagesAll = append(im.ImagesAll[:ci], im.ImagesAll[ci+1:]...)
continue
}
im.ImagesAll[ci] = fls
}
im.MakeCatMap()
im.Split()
return nil
}
func (im *Images) MakeCatMap() {
nc := len(im.Cats)
im.CatMap = make(map[string]int, nc)
for ci, c := range im.Cats {
im.CatMap[c] = ci
}
}
func (im *Images) Cat(f string) string {
if im.CatSep == "" {
dir, _ := filepath.Split(f)
return dir
}
i := strings.Index(f, im.CatSep)
return f[:i]
}
func (im *Images) Item(f string) string {
i := strings.Index(f, im.CatSep)
rf := f[i+1:]
i = strings.Index(rf, im.CatSep)
return rf[:i]
}
// OpenNames opens images at Path with category names in file names
func (im *Images) OpenNames() error {
fls := fsx.Filenames(im.Path, im.Exts...)
nf := len(fls)
if nf == 0 {
err := fmt.Errorf("Images.OpenNames() -- no image files in: %s", im.Path)
return errors.Log(err)
}
sort.Strings(fls)
im.ImagesAll = make([][]string, 0)
curcat := ""
si := 0
for ni, nm := range fls {
cat := im.Cat(nm)
if cat != curcat {
if curcat != "" {
im.Cats = append(im.Cats, curcat)
im.ImagesAll = append(im.ImagesAll, fls[si:ni])
}
curcat = cat
si = ni
}
}
im.Cats = append(im.Cats, curcat)
im.ImagesAll = append(im.ImagesAll, fls[si:len(fls)])
im.MakeCatMap()
im.Split()
return nil
}
// Split does the train / test split
func (im *Images) Split() {
if im.SplitByItm {
im.SplitItems()
} else {
im.SplitNoItems()
}
}
// SplitItems does the train / test split, by items
func (im *Images) SplitItems() {
nc := len(im.ImagesAll)
im.ImagesTrain = make([][]string, nc)
im.ImagesTest = make([][]string, nc)
for ci, fls := range im.ImagesAll {
itmp := make(map[string]int)
for _, f := range fls {
itm := im.Item(f)
itmp[itm] = 0
}
nitm := len(itmp)
itms := make([]string, nitm)
i := 0
for it := range itmp {
itms[i] = it
i++
}
pi := rand.Perm(nitm)
ntst := im.NTestPerCat
if ntst >= nitm {
ntst = nitm / 2
}
ntrn := nitm - ntst
tstm := make(map[string]int, ntrn)
for i = 0; i < ntst; i++ {
tstm[itms[pi[i]]] = i
}
for _, f := range fls {
itm := im.Item(f)
_, istst := tstm[itm]
if istst {
im.ImagesTest[ci] = append(im.ImagesTest[ci], f)
} else {
im.ImagesTrain[ci] = append(im.ImagesTrain[ci], f)
}
}
}
im.Flats()
}
// SplitNoItems does the train / test split, no items
func (im *Images) SplitNoItems() {
nc := len(im.ImagesAll)
im.ImagesTrain = make([][]string, nc)
im.ImagesTest = make([][]string, nc)
for ci, fls := range im.ImagesAll {
nitm := len(fls)
ntst := im.NTestPerCat
if ntst >= nitm {
ntst = nitm / 2
}
ntrn := nitm - ntst
slist := rand.Perm(nitm)
for i := 0; i < ntrn; i++ {
im.ImagesTrain[ci] = append(im.ImagesTrain[ci], fls[slist[i]])
}
for i := ntrn; i < nitm; i++ {
im.ImagesTest[ci] = append(im.ImagesTest[ci], fls[slist[i]])
}
}
im.Flats()
}
// SelectCats filters the list of images to those within given list of categories.
func (im *Images) SelectCats(cats []string) {
nc := len(im.Cats)
for ci := nc - 1; ci >= 0; ci-- {
cat := im.Cats[ci]
sel := false
for _, cs := range cats {
if cat == cs {
sel = true
break
}
}
if !sel {
im.Cats = append(im.Cats[:ci], im.Cats[ci+1:]...)
im.ImagesAll = append(im.ImagesAll[:ci], im.ImagesAll[ci+1:]...)
im.ImagesTrain = append(im.ImagesTrain[:ci], im.ImagesTrain[ci+1:]...)
im.ImagesTest = append(im.ImagesTest[:ci], im.ImagesTest[ci+1:]...)
}
}
im.MakeCatMap()
im.Flats()
}
// DeleteCats filters the list of images to exclude those within given list of categories.
func (im *Images) DeleteCats(cats []string) {
nc := len(im.Cats)
for ci := nc - 1; ci >= 0; ci-- {
cat := im.Cats[ci]
del := false
for _, cs := range cats {
if cat == cs {
del = true
break
}
}
if del {
im.Cats = append(im.Cats[:ci], im.Cats[ci+1:]...)
im.ImagesAll = append(im.ImagesAll[:ci], im.ImagesAll[ci+1:]...)
im.ImagesTrain = append(im.ImagesTrain[:ci], im.ImagesTrain[ci+1:]...)
im.ImagesTest = append(im.ImagesTest[:ci], im.ImagesTest[ci+1:]...)
}
}
im.MakeCatMap()
im.Flats()
}
// SelectImages filters the list of images to those within given list of images (contains)
func (im *Images) SelectImages(images []string) {
for ci, _ := range im.ImagesAll {
ofcat := im.ImagesAll[ci]
no := len(ofcat)
for oi := no - 1; oi >= 0; oi-- {
ofl := ofcat[oi]
sel := false
for _, cs := range images {
if strings.Contains(ofl, cs) {
sel = true
break
}
}
if !sel {
ofcat = append(ofcat[:oi], ofcat[oi+1:]...)
}
}
}
im.Split()
im.Flats()
}
// DeleteImages filters the list of images to exclude those within given list of images (contains)
func (im *Images) DeleteImages(images []string) {
for ci, _ := range im.ImagesAll {
ofcat := im.ImagesAll[ci]
no := len(ofcat)
for oi := no - 1; oi >= 0; oi-- {
ofl := ofcat[oi]
del := false
for _, cs := range images {
if strings.Contains(ofl, cs) {
del = true
break
}
}
if del {
ofcat = append(ofcat[:oi], ofcat[oi+1:]...)
}
}
}
im.Split()
im.Flats()
}
// Flats generates flat lists from categorized lists, in form categ/fname.obj
func (im *Images) Flats() {
im.FlatAll = im.FlatImpl(im.ImagesAll)
im.FlatTrain = im.FlatImpl(im.ImagesTrain)
im.FlatTest = im.FlatImpl(im.ImagesTest)
}
// FlatImpl generates flat lists from categorized lists, in form categ/fname.obj
func (im *Images) FlatImpl(images [][]string) []string {
var flat []string
for ci, fls := range images {
cat := im.Cats[ci]
for _, fn := range fls {
if im.CatSep == "" {
fn = cat + " " + fn
}
flat = append(flat, fn)
}
}
return flat
}
// UnFlat translates FlatTrain, FlatTest into full nested lists -- Cats must
// also have already been loaded. Call after loading FlatTrain, FlatTest
func (im *Images) UnFlat() {
nc := len(im.Cats)
im.ImagesAll = make([][]string, nc)
im.ImagesTrain = make([][]string, nc)
im.ImagesTest = make([][]string, nc)
im.MakeCatMap()
for _, fn := range im.FlatTrain {
cat := im.Cat(fn)
ci := im.CatMap[cat]
im.ImagesTrain[ci] = append(im.ImagesTrain[ci], fn)
im.ImagesAll[ci] = append(im.ImagesAll[ci], fn)
}
for _, fn := range im.FlatTest {
cat := im.Cat(fn)
ci := im.CatMap[cat]
im.ImagesTest[ci] = append(im.ImagesTest[ci], fn)
im.ImagesAll[ci] = append(im.ImagesAll[ci], fn)
}
im.FlatAll = im.FlatImpl(im.ImagesAll)
}
// ToTrainAll compiles TrainAll from ImagesTrain, ImagesTest
func (im *Images) ToTrainAll() {
nc := len(im.Cats)
im.ImagesAll = make([][]string, nc)
im.MakeCatMap()
for ci, fl := range im.ImagesTrain {
im.ImagesAll[ci] = append(im.ImagesAll[ci], fl...)
}
for ci, fl := range im.ImagesTest {
im.ImagesAll[ci] = append(im.ImagesAll[ci], fl...)
}
}
// Copyright (c) 2021, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lvis
import (
"fmt"
"image"
"path/filepath"
"sort"
"cogentcore.org/core/base/errors"
"cogentcore.org/core/base/fsx"
"cogentcore.org/core/base/iox/imagex"
"cogentcore.org/core/base/iox/jsonx"
"cogentcore.org/core/base/slicesx"
"cogentcore.org/core/gpu"
"cogentcore.org/core/math32"
"cogentcore.org/core/math32/minmax"
"cogentcore.org/core/math32/vecint"
"cogentcore.org/lab/base/mpi"
"cogentcore.org/lab/base/randx"
"cogentcore.org/lab/stats/metric"
"cogentcore.org/lab/table"
"cogentcore.org/lab/tensor"
"cogentcore.org/lab/tensor/tensormpi"
"github.com/emer/emergent/v2/env"
"github.com/emer/v1vision/v1std"
"github.com/emer/v1vision/v1vision"
"golang.org/x/image/draw"
"golang.org/x/image/math/f64"
)
// TrialState contains the state for a given trial.
// Trials are processed data-parallel per Step().
type TrialState struct {
// current category
Cat string
// index of current category
CatIdx int
// current image
Img string
// current translation
Trans math32.Vector2
// current scaling
Scale float32
// current rotation
Rot float32
// rendered image as loaded
Image image.Image `display:"-"`
}
func (st *TrialState) String() string {
return fmt.Sprintf("%s:%s", st.Cat, st.Img)
}
// ImagesEnv provides the rendered results of the Obj3D + Saccade generator.
type ImagesEnv struct {
// Name of this environment (Train, Test mode).
Name string
// NData is the number of steps to process in data-parallel.
NData int
// Trials has NData state per trial for last Step()
Trials []TrialState
// image file name
ImageFile string
// present test items, else train
Test bool
// present items in sequential order -- else shuffled
Sequential bool
// compute high-res full field filtering. Off by default
High16 bool
// compute color DoG (blob) filtering. On by default.
ColorDoG bool
// images list
Images Images
// def 0.3 maximum amount of translation as proportion of half-width size in each direction -- 1 = something in center is now at right edge
TransMax math32.Vector2
// [def: 0.15] if > 0, generate translations using gaussian normal distribution with this standard deviation, and then clip to TransMax range -- this facilitates learning on the central region while still giving exposure to wider area. Tyically turn off for last 100 epochs to measure true uniform distribution performance.
TransSigma float32
// def 0.5 - 1.1 range of scale
ScaleRange minmax.F32
// maximum degrees of rotation in plane.
// image is rotated plus or minus in this range
RotateMax float32 `default:"8"`
// V1c has the full set of V1c complex and DoG color contrast filters.
V1c v1std.V1cMulti
// maximum number of output categories representable here
MaxOut int
// use random bit patterns instead of localist output units
OutRandom bool
// proportion activity for random patterns
RndPctOn float32
// proportion minimum difference for random patterns
RndMinDiff float32
// the output tensor geometry -- must be >= number of cats
OutSize vecint.Vector2i
// number of output units per category.
// Spiking may benefit from replication; is Y inner dim of output tensor
NOutPer int
// output patterns: either localist or random
Pats table.Table `display:"no-inline"`
// random number generator for the env -- all random calls must use this
Rand randx.SysRand `display:"-"`
// random seed
RandSeed int64 `edit"-"`
// output pattern for current item
Output tensor.Float32
// starting row, e.g., for mpi allocation across processors
StRow int
// ending row, ignored if 0
EdRow int
// suffled list of entire set of images -- re-shuffle every time through imgidxs
Shuffle []int
// indexes of images to present -- from StRow to EdRow
ImgIdxs []int
// Row of item list -- this is actual counter driving everything
Row env.Counter `display:"inline"`
}
func (ev *ImagesEnv) Label() string { return ev.Name }
func (ev *ImagesEnv) Trial(di int) *TrialState {
return &ev.Trials[di]
}
func (ev *ImagesEnv) Defaults() {
ev.TransSigma = 0
// hard:
ev.TransMax.Set(0.3, 0.3) // 0.2 easy, 0.3 hard
ev.ScaleRange.Set(0.7, 1.2) // 0.8, 1.1 easy, .7-1.2 hard
ev.RotateMax = 16 // 8 easy, 16 hard
// easy:
// ev.TransMax.Set(0.2, 0.2)
// ev.ScaleRange.Set(0.8, 1.1)
// ev.RotateMax = 8
ev.RndPctOn = 0.2
ev.RndMinDiff = 0.5
ev.NOutPer = 5
ev.V1c.Defaults()
// ev.V1c.GPU = false
// todo: ev.High16, ColorDoG options.
ev.V1c.SplitColor = true // split is better!
ev.V1c.StdLowMed16DegZoom1()
}
// ImageList returns the list of images -- train or test
func (ev *ImagesEnv) ImageList() []string {
if ev.Test {
return ev.Images.FlatTest
}
return ev.Images.FlatTrain
}
// MPIAlloc allocate objects based on mpi processor number
func (ev *ImagesEnv) MPIAlloc() {
ws := mpi.WorldSize()
nim := ws * (len(ev.ImageList()) / ws) // even multiple of size -- few at end are lost..
ev.StRow, ev.EdRow, _ = tensormpi.AllocN(nim)
// mpi.PrintAllProcs = true
// mpi.Printf("allocated images: n: %d st: %d ed: %d\n", nim, ev.StRow, ev.EdRow)
// mpi.PrintAllProcs = false
}
func (ev *ImagesEnv) Config(ndata int, netGPU *gpu.GPU) {
ev.NData = ndata
v1vision.ComputeGPU = netGPU
ev.Trials = slicesx.SetLength(ev.Trials, ndata)
ev.V1c.Config(ndata)
}
func (ev *ImagesEnv) Init(run int) {
ev.RandSeed = int64(73 + run)
if ev.Rand.Rand == nil {
ev.Rand.NewRand(ev.RandSeed)
} else {
ev.Rand.Seed(ev.RandSeed)
}
ev.Row.Cur = -1 // init state -- key so that first Step() = 0
nitm := len(ev.ImageList())
if ev.EdRow > 0 {
ev.EdRow = min(ev.EdRow, nitm)
ev.ImgIdxs = make([]int, ev.EdRow-ev.StRow)
} else {
ev.ImgIdxs = make([]int, nitm)
}
for i := range ev.ImgIdxs {
ev.ImgIdxs[i] = ev.StRow + i
}
ev.Shuffle = ev.Rand.Perm(nitm)
ev.Row.Max = len(ev.ImgIdxs)
nc := len(ev.Images.Cats)
ev.MaxOut = max(nc, ev.MaxOut)
ev.ConfigPats()
}
// OpenConfig opens saved configuration for current images: config files are required,
// and an error is logged and returned if not present.
func (ev *ImagesEnv) OpenConfig() error {
cfnm := fmt.Sprintf("%s_cats.json", ev.ImageFile)
tsfnm := fmt.Sprintf("%s_ntest%d_tst.json", ev.ImageFile, ev.Images.NTestPerCat)
trfnm := fmt.Sprintf("%s_ntest%d_trn.json", ev.ImageFile, ev.Images.NTestPerCat)
if errors.Log1(fsx.FileExistsFS(embedfs, cfnm)) {
errors.Log(jsonx.OpenFS(&ev.Images.Cats, embedfs, cfnm))
errors.Log(jsonx.OpenFS(&ev.Images.ImagesTest, embedfs, tsfnm))
errors.Log(jsonx.OpenFS(&ev.Images.ImagesTrain, embedfs, trfnm))
ev.Images.ToTrainAll()
ev.Images.Flats()
return nil
}
return errors.Log(errors.New("ImagesEnv.OpenConfig: Required Cats config file not found: " + cfnm))
}
// SaveConfig saves configuration for current images
func (ev *ImagesEnv) SaveConfig() {
cfnm := fmt.Sprintf("%s_cats.json", ev.ImageFile)
tsfnm := fmt.Sprintf("%s_ntest%d_tst.json", ev.ImageFile, ev.Images.NTestPerCat)
trfnm := fmt.Sprintf("%s_ntest%d_trn.json", ev.ImageFile, ev.Images.NTestPerCat)
errors.Log(jsonx.Save(ev.Images.Cats, filepath.Join("..", cfnm)))
errors.Log(jsonx.Save(ev.Images.ImagesTest, filepath.Join("..", tsfnm)))
errors.Log(jsonx.Save(ev.Images.ImagesTrain, filepath.Join("..", trfnm)))
}
// ConfigPats configures the output patterns
func (ev *ImagesEnv) ConfigPats() {
if ev.OutRandom {
ev.ConfigPatsRandom()
} else {
ev.ConfigPatsLocalist2D()
}
}
// ConfigPatsName names the patterns
func (ev *ImagesEnv) ConfigPatsName() {
// for i := 0; i < ev.MaxOut; i++ {
// nm := fmt.Sprintf("P%03d", i)
// if i < len(ev.Images.Cats) {
// nm = ev.Images.Cats[i]
// }
// ev.Pats.SetCellString("Name", i, nm)
// }
}
// ConfigPatsLocalistPools configures the output patterns: localist case
// with pools for each sub-pool
func (ev *ImagesEnv) ConfigPatsLocalistPools() {
oshp := []int{ev.OutSize.Y, ev.OutSize.X, ev.NOutPer, 1}
ev.Output.SetShapeSizes(ev.NData, ev.OutSize.Y, ev.OutSize.X, ev.NOutPer, 1)
ev.Pats.AddStringColumn("Name")
out := ev.Pats.AddFloat32Column("Output", oshp...)
ev.Pats.SetNumRows(ev.MaxOut)
for pi := range ev.MaxOut {
op := out.SubSpace(pi)
si := ev.NOutPer * pi
for i := range ev.NOutPer {
op.SetFloat1D(1, si+i)
}
}
ev.ConfigPatsName()
}
// ConfigPatsLocalist2D configures the output patterns: localist case
// as an overall 2D layer -- NOutPer goes along X axis to be contiguous
func (ev *ImagesEnv) ConfigPatsLocalist2D() {
oshp := []int{ev.OutSize.Y, ev.OutSize.X * ev.NOutPer}
ev.Output.SetShapeSizes(ev.NData, ev.OutSize.Y, ev.OutSize.X*ev.NOutPer)
ev.Pats.Init()
ev.Pats.AddStringColumn("Name")
out := ev.Pats.AddFloat32Column("Output", oshp...)
ev.Pats.SetNumRows(ev.MaxOut)
for pi := range ev.MaxOut {
op := out.SubSpace(pi)
si := ev.NOutPer * pi
for i := range ev.NOutPer {
op.SetFloat1D(1, si+i)
}
}
ev.ConfigPatsName()
}
// ConfigPatsRandom configures the output patterns: random case
func (ev *ImagesEnv) ConfigPatsRandom() {
// oshp := []int{ev.OutSize.Y, ev.OutSize.X}
// oshpnm := []string{"Y", "X"}
// ev.Output.SetShape(oshp, nil, oshpnm)
// sch := table.Schema{
// {"Name", tensor.STRING, nil, nil},
// {"Output", tensor.FLOAT32, oshp, oshpnm},
// }
// ev.Pats.SetFromSchema(sch, ev.MaxOut)
// np := ev.OutSize.X * ev.OutSize.Y
// nOn := patgen.NFmPct(ev.RndPctOn, np)
// minDiff := patgen.NFmPct(ev.RndMinDiff, nOn)
// fnm := fmt.Sprintf("rndpats_%dx%d_n%d_on%d_df%d.tsv", ev.OutSize.X, ev.OutSize.Y, ev.MaxOut, nOn, minDiff)
// _, err := os.Stat(fnm)
// if !os.IsNotExist(err) {
// ev.Pats.OpenCSV(core.FileName(fnm), table.Tab)
// } else {
// out := ev.Pats.Col(1).(*tensor.Float32)
// patgen.PermutedBinaryMinDiff(out, nOn, 1, 0, minDiff)
// ev.ConfigPatsName()
// ev.Pats.SaveCSV(core.FileName(fnm), table.Tab, table.Headers)
// }
}
// NewShuffle generates a new random order of items to present
func (ev *ImagesEnv) NewShuffle() {
randx.PermuteInts(ev.Shuffle, &ev.Rand)
}
// CurImage returns current image based on row and
func (ev *ImagesEnv) CurImage(st *TrialState) string {
il := ev.ImageList()
sz := len(ev.ImgIdxs)
if ev.Row.Cur >= sz {
ev.Row.Max = sz
ev.Row.Cur = 0
ev.NewShuffle()
}
r := ev.Row.Cur
if r < 0 {
r = 0
}
i := ev.ImgIdxs[r]
if !ev.Sequential {
i = ev.Shuffle[i]
}
st.Img = il[i]
st.Cat = ev.Images.Cat(st.Img)
st.CatIdx = ev.Images.CatMap[st.Cat]
return st.Img
}
// OpenImage opens current image
func (ev *ImagesEnv) OpenImage(st *TrialState) (image.Image, error) {
imgNm := ev.CurImage(st)
fnm := filepath.Join(ev.Images.Path, imgNm)
img, _, err := imagex.Open(fnm)
return img, errors.Log(err)
}
// RandTransforms generates random transforms
func (ev *ImagesEnv) RandTransforms(st *TrialState) {
if ev.TransSigma > 0 {
st.Trans.X = float32(randx.GaussianGen(0, float64(ev.TransSigma), &ev.Rand))
st.Trans.X = math32.Clamp(st.Trans.X, -ev.TransMax.X, ev.TransMax.X)
st.Trans.Y = float32(randx.GaussianGen(0, float64(ev.TransSigma), &ev.Rand))
st.Trans.Y = math32.Clamp(st.Trans.Y, -ev.TransMax.Y, ev.TransMax.Y)
} else {
st.Trans.X = (ev.Rand.Float32()*2 - 1) * ev.TransMax.X
st.Trans.Y = (ev.Rand.Float32()*2 - 1) * ev.TransMax.Y
}
st.Scale = ev.ScaleRange.Min + ev.ScaleRange.Range()*ev.Rand.Float32()
st.Rot = (ev.Rand.Float32()*2 - 1) * ev.RotateMax
}
// TransformImage transforms the image according to current translation and scaling
func (ev *ImagesEnv) TransformImage(st *TrialState, img image.Image) image.Image {
s := math32.FromPoint(img.Bounds().Size())
transformer := draw.BiLinear
tx := 0.5 * st.Trans.X * s.X
ty := 0.5 * st.Trans.Y * s.Y
m := math32.Translate2D(s.X*.5+tx, s.Y*.5+ty).Scale(st.Scale, st.Scale).Rotate(math32.DegToRad(st.Rot)).Translate(-s.X*.5, -s.Y*.5)
s2d := f64.Aff3{float64(m.XX), float64(m.XY), float64(m.X0), float64(m.YX), float64(m.YY), float64(m.Y0)}
// use first color in upper left as fill color
clr := img.At(0, 0)
dst := image.NewRGBA(img.Bounds())
src := image.NewUniform(clr)
draw.Draw(dst, dst.Bounds(), src, image.ZP, draw.Src)
transformer.Transform(dst, s2d, img, img.Bounds(), draw.Over, nil) // Over superimposes over bg
return dst
}
// SetOutput sets output by category
func (ev *ImagesEnv) SetOutput(di, out int) {
ot := ev.Output.SubSpace(di).(*tensor.Float32)
ot.SetZeros()
otc := ev.Pats.Column("Output").SubSpace(out)
ot.CopyCellsFrom(otc, 0, 0, ot.Len())
}
// FloatIdx32 contains a float32 value and its index
type FloatIdx32 struct {
Val float32
Idx int
}
// ClosestRows32 returns the sorted list of distances from probe pattern
// and patterns in an tensor.Float32 where the outer-most dimension is
// assumed to be a row (e.g., as a column in an etable), using the given metric function,
// *which must have the Increasing property* -- i.e., larger = further.
// Col cell sizes must match size of probe (panics if not).
func ClosestRows32(probe *tensor.Float64, col *tensor.Float32, mfun metric.Metrics) []FloatIdx32 {
pr1d := tensor.As1D(probe)
rows := col.DimSize(0)
csz := col.Len() / rows
if csz != probe.Len() {
panic("metric.ClosestRows32: probe size != cell size of tensor column!\n")
}
dsts := make([]FloatIdx32, rows)
for ri := 0; ri < rows; ri++ {
st := ri * csz
rvals := col.Values[st : st+csz]
v := mfun.Call(pr1d, tensor.NewFloat32FromValues(rvals...))
dsts[ri].Val = float32(v.Float1D(0))
dsts[ri].Idx = ri
}
sort.Slice(dsts, func(i, j int) bool {
return dsts[i].Val < dsts[j].Val
})
return dsts
}
// OutErr scores the output activity of network, returning the index of
// item with closest fit to given pattern, and 1 if that is error, 0 if correct.
// also returns a top-two error: if 2nd closest pattern was correct.
func (ev *ImagesEnv) OutErr(tsr *tensor.Float64, curCatIdx int) (maxi int, err, err2 float64) {
ocol := ev.Pats.Column("Output").Tensor.(*tensor.Float32)
dsts := ClosestRows32(tsr, ocol, metric.MetricInvCorrelation)
maxi = dsts[0].Idx
err = 1.0
if maxi == curCatIdx {
err = 0
}
err2 = err
if dsts[1].Idx == curCatIdx {
err2 = 0
}
return
}
func (ev *ImagesEnv) String() string {
return ev.TrialName(0)
}
// TrialName returns the string rep of the env state
func (ev *ImagesEnv) TrialName(di int) string {
st := ev.Trial(di)
return st.String()
}
func (ev *ImagesEnv) Step() bool {
imgs := make([]image.Image, ev.NData)
for di := range ev.NData {
st := ev.Trial(di)
if ev.Row.Incr() {
ev.NewShuffle()
}
ev.RandTransforms(st)
img, err := ev.OpenImage(st)
if err != nil {
continue
}
img = ev.TransformImage(st, img)
st.Image = img
imgs[di] = img
ev.SetOutput(di, st.CatIdx)
}
ev.V1c.RunImages(imgs...)
return true
}
func (ev *ImagesEnv) State(element string) tensor.Values {
switch element {
case "V1l16":
return &ev.V1c.V1cParams[0].Output
case "V1m16":
return &ev.V1c.V1cParams[1].Output
// case "V1h16": // todo
// return &ev.V1h16.V1AllTsr
case "V1l8":
return &ev.V1c.V1cParams[2].Output
case "V1m8":
return &ev.V1c.V1cParams[3].Output
case "V1Cl16":
return &ev.V1c.DoGParams[0].Output
case "V1Cm16":
return &ev.V1c.DoGParams[1].Output
case "V1Cl8":
return &ev.V1c.DoGParams[2].Output
case "V1Cm8":
return &ev.V1c.DoGParams[3].Output
case "Output":
return &ev.Output
}
return nil
}
func (ev *ImagesEnv) Action(action string, input tensor.Values) {
// nop
}
// Compile-time check that implements Env interface
var _ env.Env = (*ImagesEnv)(nil)
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// lvis explores how a hierarchy of areas in the ventral stream
// of visual processing (up to inferotemporal (IT) cortex) can produce
// robust object recognition that is invariant to changes in position,
// size, etc of retinal input images.
package lvis
//go:generate core generate -add-types -add-funcs -gosl
import (
"embed"
"fmt"
"os"
"reflect"
"slices"
"cogentcore.org/core/base/reflectx"
"cogentcore.org/core/core"
"cogentcore.org/core/enums"
"cogentcore.org/core/gpu"
"cogentcore.org/core/icons"
"cogentcore.org/core/math32"
"cogentcore.org/core/tree"
"cogentcore.org/lab/base/mpi"
"cogentcore.org/lab/base/randx"
"cogentcore.org/lab/plot"
"cogentcore.org/lab/stats/stats"
"cogentcore.org/lab/tensor"
"cogentcore.org/lab/tensorcore"
"cogentcore.org/lab/tensorfs"
"github.com/emer/axon/v2/axon"
"github.com/emer/emergent/v2/decoder"
"github.com/emer/emergent/v2/egui"
"github.com/emer/emergent/v2/emer"
"github.com/emer/emergent/v2/env"
"github.com/emer/emergent/v2/looper"
"github.com/emer/emergent/v2/paths"
)
//go:embed *.json
var embedfs embed.FS
// Modes are the looping modes (Stacks) for running and statistics.
type Modes int32 //enums:enum
const (
Train Modes = iota
Test
NovelTrain
)
// Levels are the looping levels for running and statistics.
type Levels int32 //enums:enum
const (
Cycle Levels = iota
Trial
Epoch
Run
)
// StatsPhase is the phase of stats processing for given mode, level.
// Accumulated values are reset at Start, added each Step.
type StatsPhase int32 //enums:enum
const (
Start StatsPhase = iota
Step
)
// see params.go for params
// Sim encapsulates the entire simulation model, and we define all the
// functionality as methods on this struct. This structure keeps all relevant
// state information organized and available without having to pass everything around
// as arguments to methods, and provides the core GUI interface (note the view tags
// for the fields which provide hints to how things should be displayed).
type Sim struct {
// simulation configuration parameters -- set by .toml config file and / or args
Config *Config `new-window:"+"`
// Net is the network: click to view / edit parameters for layers, paths, etc.
Net *axon.Network `new-window:"+" display:"no-inline"`
// Params manages network parameter setting.
Params axon.Params `display:"inline"`
// Paths are all the specialized pathways for the network.
Paths Paths `new-window:"+" display:"no-inline"`
// Decoder is used as a comparison vs. the Output layer.
Decoder decoder.SoftMax
// Loops are the control loops for running the sim, in different Modes
// across stacks of Levels.
Loops *looper.Stacks `new-window:"+" display:"no-inline"`
// Envs provides mode-string based storage of environments.
Envs env.Envs `new-window:"+" display:"no-inline"`
// TrainUpdate has Train mode netview update parameters.
TrainUpdate axon.NetViewUpdate `display:"inline"`
// TestUpdate has Test mode netview update parameters.
TestUpdate axon.NetViewUpdate `display:"inline"`
// Root is the root tensorfs directory, where all stats and other misc sim data goes.
Root *tensorfs.Node `display:"-"`
// Stats has the stats directory within Root.
Stats *tensorfs.Node `display:"-"`
// Current has the current stats values within Stats.
Current *tensorfs.Node `display:"-"`
// StatFuncs are statistics functions called at given mode and level,
// to perform all stats computations. phase = Start does init at start of given level,
// and all intialization / configuration (called during Init too).
StatFuncs []func(mode Modes, level Levels, phase StatsPhase) `display:"-"`
// GUI manages all the GUI elements
GUI egui.GUI `display:"-"`
// RandSeeds is a list of random seeds to use for each run.
RandSeeds randx.Seeds `display:"-"`
}
func (ss *Sim) SetConfig(cfg *Config) { ss.Config = cfg }
func (ss *Sim) Body() *core.Body { return ss.GUI.Body }
func (ss *Sim) ConfigSim() {
ss.Root, _ = tensorfs.NewDir("Root")
tensorfs.CurRoot = ss.Root
ss.Paths.Defaults()
ss.Net = axon.NewNetwork(ss.Config.Name)
ss.Params.Config(LayerParams, PathParams, ss.Config.Params.Sheet, ss.Config.Params.Tag, reflect.ValueOf(ss))
ss.RandSeeds.Init(100) // max 100 runs
ss.InitRandSeed(0)
if ss.Config.GPU {
gpu.SelectAdapter = ss.Config.Run.GPUDevice
axon.GPUInit()
axon.UseGPU = true
}
ss.ConfigEnv()
ss.ConfigNet(ss.Net)
ss.ConfigLoops()
ss.ConfigStats()
// if ss.Config..GPU {
// fmt.Println(axon.GPUSystem.Vars().StringDoc())
// }
if ss.Config.Params.SaveAll {
ss.Config.Params.SaveAll = false
ss.Net.SaveParamsSnapshot(&ss.Config, ss.Config.Params.Good)
os.Exit(0)
}
}
func (ss *Sim) ConfigEnv() {
// Can be called multiple times -- don't re-create
var trn, tst *ImagesEnv
if len(ss.Envs) == 0 {
trn = &ImagesEnv{}
tst = &ImagesEnv{}
} else {
trn = ss.Envs.ByMode(Train).(*ImagesEnv)
tst = ss.Envs.ByMode(Test).(*ImagesEnv)
}
path := ss.Config.Env.Path
imgs := ss.Config.Env.ImageFile
trn.Name = Train.String()
trn.Defaults()
trn.NOutPer = ss.Config.Env.NOutPer
trn.High16 = false // not useful -- may need more tuning?
trn.ColorDoG = true
trn.Images.NTestPerCat = 2
trn.Images.SplitByItm = true
trn.OutRandom = ss.Config.Env.RndOutPats
trn.OutSize.Set(10, 10)
trn.ImageFile = imgs
trn.Images.SetPath(path, []string{".png"}, "_")
trn.OpenConfig()
if ss.Config.Env.Env != nil {
reflectx.SetFieldsFromMap(trn, ss.Config.Env.Env)
}
trn.Config(ss.Config.Run.NData, axon.ComputeGPU)
tst.Name = Test.String()
tst.Defaults()
tst.NOutPer = ss.Config.Env.NOutPer
tst.High16 = trn.High16
tst.ColorDoG = trn.ColorDoG
tst.Images.NTestPerCat = 2
tst.Images.SplitByItm = true
tst.OutRandom = ss.Config.Env.RndOutPats
tst.OutSize.Set(10, 10)
tst.Test = true
tst.ImageFile = imgs
tst.Images.SetPath(path, []string{".png"}, "_")
tst.OpenConfig()
if ss.Config.Env.Env != nil {
reflectx.SetFieldsFromMap(tst, ss.Config.Env.Env)
}
tst.Config(ss.Config.Run.NData, axon.ComputeGPU)
// remove most confusable items
confuse := []string{"blade", "flashlight", "pckeyboard", "scissors", "screwdriver", "submarine"}
trn.Images.DeleteCats(confuse)
tst.Images.DeleteCats(confuse)
if ss.Config.Run.MPI {
if ss.Config.Debug {
mpi.Printf("Did Env MPIAlloc\n")
}
trn.MPIAlloc()
tst.MPIAlloc()
}
trn.Init(0)
trn.Step() // needs an image to show
trn.Init(0)
tst.Init(0)
ss.Envs.Add(trn, tst)
}
func (ss *Sim) ConfigNet(net *axon.Network) {
net.SetMaxData(ss.Config.Run.NData)
net.Context().SetISICycles(int32(ss.Config.Run.ISICycles)).
SetMinusCycles(int32(ss.Config.Run.MinusCycles)).
SetPlusCycles(int32(ss.Config.Run.PlusCycles)).
SetSlowInterval(int32(ss.Config.Run.SlowInterval)).
SetAdaptGiInterval(int32(ss.Config.Run.AdaptGiInterval)).Update()
net.SetRandSeed(ss.RandSeeds[0]) // init new separate random seed, using run = 0
trn := ss.Envs.ByMode(Train).(*ImagesEnv)
v1nrows := trn.V1c.Out4Rows()
hi16 := trn.High16
cdog := trn.ColorDoG
v2mNp := 8
v2lNp := 4
v2Nu := 8
v4Np := 4
v4Nu := 10
if ss.Config.Params.SubPools {
v2mNp *= 2
v2lNp *= 2
v2Nu = 6
v4Np = 8
v4Nu = 7
}
v1m16 := net.AddLayer4D("V1m16", axon.InputLayer, 16, 16, v1nrows, 4).AddClass("V1m")
v1l16 := net.AddLayer4D("V1l16", axon.InputLayer, 8, 8, v1nrows, 4).AddClass("V1l")
v1m8 := net.AddLayer4D("V1m8", axon.InputLayer, 16, 16, v1nrows, 4).AddClass("V1m")
v1l8 := net.AddLayer4D("V1l8", axon.InputLayer, 8, 8, v1nrows, 4).AddClass("V1l")
v1m16.SetSampleShape(emer.CenterPoolIndexes(v1m16, 2), emer.CenterPoolShape(v1m16, 2))
v1l16.SetSampleShape(emer.CenterPoolIndexes(v1l16, 2), emer.CenterPoolShape(v1l16, 2))
v1m8.SetSampleShape(emer.CenterPoolIndexes(v1m8, 2), emer.CenterPoolShape(v1m8, 2))
v1l8.SetSampleShape(emer.CenterPoolIndexes(v1l8, 2), emer.CenterPoolShape(v1l8, 2))
// not useful so far..
// clst := net.AddLayer2D("Claustrum", 5, 5, axon.SuperLayer)
var v1cm16, v1cl16, v1cm8, v1cl8 *axon.Layer
if cdog {
v1cm16 = net.AddLayer4D("V1Cm16", axon.InputLayer, 16, 16, 2, 2).AddClass("V1Cm")
v1cl16 = net.AddLayer4D("V1Cl16", axon.InputLayer, 8, 8, 2, 2).AddClass("V1Cl")
v1cm8 = net.AddLayer4D("V1Cm8", axon.InputLayer, 16, 16, 2, 2).AddClass("V1Cm")
v1cl8 = net.AddLayer4D("V1Cl8", axon.InputLayer, 8, 8, 2, 2).AddClass("V1Cl")
v1cm16.SetSampleShape(emer.CenterPoolIndexes(v1cm16, 2), emer.CenterPoolShape(v1cm16, 2))
v1cl16.SetSampleShape(emer.CenterPoolIndexes(v1cl16, 2), emer.CenterPoolShape(v1cl16, 2))
v1cm8.SetSampleShape(emer.CenterPoolIndexes(v1cm8, 2), emer.CenterPoolShape(v1cm8, 2))
v1cl8.SetSampleShape(emer.CenterPoolIndexes(v1cl8, 2), emer.CenterPoolShape(v1cl8, 2))
}
v2m16 := net.AddLayer4D("V2m16", axon.SuperLayer, v2mNp, v2mNp, v2Nu, v2Nu).AddClass("V2m V2")
v2l16 := net.AddLayer4D("V2l16", axon.SuperLayer, v2lNp, v2lNp, v2Nu, v2Nu).AddClass("V2l V2")
v2m8 := net.AddLayer4D("V2m8", axon.SuperLayer, v2mNp, v2mNp, v2Nu, v2Nu).AddClass("V2m V2")
v2l8 := net.AddLayer4D("V2l8", axon.SuperLayer, v2lNp, v2lNp, v2Nu, v2Nu).AddClass("V2l V2")
v2m16.SetSampleShape(emer.CenterPoolIndexes(v2m16, 2), emer.CenterPoolShape(v2m16, 2))
v2l16.SetSampleShape(emer.CenterPoolIndexes(v2l16, 2), emer.CenterPoolShape(v2l16, 2))
v2m8.SetSampleShape(emer.CenterPoolIndexes(v2m8, 2), emer.CenterPoolShape(v2m8, 2))
v2l8.SetSampleShape(emer.CenterPoolIndexes(v2l8, 2), emer.CenterPoolShape(v2l8, 2))
var v1h16, v2h16, v3h16 *axon.Layer
if hi16 {
v1h16 = net.AddLayer4D("V1h16", axon.InputLayer, 32, 32, 5, 4).AddClass("V1h")
v2h16 = net.AddLayer4D("V2h16", axon.SuperLayer, 32, 32, v2Nu, v2Nu).AddClass("V2h V2")
v3h16 = net.AddLayer4D("V3h16", axon.SuperLayer, 16, 16, v2Nu, v2Nu).AddClass("V3h")
v1h16.SetSampleShape(emer.CenterPoolIndexes(v1h16, 2), emer.CenterPoolShape(v1h16, 2))
v2h16.SetSampleShape(emer.CenterPoolIndexes(v2h16, 2), emer.CenterPoolShape(v2h16, 2))
v3h16.SetSampleShape(emer.CenterPoolIndexes(v3h16, 2), emer.CenterPoolShape(v3h16, 2))
}
v4f16 := net.AddLayer4D("V4f16", axon.SuperLayer, v4Np, v4Np, v4Nu, v4Nu).AddClass("V4")
v4f8 := net.AddLayer4D("V4f8", axon.SuperLayer, v4Np, v4Np, v4Nu, v4Nu).AddClass("V4")
v4f16.SetSampleShape(emer.CenterPoolIndexes(v4f16, 2), emer.CenterPoolShape(v4f16, 2))
v4f8.SetSampleShape(emer.CenterPoolIndexes(v4f8, 2), emer.CenterPoolShape(v4f8, 2))
teo16 := net.AddLayer4D("TEOf16", axon.SuperLayer, 2, 2, 15, 15).AddClass("TEO")
teo8 := net.AddLayer4D("TEOf8", axon.SuperLayer, 2, 2, 15, 15).AddClass("TEO")
te := net.AddLayer4D("TE", axon.SuperLayer, 2, 2, 15, 15)
var out *axon.Layer
if ss.Config.Env.RndOutPats {
out = net.AddLayer2D("Output", axon.TargetLayer, trn.OutSize.Y, trn.OutSize.X)
} else {
// out = net.AddLayer4D("Output", axon.TargetLayer, trn.OutSize.Y, trn.OutSize.X, trn.NOutPer, 1)
// 2D layer:
out = net.AddLayer2D("Output", axon.TargetLayer, trn.OutSize.Y, trn.OutSize.X*trn.NOutPer)
}
full := paths.NewFull()
_ = full
rndcut := paths.NewUniformRand()
rndcut.PCon = 0.1 // 0.2 == .1 459
// rndpath := paths.NewUnifRnd()
// rndpath.PCon = 0.5 // 0.2 > .1
pool1to1 := paths.NewPoolOneToOne()
_ = pool1to1
pj := &ss.Paths
var p4x4s2, p2x2s1, p4x4s2send, p2x2s1send, p4x4s2recip, p2x2s1recip, v4toteo, teotov4 paths.Pattern
p4x4s2 = pj.PT4x4Skp2
p2x2s1 = pj.PT2x2Skp1
p4x4s2send = pj.PT4x4Skp2
p2x2s1send = pj.PT2x2Skp1
p4x4s2recip = pj.PT4x4Skp2Recip
p2x2s1recip = pj.PT2x2Skp1Recip
v4toteo = full
teotov4 = full
if ss.Config.Params.SubPools {
p4x4s2 = pj.PT4x4Skp2Sub2
p2x2s1 = pj.PT2x2Skp1Sub2
p4x4s2send = pj.PT4x4Skp2Sub2Send
p2x2s1send = pj.PT2x2Skp1Sub2Send
p4x4s2recip = pj.PT4x4Skp2Sub2SendRecip
p2x2s1recip = pj.PT2x2Skp1Sub2SendRecip
v4toteo = pj.PT4x4Skp0Sub2
teotov4 = pj.PT4x4Skp0Sub2Recip
}
net.ConnectLayers(v1m16, v2m16, p4x4s2, axon.ForwardPath).AddClass("V1V2")
net.ConnectLayers(v1l16, v2m16, p2x2s1, axon.ForwardPath).AddClass("V1V2fmSm V1V2")
net.ConnectLayers(v1l16, v2l16, p4x4s2, axon.ForwardPath).AddClass("V1V2")
net.ConnectLayers(v1m8, v2m8, p4x4s2, axon.ForwardPath).AddClass("V1V2")
net.ConnectLayers(v1l8, v2m8, p2x2s1, axon.ForwardPath).AddClass("V1V2fmSm V1V2")
net.ConnectLayers(v1l8, v2l8, p4x4s2, axon.ForwardPath).AddClass("V1V2")
if cdog {
net.ConnectLayers(v1cm16, v2m16, p4x4s2, axon.ForwardPath).AddClass("V1V2")
net.ConnectLayers(v1cl16, v2m16, p2x2s1, axon.ForwardPath).AddClass("V1V2fmSm V1V2")
net.ConnectLayers(v1cl16, v2l16, p4x4s2, axon.ForwardPath).AddClass("V1V2")
net.ConnectLayers(v1cm8, v2m8, p4x4s2, axon.ForwardPath).AddClass("V1V2")
net.ConnectLayers(v1cl8, v2m8, p2x2s1, axon.ForwardPath).AddClass("V1V2fmSm V1V2")
net.ConnectLayers(v1cl8, v2l8, p4x4s2, axon.ForwardPath).AddClass("V1V2")
}
v2v4, v4v2 := net.BidirConnectLayers(v2m16, v4f16, p4x4s2send)
v2v4.AddClass("V2V4")
v4v2.AddClass("V4V2").SetPattern(p4x4s2recip)
v2v4, v4v2 = net.BidirConnectLayers(v2l16, v4f16, p2x2s1send)
v2v4.AddClass("V2V4sm")
v4v2.AddClass("V4V2").SetPattern(p2x2s1recip)
v2v4, v4v2 = net.BidirConnectLayers(v2m8, v4f8, p4x4s2send)
v2v4.AddClass("V2V4")
v4v2.AddClass("V4V2").SetPattern(p4x4s2recip)
v2v4, v4v2 = net.BidirConnectLayers(v2l8, v4f8, p2x2s1send)
v2v4.AddClass("V2V4sm")
v4v2.AddClass("V4V2").SetPattern(p2x2s1recip)
if hi16 {
net.ConnectLayers(v1h16, v2h16, p4x4s2, axon.ForwardPath).AddClass("V1V2")
v2v3, v3v2 := net.BidirConnectLayers(v2h16, v3h16, p4x4s2send)
v2v3.AddClass("V2V3")
v3v2.AddClass("V3V2").SetPattern(p4x4s2recip)
v3v4, v4v3 := net.BidirConnectLayers(v3h16, v4f16, p4x4s2send)
v3v4.AddClass("V3V4")
v4v3.AddClass("V4V3").SetPattern(p4x4s2recip)
}
v4teo, teov4 := net.BidirConnectLayers(v4f16, teo16, v4toteo)
v4teo.AddClass("V4TEO")
teov4.AddClass("TEOV4").SetPattern(teotov4)
net.ConnectLayers(v4f8, teo16, v4toteo, axon.ForwardPath).AddClass("V4TEOoth")
v4teo, teov4 = net.BidirConnectLayers(v4f8, teo8, v4toteo)
v4teo.AddClass("V4TEO")
teov4.AddClass("TEOV4").SetPattern(teotov4)
net.ConnectLayers(v4f16, teo8, v4toteo, axon.ForwardPath).AddClass("V4TEOoth")
teote, teteo := net.BidirConnectLayers(teo16, te, full)
teote.AddClass("TEOTE")
teteo.AddClass("TETEO")
teote, teteo = net.BidirConnectLayers(teo8, te, full)
teote.AddClass("TEOTE")
teteo.AddClass("TETEO")
// TEO -> out ends up saturating quite a bit with consistently high weights,
// but removing those projections is not good -- still makes use of them.
// perhaps in a transitional way that sets up better TE reps.
// outteo := net.ConnectLayers(out, teo16, full, emer.Back)
teoout, outteo := net.BidirConnectLayers(teo16, out, full)
teoout.AddClass("TEOOut ToOut")
outteo.AddClass("OutTEO FmOut")
// outteo = net.ConnectLayers(out, teo8, full, emer.Back)
teoout, outteo = net.BidirConnectLayers(teo8, out, full)
teoout.AddClass("TEOOut ToOut")
outteo.AddClass("OutTEO FmOut")
teout, _ := net.BidirConnectLayers(te, out, full)
teout.AddClass("ToOut FmOut")
/*
// trace: not useful
// v59 459 -- only useful later -- TEO maybe not doing as well later?
v4out, outv4 := net.BidirConnectLayers(v4f16, out, full)
v4out.AddClass("V4Out ToOut")
outv4.AddClass("OutV4 FmOut")
v4out, outv4 = net.BidirConnectLayers(v4f8, out, full)
v4out.AddClass("V4Out ToOut")
outv4.AddClass("OutV4 FmOut")
*/
/*
var v2inhib, v4inhib prjn.Pattern
v2inhib = pool1to1
v4inhib = pool1to1
if ss.SubPools {
v2inhib = pj.Prjn2x2Skp2 // pj.Prjn6x6Skp2Lat
v4inhib = pj.Prjn2x2Skp2
}
// this extra inhibition drives decorrelation, produces significant learning benefits
net.LateralConnectLayerPrjn(v2m16, v2inhib, &axon.HebbPrjn{}).SetType(axon.InhibPrjn)
net.LateralConnectLayerPrjn(v2l16, v2inhib, &axon.HebbPrjn{}).SetType(axon.InhibPrjn)
net.LateralConnectLayerPrjn(v2m8, v2inhib, &axon.HebbPrjn{}).SetType(axon.InhibPrjn)
net.LateralConnectLayerPrjn(v2l8, v2inhib, &axon.HebbPrjn{}).SetType(axon.InhibPrjn)
net.LateralConnectLayerPrjn(v4f16, v4inhib, &axon.HebbPrjn{}).SetType(axon.InhibPrjn)
net.LateralConnectLayerPrjn(v4f8, v4inhib, &axon.HebbPrjn{}).SetType(axon.InhibPrjn)
net.LateralConnectLayerPrjn(teo16, pool1to1, &axon.HebbPrjn{}).SetType(axon.InhibPrjn)
net.LateralConnectLayerPrjn(teo8, pool1to1, &axon.HebbPrjn{}).SetType(axon.InhibPrjn)
net.LateralConnectLayerPrjn(te, pool1to1, &axon.HebbPrjn{}).SetType(axon.InhibPrjn)
if hi16 {
net.LateralConnectLayerPrjn(v2h16, v2inhib, &axon.HebbPrjn{}).SetType(axon.InhibPrjn)
net.LateralConnectLayerPrjn(v3h16, v2inhib, &axon.HebbPrjn{}).SetType(axon.InhibPrjn)
}
*/
///////////////////////
// Shortcuts:
// clst not useful
// net.ConnectLayers(v1l16, clst, full, axon.ForwardPath)
// V1 shortcuts best for syncing all layers -- like the pulvinar basically
net.ConnectLayers(v1l16, v4f16, rndcut, axon.ForwardPath).AddClass("V1SC")
net.ConnectLayers(v1l8, v4f8, rndcut, axon.ForwardPath).AddClass("V1SC")
net.ConnectLayers(v1l16, teo16, rndcut, axon.ForwardPath).AddClass("V1SC")
net.ConnectLayers(v1l16, teo16, rndcut, axon.ForwardPath).AddClass("V1SC")
net.ConnectLayers(v1l8, teo8, rndcut, axon.ForwardPath).AddClass("V1SC")
net.ConnectLayers(v1l8, teo8, rndcut, axon.ForwardPath).AddClass("V1SC")
net.ConnectLayers(v1l16, te, rndcut, axon.ForwardPath).AddClass("V1SC")
net.ConnectLayers(v1l8, te, rndcut, axon.ForwardPath).AddClass("V1SC")
if hi16 {
net.ConnectLayers(v1l16, v3h16, rndcut, axon.ForwardPath).AddClass("V1SC")
}
//////////////////////
// Positioning
space := float32(4)
v1m8.PlaceRightOf(v1m16, space)
v1l16.PlaceBehind(v1m16, space)
v1l8.PlaceBehind(v1m8, space)
// clst.PlaceBehind(v1l8, XAlign: relpos.Left, Space: 4, Scale: 2})
if cdog {
v1cm16.PlaceRightOf(v1m8, space)
v1cm8.PlaceRightOf(v1cm16, space)
v1cl16.PlaceBehind(v1cm16, space)
v1cl8.PlaceBehind(v1cm8, space)
}
if hi16 {
v1h16.PlaceRightOf(v1m8, space)
v2h16.PlaceRightOf(v2m8, space)
v3h16.PlaceBehind(v4f16, space)
}
v2m16.PlaceAbove(v1m16)
v2m8.PlaceRightOf(v2m16, space)
v2l16.PlaceBehind(v2m16, space)
v2l8.PlaceBehind(v2m8, space)
v4f16.PlaceAbove(v2m16)
teo16.PlaceRightOf(v4f16, space)
v4f8.PlaceRightOf(teo16, space)
teo8.PlaceRightOf(v4f8, space)
te.PlaceBehind(teo8, 15)
out.PlaceBehind(te, 15)
net.Build()
net.Defaults()
net.SetNThreads(ss.Config.Run.NThreads)
ss.ApplyParams()
net.InitWeights()
mpi.Println(net.SizeReport(false))
// adding each additional layer type improves decoding..
layers := []emer.Layer{v4f16, v4f8, teo16, teo8, out}
// layers := []emer.Layer{teo16, teo8, out}
// layers := []emer.Layer{teo16, teo8}
// layers := []emer.Layer{out}
// todo: decoder
ss.Decoder.InitLayer(len(trn.Images.Cats), layers)
ss.Decoder.Lrate = 0.05 // 0.05 > 0.1 > 0.2 for larger number of objs!
// if ss.Config.Run.MPI {
// ss.Decoder.Comm = ss.Comm
// }
}
func (ss *Sim) ApplyParams() {
ss.Params.Script = ss.Config.Params.Script
ss.Params.ApplyAll(ss.Net)
}
//////// Init, utils
// Init restarts the run, and initializes everything, including network weights
// and resets the epoch log table
func (ss *Sim) Init() {
ss.Loops.ResetCounters()
ss.SetRunName()
ss.InitRandSeed(0)
// ss.ConfigEnv() // re-config env just in case a different set of patterns was
// selected or patterns have been modified etc
ss.ApplyParams()
ss.StatsInit()
ss.NewRun()
ss.TrainUpdate.RecordSyns()
ss.TrainUpdate.Update(Train, Trial)
}
// InitRandSeed initializes the random seed based on current training run number
func (ss *Sim) InitRandSeed(run int) {
ss.RandSeeds.Set(run)
ss.RandSeeds.Set(run, &ss.Net.Rand)
}
// NetViewUpdater returns the NetViewUpdate for given mode.
func (ss *Sim) NetViewUpdater(mode enums.Enum) *axon.NetViewUpdate {
if mode.Int64() == Train.Int64() {
return &ss.TrainUpdate
}
return &ss.TestUpdate
}
// ConfigLoops configures the control loops: Training, Testing
func (ss *Sim) ConfigLoops() {
ls := looper.NewStacks()
trials := int(math32.IntMultipleGE(float32(ss.Config.Run.Trials), float32(ss.Config.Run.NData)))
cycles := ss.Config.Run.Cycles()
ls.AddStack(Train, Trial).
AddLevel(Run, ss.Config.Run.Runs).
AddLevel(Epoch, ss.Config.Run.Epochs).
AddLevelIncr(Trial, trials, ss.Config.Run.NData).
AddLevel(Cycle, cycles)
ls.AddStack(Test, Trial).
AddLevel(Epoch, 1).
AddLevelIncr(Trial, trials, ss.Config.Run.NData).
AddLevel(Cycle, cycles)
axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, Cycle, Trial, Train,
func(mode enums.Enum) { ss.Net.ClearInputs() },
func(mode enums.Enum) { ss.ApplyInputs(mode.(Modes)) },
)
ls.Stacks[Train].OnInit.Add("Init", ss.Init)
ls.Loop(Train, Run).OnStart.Add("NewRun", ss.NewRun)
trainEpoch := ls.Loop(Train, Epoch)
trainEpoch.IsDone.AddBool("NZeroStop", func() bool {
stopNz := ss.Config.Run.NZero
if stopNz <= 0 {
return false
}
curModeDir := ss.Current.Dir(Train.String())
curNZero := int(curModeDir.Value("NZero").Float1D(-1))
stop := curNZero >= stopNz
return stop
return false
})
trainEpoch.OnStart.Add("SaveWeightsAt", func() {
epc := trainEpoch.Counter.Cur
if slices.Contains(ss.Config.Log.SaveWeightsAt, epc) {
ctrString := fmt.Sprintf("%03d_%05d", ls.Loop(Train, Run).Counter.Cur, epc)
axon.SaveWeights(ss.Net, ctrString, ss.RunName())
// if epc == 200 { // note: slower lrate causes weird fluctuations in activity
// fmt.Println("learning rate schedule multiplier: .4 at:", epc)
// ss.Net.LRateSched(.4)
// axon.ToGPUParams()
// }
}
})
trainEpoch.OnStart.Add("TestAtInterval", func() {
if (ss.Config.Run.TestInterval > 0) && ((trainEpoch.Counter.Cur+1)%ss.Config.Run.TestInterval == 0) {
ss.TestAll()
}
})
ls.AddOnStartToAll("StatsStart", ss.StatsStart)
ls.AddOnEndToAll("StatsStep", ss.StatsStep)
ls.Loop(Train, Run).OnEnd.Add("SaveWeights", func() {
ctrString := fmt.Sprintf("%03d_%05d", ls.Loop(Train, Run).Counter.Cur, ls.Loop(Train, Epoch).Counter.Cur)
axon.SaveWeightsIfConfigSet(ss.Net, ss.Config.Log.SaveWeights, ctrString, ss.RunName())
})
if ss.Config.GUI {
axon.LooperUpdateNetView(ls, Cycle, Trial, ss.NetViewUpdater)
ls.Stacks[Train].OnInit.Add("GUI-Init", ss.GUI.UpdateWindow)
ls.Stacks[Test].OnInit.Add("GUI-Init", ss.GUI.UpdateWindow)
}
if ss.Config.Debug {
mpi.Println(ls.DocString())
}
ss.Loops = ls
}
// ApplyInputs applies input patterns from given environment for given mode.
// Any other start-of-trial logic can also be put here.
func (ss *Sim) ApplyInputs(mode Modes) {
net := ss.Net
ctx := net.Context()
ndata := int(ctx.NData)
curModeDir := ss.Current.Dir(mode.String())
ev := ss.Envs.ByMode(mode).(*ImagesEnv)
lays := net.LayersByType(axon.InputLayer, axon.TargetLayer)
net.InitExt()
ev.Step()
for _, lnm := range lays {
ly := ss.Net.LayerByName(lnm)
st := ev.State(ly.Name)
if st != nil {
ly.ApplyExtAll(ctx, st)
}
}
for di := range ndata {
st := ev.Trial(di)
curModeDir.StringValue("TrialName", ndata).SetString1D(ev.TrialName(di), di)
curModeDir.StringValue("Cat", ndata).SetString1D(st.Cat, di)
curModeDir.Int("CatIdx", ndata).SetInt1D(st.CatIdx, di)
}
net.ApplyExts()
ss.UpdateImage()
}
// NewRun intializes a new Run level of the model.
func (ss *Sim) NewRun() {
ctx := ss.Net.Context()
run := ss.Loops.Loop(Train, Run).Counter.Cur
ss.InitRandSeed(run)
ss.Envs.ByMode(Train).Init(run)
ss.Envs.ByMode(Test).Init(run)
ctx.Reset()
ss.Net.InitWeights()
if ss.Config.Run.StartWeights != "" {
ss.Net.OpenWeightsJSON(core.Filename(ss.Config.Run.StartWeights))
mpi.Printf("Starting with initial weights from: %s\n", ss.Config.Run.StartWeights)
}
}
// TestAll runs through the full set of testing items
func (ss *Sim) TestAll() {
ss.Envs.ByMode(Test).Init(0)
ss.Loops.ResetAndRun(Test)
ss.Loops.Mode = Train // important because this is called from Train Run: go back.
}
//////// Stats
// AddStat adds a stat compute function.
func (ss *Sim) AddStat(f func(mode Modes, level Levels, phase StatsPhase)) {
ss.StatFuncs = append(ss.StatFuncs, f)
}
// StatsStart is called by Looper at the start of given level, for each iteration.
// It needs to call RunStats Start at the next level down.
// e.g., each Epoch is the start of the full set of Trial Steps.
func (ss *Sim) StatsStart(lmd, ltm enums.Enum) {
mode := lmd.(Modes)
level := ltm.(Levels)
if level <= Trial {
return
}
ss.RunStats(mode, level-1, Start)
}
// StatsStep is called by Looper at each step of iteration,
// where it accumulates the stat results.
func (ss *Sim) StatsStep(lmd, ltm enums.Enum) {
mode := lmd.(Modes)
level := ltm.(Levels)
if level == Cycle {
return
}
ss.RunStats(mode, level, Step)
tensorfs.DirTable(axon.StatsNode(ss.Stats, mode, level), nil).WriteToLog()
}
// RunStats runs the StatFuncs for given mode, level and phase.
func (ss *Sim) RunStats(mode Modes, level Levels, phase StatsPhase) {
for _, sf := range ss.StatFuncs {
sf(mode, level, phase)
}
if phase == Step && ss.GUI.Tabs != nil {
nm := mode.String() + " " + level.String() + " Plot"
ss.GUI.Tabs.AsLab().GoUpdatePlot(nm)
}
}
// SetRunName sets the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) SetRunName() string {
runName := ss.Params.RunName(ss.Config.Run.Run)
ss.Current.StringValue("RunName", 1).SetString1D(runName, 0)
return runName
}
// RunName returns the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) RunName() string {
return ss.Current.StringValue("RunName", 1).String1D(0)
}
// StatsInit initializes all the stats by calling Start across all modes and levels.
func (ss *Sim) StatsInit() {
for md, st := range ss.Loops.Stacks {
mode := md.(Modes)
for _, lev := range st.Order {
level := lev.(Levels)
if level == Cycle {
continue
}
ss.RunStats(mode, level, Start)
}
}
if ss.GUI.Tabs != nil {
tbs := ss.GUI.Tabs.AsLab()
_, idx := tbs.CurrentTab()
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Epoch))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Run))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Test, Trial))
// ev := ss.Envs.ByMode(Train).(*ImagesEnv)
// tbs.TensorGrid("Image", &ev.Vis.ImgTsr)
tbs.SelectTabIndex(idx)
}
}
// ConfigStats handles configures functions to do all stats computation
// in the tensorfs system.
func (ss *Sim) ConfigStats() {
net := ss.Net
ss.Stats = ss.Root.Dir("Stats")
ss.Current = ss.Stats.Dir("Current")
ss.SetRunName()
// last arg(s) are levels to exclude
counterFunc := axon.StatLoopCounters(ss.Stats, ss.Current, ss.Loops, net, Trial, Cycle)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
counterFunc(mode, level, phase == Start)
})
runNameFunc := axon.StatRunName(ss.Stats, ss.Current, ss.Loops, net, Trial, Cycle)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
runNameFunc(mode, level, phase == Start)
})
trialNameFunc := axon.StatTrialName(ss.Stats, ss.Current, ss.Loops, net, Trial)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
trialNameFunc(mode, level, phase == Start)
})
// up to a point, it is good to use loops over stats in one function,
// to reduce repetition of boilerplate.
statNames := []string{"CorSim", "UnitErr", "Err", "Err2", "DecErr", "DecErr2", "Resp", "DecResp"}
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
for _, name := range statNames {
if name == "NZero" && (mode != Train || level == Trial) {
return
}
modeDir := ss.Stats.Dir(mode.String())
curModeDir := ss.Current.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
subDir := modeDir.Dir((level - 1).String()) // note: will fail for Cycle
tsr := levelDir.Float64(name)
ndata := int(ss.Net.Context().NData)
var stat float64
if phase == Start {
tsr.SetNumRows(0)
plot.SetFirstStyler(tsr, func(s *plot.Style) {
s.Range.SetMin(0).SetMax(1)
s.On = true
switch name {
case "UnitErr", "Resp":
s.On = false
}
})
continue
}
switch level {
case Trial:
out := ss.Net.LayerByName("Output")
ltsr := curModeDir.Float64(out.Name+"_ActM", out.Shape.Sizes...)
ev := ss.Envs.ByMode(Modes(ss.Net.Context().Mode)).(*ImagesEnv)
for di := range ndata {
cat := curModeDir.Int("CatIdx", ndata).Int1D(di)
var stat float64
switch name {
case "CorSim":
stat = 1.0 - float64(axon.LayerStates.Value(int(out.Index), int(di), int(axon.LayerPhaseDiff)))
case "UnitErr":
stat = out.PctUnitErr(ss.Net.Context())[di]
case "Err":
out.UnitValuesSampleTensor(ltsr, "ActM", di)
rsp, trlErr, trlErr2 := ev.OutErr(ltsr, cat)
curModeDir.Float64("Resp", ndata).SetInt1D(rsp, di)
curModeDir.Float64("Err2", ndata).SetFloat1D(trlErr2, di)
stat = trlErr
case "Err2":
stat = curModeDir.Float64(name, ndata).Float1D(di)
case "Resp":
stat = curModeDir.Float64(name, ndata).Float1D(di)
case "DecErr":
decIdx := ss.Decoder.Decode("ActM", di)
curModeDir.Float64("DecResp", ndata).SetInt1D(decIdx, di)
if mode == Train {
if ss.Config.Run.MPI {
ss.Decoder.TrainMPI(cat)
} else {
ss.Decoder.Train(cat)
}
}
decErr := float64(0)
if decIdx != cat {
decErr = 1
}
stat = decErr
decErr2 := decErr
if ss.Decoder.Sorted[1] == cat {
decErr2 = 0
}
curModeDir.Float64("DecErr2", ndata).SetFloat1D(decErr2, di)
case "DecErr2":
stat = curModeDir.Float64(name, ndata).Float1D(di)
case "DecResp":
stat = curModeDir.Float64(name, ndata).Float1D(di)
}
curModeDir.Float64(name, ndata).SetFloat1D(stat, di)
tsr.AppendRowFloat(stat)
}
case Epoch:
nz := curModeDir.Float64("NZero", 1).Float1D(0)
switch name {
case "NZero":
err := stats.StatSum.Call(subDir.Value("Err")).Float1D(0)
stat = curModeDir.Float64(name, 1).Float1D(0)
if err == 0 {
stat++
} else {
stat = 0
}
curModeDir.Float64(name, 1).SetFloat1D(stat, 0)
case "FirstZero":
stat = curModeDir.Float64(name, 1).Float1D(0)
if stat < 0 && nz == 1 {
stat = curModeDir.Int("Epoch", 1).Float1D(0)
}
curModeDir.Float64(name, 1).SetFloat1D(stat, 0)
case "LastZero":
stat = curModeDir.Float64(name, 1).Float1D(0)
if stat < 0 && nz >= float64(ss.Config.Run.NZero) {
stat = curModeDir.Int("Epoch", 1).Float1D(0)
}
curModeDir.Float64(name, 1).SetFloat1D(stat, 0)
default:
stat = stats.StatMean.Call(subDir.Value(name)).Float1D(0)
}
tsr.AppendRowFloat(stat)
case Run:
stat = stats.StatFinal.Call(subDir.Value(name)).Float1D(0)
tsr.AppendRowFloat(stat)
}
}
})
perTrlFunc := axon.StatPerTrialMSec(ss.Stats, Train, Trial)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
perTrlFunc(mode, level, phase == Start)
})
laysAll := net.LayersByType(axon.SuperLayer, axon.CTLayer, axon.TargetLayer, axon.InputLayer)
actGeFunc := axon.StatLayerActGe(ss.Stats, net, Train, Trial, Run, laysAll...)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
actGeFunc(mode, level, phase == Start)
})
laysAdapt := net.LayersByType(axon.SuperLayer, axon.CTLayer, axon.TargetLayer)
giMultFunc := axon.StatLayerGiMult(ss.Stats, net, Train, Epoch, Run, laysAdapt...)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
giMultFunc(mode, level, phase == Start)
})
pcaFunc := axon.StatPCA(ss.Stats, ss.Current, net, ss.Config.Run.PCAInterval, Train, Trial, Run, laysAdapt...)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
trnEpc := ss.Loops.Loop(Train, Epoch).Counter.Cur
pcaFunc(mode, level, phase == Start, trnEpc)
})
stateFunc := axon.StatLayerState(ss.Stats, net, Test, Trial, true, "ActM", "Output")
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
stateFunc(mode, level, phase == Start)
})
}
// StatCounters returns counters string to show at bottom of netview.
func (ss *Sim) StatCounters(mode, level enums.Enum) string {
counters := ss.Loops.Stacks[mode].CountersString()
vu := ss.NetViewUpdater(mode)
if vu == nil || vu.View == nil {
return counters
}
di := vu.View.Di
counters += fmt.Sprintf(" Di: %d", di)
curModeDir := ss.Current.Dir(mode.String())
if curModeDir.Node("TrialName") == nil {
return counters
}
counters += fmt.Sprintf(" TrialName: %s", curModeDir.StringValue("TrialName").String1D(di))
statNames := []string{"CorSim", "UnitErr", "Err"}
if level == Cycle || curModeDir.Node(statNames[0]) == nil {
return counters
}
for _, name := range statNames {
counters += fmt.Sprintf(" %s: %.4g", name, curModeDir.Float64(name).Float1D(di))
}
return counters
}
//////// GUI
// ConfigGUI configures the Cogent Core GUI interface for this simulation.
func (ss *Sim) ConfigGUI(b tree.Node) {
ss.GUI.MakeBody(b, ss, ss.Root, ss.Config.Name, ss.Config.Title, ss.Config.Doc)
ss.GUI.StopLevel = Trial
nv := ss.GUI.AddNetView("Network")
nv.Options.MaxRecs = 2 * ss.Config.Run.Cycles()
nv.Options.Raster.Max = ss.Config.Run.Cycles()
nv.Options.LayerNameSize = 0.03
nv.SetNet(ss.Net)
ss.TrainUpdate.Config(nv, axon.Theta, ss.StatCounters)
ss.TestUpdate.Config(nv, axon.Theta, ss.StatCounters)
ss.GUI.OnStop = func(mode, level enums.Enum) {
vu := ss.NetViewUpdater(mode)
vu.UpdateWhenStopped(mode, level)
}
// nv.SceneXYZ().Camera.Pose.Pos.Set(0, 1.733, 2.3)
// nv.SceneXYZ().Camera.LookAt(math32.Vec3(0, 0, 0), math32.Vec3(0, 1, 0))
ss.StatsInit()
trn := ss.Envs.ByMode(Train).(*ImagesEnv)
img := trn.V1c.Image.Tsr.SubSpace(0).(*tensor.Float32)
tensorcore.AddGridStylerTo(img, func(s *tensorcore.GridStyle) {
s.Image = true
s.Range.SetMin(0)
})
ss.GUI.Tabs.TensorGrid("Image", img)
ss.GUI.Tabs.SelectTabIndex(0)
ss.GUI.FinalizeGUI(false)
}
func (ss *Sim) UpdateImage() {
if !ss.Config.GUI {
return
}
ss.GUI.Tabs.TabUpdateRender("Image")
}
func (ss *Sim) MakeToolbar(p *tree.Plan) {
ss.GUI.AddLooperCtrl(p, ss.Loops)
tree.Add(p, func(w *core.Separator) {})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "New Seed",
Icon: icons.Add,
Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
Active: egui.ActiveAlways,
Func: func() {
ss.RandSeeds.NewSeeds()
},
})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "README",
Icon: icons.FileMarkdown,
Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
Active: egui.ActiveAlways,
Func: func() {
core.TheApp.OpenURL(ss.Config.URL)
},
})
}
func (ss *Sim) RunNoGUI() {
ss.Init()
if ss.Config.Params.Note != "" {
mpi.Printf("Note: %s\n", ss.Config.Params.Note)
}
if ss.Config.Log.SaveWeights {
mpi.Printf("Saving final weights per run\n")
}
runName := ss.SetRunName()
netName := ss.Net.Name
cfg := &ss.Config.Log
axon.OpenLogFiles(ss.Loops, ss.Stats, netName, runName, [][]string{cfg.Train, cfg.Test})
mpi.Printf("Running %d Runs starting at %d\n", ss.Config.Run.Runs, ss.Config.Run.Run)
ss.Loops.Loop(Train, Run).Counter.SetCurMaxPlusN(ss.Config.Run.Run, ss.Config.Run.Runs)
if ss.Config.Run.StartWeights != "" {
ss.Loops.Loop(Train, Epoch).Counter.Cur = ss.Config.Run.StartEpoch
}
ss.Loops.Run(Train)
axon.CloseLogFiles(ss.Loops, ss.Stats, Cycle)
axon.GPURelease()
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"github.com/emer/axon/v2/sims/lvis"
"github.com/emer/emergent/v2/egui"
)
func main() { egui.Run[lvis.Sim, lvis.Config]() }
// Copyright (c) 2021, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lvis
import (
"github.com/emer/axon/v2/axon"
)
// LayerParams sets the minimal non-default params.
// Base is always applied, and others can be optionally selected to apply on top of that.
var LayerParams = axon.LayerSheets{
"Base": {
{Sel: "Layer", Doc: "needs some special inhibition and learning params",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.04 // 0.04 for most layers
ly.Inhib.Layer.Gi = 1.1 // 1.1 def, 1.0 for lower layers is best
ly.Inhib.Pool.Gi = 1.1 // "
ly.Inhib.Layer.FB = 1 // setting for layers below
ly.Inhib.Pool.FB = 1
ly.Inhib.Layer.ClampExtMin = 0.0 // 0.05 default doesn't activate output!
ly.Inhib.Pool.ClampExtMin = 0.0
ly.Inhib.ActAvg.AdaptRate = 0.02 // 0.02 is slowest that tracks reasonably close
ly.Inhib.ActAvg.AdaptMax = 0.01 // 0.05 default; 0.01 has effect; lower not effective at preventing instability on its own.
ly.Inhib.ActAvg.LoTol = 0.8
ly.Inhib.ActAvg.HiTol = 0.0
ly.Acts.Dt.LongAvgTau = 200 // 200 > 20 for smoothing ripples
ly.Acts.Decay.Act = 0.0 // 0 == .2
ly.Acts.Decay.Glong = 0.6 // 0.6 def
ly.Acts.Dend.SSGi = 2 // 2 new default
ly.Acts.Dend.GExp = 0.2 // 0.2 > 0.1 > 0
ly.Acts.Dend.GR = 3 // 2 good for 0.2
ly.Acts.Dt.VmDendC = 500 // 500 def
ly.Acts.GabaB.Gk = 0.012 // 0.012 > 0.015 confirmed latest dev.34
ly.Acts.NMDA.Ge = 0.006 // 0.006 def
ly.Acts.NMDA.MgC = 1.4 // mg1, voff0, gbarexp.2, gbarr3 = better
ly.Acts.NMDA.Voff = 0 // mg1, voff0 = mg1.4, voff5 w best params
ly.Acts.AK.Gk = 0.1
ly.Acts.VGCC.Ge = 0.02 // non nmda: 0.15 good, 0.3 blows up, nmda: .02 best
ly.Acts.VGCC.Ca = 25 // 25 / 10tau same as SpkVGCC
ly.Acts.Mahp.Gk = 0.05 // 0.05 > lower, higher; but still needs kna
ly.Acts.Sahp.Gk = 0.1 // 0.1 >= 0.05
ly.Acts.Sahp.Off = 0.8 //
ly.Acts.Sahp.Slope = 0.02 //
ly.Acts.Sahp.CaTau = 5 // 5 ok -- not tested
ly.Acts.KNa.On.SetBool(true) // true, .05 > false
ly.Acts.KNa.Med.Gk = 0.05 // 0.1 > 0.05 without adapt in TE/TEO
ly.Acts.KNa.Slow.Gk = 0.05 // 0.1 > 0.05 " med is more impairing than slow
ly.Learn.CaLearn.Norm = 80 // 80 def; 60 makes CaLearnMax closer to 1
ly.Learn.CaLearn.SpikeVGCC.SetBool(true) // sig better..
ly.Learn.CaLearn.SpikeVgccCa = 35 // 70 / 5 or 35 / 10 both work
ly.Learn.CaLearn.VgccTau = 10 // 10 > 5 ?
// ly.Learn.CaLearn.UpdtThr = 0.01 // 0.01 > 0.05 -- was LrnThr
ly.Learn.CaLearn.Dt.MTau = 2 // 2 > 1 ?
ly.Learn.CaSpike.SpikeCaM = 12 // 12 > 8 -- for larger nets
ly.Learn.CaSpike.SpikeCaSyn = 12 // 12 >> 8 -- essential
ly.Learn.CaSpike.CaSynTau = 30 // 30 > 20, 40
ly.Learn.CaSpike.Dt.MTau = 5 // 5 > 10?
ly.Learn.LearnNMDA.Ge = 0.006 // 0.006 def
ly.Learn.LearnNMDA.MgC = 1.4 // 1.2 for unified Act params, else 1.4
ly.Learn.LearnNMDA.Voff = 0 // 0 for unified Act params, else 5
ly.Learn.LearnNMDA.Tau = 100 // 100 def
ly.Learn.TrgAvgAct.RescaleOn.SetBool(true) // critical!
ly.Learn.TrgAvgAct.SubMean = 0 // 0 > 1 key throughout -- even .5 slows learning -- doesn't help slow pca
ly.Learn.TrgAvgAct.SynScaleRate = 0.002 // 0.002 >= 0.005 > 0.001 > 0.0005 too weak even with adapt gi
ly.Learn.TrgAvgAct.ErrLRate = 0.02 // 0.02 def
ly.Learn.RLRate.On.SetBool(true) // beneficial for trace
ly.Learn.RLRate.SigmoidMin = 0.05
ly.Learn.RLRate.SigmoidLinear.SetBool(false) // false >> true
ly.Learn.RLRate.Diff.SetBool(true)
ly.Learn.RLRate.DiffThr = 0.02 // 0.02 def - todo
ly.Learn.RLRate.SpikeThr = 0.1 // 0.1 def
ly.Learn.RLRate.Min = 0.001
ly.Learn.Timing.On.SetBool(true) // time fails after a bit.
// ly.Learn.Timing.Refractory.SetBool(true)
// ly.Learn.Timing.LearnThr = 0.1
// ly.Learn.Timing.SynCaCycles = 160
// ly.Learn.Timing.Cycles = 170
// ly.Learn.Timing.TimeDiffTau = 4
}},
{Sel: ".InputLayer", Doc: "all V1 input layers",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.FB = 1 // keep normalized
ly.Inhib.Pool.FB = 1
ly.Inhib.Pool.On.SetBool(true)
ly.Inhib.Layer.Gi = 0.9 // was 0.9
ly.Inhib.Pool.Gi = 0.9 // 0.9 >= 1.1 def -- more activity
ly.Inhib.ActAvg.Nominal = 0.03 // .04 prev; .06 for !SepColor actuals: V1m8: .04, V1m16: .03
ly.Acts.Clamp.Ge = 1.5 // was 1.0
ly.Acts.Decay.Act = 1 // these make no diff
ly.Acts.Decay.Glong = 1
}},
{Sel: ".V2", Doc: "pool inhib, sparse activity",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.02 // .02 1.6.15 SSGi -- was higher
ly.Inhib.ActAvg.Offset = 0.008 // 0.008 > 0.005; nominal is lower to increase Ge
ly.Inhib.ActAvg.AdaptGi.SetBool(true) // true
ly.Inhib.Pool.On.SetBool(true) // needs pool-level
ly.Inhib.Layer.FB = 1 //
ly.Inhib.Pool.FB = 4
ly.Inhib.Layer.Gi = 1.0 // 1.1?
ly.Inhib.Pool.Gi = 1.05 // was 0.95 but gi mult goes up..
ly.Learn.CaLearn.PosBias = 1.1 // time: 1.1 > others
}},
{Sel: ".V4", Doc: "pool inhib, sparse activity",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.02 // .02 1.6.15 SSGi
ly.Inhib.ActAvg.Offset = 0.008 // 0.008 > 0.005; nominal is lower to increase Ge
ly.Inhib.ActAvg.AdaptGi.SetBool(true) // true
ly.Inhib.Pool.On.SetBool(true) // needs pool-level
ly.Inhib.Layer.FB = 1 //
ly.Inhib.Pool.FB = 4
ly.Inhib.Layer.Gi = 1.0 // 1.1?
ly.Inhib.Pool.Gi = 1.05 // was 1.0 but gi mult goes up
}},
{Sel: ".TEO", Doc: "initial activity",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.03 // .03 1.6.15 SSGi
ly.Inhib.ActAvg.Offset = 0.01 // 0.01 > lower, higher; nominal is lower to increase Ge
ly.Inhib.ActAvg.AdaptGi.SetBool(false) // false
ly.Inhib.Layer.On.SetBool(false) // no layer!
ly.Inhib.Pool.On.SetBool(true) // needs pool-level
ly.Inhib.Pool.FB = 4
ly.Inhib.Pool.Gi = 1.12 // 1.12 > others for non-adapt
}},
{Sel: "#TE", Doc: "initial activity",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.03 // .03 1.6.15 SSGi
ly.Inhib.ActAvg.Offset = 0.01 // 0.01 > lower, higher; nominal is lower to increase Ge
ly.Inhib.ActAvg.AdaptGi.SetBool(false) // false
ly.Inhib.Layer.On.SetBool(false) // no layer!
ly.Inhib.Pool.On.SetBool(true) // needs pool-level
ly.Inhib.Pool.FB = 4
ly.Inhib.Pool.Gi = 1.12 // 1.12 > others for non-adapt
}},
{Sel: "#Output", Doc: "general output, Localist default -- see RndOutPats, LocalOutPats",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 1.17 // 1.2 FB4 > 1.3 FB 1 SS0
ly.Inhib.Layer.FB = 4 // 4 > 1 -- try higher
ly.Inhib.ActAvg.Nominal = 0.005 // .005 > .008 > .01 -- prevents loss of Ge over time..
ly.Inhib.ActAvg.Offset = 0.01 // 0.01 > 0.012 > 0.005?
ly.Inhib.ActAvg.AdaptGi.SetBool(true) // needed in any case
ly.Inhib.ActAvg.LoTol = 0.1 // 0.1 > 0.05 > 0.2 > 0.5 older..
ly.Inhib.ActAvg.HiTol = 0.02 // 0.02 > 0 tiny bit
ly.Inhib.ActAvg.AdaptRate = 0.01 // 0.01 > 0.1
ly.Acts.Clamp.Ge = 0.8 // .6 = .7 > .5 (tiny diff) -- input has 1.0 now
ly.Learn.CaSpike.SpikeCaM = 12 // 12 > 8 probably; 8 = orig, 12 = new trace
ly.Learn.RLRate.On.SetBool(true) // beneficial for trace
ly.Learn.RLRate.SigmoidMin = 0.05 // 0.05 > 1 now!
ly.Learn.RLRate.Diff.SetBool(true)
ly.Learn.RLRate.DiffThr = 0.02 // 0.02 def - todo
ly.Learn.RLRate.SpikeThr = 0.1 // 0.1 def
ly.Learn.RLRate.Min = 0.001
}},
// {Sel: "#Claustrum", Doc: "testing -- not working",
// Set: func(ly *axon.LayerParams) {
// ly.Inhib.Layer.Gi = 0.8
// ly.Inhib.Pool.On.SetBool(false) // needs pool-level
// ly.Inhib.Layer.On.SetBool(true)
// ly.Inhib.ActAvg.Nominal = .06
// }},
},
"RndOutPats": {
{Sel: "#Output", Doc: "high inhib for one-hot output",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 0.9 // 0.9 > 1.0
ly.Inhib.ActAvg.Nominal = 0.1 // 0.1 seems good
}},
},
"LocalOutPats": {
{Sel: "#Output", Doc: "high inhib for one-hot output",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 1.5 // 1.5 = 1.6 > 1.4
ly.Inhib.ActAvg.Nominal = 0.01
}},
},
"OutAdapt": {
{Sel: "#Output", Doc: "general output, Localist default -- see RndOutPats, LocalOutPats",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.AdaptGi.SetBool(true) // true = definitely worse
}},
},
}
// PathParams sets the minimal non-default params.
// Base is always applied, and others can be optionally selected to apply on top of that.
var PathParams = axon.PathSheets{
"Base": {
{Sel: "Path", Doc: "exploring",
Set: func(pt *axon.PathParams) {
pt.SWts.Adapt.On.SetBool(true) // true > false, esp in cosdiff
pt.SWts.Adapt.LRate = 0.0001 // .0002, .001 > .01 > .1 after 250epc in NStrong
pt.SWts.Adapt.SubMean = 1 // 1 > 0 -- definitely needed
pt.SWts.Adapt.HiMeanDecay = 0.0008 // 0.0008 best
pt.SWts.Adapt.HiMeanThr = 0.5 // 0.5, 0.0008 goes the distance
pt.SWts.Init.SPct = 1.0 // should be 1 -- was 0.5 previously
pt.Learn.LRate.Base = 0.005 // 0.005 > 0.002; 0.002 = act avg fluctuations later
pt.Learn.DWt.SubMean = 1 // 1 > 0 for trgavg weaker
pt.Learn.DWt.CaPScale = 1 // Env10: 1
pt.Learn.DWt.SynCa20.SetBool(false)
pt.Learn.DWt.LearnThr = 0 // even 0.01 with v32 fails after 150 or so -- lower max activity levels throughout the net
}},
{Sel: ".BackPath", Doc: "top-down back-projections MUST have lower relative weight scale, otherwise network hallucinates -- smaller as network gets bigger",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.2
// pt.Learn.LRate.Base = 0
}},
{Sel: ".ToOut", Doc: "to output -- some things should be different..",
Set: func(pt *axon.PathParams) {
// pt.Learn.LRate.Base = 0.01 // base 0.01
pt.SWts.Adapt.On.SetBool(false) // off > on
pt.SWts.Init.SPct = 0 // when off, 0
pt.PathScale.Abs = 2.0 // 2.0 >= 1.8 > 2.2 > 1.5 > 1.2 trace
}},
// {Sel: ".FmOut", Doc: "from output -- some things should be different..",
// Set: func(pt *axon.PathParams) {}},
/*
{Sel: ".Inhib", Doc: "inhibitory projection -- not necc with fs-fffb inhib",
Set: func(pt *axon.PathParams) {
pt.Learn.Learn = .SetBool(true) // learned decorrel is good
pt.Learn.LRate.Base = 0.0001 // .0001 > .001 -- slower better!
pt.Learn.DWt.SubMean = 1 // 1 is *essential* here!
pt.SWts.Init.Var = 0.0
pt.SWts.Init.Mean = 0.1
pt.SWts.Init.Sym = .SetBool(false)
pt.SWts.Adapt.On = .SetBool(false)
pt.PathScale.Abs = 0.2 // .2 > .1 for controlling PCA; .3 or.4 with GiSynThr .01
pt.IncGain = 1 // .5 def
}},
*/
{Sel: ".V1V2", Doc: "lower SWt for sparser activity",
Set: func(pt *axon.PathParams) {
pt.SWts.Init.Mean = 0.4 // .4 here is key!
pt.SWts.Limit.Min = 0.1 // .1-.7
pt.SWts.Limit.Max = 0.7 //
pt.PathScale.Abs = 1.8 // 1.4 > 2.0 for color -- extra boost to get more v2 early on
}},
{Sel: ".V1V2fmSm", Doc: "weaker",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.2
}},
{Sel: ".V2V4", Doc: "lower SWt",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1.0 // 1.0 prev, 1.2 not better
pt.SWts.Init.Mean = 0.4 // .4 a tiny bit better overall
pt.SWts.Limit.Min = 0.1 // .1-.7 def
pt.SWts.Limit.Max = 0.7 //
}},
{Sel: ".V2V4sm", Doc: "extra boost",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1.0 // 1.0 prev, 1.2 not better
}},
{Sel: "#V2m16ToV4f16", Doc: "weights into V416 getting too high",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1.0 // was 0.8, but as of #680 1.0 better
}},
{Sel: "#V2l16ToV4f16", Doc: "weights into V416 getting too high",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1.0 // see above
}},
// {Sel: ".V4TEO", Doc: "stronger",
// Set: func(pt *axon.PathParams) {
// // pt.PathScale.Abs = "1.2 // trying bigger -- was low
// }},
{Sel: ".V4TEOoth", Doc: "weaker rel",
Set: func(pt *axon.PathParams) {
// pt.PathScale.Abs = 1.2 // trying bigger -- was low
pt.PathScale.Rel = 0.5
}},
// {Sel: ".V4Out", Doc: "NOT weaker",
// Set: func(pt *axon.PathParams) {
// pt.PathScale.Rel = "1 // 1 > 0.5 > .2 -- v53 still
// }},
{Sel: ".TEOTE", Doc: "too weak at start",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1 // 1.2 not better
}},
// back projections
{Sel: ".V4V2", Doc: "weaker",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.05 // .05 > .02 > .1 v70
pt.SWts.Init.Mean = 0.4 // .4 matches V2V4 -- not that big a diff on its own
pt.SWts.Limit.Min = 0.1 // .1-.7 def
pt.SWts.Limit.Max = 0.7 //
}},
// {Sel: ".TEOV2", Doc: "weaker -- not used",
// Set: func(pt *axon.PathParams) {
// pt.PathScale.Rel = "0.05 // .05 > .02 > .1
// }},
{Sel: ".TEOV4", Doc: "weaker",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.1 // .1 == .2
}},
{Sel: ".TETEO", Doc: "std",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.1 // .1 orig
}},
{Sel: ".TEOTE", Doc: "stronger",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1.2
}},
{Sel: ".OutTEO", Doc: "weaker",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.3 // .3 > .2 v53 in long run
}},
// {Sel: ".OutV4", Doc: "weaker",
// Set: func(pt *axon.PathParams) {
// pt.PathScale.Rel = 0.1 // .1 > .2 v53
// }},
{Sel: "#OutputToTE", Doc: "weaker",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.1 // 0.1 (hard xform) > 0.2 (reg xform) > 0.3 trace
}},
{Sel: "#TEToOutput", Doc: "weaker",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 1.0 // turn off for TE testing
}},
// shortcuts -- .5 > .2 (v32 still) -- all tested together
// {Sel: "#V1l16ToClaustrum", Doc: "random fixed -- not useful",
// Set: func(pt *axon.PathParams) {
// pt.Learn.Learn.SetBool(false)
// pt.PathScale.Rel = 0.5 // .5 > .8 > 1 > .4 > .3 etc
// pt.SWts.Adapt.On = .SetBool(false) // seems better
// }},
{Sel: ".V1SC", Doc: "v1 shortcut",
Set: func(pt *axon.PathParams) {
pt.Learn.LRate.Base = 0.001 //
// pt.Learn.Learn.SetBool(false)
pt.PathScale.Rel = 1.0 // .8, .9, 1 > .6, .5
pt.SWts.Adapt.On.SetBool(false) // seems better
// "apt.SWts.Init.Var = 0.05
}},
},
"ToOutTol": {
{Sel: ".ToOut", Doc: "to output -- some things should be different..",
Set: func(pt *axon.PathParams) {
// todo: param missing:
// pt.PathScale.LoTol = 0.5 // activation dropping off a cliff there at the end..
}},
},
}
// Copyright (c) 2023, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lvis
import "github.com/emer/emergent/v2/paths"
// Paths holds all the special projections.
type Paths struct {
// Standard feedforward topographic projection, recv = 1/2 send size
PT4x4Skp2 *paths.PoolTile
// Reciprocal
PT4x4Skp2Recip *paths.PoolTile
// Standard feedforward topographic projection, recv = 1/2 send size
PT4x4Skp2Sub2 *paths.PoolTileSub
// Reciprocal
PT4x4Skp2Sub2Recip *paths.PoolTileSub
// Standard feedforward topographic projection, recv = 1/2 send size
PT4x4Skp2Sub2Send *paths.PoolTileSub
// Standard feedforward topographic projection, recv = 1/2 send size
PT4x4Skp2Sub2SendRecip *paths.PoolTileSub
// same-size paths
PT2x2Skp1 *paths.PoolTile
// same-size paths reciprocal
PT2x2Skp1Recip *paths.PoolTile
// same-size paths
PT2x2Skp1Sub2 *paths.PoolTileSub
// same-size paths reciprocal
PT2x2Skp1Sub2Recip *paths.PoolTileSub
// same-size paths
PT2x2Skp1Sub2Send *paths.PoolTileSub
// same-size paths reciprocal
PT2x2Skp1Sub2SendRecip *paths.PoolTileSub
// lateral inhib projection
PT2x2Skp2 *paths.PoolTileSub
// for V4 <-> TEO
PT4x4Skp0 *paths.PoolTile
// for V4 <-> TEO
PT4x4Skp0Recip *paths.PoolTile
// for V4 <-> TEO
PT4x4Skp0Sub2 *paths.PoolTileSub
// for V4 <-> TEO
PT4x4Skp0Sub2Recip *paths.PoolTileSub
// for TE <-> TEO
PT1x1Skp0 *paths.PoolTile
// for TE <-> TEO
PT1x1Skp0Recip *paths.PoolTile
// lateral inhibitory connectivity for subpools
PT6x6Skp2Lat *paths.PoolTileSub
}
func (pj *Paths) Defaults() {
pj.PT4x4Skp2 = paths.NewPoolTile()
pj.PT4x4Skp2.Size.Set(4, 4)
pj.PT4x4Skp2.Skip.Set(2, 2)
pj.PT4x4Skp2.Start.Set(-1, -1)
pj.PT4x4Skp2.TopoRange.Min = 0.8
pj.PT4x4Skp2Recip = paths.NewPoolTileRecip(pj.PT4x4Skp2)
pj.PT4x4Skp2Sub2 = paths.NewPoolTileSub()
pj.PT4x4Skp2Sub2.Size.Set(4, 4)
pj.PT4x4Skp2Sub2.Skip.Set(2, 2)
pj.PT4x4Skp2Sub2.Start.Set(-1, -1)
pj.PT4x4Skp2Sub2.Subs.Set(2, 2)
pj.PT4x4Skp2Sub2.TopoRange.Min = 0.8
pj.PT4x4Skp2Sub2Recip = paths.NewPoolTileSubRecip(pj.PT4x4Skp2Sub2)
pj.PT4x4Skp2Sub2Send = paths.NewPoolTileSub()
*pj.PT4x4Skp2Sub2Send = *pj.PT4x4Skp2Sub2
pj.PT4x4Skp2Sub2Send.SendSubs = true
pj.PT4x4Skp2Sub2SendRecip = paths.NewPoolTileSubRecip(pj.PT4x4Skp2Sub2Send)
pj.PT2x2Skp1 = paths.NewPoolTile()
pj.PT2x2Skp1.Size.Set(2, 2)
pj.PT2x2Skp1.Skip.Set(1, 1)
pj.PT2x2Skp1.Start.Set(0, 0)
pj.PT2x2Skp1.TopoRange.Min = 0.8
pj.PT2x2Skp1Recip = paths.NewPoolTileRecip(pj.PT2x2Skp1)
pj.PT2x2Skp1Sub2 = paths.NewPoolTileSub()
pj.PT2x2Skp1Sub2.Size.Set(2, 2)
pj.PT2x2Skp1Sub2.Skip.Set(1, 1)
pj.PT2x2Skp1Sub2.Start.Set(0, 0)
pj.PT2x2Skp1Sub2.Subs.Set(2, 2)
pj.PT2x2Skp1Sub2.TopoRange.Min = 0.8
pj.PT2x2Skp1Sub2Recip = paths.NewPoolTileSubRecip(pj.PT2x2Skp1Sub2)
pj.PT2x2Skp1Sub2Send = paths.NewPoolTileSub()
pj.PT2x2Skp1Sub2Send.Size.Set(2, 2)
pj.PT2x2Skp1Sub2Send.Skip.Set(1, 1)
pj.PT2x2Skp1Sub2Send.Start.Set(0, 0)
pj.PT2x2Skp1Sub2Send.Subs.Set(2, 2)
pj.PT2x2Skp1Sub2Send.SendSubs = true
pj.PT2x2Skp1Sub2Send.TopoRange.Min = 0.8
pj.PT2x2Skp1Sub2SendRecip = paths.NewPoolTileSub()
*pj.PT2x2Skp1Sub2SendRecip = *pj.PT2x2Skp1Sub2Send
pj.PT2x2Skp1Sub2SendRecip.Recip = true
pj.PT2x2Skp2 = paths.NewPoolTileSub()
pj.PT2x2Skp2.Size.Set(2, 2)
pj.PT2x2Skp2.Skip.Set(2, 2)
pj.PT2x2Skp2.Start.Set(0, 0)
pj.PT2x2Skp2.Subs.Set(2, 2)
pj.PT4x4Skp0 = paths.NewPoolTile()
pj.PT4x4Skp0.Size.Set(4, 4)
pj.PT4x4Skp0.Skip.Set(0, 0)
pj.PT4x4Skp0.Start.Set(0, 0)
pj.PT4x4Skp0.GaussFull.Sigma = 1.5
pj.PT4x4Skp0.GaussInPool.Sigma = 1.5
pj.PT4x4Skp0.TopoRange.Min = 0.8
pj.PT4x4Skp0Recip = paths.NewPoolTileRecip(pj.PT4x4Skp0)
pj.PT4x4Skp0Sub2 = paths.NewPoolTileSub()
pj.PT4x4Skp0Sub2.Size.Set(4, 4)
pj.PT4x4Skp0Sub2.Skip.Set(0, 0)
pj.PT4x4Skp0Sub2.Start.Set(0, 0)
pj.PT4x4Skp0Sub2.Subs.Set(2, 2)
pj.PT4x4Skp0Sub2.SendSubs = true
pj.PT4x4Skp0Sub2.GaussFull.Sigma = 1.5
pj.PT4x4Skp0Sub2.GaussInPool.Sigma = 1.5
pj.PT4x4Skp0Sub2.TopoRange.Min = 0.8
pj.PT4x4Skp0Sub2Recip = paths.NewPoolTileSubRecip(pj.PT4x4Skp0Sub2)
pj.PT1x1Skp0 = paths.NewPoolTile()
pj.PT1x1Skp0.Size.Set(1, 1)
pj.PT1x1Skp0.Skip.Set(0, 0)
pj.PT1x1Skp0.Start.Set(0, 0)
pj.PT1x1Skp0.GaussFull.Sigma = 1.5
pj.PT1x1Skp0.GaussInPool.Sigma = 1.5
pj.PT1x1Skp0.TopoRange.Min = 0.8
pj.PT1x1Skp0Recip = paths.NewPoolTileRecip(pj.PT1x1Skp0)
pj.PT6x6Skp2Lat = paths.NewPoolTileSub()
pj.PT6x6Skp2Lat.Size.Set(6, 6)
pj.PT6x6Skp2Lat.Skip.Set(2, 2)
pj.PT6x6Skp2Lat.Start.Set(-2, -2)
pj.PT6x6Skp2Lat.Subs.Set(2, 2)
pj.PT6x6Skp2Lat.TopoRange.Min = 0.8
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mpi
import (
"cogentcore.org/core/core"
"cogentcore.org/core/math32/vecint"
"github.com/emer/emergent/v2/egui"
)
// ParamConfig has config parameters related to sim params.
type ParamConfig struct {
// Hidden1Size is the size of hidden 1 layer.
Hidden1Size vecint.Vector2i `default:"{'X':10,'Y':10}" nest:"+"`
// Hidden2Size is the size of hidden 2 layer.
Hidden2Size vecint.Vector2i `default:"{'X':10,'Y':10}" nest:"+"`
// Script is an interpreted script that is run to set parameters in Layer and Path
// sheets, by default using the "Script" set name.
Script string `new-window:"+" width:"100"`
// Sheet is the extra params sheet name(s) to use (space separated
// if multiple). Must be valid name as listed in compiled-in params
// or loaded params.
Sheet string
// Tag is an extra tag to add to file names and logs saved from this run.
Tag string
// Note is additional info to describe the run params etc,
// like a git commit message for the run.
Note string
// SaveAll will save a snapshot of all current param and config settings
// in a directory named params_<datestamp> (or _good if Good is true),
// then quit. Useful for comparing to later changes and seeing multiple
// views of current params.
SaveAll bool `nest:"+"`
// Good is for SaveAll, save to params_good for a known good params state.
// This can be done prior to making a new release after all tests are passing.
// Add results to git to provide a full diff record of all params over level.
Good bool `nest:"+"`
}
func (pc *ParamConfig) FieldWidget(field string) core.Value {
return egui.ScriptFieldWidget(field)
}
// RunConfig has config parameters related to running the sim.
type RunConfig struct {
// GPUDevice selects the gpu device to use.
GPUDevice int
// NData is the number of data-parallel items to process in parallel per trial.
// Is significantly faster for both CPU and GPU. Results in an effective
// mini-batch of learning.
NData int `default:"16" min:"1"`
// NThreads is the number of parallel threads for CPU computation;
// 0 = use default.
NThreads int `default:"0"`
// Run is the _starting_ run number, which determines the random seed.
// Runs counts up from there. Can do all runs in parallel by launching
// separate jobs with each starting Run, Runs = 1.
Run int `default:"0" flag:"run"`
// Runs is the total number of runs to do when running Train, starting from Run.
Runs int `default:"5" min:"1"`
// Epochs is the total number of epochs per run.
Epochs int `default:"100"`
// Trials is the total number of trials per epoch.
// Should be an even multiple of NData.
Trials int `default:"32"`
// ISICycles is the number of no-input inter-stimulus interval
// cycles at the start of the trial.
ISICycles int `default:"0"`
// MinusCycles is the number of cycles in the minus phase per trial.
MinusCycles int `default:"150"`
// PlusCycles is the number of cycles in the plus phase per trial.
PlusCycles int `default:"50"`
// NZero is how many perfect, zero-error epochs before stopping a Run.
NZero int `default:"2"`
// TestInterval is how often (in epochs) to run through all the test patterns,
// in terms of training epochs. Can use 0 or -1 for no testing.
TestInterval int `default:"5"`
// PCAInterval is how often (in epochs) to compute PCA on hidden
// representations to measure variance.
PCAInterval int `default:"5"`
// StartWeights is the name of weights file to load at start of first run.
StartWeights string
}
// Cycles returns the total number of cycles per trial: ISI + Minus + Plus.
func (rc *RunConfig) Cycles() int {
return rc.ISICycles + rc.MinusCycles + rc.PlusCycles
}
// LogConfig has config parameters related to logging data.
type LogConfig struct {
// SaveWeights will save final weights after each run.
SaveWeights bool
// Train has the list of Train mode levels to save log files for.
Train []string `default:"['Expt', 'Run', 'Epoch']" nest:"+"`
// Test has the list of Test mode levels to save log files for.
Test []string `nest:"+"`
}
// Config has the overall Sim configuration options.
type Config struct {
egui.BaseConfig
// Params has parameter related configuration options.
Params ParamConfig `display:"add-fields"`
// Run has sim running related configuration options.
Run RunConfig `display:"add-fields"`
// Log has data logging related configuration options.
Log LogConfig `display:"add-fields"`
}
func (cfg *Config) Defaults() {
cfg.Name = "RA25"
cfg.Title = "Axon random associator (MPI)"
cfg.URL = "https://github.com/emer/axon/blob/main/sims/ra25/README.md"
cfg.Doc = "This demonstrates a basic Axon model and provides a template for creating new models. It has a random-associator four-layer axon network that uses the standard supervised learning paradigm to learn mappings between 25 random input / output patterns defined over 5x5 input / output layers."
}
// Code generated by "core generate -add-types -add-funcs -gosl"; DO NOT EDIT.
package mpi
import (
"cogentcore.org/core/enums"
)
var _ModesValues = []Modes{0, 1}
// ModesN is the highest valid value for type Modes, plus one.
//
//gosl:start
const ModesN Modes = 2
//gosl:end
var _ModesValueMap = map[string]Modes{`Train`: 0, `Test`: 1}
var _ModesDescMap = map[Modes]string{0: ``, 1: ``}
var _ModesMap = map[Modes]string{0: `Train`, 1: `Test`}
// String returns the string representation of this Modes value.
func (i Modes) String() string { return enums.String(i, _ModesMap) }
// SetString sets the Modes value from its string representation,
// and returns an error if the string is invalid.
func (i *Modes) SetString(s string) error { return enums.SetString(i, s, _ModesValueMap, "Modes") }
// Int64 returns the Modes value as an int64.
func (i Modes) Int64() int64 { return int64(i) }
// SetInt64 sets the Modes value from an int64.
func (i *Modes) SetInt64(in int64) { *i = Modes(in) }
// Desc returns the description of the Modes value.
func (i Modes) Desc() string { return enums.Desc(i, _ModesDescMap) }
// ModesValues returns all possible values for the type Modes.
func ModesValues() []Modes { return _ModesValues }
// Values returns all possible values for the type Modes.
func (i Modes) Values() []enums.Enum { return enums.Values(_ModesValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Modes) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Modes) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Modes") }
var _LevelsValues = []Levels{0, 1, 2, 3, 4}
// LevelsN is the highest valid value for type Levels, plus one.
//
//gosl:start
const LevelsN Levels = 5
//gosl:end
var _LevelsValueMap = map[string]Levels{`Cycle`: 0, `Trial`: 1, `Epoch`: 2, `Run`: 3, `Expt`: 4}
var _LevelsDescMap = map[Levels]string{0: ``, 1: ``, 2: ``, 3: ``, 4: ``}
var _LevelsMap = map[Levels]string{0: `Cycle`, 1: `Trial`, 2: `Epoch`, 3: `Run`, 4: `Expt`}
// String returns the string representation of this Levels value.
func (i Levels) String() string { return enums.String(i, _LevelsMap) }
// SetString sets the Levels value from its string representation,
// and returns an error if the string is invalid.
func (i *Levels) SetString(s string) error { return enums.SetString(i, s, _LevelsValueMap, "Levels") }
// Int64 returns the Levels value as an int64.
func (i Levels) Int64() int64 { return int64(i) }
// SetInt64 sets the Levels value from an int64.
func (i *Levels) SetInt64(in int64) { *i = Levels(in) }
// Desc returns the description of the Levels value.
func (i Levels) Desc() string { return enums.Desc(i, _LevelsDescMap) }
// LevelsValues returns all possible values for the type Levels.
func LevelsValues() []Levels { return _LevelsValues }
// Values returns all possible values for the type Levels.
func (i Levels) Values() []enums.Enum { return enums.Values(_LevelsValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Levels) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Levels) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Levels") }
var _StatsPhaseValues = []StatsPhase{0, 1}
// StatsPhaseN is the highest valid value for type StatsPhase, plus one.
//
//gosl:start
const StatsPhaseN StatsPhase = 2
//gosl:end
var _StatsPhaseValueMap = map[string]StatsPhase{`Start`: 0, `Step`: 1}
var _StatsPhaseDescMap = map[StatsPhase]string{0: ``, 1: ``}
var _StatsPhaseMap = map[StatsPhase]string{0: `Start`, 1: `Step`}
// String returns the string representation of this StatsPhase value.
func (i StatsPhase) String() string { return enums.String(i, _StatsPhaseMap) }
// SetString sets the StatsPhase value from its string representation,
// and returns an error if the string is invalid.
func (i *StatsPhase) SetString(s string) error {
return enums.SetString(i, s, _StatsPhaseValueMap, "StatsPhase")
}
// Int64 returns the StatsPhase value as an int64.
func (i StatsPhase) Int64() int64 { return int64(i) }
// SetInt64 sets the StatsPhase value from an int64.
func (i *StatsPhase) SetInt64(in int64) { *i = StatsPhase(in) }
// Desc returns the description of the StatsPhase value.
func (i StatsPhase) Desc() string { return enums.Desc(i, _StatsPhaseDescMap) }
// StatsPhaseValues returns all possible values for the type StatsPhase.
func StatsPhaseValues() []StatsPhase { return _StatsPhaseValues }
// Values returns all possible values for the type StatsPhase.
func (i StatsPhase) Values() []enums.Enum { return enums.Values(_StatsPhaseValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i StatsPhase) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *StatsPhase) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "StatsPhase")
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// mpi is a test case for mpi distributed computing, using ra25 example.
package mpi
//go:generate core generate -add-types -add-funcs -gosl
import (
"embed"
"fmt"
"io/fs"
"os"
"reflect"
"cogentcore.org/core/base/errors"
"cogentcore.org/core/base/metadata"
"cogentcore.org/core/core"
"cogentcore.org/core/enums"
"cogentcore.org/core/gpu"
"cogentcore.org/core/icons"
"cogentcore.org/core/math32"
"cogentcore.org/core/tree"
"cogentcore.org/lab/base/mpi"
"cogentcore.org/lab/base/randx"
"cogentcore.org/lab/patterns"
"cogentcore.org/lab/plot"
"cogentcore.org/lab/stats/stats"
"cogentcore.org/lab/table"
"cogentcore.org/lab/tensor"
"cogentcore.org/lab/tensorfs"
"github.com/emer/axon/v2/axon"
"github.com/emer/emergent/v2/egui"
"github.com/emer/emergent/v2/env"
"github.com/emer/emergent/v2/looper"
"github.com/emer/emergent/v2/paths"
)
//go:embed random_5x5_24.tsv
var embedfs embed.FS
// Modes are the looping modes (Stacks) for running and statistics.
type Modes int32 //enums:enum
const (
Train Modes = iota
Test
)
// Levels are the looping levels for running and statistics.
type Levels int32 //enums:enum
const (
Cycle Levels = iota
Trial
Epoch
Run
Expt
)
// StatsPhase is the phase of stats processing for given mode, level.
// Accumulated values are reset at Start, added each Step.
type StatsPhase int32 //enums:enum
const (
Start StatsPhase = iota
Step
)
// see params.go for params
// Sim encapsulates the entire simulation model, and we define all the
// functionality as methods on this struct. This structure keeps all relevant
// state information organized and available without having to pass everything around
// as arguments to methods, and provides the core GUI interface (note the view tags
// for the fields which provide hints to how things should be displayed).
type Sim struct {
// simulation configuration parameters -- set by .toml config file and / or args
Config *Config `new-window:"+"`
// Net is the network: click to view / edit parameters for layers, paths, etc.
Net *axon.Network `new-window:"+" display:"no-inline"`
// Params manages network parameter setting.
Params axon.Params `display:"inline"`
// Loops are the control loops for running the sim, in different Modes
// across stacks of Levels.
Loops *looper.Stacks `new-window:"+" display:"no-inline"`
// Envs provides mode-string based storage of environments.
Envs env.Envs `new-window:"+" display:"no-inline"`
// TrainUpdate has Train mode netview update parameters.
TrainUpdate axon.NetViewUpdate `display:"inline"`
// TestUpdate has Test mode netview update parameters.
TestUpdate axon.NetViewUpdate `display:"inline"`
// Root is the root tensorfs directory, where all stats and other misc sim data goes.
Root *tensorfs.Node `display:"-"`
// Stats has the stats directory within Root.
Stats *tensorfs.Node `display:"-"`
// Current has the current stats values within Stats.
Current *tensorfs.Node `display:"-"`
// StatFuncs are statistics functions called at given mode and level,
// to perform all stats computations. phase = Start does init at start of given level,
// and all intialization / configuration (called during Init too).
StatFuncs []func(mode Modes, level Levels, phase StatsPhase) `display:"-"`
// GUI manages all the GUI elements
GUI egui.GUI `display:"-"`
// RandSeeds is a list of random seeds to use for each run.
RandSeeds randx.Seeds `display:"-"`
}
func (ss *Sim) SetConfig(cfg *Config) { ss.Config = cfg }
func (ss *Sim) Body() *core.Body { return ss.GUI.Body }
func (ss *Sim) ConfigSim() {
ss.Root, _ = tensorfs.NewDir("Root")
tensorfs.CurRoot = ss.Root
ss.Net = axon.NewNetwork(ss.Config.Name)
ss.Params.Config(LayerParams, PathParams, ss.Config.Params.Sheet, ss.Config.Params.Tag, reflect.ValueOf(ss))
ss.RandSeeds.Init(100) // max 100 runs
ss.InitRandSeed(0)
if ss.Config.GPU {
gpu.SelectAdapter = ss.Config.Run.GPUDevice
axon.GPUInit()
axon.UseGPU = true
}
// ss.ConfigInputs()
ss.OpenInputs()
ss.ConfigEnv()
ss.ConfigNet(ss.Net)
ss.ConfigLoops()
ss.ConfigStats()
// if ss.Config..GPU {
// fmt.Println(axon.GPUSystem.Vars().StringDoc())
// }
if ss.Config.Params.SaveAll {
ss.Config.Params.SaveAll = false
ss.Net.SaveParamsSnapshot(&ss.Config, ss.Config.Params.Good)
os.Exit(0)
}
}
func (ss *Sim) ConfigEnv() {
// Can be called multiple times -- don't re-create
var trn, tst *env.FixedTable
if len(ss.Envs) == 0 {
trn = &env.FixedTable{}
tst = &env.FixedTable{}
} else {
trn = ss.Envs.ByMode(Train).(*env.FixedTable)
tst = ss.Envs.ByMode(Test).(*env.FixedTable)
}
inputs := tensorfs.DirTable(ss.Root.Dir("Inputs/Train"), nil)
// this logic can be used to create train-test splits of a set of patterns:
// n := inputs.NumRows()
// order := rand.Perm(n)
// ntrn := int(0.85 * float64(n))
// trnEnv := table.NewView(inputs)
// tstEnv := table.NewView(inputs)
// trnEnv.Indexes = order[:ntrn]
// tstEnv.Indexes = order[ntrn:]
// note: names must be standard here!
trn.Name = Train.String()
trn.Config(table.NewView(inputs))
trn.Validate()
tst.Name = Test.String()
tst.Config(table.NewView(inputs))
tst.Sequential = true
tst.Validate()
trn.Init(0)
tst.Init(0)
// note: names must be in place when adding
ss.Envs.Add(trn, tst)
}
func (ss *Sim) ConfigNet(net *axon.Network) {
net.SetMaxData(ss.Config.Run.NData)
net.Context().SetISICycles(int32(ss.Config.Run.ISICycles)).
SetMinusCycles(int32(ss.Config.Run.MinusCycles)).
SetPlusCycles(int32(ss.Config.Run.PlusCycles)).Update()
net.SetRandSeed(ss.RandSeeds[0]) // init new separate random seed, using run = 0
inp := net.AddLayer2D("Input", axon.InputLayer, 5, 5)
hid1 := net.AddLayer2D("Hidden1", axon.SuperLayer, ss.Config.Params.Hidden1Size.Y, ss.Config.Params.Hidden1Size.X)
hid2 := net.AddLayer2D("Hidden2", axon.SuperLayer, ss.Config.Params.Hidden2Size.Y, ss.Config.Params.Hidden2Size.X)
out := net.AddLayer2D("Output", axon.TargetLayer, 5, 5)
// use this to position layers relative to each other
// hid2.PlaceRightOf(hid1, 2)
// note: see emergent/path module for all the options on how to connect
// NewFull returns a new paths.Full connectivity pattern
full := paths.NewFull()
net.ConnectLayers(inp, hid1, full, axon.ForwardPath)
net.BidirConnectLayers(hid1, hid2, full)
net.BidirConnectLayers(hid2, out, full)
// net.LateralConnectLayerPath(hid1, full, &axon.HebbPath{}).SetType(InhibPath)
// note: if you wanted to change a layer type from e.g., Target to Compare, do this:
// out.Type = axon.CompareLayer
// that would mean that the output layer doesn't reflect target values in plus phase
// and thus removes error-driven learning -- but stats are still computed.
net.Build()
net.Defaults()
net.SetNThreads(ss.Config.Run.NThreads)
ss.ApplyParams()
net.InitWeights()
}
func (ss *Sim) ApplyParams() {
ss.Params.Script = ss.Config.Params.Script
ss.Params.ApplyAll(ss.Net)
}
//////// Init, utils
// Init restarts the run, and initializes everything, including network weights
// and resets the epoch log table
func (ss *Sim) Init() {
ss.Loops.ResetCounters()
ss.SetRunName()
ss.InitRandSeed(0)
// ss.ConfigEnv() // re-config env just in case a different set of patterns was
// selected or patterns have been modified etc
ss.ApplyParams()
ss.StatsInit()
ss.NewRun()
ss.TrainUpdate.RecordSyns()
ss.TrainUpdate.Update(Train, Trial)
}
// InitRandSeed initializes the random seed based on current training run number
func (ss *Sim) InitRandSeed(run int) {
ss.RandSeeds.Set(run)
ss.RandSeeds.Set(run, &ss.Net.Rand)
}
// NetViewUpdater returns the NetViewUpdate for given mode.
func (ss *Sim) NetViewUpdater(mode enums.Enum) *axon.NetViewUpdate {
if mode.Int64() == Train.Int64() {
return &ss.TrainUpdate
}
return &ss.TestUpdate
}
// ConfigLoops configures the control loops: Training, Testing
func (ss *Sim) ConfigLoops() {
ls := looper.NewStacks()
trials := int(math32.IntMultipleGE(float32(ss.Config.Run.Trials), float32(ss.Config.Run.NData)))
cycles := ss.Config.Run.Cycles()
ls.AddStack(Train, Trial).
AddLevel(Expt, 1).
AddLevel(Run, ss.Config.Run.Runs).
AddLevel(Epoch, ss.Config.Run.Epochs).
AddLevelIncr(Trial, trials, ss.Config.Run.NData).
AddLevel(Cycle, cycles)
ls.AddStack(Test, Trial).
AddLevel(Epoch, 1).
AddLevelIncr(Trial, trials, ss.Config.Run.NData).
AddLevel(Cycle, cycles)
axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, Cycle, Trial, Train,
func(mode enums.Enum) { ss.Net.ClearInputs() },
func(mode enums.Enum) { ss.ApplyInputs(mode.(Modes)) },
)
ls.Stacks[Train].OnInit.Add("Init", ss.Init)
ls.Loop(Train, Run).OnStart.Add("NewRun", ss.NewRun)
trainEpoch := ls.Loop(Train, Epoch)
trainEpoch.IsDone.AddBool("NZeroStop", func() bool {
stopNz := ss.Config.Run.NZero
if stopNz <= 0 {
return false
}
curModeDir := ss.Current.Dir(Train.String())
curNZero := int(curModeDir.Value("NZero").Float1D(-1))
stop := curNZero >= stopNz
return stop
return false
})
trainEpoch.OnStart.Add("TestAtInterval", func() {
if (ss.Config.Run.TestInterval > 0) && ((trainEpoch.Counter.Cur+1)%ss.Config.Run.TestInterval == 0) {
ss.TestAll()
}
})
ls.AddOnStartToAll("StatsStart", ss.StatsStart)
ls.AddOnEndToAll("StatsStep", ss.StatsStep)
ls.Loop(Train, Run).OnEnd.Add("SaveWeights", func() {
ctrString := fmt.Sprintf("%03d_%05d", ls.Loop(Train, Run).Counter.Cur, ls.Loop(Train, Epoch).Counter.Cur)
axon.SaveWeightsIfConfigSet(ss.Net, ss.Config.Log.SaveWeights, ctrString, ss.RunName())
})
if ss.Config.GUI {
axon.LooperUpdateNetView(ls, Cycle, Trial, ss.NetViewUpdater)
ls.Stacks[Train].OnInit.Add("GUI-Init", ss.GUI.UpdateWindow)
ls.Stacks[Test].OnInit.Add("GUI-Init", ss.GUI.UpdateWindow)
}
if ss.Config.Debug {
mpi.Println(ls.DocString())
}
ss.Loops = ls
}
// ApplyInputs applies input patterns from given environment for given mode.
// Any other start-of-trial logic can also be put here.
func (ss *Sim) ApplyInputs(mode Modes) {
net := ss.Net
ndata := int(net.Context().NData)
curModeDir := ss.Current.Dir(mode.String())
ev := ss.Envs.ByMode(mode)
lays := net.LayersByType(axon.InputLayer, axon.TargetLayer)
net.InitExt()
for di := range ndata {
ev.Step()
curModeDir.StringValue("TrialName", ndata).SetString1D(ev.String(), di)
for _, lnm := range lays {
ly := ss.Net.LayerByName(lnm)
st := ev.State(ly.Name)
if st != nil {
ly.ApplyExt(uint32(di), st)
}
}
}
net.ApplyExts()
}
// NewRun intializes a new Run level of the model.
func (ss *Sim) NewRun() {
ctx := ss.Net.Context()
run := ss.Loops.Loop(Train, Run).Counter.Cur
ss.InitRandSeed(run)
ss.Envs.ByMode(Train).Init(run)
ss.Envs.ByMode(Test).Init(run)
ctx.Reset()
ss.Net.InitWeights()
if ss.Config.Run.StartWeights != "" {
ss.Net.OpenWeightsJSON(core.Filename(ss.Config.Run.StartWeights))
mpi.Printf("Starting with initial weights from: %s\n", ss.Config.Run.StartWeights)
}
}
// TestAll runs through the full set of testing items
func (ss *Sim) TestAll() {
ss.Envs.ByMode(Test).Init(0)
ss.Loops.ResetAndRun(Test)
ss.Loops.Mode = Train // important because this is called from Train Run: go back.
}
//////// Inputs
func (ss *Sim) ConfigInputs() {
dt := table.New()
metadata.SetName(dt, "Train")
metadata.SetDoc(dt, "Training inputs")
dt.AddStringColumn("Name")
dt.AddFloat32Column("Input", 5, 5)
dt.AddFloat32Column("Output", 5, 5)
dt.SetNumRows(24)
patterns.PermutedBinaryMinDiff(dt.Columns.Values[1], 6, 1, 0, 3)
patterns.PermutedBinaryMinDiff(dt.Columns.Values[2], 6, 1, 0, 3)
dt.SaveCSV("random_5x5_24_gen.tsv", tensor.Tab, table.Headers)
tensorfs.DirFromTable(ss.Root.Dir("Inputs/Train"), dt)
}
// OpenTable opens a [table.Table] from embedded content, storing
// the data in the given tensorfs directory.
func (ss *Sim) OpenTable(dir *tensorfs.Node, fsys fs.FS, fnm, name, docs string) (*table.Table, error) {
dt := table.New()
metadata.SetName(dt, name)
metadata.SetDoc(dt, docs)
err := dt.OpenFS(embedfs, fnm, tensor.Tab)
if errors.Log(err) != nil {
return dt, err
}
tensorfs.DirFromTable(dir.Dir(name), dt)
return dt, err
}
func (ss *Sim) OpenInputs() {
dir := ss.Root.Dir("Inputs")
ss.OpenTable(dir, embedfs, "random_5x5_24.tsv", "Train", "Training inputs")
}
//////// Stats
// AddStat adds a stat compute function.
func (ss *Sim) AddStat(f func(mode Modes, level Levels, phase StatsPhase)) {
ss.StatFuncs = append(ss.StatFuncs, f)
}
// StatsStart is called by Looper at the start of given level, for each iteration.
// It needs to call RunStats Start at the next level down.
// e.g., each Epoch is the start of the full set of Trial Steps.
func (ss *Sim) StatsStart(lmd, ltm enums.Enum) {
mode := lmd.(Modes)
level := ltm.(Levels)
if level <= Trial {
return
}
ss.RunStats(mode, level-1, Start)
}
// StatsStep is called by Looper at each step of iteration,
// where it accumulates the stat results.
func (ss *Sim) StatsStep(lmd, ltm enums.Enum) {
mode := lmd.(Modes)
level := ltm.(Levels)
if level == Cycle {
return
}
ss.RunStats(mode, level, Step)
tensorfs.DirTable(axon.StatsNode(ss.Stats, mode, level), nil).WriteToLog()
}
// RunStats runs the StatFuncs for given mode, level and phase.
func (ss *Sim) RunStats(mode Modes, level Levels, phase StatsPhase) {
for _, sf := range ss.StatFuncs {
sf(mode, level, phase)
}
if phase == Step && ss.GUI.Tabs != nil {
nm := mode.String() + " " + level.String() + " Plot"
ss.GUI.Tabs.AsLab().GoUpdatePlot(nm)
}
}
// SetRunName sets the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) SetRunName() string {
runName := ss.Params.RunName(ss.Config.Run.Run)
ss.Current.StringValue("RunName", 1).SetString1D(runName, 0)
return runName
}
// RunName returns the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) RunName() string {
return ss.Current.StringValue("RunName", 1).String1D(0)
}
// StatsInit initializes all the stats by calling Start across all modes and levels.
func (ss *Sim) StatsInit() {
for md, st := range ss.Loops.Stacks {
mode := md.(Modes)
for _, lev := range st.Order {
level := lev.(Levels)
if level == Cycle {
continue
}
ss.RunStats(mode, level, Start)
}
}
if ss.GUI.Tabs != nil {
tbs := ss.GUI.Tabs.AsLab()
_, idx := tbs.CurrentTab()
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Epoch))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Run))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Test, Trial))
tbs.SelectTabIndex(idx)
}
}
// ConfigStats handles configures functions to do all stats computation
// in the tensorfs system.
func (ss *Sim) ConfigStats() {
net := ss.Net
ss.Stats = ss.Root.Dir("Stats")
ss.Current = ss.Stats.Dir("Current")
ss.SetRunName()
// last arg(s) are levels to exclude
counterFunc := axon.StatLoopCounters(ss.Stats, ss.Current, ss.Loops, net, Trial, Cycle)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
counterFunc(mode, level, phase == Start)
})
runNameFunc := axon.StatRunName(ss.Stats, ss.Current, ss.Loops, net, Trial, Cycle)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
runNameFunc(mode, level, phase == Start)
})
trialNameFunc := axon.StatTrialName(ss.Stats, ss.Current, ss.Loops, net, Trial)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
trialNameFunc(mode, level, phase == Start)
})
// up to a point, it is good to use loops over stats in one function,
// to reduce repetition of boilerplate.
statNames := []string{"CorSim", "UnitErr", "Err", "NZero", "FirstZero", "LastZero"}
statDocs := map[string]string{
"CorSim": "The correlation-based similarity of the neural activity patterns between the minus and plus phase (1 = patterns are effectively identical). For target layers, this is good continuous, normalized measure of learning performance, which can be more sensitive than thresholded SSE measures.",
"UnitErr": "Normalized proportion of neurons with activities on the wrong side of 0.5 relative to the target values. This is a good normalized error measure.",
"Err": "At the trial level this indicates the presence of an error (i.e., UnitErr > 0), and at higher levels, it is the proportion of errors across the epoch. Thus, when this is zero, the network is performing perfectly (with respect to target outputs).",
"NZero": "The number of zero-error epochs in a row.",
"FirstZero": "The first epoch when there were no errors according to Err stat.",
"LastZero": "The epoch when training was stopped because NZero got above the threshold for number of perfect epochs in a row",
}
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
for _, name := range statNames {
if name == "NZero" && (mode != Train || level == Trial) {
return
}
modeDir := ss.Stats.Dir(mode.String())
curModeDir := ss.Current.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
subDir := modeDir.Dir((level - 1).String()) // note: will fail for Cycle
tsr := levelDir.Float64(name)
ndata := int(ss.Net.Context().NData)
var stat float64
if phase == Start {
tsr.SetNumRows(0)
plot.SetFirstStyler(tsr, func(s *plot.Style) {
s.Range.SetMin(0).SetMax(1)
s.On = true
switch name {
case "NZero":
s.On = false
case "FirstZero", "LastZero":
if level < Run {
s.On = false
}
}
})
metadata.SetDoc(tsr, statDocs[name])
switch name {
case "NZero":
if level == Epoch {
curModeDir.Float64(name, 1).SetFloat1D(0, 0)
}
case "FirstZero", "LastZero":
if level == Epoch {
curModeDir.Float64(name, 1).SetFloat1D(-1, 0)
}
}
continue
}
switch level {
case Trial:
out := ss.Net.LayerByName("Output")
for di := range ndata {
var stat float64
switch name {
case "CorSim":
stat = 1.0 - float64(axon.LayerStates.Value(int(out.Index), int(di), int(axon.LayerPhaseDiff)))
case "UnitErr":
stat = out.PctUnitErr(ss.Net.Context())[di]
case "Err":
uniterr := curModeDir.Float64("UnitErr", ndata).Float1D(di)
stat = 1.0
if uniterr == 0 {
stat = 0
}
}
curModeDir.Float64(name, ndata).SetFloat1D(stat, di)
tsr.AppendRowFloat(stat)
}
case Epoch:
nz := curModeDir.Float64("NZero", 1).Float1D(0)
switch name {
case "NZero":
err := stats.StatSum.Call(subDir.Value("Err")).Float1D(0)
stat = curModeDir.Float64(name, 1).Float1D(0)
if err == 0 {
stat++
} else {
stat = 0
}
curModeDir.Float64(name, 1).SetFloat1D(stat, 0)
case "FirstZero":
stat = curModeDir.Float64(name, 1).Float1D(0)
if stat < 0 && nz == 1 {
stat = curModeDir.Int("Epoch", 1).Float1D(0)
}
curModeDir.Float64(name, 1).SetFloat1D(stat, 0)
case "LastZero":
stat = curModeDir.Float64(name, 1).Float1D(0)
if stat < 0 && nz >= float64(ss.Config.Run.NZero) {
stat = curModeDir.Int("Epoch", 1).Float1D(0)
}
curModeDir.Float64(name, 1).SetFloat1D(stat, 0)
default:
stat = stats.StatMean.Call(subDir.Value(name)).Float1D(0)
}
tsr.AppendRowFloat(stat)
case Run:
stat = stats.StatFinal.Call(subDir.Value(name)).Float1D(0)
tsr.AppendRowFloat(stat)
default: // Expt
stat = stats.StatMean.Call(subDir.Value(name)).Float1D(0)
tsr.AppendRowFloat(stat)
}
}
})
perTrlFunc := axon.StatPerTrialMSec(ss.Stats, Train, Trial)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
perTrlFunc(mode, level, phase == Start)
})
lays := net.LayersByType(axon.SuperLayer, axon.CTLayer, axon.TargetLayer)
actGeFunc := axon.StatLayerActGe(ss.Stats, net, Train, Trial, Run, lays...)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
actGeFunc(mode, level, phase == Start)
})
pcaFunc := axon.StatPCA(ss.Stats, ss.Current, net, ss.Config.Run.PCAInterval, Train, Trial, Run, lays...)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
trnEpc := ss.Loops.Loop(Train, Epoch).Counter.Cur
pcaFunc(mode, level, phase == Start, trnEpc)
})
stateFunc := axon.StatLayerState(ss.Stats, net, Test, Trial, true, "ActM", "Input", "Output")
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
stateFunc(mode, level, phase == Start)
})
}
// StatCounters returns counters string to show at bottom of netview.
func (ss *Sim) StatCounters(mode, level enums.Enum) string {
counters := ss.Loops.Stacks[mode].CountersString()
vu := ss.NetViewUpdater(mode)
if vu == nil || vu.View == nil {
return counters
}
di := vu.View.Di
counters += fmt.Sprintf(" Di: %d", di)
curModeDir := ss.Current.Dir(mode.String())
if curModeDir.Node("TrialName") == nil {
return counters
}
counters += fmt.Sprintf(" TrialName: %s", curModeDir.StringValue("TrialName").String1D(di))
statNames := []string{"CorSim", "UnitErr", "Err"}
if level == Cycle || curModeDir.Node(statNames[0]) == nil {
return counters
}
for _, name := range statNames {
counters += fmt.Sprintf(" %s: %.4g", name, curModeDir.Float64(name).Float1D(di))
}
return counters
}
//////// GUI
// ConfigGUI configures the Cogent Core GUI interface for this simulation.
func (ss *Sim) ConfigGUI(b tree.Node) {
ss.GUI.MakeBody(b, ss, ss.Root, ss.Config.Name, ss.Config.Title, ss.Config.Doc)
ss.GUI.StopLevel = Trial
nv := ss.GUI.AddNetView("Network")
nv.Options.MaxRecs = 2 * ss.Config.Run.Cycles()
nv.Options.Raster.Max = ss.Config.Run.Cycles()
nv.SetNet(ss.Net)
ss.TrainUpdate.Config(nv, axon.Theta, ss.StatCounters)
ss.TestUpdate.Config(nv, axon.Theta, ss.StatCounters)
ss.GUI.OnStop = func(mode, level enums.Enum) {
vu := ss.NetViewUpdater(mode)
vu.UpdateWhenStopped(mode, level)
}
nv.SceneXYZ().Camera.Pose.Pos.Set(0, 1, 2.75) // more "head on" than default which is more "top down"
nv.SceneXYZ().Camera.LookAt(math32.Vec3(0, 0, 0), math32.Vec3(0, 1, 0))
ss.StatsInit()
ss.GUI.FinalizeGUI(false)
}
func (ss *Sim) MakeToolbar(p *tree.Plan) {
ss.GUI.AddLooperCtrl(p, ss.Loops)
tree.Add(p, func(w *core.Separator) {})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "New Seed",
Icon: icons.Add,
Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
Active: egui.ActiveAlways,
Func: func() {
ss.RandSeeds.NewSeeds()
},
})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "README",
Icon: icons.FileMarkdown,
Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
Active: egui.ActiveAlways,
Func: func() {
core.TheApp.OpenURL(ss.Config.URL)
},
})
}
func (ss *Sim) RunNoGUI() {
ss.Init()
if ss.Config.Params.Note != "" {
mpi.Printf("Note: %s\n", ss.Config.Params.Note)
}
if ss.Config.Log.SaveWeights {
mpi.Printf("Saving final weights per run\n")
}
runName := ss.SetRunName()
netName := ss.Net.Name
cfg := &ss.Config.Log
axon.OpenLogFiles(ss.Loops, ss.Stats, netName, runName, [][]string{cfg.Train, cfg.Test})
mpi.Printf("Running %d Runs starting at %d\n", ss.Config.Run.Runs, ss.Config.Run.Run)
ss.Loops.Loop(Train, Run).Counter.SetCurMaxPlusN(ss.Config.Run.Run, ss.Config.Run.Runs)
ss.Loops.Run(Train)
axon.CloseLogFiles(ss.Loops, ss.Stats, Cycle)
axon.GPURelease()
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"github.com/emer/axon/v2/sims/mpi"
"github.com/emer/emergent/v2/egui"
)
func main() { egui.Run[mpi.Sim, mpi.Config]() }
// Copyright (c) 2019, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mpi
import (
"github.com/emer/axon/v2/axon"
)
// LayerParams sets the minimal non-default params.
// Base is always applied, and others can be optionally selected to apply on top of that.
var LayerParams = axon.LayerSheets{
"Base": {
{Sel: "Layer", Doc: "all defaults",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 1.05 // 1.05 > 1.1 for short-term; 1.1 better long-run stability
ly.Inhib.Layer.FB = 0.5 // 0.5 > 0.2 > 0.1 > 1.0 -- usu 1.0
ly.Inhib.ActAvg.Nominal = 0.06 // 0.6 > 0.5
ly.Acts.NMDA.MgC = 1.2 // 1.2 > 1.4 here, still..
ly.Learn.RLRate.SigmoidLinear.SetBool(false) // false > true here
}},
{Sel: "#Input", Doc: "critical now to specify the activity level",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 0.9 // 0.9 > 1.0
ly.Acts.Clamp.Ge = 1.5 // 1.5 > 1.0
ly.Inhib.ActAvg.Nominal = 0.15 // .24 nominal, lower to give higher excitation
}},
{Sel: "#Output", Doc: "output definitely needs lower inhib -- true for smaller layers in general",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 0.65 // 0.65
ly.Inhib.ActAvg.Nominal = 0.24
ly.Acts.Spikes.Tr = 1 // 1 is new minimum.. > 3
ly.Acts.Clamp.Ge = 0.8 // 0.8 > 0.6
ly.Learn.RLRate.SigmoidMin = 0.05 // sigmoid derivative actually useful here!
}},
},
}
// PathParams sets the minimal non-default params.
// Base is always applied, and others can be optionally selected to apply on top of that.
var PathParams = axon.PathSheets{
"Base": {
{Sel: "Path", Doc: "basic path params",
Set: func(pt *axon.PathParams) {
pt.Learn.LRate.Base = 0.1 // 0.1 learns fast but dies early, .02 is stable long term
pt.SWts.Adapt.LRate = 0.1 // .1 >= .2,
pt.SWts.Init.SPct = 0.5 // .5 >= 1 here -- 0.5 more reliable, 1.0 faster..
pt.Learn.DWt.SubMean = 0 // 1 > 0 for long run stability
}},
{Sel: ".BackPath", Doc: "top-down back-pathways MUST have lower relative weight scale, otherwise network hallucinates",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.3 // 0.3 > 0.2 > 0.1 > 0.5
}},
},
}
// Copyright (c) 2023, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package neuron
import (
"cogentcore.org/core/core"
"github.com/emer/emergent/v2/egui"
)
// ParamConfig has config parameters related to sim params.
type ParamConfig struct {
// Sheet is the extra params sheet name(s) to use (space separated
// if multiple). Must be valid name as listed in compiled-in params
// or loaded params.
Sheet string
// Script is an interpreted script that is run to set parameters in Layer and Path
// sheets, by default using the "Script" set name.
Script string `new-window:"+" width:"100"`
// Tag is an extra tag to add to file names and logs saved from this run.
Tag string
// Note is additional info to describe the run params etc,
// like a git commit message for the run.
Note string
// SaveAll will save a snapshot of all current param and config settings
// in a directory named params_<datestamp> (or _good if Good is true),
// then quit. Useful for comparing to later changes and seeing multiple
// views of current params.
SaveAll bool `nest:"+"`
// Good is for SaveAll, save to params_good for a known good params state.
// This can be done prior to making a new release after all tests are passing.
// Add results to git to provide a full diff record of all params over level.
Good bool `nest:"+"`
}
func (pc *ParamConfig) FieldWidget(field string) core.Value {
return egui.ScriptFieldWidget(field)
}
// RunConfig has config parameters related to running the sim.
type RunConfig struct {
// Cycles is the total number of cycles to run.
Cycles int `min:"10" default:"200"`
// OnCycle is when the excitatory input into the neuron turns on.
OnCycle int `min:"0" default:"10"`
// OffCycle is when does excitatory input into the neuron turns off.
OffCycle int `min:"0" default:"160"`
}
// LogConfig has config parameters related to logging data.
type LogConfig struct {
// Save saves a log file when run in nogui mode.
Save bool
}
// Config has the overall Sim configuration options.
type Config struct {
egui.BaseConfig
// GeClamp clamps a constant Ge value; otherwise there is a discrete spiking input.
GeClamp bool `default:"true"`
// SpikeHz is the frequency of input spiking for !GeClamp mode.
SpikeHz float32 `default:"50"`
// VgccGe is the strength of the VGCC contribution to Ge(t) excitatory
// conductance. This is only activated during spikes, and is an essential part of
// the Ca-driven learning to reflect recv spiking in the Ca signal.
// If too strong it can leads to runaway excitatory bursting.
VgccGe float32 `default:"0.02"`
// AKGk is the strength of the A-type potassium channel, which is only active
// at high (depolarized) membrane potentials, i.e., during spikes.
// It is useful to balance against the excitatiohn from VGCC's.
AKGk float32 `default:"0.1"`
// Params has parameter related configuration options.
Params ParamConfig `display:"add-fields"`
// Run has sim running related configuration options.
Run RunConfig `display:"add-fields"`
// Log has data logging related configuration options.
Log LogConfig `display:"add-fields"`
}
func (cfg *Config) Defaults() {
cfg.Name = "Neuron"
cfg.Title = "Axon single neuron"
cfg.URL = "https://github.com/emer/axon/blob/main/sims/neuron/README.md"
cfg.Doc = "This simulation gives an in-depth view inside the processing within an individual neuron, including the various channels that shape its dynamics in important ways."
}
// Code generated by "core generate -add-types -add-funcs -gosl"; DO NOT EDIT.
package neuron
import (
"cogentcore.org/core/enums"
)
var _ModesValues = []Modes{0}
// ModesN is the highest valid value for type Modes, plus one.
//
//gosl:start
const ModesN Modes = 1
//gosl:end
var _ModesValueMap = map[string]Modes{`Test`: 0}
var _ModesDescMap = map[Modes]string{0: ``}
var _ModesMap = map[Modes]string{0: `Test`}
// String returns the string representation of this Modes value.
func (i Modes) String() string { return enums.String(i, _ModesMap) }
// SetString sets the Modes value from its string representation,
// and returns an error if the string is invalid.
func (i *Modes) SetString(s string) error { return enums.SetString(i, s, _ModesValueMap, "Modes") }
// Int64 returns the Modes value as an int64.
func (i Modes) Int64() int64 { return int64(i) }
// SetInt64 sets the Modes value from an int64.
func (i *Modes) SetInt64(in int64) { *i = Modes(in) }
// Desc returns the description of the Modes value.
func (i Modes) Desc() string { return enums.Desc(i, _ModesDescMap) }
// ModesValues returns all possible values for the type Modes.
func ModesValues() []Modes { return _ModesValues }
// Values returns all possible values for the type Modes.
func (i Modes) Values() []enums.Enum { return enums.Values(_ModesValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Modes) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Modes) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Modes") }
var _LevelsValues = []Levels{0, 1}
// LevelsN is the highest valid value for type Levels, plus one.
//
//gosl:start
const LevelsN Levels = 2
//gosl:end
var _LevelsValueMap = map[string]Levels{`Cycle`: 0, `Trial`: 1}
var _LevelsDescMap = map[Levels]string{0: ``, 1: ``}
var _LevelsMap = map[Levels]string{0: `Cycle`, 1: `Trial`}
// String returns the string representation of this Levels value.
func (i Levels) String() string { return enums.String(i, _LevelsMap) }
// SetString sets the Levels value from its string representation,
// and returns an error if the string is invalid.
func (i *Levels) SetString(s string) error { return enums.SetString(i, s, _LevelsValueMap, "Levels") }
// Int64 returns the Levels value as an int64.
func (i Levels) Int64() int64 { return int64(i) }
// SetInt64 sets the Levels value from an int64.
func (i *Levels) SetInt64(in int64) { *i = Levels(in) }
// Desc returns the description of the Levels value.
func (i Levels) Desc() string { return enums.Desc(i, _LevelsDescMap) }
// LevelsValues returns all possible values for the type Levels.
func LevelsValues() []Levels { return _LevelsValues }
// Values returns all possible values for the type Levels.
func (i Levels) Values() []enums.Enum { return enums.Values(_LevelsValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Levels) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Levels) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Levels") }
var _StatsPhaseValues = []StatsPhase{0, 1}
// StatsPhaseN is the highest valid value for type StatsPhase, plus one.
//
//gosl:start
const StatsPhaseN StatsPhase = 2
//gosl:end
var _StatsPhaseValueMap = map[string]StatsPhase{`Start`: 0, `Step`: 1}
var _StatsPhaseDescMap = map[StatsPhase]string{0: ``, 1: ``}
var _StatsPhaseMap = map[StatsPhase]string{0: `Start`, 1: `Step`}
// String returns the string representation of this StatsPhase value.
func (i StatsPhase) String() string { return enums.String(i, _StatsPhaseMap) }
// SetString sets the StatsPhase value from its string representation,
// and returns an error if the string is invalid.
func (i *StatsPhase) SetString(s string) error {
return enums.SetString(i, s, _StatsPhaseValueMap, "StatsPhase")
}
// Int64 returns the StatsPhase value as an int64.
func (i StatsPhase) Int64() int64 { return int64(i) }
// SetInt64 sets the StatsPhase value from an int64.
func (i *StatsPhase) SetInt64(in int64) { *i = StatsPhase(in) }
// Desc returns the description of the StatsPhase value.
func (i StatsPhase) Desc() string { return enums.Desc(i, _StatsPhaseDescMap) }
// StatsPhaseValues returns all possible values for the type StatsPhase.
func StatsPhaseValues() []StatsPhase { return _StatsPhaseValues }
// Values returns all possible values for the type StatsPhase.
func (i StatsPhase) Values() []enums.Enum { return enums.Values(_StatsPhaseValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i StatsPhase) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *StatsPhase) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "StatsPhase")
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// neuron: This simulation gives an in-depth view inside the processing within
// an individual neuron, including the various channels that shape its dynamics
// in important ways.
package neuron
//go:generate core generate -add-types -add-funcs -gosl
import (
"fmt"
"os"
"reflect"
"cogentcore.org/core/core"
"cogentcore.org/core/enums"
"cogentcore.org/core/icons"
"cogentcore.org/core/tree"
"cogentcore.org/lab/base/mpi"
"cogentcore.org/lab/base/randx"
"cogentcore.org/lab/plot"
"cogentcore.org/lab/stats/stats"
"cogentcore.org/lab/tensorfs"
"github.com/emer/axon/v2/axon"
"github.com/emer/emergent/v2/egui"
)
// Modes are the looping modes (Stacks) for running and statistics.
type Modes int32 //enums:enum
const (
Test Modes = iota
)
// Levels are the looping levels for running and statistics.
type Levels int32 //enums:enum
const (
Cycle Levels = iota
Trial
)
// StatsPhase is the phase of stats processing for given mode, level.
// Accumulated values are reset at Start, added each Step.
type StatsPhase int32 //enums:enum
const (
Start StatsPhase = iota
Step
)
// see config.go for Config
// LayerParams sets the minimal non-default params.
// Base is always applied, and others can be optionally selected to apply on top of that.
var LayerParams = axon.LayerSheets{
"Base": {
{Sel: "Layer", Doc: "generic params for all layers: lower gain, slower, soft clamp",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.On.SetBool(false)
ly.Acts.Init.Vm = -70
}},
},
}
// PathParams sets the minimal non-default params.
// Base is always applied, and others can be optionally selected to apply on top of that.
var PathParams = axon.PathSheets{
"Base": {},
}
// Sim encapsulates the entire simulation model, and we define all the
// functionality as methods on this struct. This structure keeps all relevant
// state information organized and available without having to pass everything around
// as arguments to methods, and provides the core GUI interface (note the view tags
// for the fields which provide hints to how things should be displayed).
type Sim struct {
// Ge is the synaptic excitatory conductance per time step (ms), when getting input.
Ge float32 `min:"0" step:"0.01" default:"0.15"`
// Gi is the raw inhibitory conductance per time step (ms).
Gi float32 `min:"0" step:"0.01" default:"0.1"`
// ErevE is the excitatory reversal (driving) potential in mV.
// This determines where excitation pushes Vm up to.
ErevE float32 `min:"-90" max:"100" step:"5" default:"0"`
// ErevI is the inhibition reversal (driving) potential in mV.
// This determines where inhibition pulls Vm down to.
ErevI float32 `min:"-100" max:"100" step:"5" default:"-90"`
// NoiseG is the strength of the noise conductance.
Noise float32 `min:"0" step:"0.01"`
// NmdaGe is the strength of contribution of the NMDA excitatory Ca++ current,
// to the overall Ge(t) excitatory conductance value. This channel
// has a long time constant and is essential for establishing
// a more stable neural representation over time by keeping active neurons active.
NmdaGe float32 `default:"0.006"`
// GababGk is the strength of contribution of the GABA-B inhibitory K current,
// to the overall Gk(t) inhibitory potassium (K) conductance value. This channel
// also has a long time constant like NMDA, and works in opposition to it,
// by keeping inactive neurons inactive, synergistically helping to establish
// stable neural representations.
GababGk float32 `default:"0.015"`
// KNa toggles the use of sodium-gated potassium adaptation mechanisms
// that cause the neuron to reduce spiking over time.
KNa bool `default:"true"`
// MahpGk is the strength of mAHP M-type K channel, which drives adaptation
// similar to KNa adaptation mechanisms.
MahpGk float32 `default:"0.05"`
// Config has simulation configuration parameters, set by .toml config file and / or args.
Config *Config `new-window:"+"`
// Net is the network: click to view / edit parameters for layers, paths, etc.
Net *axon.Network `new-window:"+" display:"no-inline"`
// InputISI is the input ISI countdown for spiking mode; counts up.
InputISI float32 `display:"-"`
// Params manages network parameter setting.
Params axon.Params `display:"inline"`
// NetUpdate has Test mode netview update parameters.
NetUpdate axon.NetViewUpdate `display:"inline"`
// Root is the root tensorfs directory, where all stats and other misc sim data goes.
Root *tensorfs.Node `display:"-"`
// Stats has the stats directory within Root.
Stats *tensorfs.Node `display:"-"`
// Current has the current stats values within Stats.
Current *tensorfs.Node `display:"-"`
// StatFuncs are statistics functions called at given mode and level,
// to perform all stats computations. phase = Start does init at start of given level,
// and all intialization / configuration (called during Init too).
StatFuncs []func(mode Modes, level Levels, phase StatsPhase) `display:"-"`
// GUI manages all the GUI elements
GUI egui.GUI `display:"-"`
// RandSeeds is a list of random seeds to use for each run.
RandSeeds randx.Seeds `display:"-"`
}
func Embed(b tree.Node) { egui.Embed[Sim, Config](b) }
func (ss *Sim) SetConfig(cfg *Config) { ss.Config = cfg }
func (ss *Sim) Body() *core.Body { return ss.GUI.Body }
func (ss *Sim) Defaults() {
ss.Config.Defaults()
ss.Ge = 0.15
ss.Gi = 0.1
ss.ErevE = 0
ss.ErevI = -90
ss.Noise = 0
ss.KNa = true
ss.MahpGk = 0.05
ss.NmdaGe = 0.006
ss.GababGk = 0.015
}
func (ss *Sim) ConfigSim() {
ss.Defaults()
ss.Root, _ = tensorfs.NewDir("Root")
tensorfs.CurRoot = ss.Root
ss.Net = axon.NewNetwork(ss.Config.Name)
ss.Params.Config(LayerParams, PathParams, ss.Config.Params.Sheet, ss.Config.Params.Tag, reflect.ValueOf(ss))
ss.RandSeeds.Init(100) // max 100 runs
ss.InitRandSeed(0)
ss.ConfigNet(ss.Net)
ss.ConfigStats()
if ss.Config.Params.SaveAll {
ss.Config.Params.SaveAll = false
ss.Net.SaveParamsSnapshot(&ss.Config, ss.Config.Params.Good)
os.Exit(0)
}
}
func (ss *Sim) ConfigNet(net *axon.Network) {
net.SetMaxData(1)
net.Context().ThetaCycles = int32(ss.Config.Run.Cycles)
net.SetRandSeed(ss.RandSeeds[0]) // init new separate random seed, using run = 0
net.AddLayer2D("Neuron", axon.SuperLayer, 1, 1)
net.Build()
net.Defaults()
ss.ApplyParams()
net.InitWeights()
}
func (ss *Sim) ApplyParams() {
ss.Params.Script = ss.Config.Params.Script
ss.Params.ApplyAll(ss.Net)
ly := ss.Net.LayerByName("Neuron")
lyp := ly.Params
lyp.Acts.Gbar.E = 100
lyp.Acts.Gbar.L = 20
lyp.Acts.Erev.E = ss.ErevE
lyp.Acts.Erev.I = ss.ErevI
if ss.Noise > 0 {
lyp.Acts.Noise.On.SetBool(true)
lyp.Acts.Noise.Ge = ss.Noise
lyp.Acts.Noise.Gi = ss.Noise
} else {
lyp.Acts.Noise.On.SetBool(false)
}
lyp.Acts.KNa.On.SetBool(ss.KNa)
lyp.Acts.Mahp.Gk = ss.MahpGk
lyp.Acts.NMDA.Ge = ss.NmdaGe
lyp.Acts.GabaB.Gk = ss.GababGk
lyp.Acts.VGCC.Ge = ss.Config.VgccGe
lyp.Acts.AK.Gk = ss.Config.AKGk
lyp.Acts.Update()
}
//////// Init, utils
// Init restarts the run, and initializes everything, including network weights
// and resets the epoch log table
func (ss *Sim) Init() {
ss.SetRunName()
ss.InitRandSeed(0)
ss.ApplyParams()
ss.StatsInit()
ss.NewRun()
ss.NetUpdate.Update(Test, Cycle)
}
// NewRun intializes a new Run level of the model.
func (ss *Sim) NewRun() {
ctx := ss.Net.Context()
ss.InitRandSeed(0)
ctx.Reset()
ss.InputISI = 0
ss.Net.InitWeights()
ss.RunStats(Test, Cycle, Start)
}
// InitRandSeed initializes the random seed based on current training run number
func (ss *Sim) InitRandSeed(run int) {
ss.RandSeeds.Set(run)
ss.RandSeeds.Set(run, &ss.Net.Rand)
}
// RunCycles updates neuron over specified number of cycles.
func (ss *Sim) RunCycles() {
ctx := ss.Net.Context()
ss.Net.InitActs()
ss.Net.ThetaCycleStart(Test, false)
ss.Net.MinusPhaseStart()
ss.ApplyParams()
inputOn := false
for cyc := 0; cyc < ss.Config.Run.Cycles; cyc++ {
switch cyc {
case ss.Config.Run.OnCycle:
inputOn = true
case ss.Config.Run.OffCycle:
inputOn = false
}
ss.NeuronUpdate(ss.Net, inputOn)
ctx.Cycle = int32(cyc)
ss.RunStats(Test, Cycle, Step)
ss.NetUpdate.UpdateCycle(cyc, Test, Cycle)
if ss.GUI.StopNow() {
break
}
}
}
// NeuronUpdate updates the neuron.
func (ss *Sim) NeuronUpdate(nt *axon.Network, inputOn bool) {
ly := nt.LayerByName("Neuron")
ni := int(ly.NeurStIndex)
di := 0
ac := &ly.Params.Acts
// nrn.Noise = float32(ly.Params.Act.Noise.Gen(-1))
// nrn.Ge += nrn.Noise // GeNoise
// nrn.Gi = 0
if inputOn {
if ss.Config.GeClamp {
geSyn := ac.Dt.GeSynFromRawSteady(ss.Ge)
axon.Neurons.Set(ss.Ge, ni, di, int(axon.GeRaw))
axon.Neurons.Set(geSyn, ni, di, int(axon.GeSyn))
} else {
ss.InputISI += 1
ge := float32(0)
if ss.InputISI > 1000.0/ss.Config.SpikeHz {
ge = ss.Ge
ss.InputISI = 0
}
geSyn := ac.Dt.GeSynFromRawSteady(ge)
axon.Neurons.Set(ge, ni, di, int(axon.GeRaw))
axon.Neurons.Set(geSyn, ni, di, int(axon.GeSyn))
}
} else {
axon.Neurons.Set(0, ni, di, int(axon.GeRaw))
axon.Neurons.Set(0, ni, di, int(axon.GeSyn))
}
giSyn := ac.Dt.GiSynFromRawSteady(ss.Gi)
axon.Neurons.Set(ss.Gi, ni, di, int(axon.GiRaw))
axon.Neurons.Set(giSyn, ni, di, int(axon.GiSyn))
axon.RunCycleNeuron(2)
nt.Context().CycleInc()
}
// Stop tells the sim to stop running
func (ss *Sim) Stop() {
ss.GUI.SetStopNow()
}
//////// Stats
// AddStat adds a stat compute function.
func (ss *Sim) AddStat(f func(mode Modes, level Levels, phase StatsPhase)) {
ss.StatFuncs = append(ss.StatFuncs, f)
}
// StatsStart is called by Looper at the start of given level, for each iteration.
// It needs to call RunStats Start at the next level down.
// e.g., each Epoch is the start of the full set of Trial Steps.
func (ss *Sim) StatsStart(lmd, ltm enums.Enum) {
mode := lmd.(Modes)
level := ltm.(Levels)
if level <= Trial {
return
}
ss.RunStats(mode, level-1, Start)
}
// StatsStep is called by Looper at each step of iteration,
// where it accumulates the stat results.
func (ss *Sim) StatsStep(lmd, ltm enums.Enum) {
mode := lmd.(Modes)
level := ltm.(Levels)
if level == Cycle {
return
}
ss.RunStats(mode, level, Step)
tensorfs.DirTable(axon.StatsNode(ss.Stats, mode, level), nil).WriteToLog()
}
// RunStats runs the StatFuncs for given mode, level and phase.
func (ss *Sim) RunStats(mode Modes, level Levels, phase StatsPhase) {
for _, sf := range ss.StatFuncs {
sf(mode, level, phase)
}
if phase == Step && ss.GUI.Tabs != nil {
nm := mode.String() + " " + level.String() + " Plot"
ss.GUI.Tabs.AsLab().GoUpdatePlot(nm)
}
}
// SetRunName sets the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) SetRunName() string {
runName := ss.Params.RunName(0)
ss.Current.StringValue("RunName", 1).SetString1D(runName, 0)
return runName
}
// RunName returns the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) RunName() string {
return ss.Current.StringValue("RunName", 1).String1D(0)
}
// StatsInit initializes all the stats by calling Start across all modes and levels.
func (ss *Sim) StatsInit() {
ss.RunStats(Test, Cycle, Start)
ss.RunStats(Test, Trial, Start)
if ss.GUI.Tabs != nil {
tbs := ss.GUI.Tabs.AsLab()
_, idx := tbs.CurrentTab()
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Test, Cycle))
tbs.SelectTabIndex(idx)
}
}
// ConfigStats handles configures functions to do all stats computation
// in the tensorfs system.
func (ss *Sim) ConfigStats() {
net := ss.Net
ly := net.LayerByName("Neuron")
ss.Stats = ss.Root.Dir("Stats")
ss.Current = ss.Stats.Dir("Current")
ss.SetRunName()
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
name := "Cycle"
modeDir := ss.Stats.Dir(mode.String())
curModeDir := ss.Current.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
tsr := levelDir.Int(name)
if phase == Start {
tsr.SetNumRows(0)
plot.SetFirstStyler(tsr, func(s *plot.Style) {
s.Range.SetMin(0).SetMax(float64(ss.Config.Run.Cycles))
})
return
}
stat := int(net.Context().Cycle)
curModeDir.Int(name, 1).SetInt1D(stat, 0)
tsr.AppendRowInt(stat)
})
vars := []string{"GeSyn", "Ge", "Gi", "Inet", "Vm", "Act", "Spike", "Gk", "ISI", "ISIAvg", "VmDend", "Gnmda", "GgabaB", "Gvgcc", "Gak", "GknaMed", "GknaSlow", "GnmdaSyn", "GababM", "VgccM", "VgccH", "MahpN", "GiSyn", "GnmdaLrn", "VgccCa", "LearnCa"}
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
for _, name := range vars {
modeDir := ss.Stats.Dir(mode.String())
curModeDir := ss.Current.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
tsr := levelDir.Float64(name)
if phase == Start {
tsr.SetNumRows(0)
plot.SetFirstStyler(tsr, func(s *plot.Style) {
s.Range.SetMin(0).SetMax(1)
s.On = false
switch name {
case "Vm":
s.On = true
s.RightY = true
s.Label = "Vm"
case "Act", "Spike":
s.On = true
case "ISI", "ISIAvg", "VmDend", "GababM", "VgccCa":
s.RightY = true
}
})
continue
}
switch level {
case Cycle:
stat := float64(ly.UnitValue(name, []int{0, 0}, 0))
curModeDir.Float64(name, 1).SetFloat1D(stat, 0)
tsr.AppendRowFloat(stat)
case Trial:
subDir := modeDir.Dir((level - 1).String())
stat := stats.StatMean.Call(subDir.Value(name)).Float1D(0)
tsr.AppendRowFloat(stat)
}
}
})
}
// StatCounters returns counters string to show at bottom of netview.
func (ss *Sim) StatCounters(mode, level enums.Enum) string {
ctx := ss.Net.Context()
counters := fmt.Sprintf("Cycle: %d", ctx.Cycle)
return counters
}
//////// GUI
// ConfigGUI configures the Cogent Core GUI interface for this simulation.
func (ss *Sim) ConfigGUI(b tree.Node) {
ss.GUI.MakeBody(b, ss, ss.Root, ss.Config.Name, ss.Config.Title, ss.Config.Doc)
nv := ss.GUI.AddNetView("Network")
nv.Options.MaxRecs = 2 * ss.Config.Run.Cycles
nv.Options.Raster.Max = ss.Config.Run.Cycles
nv.SetNet(ss.Net)
ss.NetUpdate.Config(nv, axon.Cycle, ss.StatCounters)
ss.GUI.OnStop = func(mode, level enums.Enum) {
ss.NetUpdate.UpdateWhenStopped(mode, level)
}
ss.StatsInit()
ss.GUI.FinalizeGUI(false)
}
func (ss *Sim) MakeToolbar(p *tree.Plan) {
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{Label: "Init", Icon: icons.Update,
Tooltip: "Initialize everything including network weights, and start over. Also applies current params.",
Active: egui.ActiveStopped,
Func: func() {
ss.Init()
ss.GUI.UpdateWindow()
},
})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{Label: "Stop", Icon: icons.Stop,
Tooltip: "Stops running.",
Active: egui.ActiveRunning,
Func: func() {
ss.Stop()
ss.GUI.UpdateWindow()
},
})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{Label: "Run Cycles", Icon: icons.PlayArrow,
Tooltip: "Runs neuron updating over Cycles.",
Active: egui.ActiveStopped,
Func: func() {
if !ss.GUI.IsRunning() {
go func() {
ss.GUI.StartRun()
ss.RunCycles()
ss.GUI.Stopped(Test, Trial)
}()
}
},
})
tree.Add(p, func(w *core.Separator) {})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{Label: "Reset Plot", Icon: icons.Update,
Tooltip: "Reset TstCycPlot.",
Active: egui.ActiveStopped,
Func: func() {
ss.StatsInit()
ss.GUI.UpdateWindow()
},
})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{Label: "Defaults", Icon: icons.Update,
Tooltip: "Restore initial default parameters.",
Active: egui.ActiveStopped,
Func: func() {
ss.Defaults()
ss.Init()
ss.GUI.UpdateWindow()
},
})
tree.Add(p, func(w *core.Separator) {})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "New Seed",
Icon: icons.Add,
Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
Active: egui.ActiveAlways,
Func: func() {
ss.RandSeeds.NewSeeds()
},
})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "README",
Icon: icons.FileMarkdown,
Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
Active: egui.ActiveAlways,
Func: func() {
core.TheApp.OpenURL(ss.Config.URL)
},
})
}
func (ss *Sim) RunNoGUI() {
ss.Init()
if ss.Config.Params.Note != "" {
mpi.Printf("Note: %s\n", ss.Config.Params.Note)
}
// runName := ss.SetRunName()
// netName := ss.Net.Name
// axon.OpenLogFiles(ss.Loops, ss.Stats, netName, runName, [][]string{[]string{"Cycle"}})
mpi.Printf("Running %d Cycles\n", ss.Config.Run.Cycles)
ss.RunCycles()
// axon.CloseLogFiles(ss.Loops, ss.Stats, Cycle)
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"github.com/emer/axon/v2/sims/neuron"
"github.com/emer/emergent/v2/egui"
)
func main() { egui.Run[neuron.Sim, neuron.Config]() }
// Copyright (c) 2023, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package objrec
import (
"cogentcore.org/core/core"
"github.com/emer/emergent/v2/egui"
"github.com/emer/emergent/v2/paths"
)
// EnvConfig has config params for environment
// note: only adding fields for key Env params that matter for both Network and Env
// other params are set via the Env map data mechanism.
type EnvConfig struct { //types:add
// env parameters -- can set any field/subfield on Env struct, using standard TOML formatting
Env map[string]any
// number of units per localist output unit
NOutPer int `default:"5"`
}
// ParamConfig has config parameters related to sim params.
type ParamConfig struct {
// Script is an interpreted script that is run to set parameters in Layer and Path
// sheets, by default using the "Script" set name.
Script string `new-window:"+" width:"100"`
// Sheet is the extra params sheet name(s) to use (space separated
// if multiple). Must be valid name as listed in compiled-in params
// or loaded params.
Sheet string
// Tag is an extra tag to add to file names and logs saved from this run.
Tag string
// Note is additional info to describe the run params etc,
// like a git commit message for the run.
Note string
// SaveAll will save a snapshot of all current param and config settings
// in a directory named params_<datestamp> (or _good if Good is true),
// then quit. Useful for comparing to later changes and seeing multiple
// views of current params.
SaveAll bool `nest:"+"`
// Good is for SaveAll, save to params_good for a known good params state.
// This can be done prior to making a new release after all tests are passing.
// Add results to git to provide a full diff record of all params over level.
Good bool `nest:"+"`
// pathway from V1 to V4 which is tiled 4x4 skip 2 with topo scale values.
V1V4Path *paths.PoolTile `nest:"+"`
}
func (pc *ParamConfig) FieldWidget(field string) core.Value {
return egui.ScriptFieldWidget(field)
}
func (cfg *ParamConfig) Defaults() {
cfg.V1V4Path = paths.NewPoolTile()
cfg.V1V4Path.Size.Set(4, 4)
cfg.V1V4Path.Skip.Set(2, 2)
cfg.V1V4Path.Start.Set(-1, -1)
cfg.V1V4Path.TopoRange.Min = 0.8 // note: none of these make a very big diff
// but using a symmetric scale range .8 - 1.2 seems like it might be good -- otherwise
// weights are systematicaly smaller.
// ss.V1V4Path.GaussFull.DefNoWrap()
// ss.V1V4Path.GaussInPool.DefNoWrap()
}
// RunConfig has config parameters related to running the sim.
type RunConfig struct {
// GPUDevice selects the gpu device to use.
GPUDevice int
// NData is the number of data-parallel items to process in parallel per trial.
// Is significantly faster for both CPU and GPU. Results in an effective
// mini-batch of learning.
NData int `default:"16" min:"1"`
// SlowInterval is the interval between slow adaptive processes.
// This generally needs to be longer than the default of 100 in larger models.
SlowInterval int `default:"200"` // 200 > 400
// AdaptGiInterval is the interval between adapting inhibition steps.
AdaptGiInterval int `default:"200"` // 200 same is fine
// NThreads is the number of parallel threads for CPU computation;
// 0 = use default.
NThreads int `default:"0"`
// Run is the _starting_ run number, which determines the random seed.
// Runs counts up from there. Can do all runs in parallel by launching
// separate jobs with each starting Run, Runs = 1.
Run int `default:"0" flag:"run"`
// Runs is the total number of runs to do when running Train, starting from Run.
Runs int `default:"5" min:"1"`
// Epochs is the total number of epochs per run.
Epochs int `default:"200"`
// Trials is the total number of trials per epoch.
// Should be an even multiple of NData.
Trials int `default:"128"`
// ISICycles is the number of no-input inter-stimulus interval
// cycles at the start of the trial.
ISICycles int `default:"10"`
// MinusCycles is the number of cycles in the minus phase per trial.
MinusCycles int `default:"160"`
// PlusCycles is the number of cycles in the plus phase per trial.
PlusCycles int `default:"60"`
// NZero is how many perfect, zero-error epochs before stopping a Run.
NZero int `default:"2"`
// TestInterval is how often (in epochs) to run through all the test patterns,
// in terms of training epochs. Can use 0 or -1 for no testing.
TestInterval int `default:"5"`
// PCAInterval is how often (in epochs) to compute PCA on hidden
// representations to measure variance.
PCAInterval int `default:"10"`
// StartWeights is the name of weights file to load at start of first run.
StartWeights string
}
// Cycles returns the total number of cycles per trial: ISI + Minus + Plus.
func (rc *RunConfig) Cycles() int {
return rc.ISICycles + rc.MinusCycles + rc.PlusCycles
}
// LogConfig has config parameters related to logging data.
type LogConfig struct {
// SaveWeights will save final weights after each run.
SaveWeights bool
// Train has the list of Train mode levels to save log files for.
Train []string `default:"['Expt', 'Run', 'Epoch']" nest:"+"`
// Test has the list of Test mode levels to save log files for.
Test []string `nest:"+"`
}
// Config has the overall Sim configuration options.
type Config struct {
egui.BaseConfig
// environment configuration options
Env EnvConfig `display:"add-fields"`
// Params has parameter related configuration options.
Params ParamConfig `display:"add-fields"`
// Run has sim running related configuration options.
Run RunConfig `display:"add-fields"`
// Log has data logging related configuration options.
Log LogConfig `display:"add-fields"`
}
func (cfg *Config) Defaults() {
cfg.Name = "Objrec"
cfg.Title = "Object Recognition"
cfg.URL = "https://github.com/emer/axon/blob/main/sims/objrec/README.md"
cfg.Doc = "This simulation explores how a hierarchy of areas in the ventral stream of visual processing (up to inferotemporal (IT) cortex) can produce robust object recognition that is invariant to changes in position, size, etc of retinal input images."
cfg.Params.Defaults()
}
// Code generated by "core generate -add-types -add-funcs -gosl"; DO NOT EDIT.
package objrec
import (
"cogentcore.org/core/enums"
)
var _ModesValues = []Modes{0, 1, 2}
// ModesN is the highest valid value for type Modes, plus one.
//
//gosl:start
const ModesN Modes = 3
//gosl:end
var _ModesValueMap = map[string]Modes{`Train`: 0, `Test`: 1, `NovelTrain`: 2}
var _ModesDescMap = map[Modes]string{0: ``, 1: ``, 2: ``}
var _ModesMap = map[Modes]string{0: `Train`, 1: `Test`, 2: `NovelTrain`}
// String returns the string representation of this Modes value.
func (i Modes) String() string { return enums.String(i, _ModesMap) }
// SetString sets the Modes value from its string representation,
// and returns an error if the string is invalid.
func (i *Modes) SetString(s string) error { return enums.SetString(i, s, _ModesValueMap, "Modes") }
// Int64 returns the Modes value as an int64.
func (i Modes) Int64() int64 { return int64(i) }
// SetInt64 sets the Modes value from an int64.
func (i *Modes) SetInt64(in int64) { *i = Modes(in) }
// Desc returns the description of the Modes value.
func (i Modes) Desc() string { return enums.Desc(i, _ModesDescMap) }
// ModesValues returns all possible values for the type Modes.
func ModesValues() []Modes { return _ModesValues }
// Values returns all possible values for the type Modes.
func (i Modes) Values() []enums.Enum { return enums.Values(_ModesValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Modes) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Modes) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Modes") }
var _LevelsValues = []Levels{0, 1, 2, 3, 4}
// LevelsN is the highest valid value for type Levels, plus one.
//
//gosl:start
const LevelsN Levels = 5
//gosl:end
var _LevelsValueMap = map[string]Levels{`Cycle`: 0, `Trial`: 1, `Epoch`: 2, `Run`: 3, `Expt`: 4}
var _LevelsDescMap = map[Levels]string{0: ``, 1: ``, 2: ``, 3: ``, 4: ``}
var _LevelsMap = map[Levels]string{0: `Cycle`, 1: `Trial`, 2: `Epoch`, 3: `Run`, 4: `Expt`}
// String returns the string representation of this Levels value.
func (i Levels) String() string { return enums.String(i, _LevelsMap) }
// SetString sets the Levels value from its string representation,
// and returns an error if the string is invalid.
func (i *Levels) SetString(s string) error { return enums.SetString(i, s, _LevelsValueMap, "Levels") }
// Int64 returns the Levels value as an int64.
func (i Levels) Int64() int64 { return int64(i) }
// SetInt64 sets the Levels value from an int64.
func (i *Levels) SetInt64(in int64) { *i = Levels(in) }
// Desc returns the description of the Levels value.
func (i Levels) Desc() string { return enums.Desc(i, _LevelsDescMap) }
// LevelsValues returns all possible values for the type Levels.
func LevelsValues() []Levels { return _LevelsValues }
// Values returns all possible values for the type Levels.
func (i Levels) Values() []enums.Enum { return enums.Values(_LevelsValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Levels) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Levels) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Levels") }
var _StatsPhaseValues = []StatsPhase{0, 1}
// StatsPhaseN is the highest valid value for type StatsPhase, plus one.
//
//gosl:start
const StatsPhaseN StatsPhase = 2
//gosl:end
var _StatsPhaseValueMap = map[string]StatsPhase{`Start`: 0, `Step`: 1}
var _StatsPhaseDescMap = map[StatsPhase]string{0: ``, 1: ``}
var _StatsPhaseMap = map[StatsPhase]string{0: `Start`, 1: `Step`}
// String returns the string representation of this StatsPhase value.
func (i StatsPhase) String() string { return enums.String(i, _StatsPhaseMap) }
// SetString sets the StatsPhase value from its string representation,
// and returns an error if the string is invalid.
func (i *StatsPhase) SetString(s string) error {
return enums.SetString(i, s, _StatsPhaseValueMap, "StatsPhase")
}
// Int64 returns the StatsPhase value as an int64.
func (i StatsPhase) Int64() int64 { return int64(i) }
// SetInt64 sets the StatsPhase value from an int64.
func (i *StatsPhase) SetInt64(in int64) { *i = StatsPhase(in) }
// Desc returns the description of the StatsPhase value.
func (i StatsPhase) Desc() string { return enums.Desc(i, _StatsPhaseDescMap) }
// StatsPhaseValues returns all possible values for the type StatsPhase.
func StatsPhaseValues() []StatsPhase { return _StatsPhaseValues }
// Values returns all possible values for the type StatsPhase.
func (i StatsPhase) Values() []enums.Enum { return enums.Values(_StatsPhaseValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i StatsPhase) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *StatsPhase) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "StatsPhase")
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package objrec
import (
"fmt"
"image"
"cogentcore.org/core/base/slicesx"
"cogentcore.org/core/gpu"
"cogentcore.org/core/paint"
"cogentcore.org/lab/base/randx"
"cogentcore.org/lab/tensor"
"github.com/emer/emergent/v2/env"
"github.com/emer/v1vision/v1std"
"github.com/emer/v1vision/v1vision"
"github.com/emer/v1vision/vxform"
)
// TrialState contains the state for a given trial.
// Trials are processed data-parallel per Step().
type TrialState struct {
// LED number that was drawn
LED int `edit:"-"`
// current -- prev transforms
XForm vxform.XForm
// DrawImage is the image as drawn by the LED drawer.
DrawImage image.Image
// DrawImageTsr is the image as drawn by the LED drawer.
DrawImageTsr tensor.Float32
// XFormImage is the transformed image, from XForm.
XFormImage image.Image
}
func (st *TrialState) String() string {
return fmt.Sprintf("Obj: %02d, %s", st.LED, st.XForm.String())
}
// LEDEnv generates images of old-school "LED" style "letters"
// composed of a set of horizontal and vertical elements.
// All possible such combinations of 3 out of 6 line segments are created.
type LEDEnv struct {
// name of this environment
Name string
// NData is the number of steps to process in data-parallel.
NData int
// Trials has NData state per trial for last Step()
Trials []TrialState
// draws LEDs onto image
Draw LEDraw
// V1c does all the V1 processing.
V1c v1std.V1cGrey `new-window:"+"`
// Image manages setting image.
Image v1std.Image
// number of output units per LED item -- spiking benefits from replication
NOutPer int
// minimum LED number to draw (0-19)
MinLED int `min:"0" max:"19"`
// maximum LED number to draw (0-19)
MaxLED int `min:"0" max:"19"`
// random transform parameters
XFormRand vxform.Rand
// CurLED one-hot output tensor, dims: [NData][4][5][NOutPer][1]
Output tensor.Float32
// random number generator for the env -- all random calls must use this
Rand randx.SysRand `display:"-"`
// random seed: set this to control sequence
RandSeed int64 `edit:"-"`
}
func (ev *LEDEnv) Trial(di int) *TrialState {
return &ev.Trials[di]
}
func (ev *LEDEnv) Label() string { return ev.Name }
func (ev *LEDEnv) State(element string) tensor.Values {
switch element {
case "Image":
// todo:
// v1vision.RGBToGrey(paint.RenderToImage(ev.Draw.Paint), &ev.OrigImg, 0, false) // pad for filt, bot zero
// return &ev.OrigImg
case "V1":
return ev.V1c.Output
case "Output":
return &ev.Output
}
return nil
}
func (ev *LEDEnv) Defaults() {
ev.Draw.Defaults()
ev.V1c.Defaults()
ev.Image.Defaults()
ev.Image.Size = image.Point{40, 40}
ev.V1c.SetSize(6, 2) // V1mF16 typically = 12, no border, spc = 4 -- using 1/2 that here
ev.NOutPer = 5
ev.XFormRand.TransX.Set(-0.25, 0.25)
ev.XFormRand.TransY.Set(-0.25, 0.25)
ev.XFormRand.Scale.Set(0.7, 1)
ev.XFormRand.Rot.Set(-3.6, 3.6)
}
func (ev *LEDEnv) Config(ndata int, netGPU *gpu.GPU) {
ev.NData = ndata
ev.Trials = slicesx.SetLength(ev.Trials, ndata)
v1vision.ComputeGPU = netGPU
ev.V1c.Config(ndata, ev.Image.Size)
ev.Output.SetShapeSizes(ndata, 4, 5, ev.NOutPer, 1)
}
func (ev *LEDEnv) Init(run int) {
ev.Draw.Init()
ev.RandSeed = int64(73 + run)
if ev.Rand.Rand == nil {
ev.Rand.NewRand(ev.RandSeed)
} else {
ev.Rand.Seed(ev.RandSeed)
}
}
func (ev *LEDEnv) Step() bool {
imgs := make([]image.Image, ev.NData)
for di := range ev.NData {
st := ev.Trial(di)
ev.DrawRandLED(di, st)
imgs[di] = st.XFormImage
}
ev.V1c.RunImages(&ev.Image, imgs...)
return true
}
func (ev *LEDEnv) Action(element string, input tensor.Values) {
// nop
}
// Compile-time check that implements Env interface
var _ env.Env = (*LEDEnv)(nil)
func (ev *LEDEnv) String() string {
return ev.TrialName(0)
}
// TrialName returns the string rep of the LED env state
func (ev *LEDEnv) TrialName(di int) string {
st := ev.Trial(di)
return st.String()
}
// SetOutput sets the output LED bit for given data item
func (ev *LEDEnv) SetOutput(di, out int) {
ot := ev.Output.SubSpace(di).(*tensor.Float32)
ot.SetZeros()
si := ev.NOutPer * out
for i := 0; i < ev.NOutPer; i++ {
ot.SetFloat1D(1, si+i)
}
}
// OutErr scores the output activity of network, returning the index of
// item with max overall activity, and 1 if that is error, 0 if correct.
// also returns a top-two error: if 2nd most active output was correct.
func (ev *LEDEnv) OutErr(tsr *tensor.Float64, di, corLED int) (maxi int, err, err2 float64) {
ot := ev.Output.SubSpace(di).(*tensor.Float32)
nc := ot.Len() / ev.NOutPer
maxi = 0
maxv := 0.0
for i := 0; i < nc; i++ {
si := ev.NOutPer * i
sum := 0.0
for j := 0; j < ev.NOutPer; j++ {
sum += tsr.Float1D(si + j)
}
if sum > maxv {
maxi = i
maxv = sum
}
}
err = 1.0
if maxi == corLED {
err = 0
}
maxv2 := 0.0
maxi2 := 0
for i := 0; i < nc; i++ {
if i == maxi { // skip top
continue
}
si := ev.NOutPer * i
sum := 0.0
for j := 0; j < ev.NOutPer; j++ {
sum += tsr.Float1D(si + j)
}
if sum > maxv2 {
maxi2 = i
maxv2 = sum
}
}
err2 = err
if maxi2 == corLED {
err2 = 0
}
return
}
// DrawRandLED picks a new random LED and draws it
func (ev *LEDEnv) DrawRandLED(di int, st *TrialState) {
rng := 1 + ev.MaxLED - ev.MinLED
led := ev.MinLED + ev.Rand.Intn(rng)
ev.DrawLED(led)
st.LED = led
st.DrawImage = paint.RenderToImage(ev.Draw.Paint)
ev.SetOutput(di, led)
ev.XFormRand.Gen(&st.XForm, &ev.Rand)
st.XFormImage = st.XForm.Image(st.DrawImage)
}
// DrawLED draw specified LED
func (ev *LEDEnv) DrawLED(led int) {
ev.Draw.Clear()
ev.Draw.DrawLED(led)
}
// Copyright (c) 2019, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package objrec
import (
"image"
"image/color"
"cogentcore.org/core/colors"
"cogentcore.org/core/math32"
"cogentcore.org/core/paint"
)
// LEDraw renders old-school "LED" style "letters" composed of a set of horizontal
// and vertical elements. All possible such combinations of 3 out of 6 line segments are created.
// Renders using SVG.
type LEDraw struct { //types:add
// line width of LEDraw as percent of display size
Width float32 `default:"4"`
// size of overall LED as proportion of overall image size
Size float32 `default:"0.6"`
// color name for drawing lines
LineColor color.RGBA
// color name for background
BgColor color.RGBA
// size of image to render
ImgSize image.Point
// painting context object
Paint *paint.Painter `display:"-"`
}
func (ld *LEDraw) Defaults() {
ld.ImgSize = image.Point{120, 120}
ld.Width = 4
ld.Size = 0.6
ld.LineColor = colors.White
ld.BgColor = colors.Black
}
// Init ensures that the image is created and of the right size, and renderer is initialized
func (ld *LEDraw) Init() {
if ld.ImgSize.X == 0 || ld.ImgSize.Y == 0 {
ld.Defaults()
}
ld.Paint = paint.NewPainter(math32.FromPoint(ld.ImgSize))
ld.Paint.Stroke.Width.Pw(ld.Width)
ld.Paint.Stroke.Color = colors.Uniform(ld.LineColor)
ld.Paint.Fill.Color = colors.Uniform(ld.BgColor)
ld.Paint.ToDots()
}
// Clear clears the image with BgColor
func (ld *LEDraw) Clear() {
if ld.Paint == nil {
ld.Init()
}
ld.Paint.Clear()
}
// DrawSeg draws one segment
func (ld *LEDraw) DrawSeg(seg LEDSegs) {
ctrX := float32(ld.ImgSize.X) * 0.5
ctrY := float32(ld.ImgSize.Y) * 0.5
szX := ctrX * ld.Size
szY := ctrY * ld.Size
// note: top-zero coordinates
switch seg {
case Bottom:
ld.Paint.Line(ctrX-szX, ctrY+szY, ctrX+szX, ctrY+szY)
case Left:
ld.Paint.Line(ctrX-szX, ctrY-szY, ctrX-szX, ctrY+szY)
case Right:
ld.Paint.Line(ctrX+szX, ctrY-szY, ctrX+szX, ctrY+szY)
case Top:
ld.Paint.Line(ctrX-szX, ctrY-szY, ctrX+szX, ctrY-szY)
case CenterH:
ld.Paint.Line(ctrX-szX, ctrY, ctrX+szX, ctrY)
case CenterV:
ld.Paint.Line(ctrX, ctrY-szY, ctrX, ctrY+szY)
}
ld.Paint.Draw()
}
// DrawLED draws one LED of given number, based on LEDdata
func (ld *LEDraw) DrawLED(num int) {
led := LEData[num]
for _, seg := range led {
ld.DrawSeg(seg)
}
}
//////// LED data
// LEDSegs are the led segments
type LEDSegs int32
const (
Bottom LEDSegs = iota
Left
Right
Top
CenterH
CenterV
LEDSegsN
)
var LEData = [][3]LEDSegs{
{CenterH, CenterV, Right},
{Top, CenterV, Bottom},
{Top, Right, Bottom},
{Bottom, CenterV, Right},
{Left, CenterH, Right},
{Left, CenterV, CenterH},
{Left, CenterV, Right},
{Left, CenterV, Bottom},
{Left, CenterH, Top},
{Left, CenterH, Bottom},
{Top, CenterV, Right},
{Bottom, CenterV, CenterH},
{Right, CenterH, Bottom},
{Top, CenterH, Bottom},
{Left, Top, Right},
{Top, CenterH, Right},
{Left, CenterV, Top},
{Top, Left, Bottom},
{Left, Bottom, Right},
{Top, CenterV, CenterH},
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// objrec explores how a hierarchy of areas in the ventral stream
// of visual processing (up to inferotemporal (IT) cortex) can produce
// robust object recognition that is invariant to changes in position,
// size, etc of retinal input images.
package objrec
//go:generate core generate -add-types -add-funcs -gosl
import (
"fmt"
"os"
"reflect"
"cogentcore.org/core/base/reflectx"
"cogentcore.org/core/core"
"cogentcore.org/core/enums"
"cogentcore.org/core/gpu"
"cogentcore.org/core/icons"
"cogentcore.org/core/math32"
"cogentcore.org/core/tree"
"cogentcore.org/lab/base/mpi"
"cogentcore.org/lab/base/randx"
"cogentcore.org/lab/plot"
"cogentcore.org/lab/stats/stats"
"cogentcore.org/lab/tensor"
"cogentcore.org/lab/tensorcore"
"cogentcore.org/lab/tensorfs"
"github.com/emer/axon/v2/axon"
"github.com/emer/emergent/v2/egui"
"github.com/emer/emergent/v2/emer"
"github.com/emer/emergent/v2/env"
"github.com/emer/emergent/v2/looper"
"github.com/emer/emergent/v2/paths"
)
// Modes are the looping modes (Stacks) for running and statistics.
type Modes int32 //enums:enum
const (
Train Modes = iota
Test
NovelTrain
)
// Levels are the looping levels for running and statistics.
type Levels int32 //enums:enum
const (
Cycle Levels = iota
Trial
Epoch
Run
Expt
)
// StatsPhase is the phase of stats processing for given mode, level.
// Accumulated values are reset at Start, added each Step.
type StatsPhase int32 //enums:enum
const (
Start StatsPhase = iota
Step
)
// see params.go for params
// Sim encapsulates the entire simulation model, and we define all the
// functionality as methods on this struct. This structure keeps all relevant
// state information organized and available without having to pass everything around
// as arguments to methods, and provides the core GUI interface (note the view tags
// for the fields which provide hints to how things should be displayed).
type Sim struct {
// simulation configuration parameters -- set by .toml config file and / or args
Config *Config `new-window:"+"`
// Net is the network: click to view / edit parameters for layers, paths, etc.
Net *axon.Network `new-window:"+" display:"no-inline"`
// Params manages network parameter setting.
Params axon.Params `display:"inline"`
// Loops are the control loops for running the sim, in different Modes
// across stacks of Levels.
Loops *looper.Stacks `new-window:"+" display:"no-inline"`
// Envs provides mode-string based storage of environments.
Envs env.Envs `new-window:"+" display:"no-inline"`
// TrainUpdate has Train mode netview update parameters.
TrainUpdate axon.NetViewUpdate `display:"inline"`
// TestUpdate has Test mode netview update parameters.
TestUpdate axon.NetViewUpdate `display:"inline"`
// Root is the root tensorfs directory, where all stats and other misc sim data goes.
Root *tensorfs.Node `display:"-"`
// Stats has the stats directory within Root.
Stats *tensorfs.Node `display:"-"`
// Current has the current stats values within Stats.
Current *tensorfs.Node `display:"-"`
// StatFuncs are statistics functions called at given mode and level,
// to perform all stats computations. phase = Start does init at start of given level,
// and all intialization / configuration (called during Init too).
StatFuncs []func(mode Modes, level Levels, phase StatsPhase) `display:"-"`
// GUI manages all the GUI elements
GUI egui.GUI `display:"-"`
// RandSeeds is a list of random seeds to use for each run.
RandSeeds randx.Seeds `display:"-"`
}
func (ss *Sim) SetConfig(cfg *Config) { ss.Config = cfg }
func (ss *Sim) Body() *core.Body { return ss.GUI.Body }
func (ss *Sim) ConfigSim() {
ss.Root, _ = tensorfs.NewDir("Root")
tensorfs.CurRoot = ss.Root
ss.Net = axon.NewNetwork(ss.Config.Name)
ss.Params.Config(LayerParams, PathParams, ss.Config.Params.Sheet, ss.Config.Params.Tag, reflect.ValueOf(ss))
ss.RandSeeds.Init(100) // max 100 runs
ss.InitRandSeed(0)
if ss.Config.GPU {
gpu.SelectAdapter = ss.Config.Run.GPUDevice
axon.GPUInit()
axon.UseGPU = true
}
ss.ConfigEnv()
ss.ConfigNet(ss.Net)
ss.ConfigLoops()
ss.ConfigStats()
// if ss.Config..GPU {
// fmt.Println(axon.GPUSystem.Vars().StringDoc())
// }
if ss.Config.Params.SaveAll {
ss.Config.Params.SaveAll = false
ss.Net.SaveParamsSnapshot(&ss.Config, ss.Config.Params.Good)
os.Exit(0)
}
}
func (ss *Sim) ConfigEnv() {
// Can be called multiple times -- don't re-create
var trn, novTrn, tst *LEDEnv
if len(ss.Envs) == 0 {
trn = &LEDEnv{}
novTrn = &LEDEnv{}
tst = &LEDEnv{}
} else {
trn = ss.Envs.ByMode(Train).(*LEDEnv)
novTrn = ss.Envs.ByMode(NovelTrain).(*LEDEnv)
tst = ss.Envs.ByMode(Test).(*LEDEnv)
}
trn.Name = Train.String()
trn.Defaults()
trn.MinLED = 0
trn.MaxLED = 17 // exclude last 2 by default
trn.NOutPer = ss.Config.Env.NOutPer
if ss.Config.Env.Env != nil {
reflectx.SetFieldsFromMap(trn, ss.Config.Env.Env)
}
trn.Config(ss.Config.Run.NData, axon.ComputeGPU)
novTrn.Name = NovelTrain.String()
novTrn.Defaults()
novTrn.MinLED = 18
novTrn.MaxLED = 19 // only last 2 items
novTrn.NOutPer = ss.Config.Env.NOutPer
if ss.Config.Env.Env != nil {
reflectx.SetFieldsFromMap(novTrn, ss.Config.Env.Env)
}
novTrn.XFormRand.TransX.Set(-0.125, 0.125)
novTrn.XFormRand.TransY.Set(-0.125, 0.125)
novTrn.XFormRand.Scale.Set(0.775, 0.925) // 1/2 around midpoint
novTrn.XFormRand.Rot.Set(-2, 2)
novTrn.Config(ss.Config.Run.NData, axon.ComputeGPU)
tst.Name = Test.String()
tst.Defaults()
tst.MinLED = 0
tst.MaxLED = 19 // all by default
tst.NOutPer = ss.Config.Env.NOutPer
if ss.Config.Env.Env != nil {
reflectx.SetFieldsFromMap(tst, ss.Config.Env.Env)
}
tst.Config(ss.Config.Run.NData, axon.ComputeGPU)
trn.Init(0)
trn.Step() // needs an image to show
trn.Init(0)
novTrn.Init(0)
tst.Init(0)
ss.Envs.Add(trn, novTrn, tst)
}
func (ss *Sim) ConfigNet(net *axon.Network) {
net.SetMaxData(ss.Config.Run.NData)
net.Context().SetISICycles(int32(ss.Config.Run.ISICycles)).
SetMinusCycles(int32(ss.Config.Run.MinusCycles)).
SetPlusCycles(int32(ss.Config.Run.PlusCycles)).
SetSlowInterval(int32(ss.Config.Run.SlowInterval)).
SetAdaptGiInterval(int32(ss.Config.Run.AdaptGiInterval)).Update()
net.SetRandSeed(ss.RandSeeds[0]) // init new separate random seed, using run = 0
v1 := net.AddLayer4D("V1", axon.InputLayer, 10, 10, 5, 4)
v4 := net.AddLayer4D("V4", axon.SuperLayer, 7, 7, 10, 10) // 10x10 == 16x16 > 7x7 (orig, 5, 5, 10, 10)
it := net.AddLayer2D("IT", axon.SuperLayer, 16, 16) // 16x16 == 20x20 > 10x10 (orig, 16, 16)
out := net.AddLayer4D("Output", axon.TargetLayer, 4, 5, ss.Config.Env.NOutPer, 1)
v1.SetSampleShape(emer.CenterPoolIndexes(v1, 2), emer.CenterPoolShape(v1, 2))
v4.SetSampleShape(emer.CenterPoolIndexes(v4, 2), emer.CenterPoolShape(v4, 2))
full := paths.NewFull()
_ = full
rndpath := paths.NewUniformRand() // no advantage
rndpath.PCon = 0.5 // 0.2 > .1
_ = rndpath
pool1to1 := paths.NewPoolOneToOne()
_ = pool1to1
rndcut := paths.NewUniformRand()
rndcut.PCon = 0.1 // 0.2 == .1 459
_ = rndcut
net.ConnectLayers(v1, v4, ss.Config.Params.V1V4Path, axon.ForwardPath)
v4IT, _ := net.BidirConnectLayers(v4, it, full)
itOut, outIT := net.BidirConnectLayers(it, out, full)
net.ConnectLayers(v1, it, rndcut, axon.ForwardPath).AddClass("V1SC")
it.PlaceRightOf(v4, 2)
out.PlaceRightOf(it, 2)
v4IT.AddClass("NovLearn")
itOut.AddClass("NovLearn")
outIT.AddClass("NovLearn")
net.Build()
net.Defaults()
net.SetNThreads(ss.Config.Run.NThreads)
ss.ApplyParams()
net.InitWeights()
}
func (ss *Sim) ApplyParams() {
ss.Params.Script = ss.Config.Params.Script
ss.Params.ApplyAll(ss.Net)
}
//////// Init, utils
// Init restarts the run, and initializes everything, including network weights
// and resets the epoch log table
func (ss *Sim) Init() {
ss.Loops.ResetCounters()
ss.SetRunName()
ss.InitRandSeed(0)
// ss.ConfigEnv() // re-config env just in case a different set of patterns was
// selected or patterns have been modified etc
ss.ApplyParams()
ss.StatsInit()
ss.NewRun()
ss.TrainUpdate.RecordSyns()
ss.TrainUpdate.Update(Train, Trial)
}
// InitRandSeed initializes the random seed based on current training run number
func (ss *Sim) InitRandSeed(run int) {
ss.RandSeeds.Set(run)
ss.RandSeeds.Set(run, &ss.Net.Rand)
}
// NetViewUpdater returns the NetViewUpdate for given mode.
func (ss *Sim) NetViewUpdater(mode enums.Enum) *axon.NetViewUpdate {
if mode.Int64() == Train.Int64() {
return &ss.TrainUpdate
}
return &ss.TestUpdate
}
// ConfigLoops configures the control loops: Training, Testing
func (ss *Sim) ConfigLoops() {
ls := looper.NewStacks()
trials := int(math32.IntMultipleGE(float32(ss.Config.Run.Trials), float32(ss.Config.Run.NData)))
cycles := ss.Config.Run.Cycles()
minus := ss.Config.Run.MinusCycles
plus := ss.Config.Run.PlusCycles
isi := ss.Config.Run.ISICycles
if ss.Config.Debug {
mpi.Println("ISI:", isi, "Minus:", minus, "Plus:", plus)
}
ls.AddStack(Train, Trial).
AddLevel(Expt, 1).
AddLevel(Run, ss.Config.Run.Runs).
AddLevel(Epoch, ss.Config.Run.Epochs).
AddLevelIncr(Trial, trials, ss.Config.Run.NData).
AddLevel(Cycle, cycles)
ls.AddStack(Test, Trial).
AddLevel(Epoch, 1).
AddLevelIncr(Trial, trials, ss.Config.Run.NData).
AddLevel(Cycle, cycles)
axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, Cycle, Trial, Train,
func(mode enums.Enum) { ss.Net.ClearInputs() },
func(mode enums.Enum) { ss.ApplyInputs(mode.(Modes)) },
)
ls.Stacks[Train].OnInit.Add("Init", ss.Init)
ls.Loop(Train, Run).OnStart.Add("NewRun", ss.NewRun)
trainEpoch := ls.Loop(Train, Epoch)
trainEpoch.IsDone.AddBool("NZeroStop", func() bool {
stopNz := ss.Config.Run.NZero
if stopNz <= 0 {
return false
}
curModeDir := ss.Current.Dir(Train.String())
curNZero := int(curModeDir.Value("NZero").Float1D(-1))
stop := curNZero >= stopNz
return stop
return false
})
trainEpoch.OnStart.Add("TestAtInterval", func() {
if (ss.Config.Run.TestInterval > 0) && ((trainEpoch.Counter.Cur+1)%ss.Config.Run.TestInterval == 0) {
ss.TestAll()
}
})
ls.AddOnStartToAll("StatsStart", ss.StatsStart)
ls.AddOnEndToAll("StatsStep", ss.StatsStep)
ls.Loop(Train, Run).OnEnd.Add("SaveWeights", func() {
ctrString := fmt.Sprintf("%03d_%05d", ls.Loop(Train, Run).Counter.Cur, ls.Loop(Train, Epoch).Counter.Cur)
axon.SaveWeightsIfConfigSet(ss.Net, ss.Config.Log.SaveWeights, ctrString, ss.RunName())
})
if ss.Config.GUI {
axon.LooperUpdateNetView(ls, Cycle, Trial, ss.NetViewUpdater)
ls.Stacks[Train].OnInit.Add("GUI-Init", ss.GUI.UpdateWindow)
ls.Stacks[Test].OnInit.Add("GUI-Init", ss.GUI.UpdateWindow)
}
if ss.Config.Debug {
mpi.Println(ls.DocString())
}
ss.Loops = ls
}
// ApplyInputs applies input patterns from given environment for given mode.
// Any other start-of-trial logic can also be put here.
func (ss *Sim) ApplyInputs(mode Modes) {
net := ss.Net
ctx := net.Context()
ndata := int(ctx.NData)
curModeDir := ss.Current.Dir(mode.String())
ev := ss.Envs.ByMode(mode).(*LEDEnv)
lays := net.LayersByType(axon.InputLayer, axon.TargetLayer)
net.InitExt()
ev.Step()
for _, lnm := range lays {
ly := ss.Net.LayerByName(lnm)
st := ev.State(ly.Name)
if st != nil {
ly.ApplyExtAll(ctx, st)
}
}
for di := range ndata {
curModeDir.StringValue("TrialName", ndata).SetString1D(ev.TrialName(di), di)
curModeDir.Int("Cat", ndata).SetInt1D(ev.Trial(di).LED, di)
}
net.ApplyExts()
ss.UpdateImage()
}
// NewRun intializes a new Run level of the model.
func (ss *Sim) NewRun() {
ctx := ss.Net.Context()
run := ss.Loops.Loop(Train, Run).Counter.Cur
ss.InitRandSeed(run)
ss.Envs.ByMode(Train).Init(run)
ss.Envs.ByMode(Test).Init(run)
ctx.Reset()
ss.Net.InitWeights()
if ss.Config.Run.StartWeights != "" {
ss.Net.OpenWeightsJSON(core.Filename(ss.Config.Run.StartWeights))
mpi.Printf("Starting with initial weights from: %s\n", ss.Config.Run.StartWeights)
}
}
// TestAll runs through the full set of testing items
func (ss *Sim) TestAll() {
ss.Envs.ByMode(Test).Init(0)
ss.Loops.ResetAndRun(Test)
ss.Loops.Mode = Train // important because this is called from Train Run: go back.
}
//////// Stats
// AddStat adds a stat compute function.
func (ss *Sim) AddStat(f func(mode Modes, level Levels, phase StatsPhase)) {
ss.StatFuncs = append(ss.StatFuncs, f)
}
// StatsStart is called by Looper at the start of given level, for each iteration.
// It needs to call RunStats Start at the next level down.
// e.g., each Epoch is the start of the full set of Trial Steps.
func (ss *Sim) StatsStart(lmd, ltm enums.Enum) {
mode := lmd.(Modes)
level := ltm.(Levels)
if level <= Trial {
return
}
ss.RunStats(mode, level-1, Start)
}
// StatsStep is called by Looper at each step of iteration,
// where it accumulates the stat results.
func (ss *Sim) StatsStep(lmd, ltm enums.Enum) {
mode := lmd.(Modes)
level := ltm.(Levels)
if level == Cycle {
return
}
ss.RunStats(mode, level, Step)
tensorfs.DirTable(axon.StatsNode(ss.Stats, mode, level), nil).WriteToLog()
}
// RunStats runs the StatFuncs for given mode, level and phase.
func (ss *Sim) RunStats(mode Modes, level Levels, phase StatsPhase) {
for _, sf := range ss.StatFuncs {
sf(mode, level, phase)
}
if phase == Step && ss.GUI.Tabs != nil {
nm := mode.String() + " " + level.String() + " Plot"
ss.GUI.Tabs.AsLab().GoUpdatePlot(nm)
}
}
// SetRunName sets the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) SetRunName() string {
runName := ss.Params.RunName(ss.Config.Run.Run)
ss.Current.StringValue("RunName", 1).SetString1D(runName, 0)
return runName
}
// RunName returns the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) RunName() string {
return ss.Current.StringValue("RunName", 1).String1D(0)
}
// StatsInit initializes all the stats by calling Start across all modes and levels.
func (ss *Sim) StatsInit() {
for md, st := range ss.Loops.Stacks {
mode := md.(Modes)
for _, lev := range st.Order {
level := lev.(Levels)
if level == Cycle {
continue
}
ss.RunStats(mode, level, Start)
}
}
if ss.GUI.Tabs != nil {
tbs := ss.GUI.Tabs.AsLab()
_, idx := tbs.CurrentTab()
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Epoch))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Run))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Test, Trial))
ev := ss.Envs.ByMode(Train).(*LEDEnv)
img := ev.Image.Tsr.SubSpace(0)
tensorcore.AddGridStylerTo(img, func(s *tensorcore.GridStyle) {
s.Image = true
s.Range.SetMin(0)
})
tbs.TensorGrid("Image", img)
tbs.SelectTabIndex(idx)
}
}
// ConfigStats handles configures functions to do all stats computation
// in the tensorfs system.
func (ss *Sim) ConfigStats() {
net := ss.Net
ss.Stats = ss.Root.Dir("Stats")
ss.Current = ss.Stats.Dir("Current")
ss.SetRunName()
// last arg(s) are levels to exclude
counterFunc := axon.StatLoopCounters(ss.Stats, ss.Current, ss.Loops, net, Trial, Cycle)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
counterFunc(mode, level, phase == Start)
})
runNameFunc := axon.StatRunName(ss.Stats, ss.Current, ss.Loops, net, Trial, Cycle)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
runNameFunc(mode, level, phase == Start)
})
trialNameFunc := axon.StatTrialName(ss.Stats, ss.Current, ss.Loops, net, Trial)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
trialNameFunc(mode, level, phase == Start)
})
// up to a point, it is good to use loops over stats in one function,
// to reduce repetition of boilerplate.
statNames := []string{"CorSim", "UnitErr", "Err", "Err2", "Resp", "NZero", "FirstZero", "LastZero"}
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
for _, name := range statNames {
if name == "NZero" && (mode != Train || level == Trial) {
return
}
modeDir := ss.Stats.Dir(mode.String())
curModeDir := ss.Current.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
subDir := modeDir.Dir((level - 1).String()) // note: will fail for Cycle
tsr := levelDir.Float64(name)
ndata := int(ss.Net.Context().NData)
var stat float64
if phase == Start {
tsr.SetNumRows(0)
plot.SetFirstStyler(tsr, func(s *plot.Style) {
s.Range.SetMin(0).SetMax(1)
s.On = true
switch name {
case "UnitErr", "Resp", "NZero":
s.On = false
case "FirstZero", "LastZero":
if level < Run {
s.On = false
}
}
})
switch name {
case "NZero":
if level == Epoch {
curModeDir.Float64(name, 1).SetFloat1D(0, 0)
}
case "FirstZero", "LastZero":
if level == Epoch {
curModeDir.Float64(name, 1).SetFloat1D(-1, 0)
}
}
continue
}
switch level {
case Trial:
out := ss.Net.LayerByName("Output")
ltsr := curModeDir.Float64(out.Name+"_ActM", out.Shape.Sizes...)
ev := ss.Envs.ByMode(Modes(ss.Net.Context().Mode)).(*LEDEnv)
for di := range ndata {
var stat float64
switch name {
case "CorSim":
stat = 1.0 - float64(axon.LayerStates.Value(int(out.Index), int(di), int(axon.LayerPhaseDiff)))
case "UnitErr":
stat = out.PctUnitErr(ss.Net.Context())[di]
case "Err":
out.UnitValuesSampleTensor(ltsr, "ActM", di)
cat := curModeDir.Int("Cat", ndata).Int1D(di)
rsp, trlErr, trlErr2 := ev.OutErr(ltsr, di, cat)
curModeDir.Float64("Resp", ndata).SetInt1D(rsp, di)
curModeDir.Float64("Err2", ndata).SetFloat1D(trlErr2, di)
stat = trlErr
case "Err2":
stat = curModeDir.Float64(name, ndata).Float1D(di)
case "Resp":
stat = curModeDir.Float64(name, ndata).Float1D(di)
}
curModeDir.Float64(name, ndata).SetFloat1D(stat, di)
tsr.AppendRowFloat(stat)
}
case Epoch:
nz := curModeDir.Float64("NZero", 1).Float1D(0)
switch name {
case "NZero":
err := stats.StatSum.Call(subDir.Value("Err")).Float1D(0)
stat = curModeDir.Float64(name, 1).Float1D(0)
if err == 0 {
stat++
} else {
stat = 0
}
curModeDir.Float64(name, 1).SetFloat1D(stat, 0)
case "FirstZero":
stat = curModeDir.Float64(name, 1).Float1D(0)
if stat < 0 && nz == 1 {
stat = curModeDir.Int("Epoch", 1).Float1D(0)
}
curModeDir.Float64(name, 1).SetFloat1D(stat, 0)
case "LastZero":
stat = curModeDir.Float64(name, 1).Float1D(0)
if stat < 0 && nz >= float64(ss.Config.Run.NZero) {
stat = curModeDir.Int("Epoch", 1).Float1D(0)
}
curModeDir.Float64(name, 1).SetFloat1D(stat, 0)
default:
stat = stats.StatMean.Call(subDir.Value(name)).Float1D(0)
}
tsr.AppendRowFloat(stat)
case Run:
stat = stats.StatFinal.Call(subDir.Value(name)).Float1D(0)
tsr.AppendRowFloat(stat)
default: // Expt
stat = stats.StatMean.Call(subDir.Value(name)).Float1D(0)
tsr.AppendRowFloat(stat)
}
}
})
perTrlFunc := axon.StatPerTrialMSec(ss.Stats, Train, Trial)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
perTrlFunc(mode, level, phase == Start)
})
lays := net.LayersByType(axon.SuperLayer, axon.CTLayer, axon.TargetLayer)
actGeFunc := axon.StatLayerActGe(ss.Stats, net, Train, Trial, Run, lays...)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
actGeFunc(mode, level, phase == Start)
})
giMultFunc := axon.StatLayerGiMult(ss.Stats, net, Train, Epoch, Run, lays...)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
giMultFunc(mode, level, phase == Start)
})
superLays := net.LayersByType(axon.SuperLayer, axon.CTLayer)
learnNowFunc := axon.StatLearnNow(ss.Stats, ss.Current, net, Trial, Run, superLays...)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
learnNowFunc(mode, level, phase == Start)
})
pcaFunc := axon.StatPCA(ss.Stats, ss.Current, net, ss.Config.Run.PCAInterval, Train, Trial, Run, lays...)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
trnEpc := ss.Loops.Loop(Train, Epoch).Counter.Cur
pcaFunc(mode, level, phase == Start, trnEpc)
})
stateFunc := axon.StatLayerState(ss.Stats, net, Test, Trial, true, "ActM", "Output")
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
stateFunc(mode, level, phase == Start)
})
}
// StatCounters returns counters string to show at bottom of netview.
func (ss *Sim) StatCounters(mode, level enums.Enum) string {
counters := ss.Loops.Stacks[mode].CountersString()
vu := ss.NetViewUpdater(mode)
if vu == nil || vu.View == nil {
return counters
}
di := vu.View.Di
counters += fmt.Sprintf(" Di: %d", di)
curModeDir := ss.Current.Dir(mode.String())
if curModeDir.Node("TrialName") == nil {
return counters
}
counters += fmt.Sprintf(" TrialName: %s", curModeDir.StringValue("TrialName").String1D(di))
statNames := []string{"CorSim", "UnitErr", "Err"}
if level == Cycle || curModeDir.Node(statNames[0]) == nil {
return counters
}
for _, name := range statNames {
counters += fmt.Sprintf(" %s: %.4g", name, curModeDir.Float64(name).Float1D(di))
}
return counters
}
//////// GUI
// ConfigGUI configures the Cogent Core GUI interface for this simulation.
func (ss *Sim) ConfigGUI(b tree.Node) {
ss.GUI.MakeBody(b, ss, ss.Root, ss.Config.Name, ss.Config.Title, ss.Config.Doc)
ss.GUI.StopLevel = Trial
nv := ss.GUI.AddNetView("Network")
nv.Options.MaxRecs = 2 * ss.Config.Run.Cycles()
nv.Options.Raster.Max = ss.Config.Run.Cycles()
nv.SetNet(ss.Net)
ss.TrainUpdate.Config(nv, axon.Theta, ss.StatCounters)
ss.TestUpdate.Config(nv, axon.Theta, ss.StatCounters)
ss.GUI.OnStop = func(mode, level enums.Enum) {
vu := ss.NetViewUpdater(mode)
vu.UpdateWhenStopped(mode, level)
}
nv.SceneXYZ().Camera.Pose.Pos.Set(0, 1.733, 2.3)
nv.SceneXYZ().Camera.LookAt(math32.Vec3(0, 0, 0), math32.Vec3(0, 1, 0))
ss.StatsInit()
trn := ss.Envs.ByMode(Train).(*LEDEnv)
img := trn.Image.Tsr.SubSpace(0).(*tensor.Float32)
tensorcore.AddGridStylerTo(img, func(s *tensorcore.GridStyle) {
s.Image = true
s.Range.SetMin(0)
})
ss.GUI.Tabs.TensorGrid("Image", img)
ss.GUI.Tabs.SelectTabIndex(0)
ss.GUI.FinalizeGUI(false)
}
func (ss *Sim) UpdateImage() {
if !ss.Config.GUI {
return
}
ss.GUI.Tabs.TabUpdateRender("Image")
}
func (ss *Sim) MakeToolbar(p *tree.Plan) {
ss.GUI.AddLooperCtrl(p, ss.Loops)
tree.Add(p, func(w *core.Separator) {})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "New Seed",
Icon: icons.Add,
Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
Active: egui.ActiveAlways,
Func: func() {
ss.RandSeeds.NewSeeds()
},
})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "README",
Icon: icons.FileMarkdown,
Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
Active: egui.ActiveAlways,
Func: func() {
core.TheApp.OpenURL(ss.Config.URL)
},
})
}
func (ss *Sim) RunNoGUI() {
ss.Init()
if ss.Config.Params.Note != "" {
mpi.Printf("Note: %s\n", ss.Config.Params.Note)
}
if ss.Config.Log.SaveWeights {
mpi.Printf("Saving final weights per run\n")
}
runName := ss.SetRunName()
netName := ss.Net.Name
cfg := &ss.Config.Log
axon.OpenLogFiles(ss.Loops, ss.Stats, netName, runName, [][]string{cfg.Train, cfg.Test})
mpi.Printf("Running %d Runs starting at %d\n", ss.Config.Run.Runs, ss.Config.Run.Run)
ss.Loops.Loop(Train, Run).Counter.SetCurMaxPlusN(ss.Config.Run.Run, ss.Config.Run.Runs)
ss.Loops.Run(Train)
axon.CloseLogFiles(ss.Loops, ss.Stats, Cycle)
axon.GPURelease()
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"github.com/emer/axon/v2/sims/objrec"
"github.com/emer/emergent/v2/egui"
)
func main() { egui.Run[objrec.Sim, objrec.Config]() }
package objrec
import (
"github.com/emer/axon/v2/axon"
)
// LayerParams sets the minimal non-default params.
// Base is always applied, and others can be optionally selected to apply on top of that.
var LayerParams = axon.LayerSheets{
"Base": {
{Sel: "Layer", Doc: "needs some special inhibition and learning params",
Set: func(ly *axon.LayerParams) {
ly.Acts.Decay.Act = 0.0 // 0 > .2 -- highly sensitive
ly.Acts.Decay.Glong = 0.6 // 0.6 def > 0.5, .7 -- highly sensitive
ly.Acts.NMDA.MgC = 1.4 // 1.4, 5 > 1.2, 0
ly.Acts.NMDA.Voff = 0 // see above
ly.Acts.NMDA.Ge = 0.006 // 0.006 > 7 or higher
ly.Acts.GabaB.Gk = 0.015 // 0.015 > lower; higher not better
ly.Inhib.ActAvg.AdaptRate = 0.1 // 0.1 default > 0.05?
ly.Inhib.ActAvg.AdaptMax = 0.05 // 0.05 > 0.01
ly.Learn.CaSpike.SpikeCaM = 12 // 12 > 8 > 15 (too high) -- 12 makes everything work!
ly.Learn.TrgAvgAct.SynScaleRate = 0.0002 // 0.0002 > others -- 0.005 sig worse
ly.Learn.LearnNMDA.MgC = 1.4 // 1.4, 5 > 1.2, 0
ly.Learn.LearnNMDA.Voff = 0 // see above
ly.Learn.LearnNMDA.Tau = 100 // 100 def
ly.Learn.LearnNMDA.Ge = 0.006
ly.Learn.RLRate.SigmoidLinear.SetBool(true) // true > false later; more stable
ly.Learn.CaLearn.Norm = 80 // 80 works
ly.Learn.CaLearn.SpikeVGCC.SetBool(true) // sig better..
ly.Learn.CaLearn.SpikeVgccCa = 35 // 70 / 5 or 35 / 10 both work
ly.Learn.CaLearn.VgccTau = 10 // 10 > 5 ?
ly.Learn.CaLearn.Dt.MTau = 2 // 2 > 4 even with more ncycles
ly.Learn.CaSpike.Dt.MTau = 5 // 5 > 10 even with more ncycles
ly.Learn.Timing.On.SetBool(false)
ly.Learn.Timing.Refractory.SetBool(false) // ?
ly.Learn.Timing.LearnThr = 0.1 // 0.1 best for trial-based
ly.Learn.Timing.SynCaCycles = 160 // 160 best for trial-based
ly.Learn.Timing.Cycles = 170 // 170 > 160
ly.Learn.Timing.TimeDiffTau = 4 // 4 > 2
}},
{Sel: "#V1", Doc: "pool inhib (not used), initial activity",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.08 // 0.08 == 0.9 just noisier
ly.Inhib.Pool.On.SetBool(true)
ly.Inhib.Layer.Gi = 0.9 // 0.9 def
ly.Inhib.Pool.Gi = 0.9 // 0.9 def
ly.Inhib.Layer.FB = 1
ly.Inhib.Pool.FB = 1
ly.Acts.Clamp.Ge = 1.5 // 1.5 for fsffffb
ly.Acts.Decay.Act = 1 // 1 = slightly beneficial
ly.Acts.Decay.Glong = 1
}},
{Sel: "#V4", Doc: "pool inhib, sparse activity",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.03 // 0.03 > .04 > 0.025
ly.Inhib.ActAvg.Offset = 0 // 0.01 not good
ly.Inhib.ActAvg.AdaptGi.SetBool(true)
ly.Inhib.Layer.FB = 1 // 1.1 FB1 >> 4!
ly.Inhib.Pool.FB = 4 // 4
ly.Inhib.Layer.SS = 30 // 30 best
ly.Inhib.Pool.SS = 30 // 0 works here..
ly.Inhib.Layer.Gi = 1.0 // 1.1 > 1.0 -- def 1.1, 1.0 > 1.0, 1.1!
ly.Inhib.Pool.Gi = 0.9 // 0.9
ly.Inhib.Pool.On.SetBool(true) // needs pool-level
}},
{Sel: "#IT", Doc: "initial activity",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.04 // 0.05 > 0.04 with v1sc
ly.Inhib.ActAvg.AdaptGi.SetBool(true)
ly.Inhib.Layer.Gi = 1.1 // 1.1 > 1.05 1.6.15 adapt
ly.Inhib.Layer.FB = 4 // 4
}},
{Sel: "#Output", Doc: "high inhib for one-hot output",
Set: func(ly *axon.LayerParams) {
// ly.Acts.Decay.Act = 0.0 // 0.2 with glong .6 best in lvis, slows learning here
// ly.Acts.Decay.Glong = 0.6 // 0.6 def
ly.Inhib.ActAvg.Nominal = 0.05 // 0.05 nominal
ly.Inhib.ActAvg.Offset = -0.005 //
ly.Inhib.ActAvg.AdaptGi.SetBool(true) //
ly.Inhib.Layer.Gi = 1.2 // 1.2 FB1 > 1.1 FB4
ly.Inhib.Layer.FB = 1 //
ly.Acts.Clamp.Ge = 0.8 // 0.8 > 1.0 > 0.6 1.6.4
}},
},
}
// PathParams sets the minimal non-default params.
// Base is always applied, and others can be optionally selected to apply on top of that.
var PathParams = axon.PathSheets{
"Base": {
{Sel: "Path", Doc: "",
Set: func(pt *axon.PathParams) {
// pt.Com.MaxDelay = 10 // not much effect
// pt.Com.Delay = 10
pt.Learn.LRate.Base = 0.2 // 0.2 > 0.1 > 0.05
pt.Learn.DWt.SubMean = 1 // 1 -- faster if 0 until 20 epc -- prevents sig amount of late deterioration
pt.SWts.Adapt.LRate = 0.0001 // 0.005 == .1 == .01
pt.SWts.Adapt.HiMeanDecay = 0 // 0 > 0.0008 (best in lvis)
pt.SWts.Adapt.HiMeanThr = 0.5 // 0.5, 0.0008 goes the distance
pt.SWts.Init.SPct = 1 // 1 >= lower (trace-v11)
pt.Learn.DWt.CaPScale = 1 //
pt.Learn.DWt.LearnThr = 0.1
pt.Learn.DWt.SynCa20.SetBool(false)
}},
{Sel: ".ToTarget", Doc: "",
Set: func(pt *axon.PathParams) {
pt.Learn.LRate.Base = 0.05 // 0.05 > 0.1
}},
{Sel: ".BackPath", Doc: "top-down back-pathways MUST have lower relative weight scale, otherwise network hallucinates -- smaller as network gets bigger",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.2 // .2 >= .3 > .15 > .1 > .05 @176
}},
{Sel: "#V4ToIT", Doc: "stronger",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1.2 // 1.2 >= 1.0 ? > 1.5 too much
}},
{Sel: ".V1SC", Doc: "v1 shortcut",
Set: func(pt *axon.PathParams) {
pt.Learn.LRate.Base = 0.04 // 0.02 >= 0.01 > 0.001, 0.05
pt.PathScale.Rel = 0.05 // 0.2 >> 0.3, 0.5 (blows up)
pt.SWts.Adapt.On.SetBool(false) // seems better
}},
},
}
// Copyright (c) 2023, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pfcmaint
import (
"cogentcore.org/core/core"
"github.com/emer/emergent/v2/egui"
)
// EnvConfig has config params for environment.
type EnvConfig struct {
// Env parameters: can set any field/subfield on Env struct,
// using standard TOML formatting.
Env map[string]any
}
// ParamConfig has config parameters related to sim params.
type ParamConfig struct {
// MaintCons uses maintenance connections instead of self-maintenance.
MaintCons bool
// NUnits is the number of units per dimension in the PFC.
NUnits int `default:"7"`
// Tweak means to perform automated parameter tweaking for
// parameters marked Hypers Tweak = log,incr, or [vals].
Tweak bool
// Baseline for Tweak, if true, first run a baseline with current default params.
Baseline bool
// DryRun for Tweak, if true, only print what would be done, don't run.
DryRun bool
// Script is an interpreted script that is run to set parameters in Layer and Path
// sheets, by default using the "Script" set name.
Script string `new-window:"+" width:"100"`
// Sheet is the extra params sheet name(s) to use (space separated
// if multiple). Must be valid name as listed in compiled-in params
// or loaded params.
Sheet string
// Tag is an extra tag to add to file names and logs saved from this run.
Tag string
// Note is additional info to describe the run params etc,
// like a git commit message for the run.
Note string
// SaveAll will save a snapshot of all current param and config settings
// in a directory named params_<datestamp> (or _good if Good is true),
// then quit. Useful for comparing to later changes and seeing multiple
// views of current params.
SaveAll bool `nest:"+"`
// Good is for SaveAll, save to params_good for a known good params state.
// This can be done prior to making a new release after all tests are passing.
// Add results to git to provide a full diff record of all params over level.
Good bool `nest:"+"`
}
func (pc *ParamConfig) FieldWidget(field string) core.Value {
return egui.ScriptFieldWidget(field)
}
// RunConfig has config parameters related to running the sim.
type RunConfig struct {
// GPUDevice selects the gpu device to use.
GPUDevice int
// NData is the number of data-parallel items to process in parallel per trial.
// Is significantly faster for both CPU and GPU. Results in an effective
// mini-batch of learning.
NData int `default:"2" min:"1"`
// NThreads is the number of parallel threads for CPU computation;
// 0 = use default.
NThreads int `default:"0"`
// Run is the _starting_ run number, which determines the random seed.
// Runs counts up from there. Can do all runs in parallel by launching
// separate jobs with each starting Run, Runs = 1.
Run int `default:"0" flag:"run"`
// Runs is the total number of runs to do when running Train, starting from Run.
Runs int `default:"1" min:"1"`
// Epochs is the total number of epochs per run.
Epochs int `default:"50"`
// Trials is the total number of trials per epoch.
// Should be an even multiple of NData.
Trials int `default:"128"`
// ISICycles is the number of no-input inter-stimulus interval
// cycles at the start of the trial.
ISICycles int `default:"0"`
// MinusCycles is the number of cycles in the minus phase per trial.
MinusCycles int `default:"150"`
// PlusCycles is the number of cycles in the plus phase per trial.
PlusCycles int `default:"50"`
}
// Cycles returns the total number of cycles per trial: ISI + Minus + Plus.
func (rc *RunConfig) Cycles() int {
return rc.ISICycles + rc.MinusCycles + rc.PlusCycles
}
// LogConfig has config parameters related to logging data.
type LogConfig struct {
// SaveWeights will save final weights after each run.
SaveWeights bool
// Train has the list of Train mode levels to save log files for.
Train []string `default:"['Run', 'Epoch']" nest:"+"`
// Test has the list of Test mode levels to save log files for.
Test []string `nest:"+"`
}
// Config has the overall Sim configuration options.
type Config struct {
egui.BaseConfig
// Env has environment configuration options.
Env EnvConfig `display:"add-fields"`
// Params has parameter related configuration options.
Params ParamConfig `display:"add-fields"`
// Run has sim running related configuration options.
Run RunConfig `display:"add-fields"`
// Log has data logging related configuration options.
Log LogConfig `display:"add-fields"`
}
func (cfg *Config) Defaults() {
cfg.Name = "PFCMaintS"
cfg.Title = "Prefrontal Cortex Maintenance"
cfg.URL = "https://github.com/emer/axon/blob/main/sims/pfcmaint/README.md"
cfg.Doc = "This project tests prefrontal cortex (PFC) active maintenance mechanisms supported by the pyramidal tract (PT) neurons, in the PTMaint layer type."
}
// Code generated by "core generate -add-types -add-funcs -gosl"; DO NOT EDIT.
package pfcmaint
import (
"cogentcore.org/core/enums"
)
var _ModesValues = []Modes{0, 1}
// ModesN is the highest valid value for type Modes, plus one.
//
//gosl:start
const ModesN Modes = 2
//gosl:end
var _ModesValueMap = map[string]Modes{`Train`: 0, `Test`: 1}
var _ModesDescMap = map[Modes]string{0: ``, 1: ``}
var _ModesMap = map[Modes]string{0: `Train`, 1: `Test`}
// String returns the string representation of this Modes value.
func (i Modes) String() string { return enums.String(i, _ModesMap) }
// SetString sets the Modes value from its string representation,
// and returns an error if the string is invalid.
func (i *Modes) SetString(s string) error { return enums.SetString(i, s, _ModesValueMap, "Modes") }
// Int64 returns the Modes value as an int64.
func (i Modes) Int64() int64 { return int64(i) }
// SetInt64 sets the Modes value from an int64.
func (i *Modes) SetInt64(in int64) { *i = Modes(in) }
// Desc returns the description of the Modes value.
func (i Modes) Desc() string { return enums.Desc(i, _ModesDescMap) }
// ModesValues returns all possible values for the type Modes.
func ModesValues() []Modes { return _ModesValues }
// Values returns all possible values for the type Modes.
func (i Modes) Values() []enums.Enum { return enums.Values(_ModesValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Modes) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Modes) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Modes") }
var _LevelsValues = []Levels{0, 1, 2, 3}
// LevelsN is the highest valid value for type Levels, plus one.
//
//gosl:start
const LevelsN Levels = 4
//gosl:end
var _LevelsValueMap = map[string]Levels{`Cycle`: 0, `Trial`: 1, `Epoch`: 2, `Run`: 3}
var _LevelsDescMap = map[Levels]string{0: ``, 1: ``, 2: ``, 3: ``}
var _LevelsMap = map[Levels]string{0: `Cycle`, 1: `Trial`, 2: `Epoch`, 3: `Run`}
// String returns the string representation of this Levels value.
func (i Levels) String() string { return enums.String(i, _LevelsMap) }
// SetString sets the Levels value from its string representation,
// and returns an error if the string is invalid.
func (i *Levels) SetString(s string) error { return enums.SetString(i, s, _LevelsValueMap, "Levels") }
// Int64 returns the Levels value as an int64.
func (i Levels) Int64() int64 { return int64(i) }
// SetInt64 sets the Levels value from an int64.
func (i *Levels) SetInt64(in int64) { *i = Levels(in) }
// Desc returns the description of the Levels value.
func (i Levels) Desc() string { return enums.Desc(i, _LevelsDescMap) }
// LevelsValues returns all possible values for the type Levels.
func LevelsValues() []Levels { return _LevelsValues }
// Values returns all possible values for the type Levels.
func (i Levels) Values() []enums.Enum { return enums.Values(_LevelsValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Levels) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Levels) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Levels") }
var _StatsPhaseValues = []StatsPhase{0, 1}
// StatsPhaseN is the highest valid value for type StatsPhase, plus one.
//
//gosl:start
const StatsPhaseN StatsPhase = 2
//gosl:end
var _StatsPhaseValueMap = map[string]StatsPhase{`Start`: 0, `Step`: 1}
var _StatsPhaseDescMap = map[StatsPhase]string{0: ``, 1: ``}
var _StatsPhaseMap = map[StatsPhase]string{0: `Start`, 1: `Step`}
// String returns the string representation of this StatsPhase value.
func (i StatsPhase) String() string { return enums.String(i, _StatsPhaseMap) }
// SetString sets the StatsPhase value from its string representation,
// and returns an error if the string is invalid.
func (i *StatsPhase) SetString(s string) error {
return enums.SetString(i, s, _StatsPhaseValueMap, "StatsPhase")
}
// Int64 returns the StatsPhase value as an int64.
func (i StatsPhase) Int64() int64 { return int64(i) }
// SetInt64 sets the StatsPhase value from an int64.
func (i *StatsPhase) SetInt64(in int64) { *i = StatsPhase(in) }
// Desc returns the description of the StatsPhase value.
func (i StatsPhase) Desc() string { return enums.Desc(i, _StatsPhaseDescMap) }
// StatsPhaseValues returns all possible values for the type StatsPhase.
func StatsPhaseValues() []StatsPhase { return _StatsPhaseValues }
// Values returns all possible values for the type StatsPhase.
func (i StatsPhase) Values() []enums.Enum { return enums.Values(_StatsPhaseValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i StatsPhase) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *StatsPhase) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "StatsPhase")
}
// Copyright (c) 2022, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pfcmaint
import "github.com/emer/axon/v2/axon"
// LayerParams sets the minimal non-default params.
// Base is always applied, and others can be optionally selected to apply on top of that.
var LayerParams = axon.LayerSheets{
"Base": {
{Sel: "Layer", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Acts.Clamp.Ge = 1.0 // 1.5 is def, was 0.6 (too low)
// ly.Inhib.ActAvg.Nominal = 0.2
}},
{Sel: ".Time", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.05
}},
{Sel: ".PTMaintLayer", Doc: "time integration params",
Set: func(ly *axon.LayerParams) {
ly.Acts.Dend.ModGain = 1.5
ly.Acts.GabaB.Gk = 0.01 // too strong and it depresses firing for a long time
ly.Acts.SMaint.On.SetBool(true)
ly.Acts.SMaint.NNeurons = 10 // higher = more activity
ly.Acts.SMaint.ISI.Min = 1 // 1 sig better than 3
ly.Acts.SMaint.ISI.Max = 20 // not much effect
ly.Acts.SMaint.Ge = 0.2
ly.Acts.SMaint.Inhib = 1
ly.Inhib.ActAvg.Nominal = 0.1
ly.Inhib.Layer.Gi = 0.5
ly.Inhib.Pool.Gi = 0.5 // not active
}},
{Sel: ".PTPredLayer", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 0.8 // 0.8 def
ly.CT.GeGain = 0.05 // 0.05 def
}},
{Sel: ".CTLayer", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 1.4 // 0.8 def
ly.CT.GeGain = 2 // 2 def
}},
{Sel: ".BGThalLayer", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Learn.NeuroMod.AChDisInhib = 0
}},
},
}
// PathParams sets the minimal non-default params.
// Base is always applied, and others can be optionally selected to apply on top of that.
var PathParams = axon.PathSheets{
"Base": {
{Sel: "Path", Doc: "",
Set: func(pt *axon.PathParams) {
pt.Learn.LRate.Base = 0.01
}},
{Sel: ".PFCPath", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1.0
pt.Learn.LRate.Base = 0.01
}},
{Sel: "#GPiToPFCThal", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 4.0
}},
{Sel: ".InputToPFC", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 2
}},
{Sel: ".CTtoPred", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 2 // 1 def
}},
{Sel: ".PTtoPred", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1 // was 6
}},
{Sel: ".CTToPulv", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0
pt.PathScale.Abs = 0
}},
{Sel: ".SuperToThal", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 4.0 // 4 > 2 for gating sooner
}},
{Sel: "#PFCPTpToItemP", Doc: "weaker",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1
}},
{Sel: "#ItemPToPFCCT", Doc: "weaker",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 0.1
}},
{Sel: "#TimePToPFCCT", Doc: "stronger",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.5
}},
{Sel: "#TimePToPFC", Doc: "stronger",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.5
}},
},
}
// LayerParamsCons are params for MaintCons case
var LayerParamsCons = axon.LayerSheets{
"Base": {
{Sel: "Layer", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Acts.Clamp.Ge = 1.0 // 1.5 is def, was 0.6 (too low)
// ly.Inhib.ActAvg.Nominal = 0.2
}},
{Sel: ".Time", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.05
}},
{Sel: ".PTMaintLayer", Doc: "time integration params",
Set: func(ly *axon.LayerParams) {
ly.Acts.Dend.ModGain = 1.5
ly.Acts.GabaB.Gk = 0.01 // too strong and it depresses firing for a long time
ly.Acts.SMaint.On.SetBool(false)
ly.Inhib.Layer.Gi = 2.6 // 3 is too strong
ly.Inhib.Pool.Gi = 3 // not active
}},
{Sel: ".BGThalLayer", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Learn.NeuroMod.AChDisInhib = 0
}},
},
}
// PathParamsCons are params for MaintCons case.
var PathParamsCons = axon.PathSheets{
"Base": {
{Sel: ".PFCPath", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1.0
}},
{Sel: "#GPiToPFCThal", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 4.0
}},
{Sel: ".InputToPFC", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 2
}},
{Sel: ".PFCPath", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 2
}},
{Sel: ".PTSelfMaint", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 1
pt.PathScale.Abs = 5 // needs 5
}},
{Sel: ".CTToPulv", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0
pt.PathScale.Abs = 0
}},
{Sel: ".SuperToThal", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 4.0 // 4 > 2 for gating sooner
}},
},
}
// Copyright (c) 2022, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pfcmaint
import (
"fmt"
"log/slog"
"cogentcore.org/lab/base/randx"
"cogentcore.org/lab/patterns"
"cogentcore.org/lab/table"
"cogentcore.org/lab/tensor"
"github.com/emer/emergent/v2/env"
"github.com/emer/emergent/v2/etime"
)
// PFCMaintEnv implements a simple store-maintain-recall active maintenance task
type PFCMaintEnv struct {
// name of environment -- Train or Test
Name string
// training or testing env?
Mode etime.Modes
// sequence counter is for the outer loop of maint per item
Sequence env.Counter `display:"inline"`
// trial counter is for the maint step within item
Trial env.Counter `display:"inline"`
// Di is the data parallel index.
Di int
// ndata is number of data parallel total.
NData int
// number of different items to maintain.
NItems int
// StartItem is item we start on, based on Di, NData.
StartItem int
// number of trials to maintain
NTrials int
// state rep, number of units, Y
NUnitsY int `display:"-"`
// state rep, number of units, X
NUnitsX int `display:"-"`
// total number of units
NUnits int `display:"-"`
// item patterns
Pats table.Table
// random number generator for the env -- all random calls must use this
Rand randx.SysRand `display:"-"`
// random seed
RandSeed int64 `edit:"-"`
// named states: ACCPos, ACCNeg
States map[string]*tensor.Float32
}
func (ev *PFCMaintEnv) Label() string { return ev.Name }
func (ev *PFCMaintEnv) String() string {
return fmt.Sprintf("%d", ev.Trial.Cur)
}
func (ev *PFCMaintEnv) Defaults() {
ev.NItems = 10
ev.NTrials = 10
ev.NUnitsY = 5
ev.NUnitsX = 5
ev.NUnits = ev.NUnitsY * ev.NUnitsX
}
// Config configures the world
func (ev *PFCMaintEnv) Config(mode etime.Modes, di, ndata int, rndseed int64) {
ev.Mode = mode
ev.Di = di
ev.NData = ndata
ev.RandSeed = rndseed
ev.Rand.NewRand(ev.RandSeed)
ev.States = make(map[string]*tensor.Float32)
ev.States["Item"] = tensor.NewFloat32(ev.NUnitsY, ev.NUnitsX)
ev.States["Time"] = tensor.NewFloat32(ev.NUnitsY, ev.NTrials)
ev.States["GPi"] = tensor.NewFloat32(ev.NUnitsY, ev.NUnitsX)
if ev.NItems%ndata != 0 {
slog.Error("PFCMaintEnv: Number of items must be evenly divisible by NData", "NItems:", ev.NItems, "NData:", ndata)
}
nper := ev.NItems / ndata
ev.Sequence.Max = nper
ev.StartItem = ev.Di * nper
ev.Trial.Max = ev.NTrials
ev.ConfigPats()
}
func (ev *PFCMaintEnv) ConfigPats() {
npats := ev.NItems
ev.Pats.Init()
ev.Pats.DeleteAll()
ev.Pats.AddStringColumn("Name")
ev.Pats.AddFloat32Column("Item", ev.NUnitsY, ev.NUnitsX)
ev.Pats.SetNumRows(npats)
pctAct := float32(0.2)
minPctDiff := float32(0.5)
nUn := ev.NUnitsY * ev.NUnitsX
nOn := patterns.NFromPct(float64(pctAct), nUn)
minDiff := patterns.NFromPct(float64(minPctDiff), nOn)
patterns.PermutedBinaryMinDiff(ev.Pats.Columns.At("Item"), nOn, 1, 0, minDiff)
}
func (ev *PFCMaintEnv) Init(run int) {
ev.Sequence.Init()
ev.Trial.Init()
}
func (ev *PFCMaintEnv) State(el string) tensor.Values {
return ev.States[el]
}
// RenderLocalist renders localist * NUnitsPer
func (ev *PFCMaintEnv) RenderLocalist(name string, idx int) {
av := ev.States[name]
av.SetZeros()
for yi := 0; yi < ev.NUnitsY; yi++ {
av.Set(1, yi, idx)
}
}
// RenderState renders the given condition, trial
func (ev *PFCMaintEnv) RenderState(item, trial int) {
st := ev.States["Item"]
st.CopyFrom(ev.Pats.Column("Item").RowTensor(item))
ev.RenderLocalist("Time", trial)
st = ev.States["GPi"]
st.CopyFrom(ev.Pats.Column("Item").RowTensor(item))
if trial == 0 {
st.SetZeros()
}
}
// Step does one step -- must set Trial.Cur first if doing testing
func (ev *PFCMaintEnv) Step() bool {
item := ev.StartItem + ev.Sequence.Cur
ev.RenderState(item, ev.Trial.Cur)
ev.Sequence.Same()
if ev.Trial.Incr() {
ev.Sequence.Incr()
}
return true
}
func (ev *PFCMaintEnv) Action(action string, nop tensor.Values) {
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// pfcmaint: This project tests prefrontal cortex (PFC) active
// maintenance mechanisms supported by the pyramidal tract (PT) neurons,
// in the PTMaint layer type.
package pfcmaint
//go:generate core generate -add-types -add-funcs -gosl
import (
"fmt"
"os"
"reflect"
"cogentcore.org/core/base/reflectx"
"cogentcore.org/core/core"
"cogentcore.org/core/enums"
"cogentcore.org/core/gpu"
"cogentcore.org/core/icons"
"cogentcore.org/core/math32"
"cogentcore.org/core/tree"
"cogentcore.org/lab/base/mpi"
"cogentcore.org/lab/base/randx"
"cogentcore.org/lab/plot"
"cogentcore.org/lab/stats/stats"
"cogentcore.org/lab/tensorfs"
"github.com/emer/axon/v2/axon"
"github.com/emer/emergent/v2/egui"
"github.com/emer/emergent/v2/env"
"github.com/emer/emergent/v2/etime"
"github.com/emer/emergent/v2/looper"
"github.com/emer/emergent/v2/paths"
)
// Modes are the looping modes (Stacks) for running and statistics.
type Modes int32 //enums:enum
const (
Train Modes = iota
Test
)
// Levels are the looping levels for running and statistics.
type Levels int32 //enums:enum
const (
Cycle Levels = iota
Trial
Epoch
Run
)
// StatsPhase is the phase of stats processing for given mode, level.
// Accumulated values are reset at Start, added each Step.
type StatsPhase int32 //enums:enum
const (
Start StatsPhase = iota
Step
)
// see params.go for params
// Sim encapsulates the entire simulation model, and we define all the
// functionality as methods on this struct. This structure keeps all relevant
// state information organized and available without having to pass everything around
// as arguments to methods, and provides the core GUI interface (note the view tags
// for the fields which provide hints to how things should be displayed).
type Sim struct {
// simulation configuration parameters -- set by .toml config file and / or args
Config *Config `new-window:"+"`
// Net is the network: click to view / edit parameters for layers, paths, etc.
Net *axon.Network `new-window:"+" display:"no-inline"`
// Params manages network parameter setting.
Params axon.Params `display:"inline"`
// Loops are the control loops for running the sim, in different Modes
// across stacks of Levels.
Loops *looper.Stacks `new-window:"+" display:"no-inline"`
// Envs provides mode-string based storage of environments.
Envs env.Envs `new-window:"+" display:"no-inline"`
// TrainUpdate has Train mode netview update parameters.
TrainUpdate axon.NetViewUpdate `display:"inline"`
// TestUpdate has Test mode netview update parameters.
TestUpdate axon.NetViewUpdate `display:"inline"`
// Root is the root tensorfs directory, where all stats and other misc sim data goes.
Root *tensorfs.Node `display:"-"`
// Stats has the stats directory within Root.
Stats *tensorfs.Node `display:"-"`
// Current has the current stats values within Stats.
Current *tensorfs.Node `display:"-"`
// StatFuncs are statistics functions called at given mode and level,
// to perform all stats computations. phase = Start does init at start of given level,
// and all intialization / configuration (called during Init too).
StatFuncs []func(mode Modes, level Levels, phase StatsPhase) `display:"-"`
// GUI manages all the GUI elements
GUI egui.GUI `display:"-"`
// RandSeeds is a list of random seeds to use for each run.
RandSeeds randx.Seeds `display:"-"`
}
func (ss *Sim) SetConfig(cfg *Config) { ss.Config = cfg }
func (ss *Sim) Body() *core.Body { return ss.GUI.Body }
func (ss *Sim) ConfigSim() {
ss.Root, _ = tensorfs.NewDir("Root")
tensorfs.CurRoot = ss.Root
ss.Net = axon.NewNetwork(ss.Config.Name)
ss.Params.Config(LayerParams, PathParams, ss.Config.Params.Sheet, ss.Config.Params.Tag, reflect.ValueOf(ss))
ss.RandSeeds.Init(100) // max 100 runs
ss.InitRandSeed(0)
if ss.Config.GPU {
gpu.SelectAdapter = ss.Config.Run.GPUDevice
axon.GPUInit()
axon.UseGPU = true
}
ss.ConfigEnv()
ss.ConfigNet(ss.Net)
ss.ConfigLoops()
ss.ConfigStats()
// if ss.Config..GPU {
// fmt.Println(axon.GPUSystem.Vars().StringDoc())
// }
if ss.Config.Params.SaveAll {
ss.Config.Params.SaveAll = false
ss.Net.SaveParamsSnapshot(&ss.Config, ss.Config.Params.Good)
os.Exit(0)
}
}
func (ss *Sim) ConfigEnv() {
// Can be called multiple times -- don't re-create
newEnv := (len(ss.Envs) == 0)
ndata := ss.Config.Run.NData
for di := 0; di < ndata; di++ {
var trn, tst *PFCMaintEnv
if newEnv {
trn = &PFCMaintEnv{}
tst = &PFCMaintEnv{}
} else {
trn = ss.Envs.ByModeDi(etime.Train, di).(*PFCMaintEnv)
tst = ss.Envs.ByModeDi(etime.Test, di).(*PFCMaintEnv)
}
// note: names must be standard here!
trn.Name = env.ModeDi(etime.Train, di)
trn.Defaults()
if ss.Config.Env.Env != nil {
reflectx.SetFieldsFromMap(trn, ss.Config.Env.Env)
}
trn.Config(etime.Train, di, ndata, 73) // same seeds so same pats
tst.Name = env.ModeDi(etime.Test, di)
tst.Defaults()
if ss.Config.Env.Env != nil {
reflectx.SetFieldsFromMap(tst, ss.Config.Env.Env)
}
tst.Config(etime.Test, di, ndata, 181)
trn.Init(0)
tst.Init(0)
// note: names must be in place when adding
ss.Envs.Add(trn, tst)
}
}
func (ss *Sim) ConfigNet(net *axon.Network) {
net.SetMaxData(ss.Config.Run.NData)
net.Context().SetISICycles(int32(ss.Config.Run.ISICycles)).
SetMinusCycles(int32(ss.Config.Run.MinusCycles)).
SetPlusCycles(int32(ss.Config.Run.PlusCycles)).Update()
net.SetRandSeed(ss.RandSeeds[0]) // init new separate random seed, using run = 0
ev := ss.Envs.ByModeDi(Train, 0).(*PFCMaintEnv)
space := float32(2)
full := paths.NewFull()
nun := ss.Config.Params.NUnits
if nun <= 0 {
nun = 7
}
in, inP := net.AddInputPulv2D("Item", ev.NUnitsY, ev.NUnitsX, space)
time, timeP := net.AddInputPulv2D("Time", ev.NUnitsY, ev.NTrials, space)
gpi := net.AddLayer2D("GPi", axon.InputLayer, ev.NUnitsY, ev.NUnitsX)
pfc, pfcCT, pfcPT, pfcPTp, pfcThal := net.AddPFC2D("PFC", "Thal", nun, nun, true, !ss.Config.Params.MaintCons, space)
_ = pfcPT
_ = pfcThal
net.ConnectToPFCBack(in, inP, pfc, pfcCT, pfcPT, pfcPTp, full, "InputToPFC")
net.ConnectToPFCBack(time, timeP, pfc, pfcCT, pfcPT, pfcPTp, full, "InputToPFC")
net.ConnectLayers(gpi, pfcThal, full, axon.InhibPath)
time.PlaceRightOf(in, space)
gpi.PlaceRightOf(time, space)
pfcThal.PlaceRightOf(gpi, space)
pfc.PlaceAbove(in)
pfcPT.PlaceRightOf(pfc, space)
net.Build()
net.Defaults()
net.SetNThreads(ss.Config.Run.NThreads)
ss.ApplyParams()
net.InitWeights()
ss.ConfigRubicon()
}
func (ss *Sim) ConfigRubicon() {
rp := &ss.Net.Rubicon
rp.SetNUSs(1, 1)
}
func (ss *Sim) ApplyParams() {
ss.Params.Script = ss.Config.Params.Script
ss.Params.ApplyAll(ss.Net)
}
//////// Init, utils
// Init restarts the run, and initializes everything, including network weights
// and resets the epoch log table
func (ss *Sim) Init() {
ss.Loops.ResetCounters()
ss.SetRunName()
ss.InitRandSeed(0)
ss.ConfigEnv() // always do -- otherwise env params not reset after run
// selected or patterns have been modified etc
ss.ApplyParams()
ss.StatsInit()
ss.NewRun()
ss.TrainUpdate.RecordSyns()
ss.TrainUpdate.Update(Train, Trial)
}
// InitRandSeed initializes the random seed based on current training run number
func (ss *Sim) InitRandSeed(run int) {
ss.RandSeeds.Set(run)
ss.RandSeeds.Set(run, &ss.Net.Rand)
}
// NetViewUpdater returns the NetViewUpdate for given mode.
func (ss *Sim) NetViewUpdater(mode enums.Enum) *axon.NetViewUpdate {
if mode.Int64() == Train.Int64() {
return &ss.TrainUpdate
}
return &ss.TestUpdate
}
// ConfigLoops configures the control loops: Training, Testing
func (ss *Sim) ConfigLoops() {
ls := looper.NewStacks()
trials := int(math32.IntMultipleGE(float32(ss.Config.Run.Trials), float32(ss.Config.Run.NData)))
cycles := ss.Config.Run.Cycles()
ls.AddStack(Train, Trial).
AddLevel(Run, ss.Config.Run.Runs).
AddLevel(Epoch, ss.Config.Run.Epochs).
AddLevelIncr(Trial, trials, ss.Config.Run.NData).
AddLevel(Cycle, cycles)
ls.AddStack(Test, Trial).
AddLevel(Epoch, 1).
AddLevelIncr(Trial, trials, ss.Config.Run.NData).
AddLevel(Cycle, cycles)
axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, Cycle, Trial, Train,
func(mode enums.Enum) { ss.Net.ClearInputs() },
func(mode enums.Enum) {
trial := ls.Stacks[mode].Loops[Trial].Counter.Cur
theta := ls.Stacks[mode].Loops[Trial].Counter.Cur
ss.ApplyInputs(mode.(Modes), trial, theta)
},
)
ls.Stacks[Train].OnInit.Add("Init", ss.Init)
ls.Loop(Train, Run).OnStart.Add("NewRun", ss.NewRun)
ls.AddOnStartToAll("StatsStart", ss.StatsStart)
ls.AddOnEndToAll("StatsStep", ss.StatsStep)
ls.Loop(Train, Run).OnEnd.Add("SaveWeights", func() {
ctrString := fmt.Sprintf("%03d_%05d", ls.Loop(Train, Run).Counter.Cur, ls.Loop(Train, Epoch).Counter.Cur)
axon.SaveWeightsIfConfigSet(ss.Net, ss.Config.Log.SaveWeights, ctrString, ss.RunName())
})
if ss.Config.GUI {
axon.LooperUpdateNetView(ls, Cycle, Trial, ss.NetViewUpdater)
ls.Stacks[Train].OnInit.Add("GUI-Init", ss.GUI.UpdateWindow)
ls.Stacks[Test].OnInit.Add("GUI-Init", ss.GUI.UpdateWindow)
}
if ss.Config.Debug {
mpi.Println(ls.DocString())
}
ss.Loops = ls
}
// ApplyInputs applies input patterns from given environment for given mode.
// Any other start-of-trial logic can also be put here.
func (ss *Sim) ApplyInputs(mode Modes, trial, theta int) {
net := ss.Net
ndata := int(net.Context().NData)
curModeDir := ss.Current.Dir(mode.String())
lays := []string{"Item", "Time", "GPi"}
net.InitExt()
for di := range ndata {
ev := ss.Envs.ByModeDi(mode, di).(*PFCMaintEnv)
ev.Step()
for _, lnm := range lays {
ly := ss.Net.LayerByName(lnm)
st := ev.State(ly.Name)
if st != nil {
ly.ApplyExt(uint32(di), st)
}
}
curModeDir.StringValue("TrialName", ndata).SetString1D(ev.String(), di)
ss.ApplyRubicon(ev, mode, theta, uint32(di))
}
net.ApplyExts()
}
// ApplyRubicon applies Rubicon reward inputs
func (ss *Sim) ApplyRubicon(ev *PFCMaintEnv, mode Modes, trial int, di uint32) {
rp := &ss.Net.Rubicon
rp.NewState(di, &ss.Net.Rand) // first before anything else is updated
if ev.Trial.Cur == 0 { // reset maint on rew -- trial counter wraps around to 0
axon.GlobalSetRew(di, 1, true)
}
}
// NewRun intializes a new Run level of the model.
func (ss *Sim) NewRun() {
ctx := ss.Net.Context()
run := ss.Loops.Loop(Train, Run).Counter.Cur
ss.InitRandSeed(run)
for di := 0; di < int(ctx.NData); di++ {
ss.Envs.ByModeDi(Train, di).Init(run)
ss.Envs.ByModeDi(Test, di).Init(run)
}
ctx.Reset()
ss.Net.InitWeights()
}
//////// Stats
// AddStat adds a stat compute function.
func (ss *Sim) AddStat(f func(mode Modes, level Levels, phase StatsPhase)) {
ss.StatFuncs = append(ss.StatFuncs, f)
}
// StatsStart is called by Looper at the start of given level, for each iteration.
// It needs to call RunStats Start at the next level down.
// e.g., each Epoch is the start of the full set of Trial Steps.
func (ss *Sim) StatsStart(lmd, ltm enums.Enum) {
mode := lmd.(Modes)
level := ltm.(Levels)
if level <= Trial {
return
}
ss.RunStats(mode, level-1, Start)
}
// StatsStep is called by Looper at each step of iteration,
// where it accumulates the stat results.
func (ss *Sim) StatsStep(lmd, ltm enums.Enum) {
mode := lmd.(Modes)
level := ltm.(Levels)
if level == Cycle {
return
}
ss.RunStats(mode, level, Step)
tensorfs.DirTable(axon.StatsNode(ss.Stats, mode, level), nil).WriteToLog()
}
// RunStats runs the StatFuncs for given mode, level and phase.
func (ss *Sim) RunStats(mode Modes, level Levels, phase StatsPhase) {
for _, sf := range ss.StatFuncs {
sf(mode, level, phase)
}
if phase == Step && ss.GUI.Tabs != nil {
nm := mode.String() + " " + level.String() + " Plot"
ss.GUI.Tabs.AsLab().GoUpdatePlot(nm)
ss.GUI.Tabs.AsLab().GoUpdatePlot("Train TrialAll Plot")
}
}
// SetRunName sets the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) SetRunName() string {
runName := ss.Params.RunName(ss.Config.Run.Run)
ss.Current.StringValue("RunName", 1).SetString1D(runName, 0)
return runName
}
// RunName returns the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) RunName() string {
return ss.Current.StringValue("RunName", 1).String1D(0)
}
// StatsInit initializes all the stats by calling Start across all modes and levels.
func (ss *Sim) StatsInit() {
for md, st := range ss.Loops.Stacks {
mode := md.(Modes)
for _, lev := range st.Order {
level := lev.(Levels)
if level == Cycle {
continue
}
ss.RunStats(mode, level, Start)
}
}
if ss.GUI.Tabs != nil {
tbs := ss.GUI.Tabs.AsLab()
_, idx := tbs.CurrentTab()
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Trial))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Epoch))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Run))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Test, Trial))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Test, Epoch))
tbs.SelectTabIndex(idx)
}
}
// ConfigStats handles configures functions to do all stats computation
// in the tensorfs system.
func (ss *Sim) ConfigStats() {
net := ss.Net
ss.Stats = ss.Root.Dir("Stats")
ss.Current = ss.Stats.Dir("Current")
ss.SetRunName()
// last arg(s) are levels to exclude
counterFunc := axon.StatLoopCounters(ss.Stats, ss.Current, ss.Loops, net, Trial, Cycle)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
counterFunc(mode, level, phase == Start)
})
runNameFunc := axon.StatRunName(ss.Stats, ss.Current, ss.Loops, net, Trial, Cycle)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
runNameFunc(mode, level, phase == Start)
})
trialNameFunc := axon.StatTrialName(ss.Stats, ss.Current, ss.Loops, net, Trial)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
trialNameFunc(mode, level, phase == Start)
})
perTrlFunc := axon.StatPerTrialMSec(ss.Stats, Train, Trial)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
perTrlFunc(mode, level, phase == Start)
})
// up to a point, it is good to use loops over stats in one function,
// to reduce repetition of boilerplate.
statNames := []string{"ItemP_CorSim", "TimeP_CorSim"}
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
for _, name := range statNames {
modeDir := ss.Stats.Dir(mode.String())
curModeDir := ss.Current.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
subDir := modeDir.Dir((level - 1).String()) // note: will fail for Cycle
tsr := levelDir.Float64(name)
ndata := int(ss.Net.Context().NData)
var stat float64
if phase == Start {
tsr.SetNumRows(0)
plot.SetFirstStyler(tsr, func(s *plot.Style) {
s.Range.SetMin(0).SetMax(1)
s.On = true
})
continue
}
switch level {
case Trial:
itemly := ss.Net.LayerByName("ItemP")
timely := ss.Net.LayerByName("TimeP")
for di := range ndata {
ev := ss.Envs.ByModeDi(mode, di).(*PFCMaintEnv)
var stat float64
switch name {
case "ItemP_CorSim":
stat = 1.0 - float64(axon.LayerStates.Value(int(itemly.Index), int(di), int(axon.LayerPhaseDiff)))
case "TimeP_CorSim":
stat = 1.0 - float64(axon.LayerStates.Value(int(timely.Index), int(di), int(axon.LayerPhaseDiff)))
}
if ev.Trial.Prev == 0 { // unpredictable
stat = 1
}
curModeDir.Float64(name, ndata).SetFloat1D(stat, di)
tsr.AppendRowFloat(stat)
}
case Run:
stat = stats.StatFinal.Call(subDir.Value(name)).Float1D(0)
tsr.AppendRowFloat(stat)
default:
stat = stats.StatMean.Call(subDir.Value(name)).Float1D(0)
tsr.AppendRowFloat(stat)
}
}
})
prevCorFunc := axon.StatPrevCorSim(ss.Stats, ss.Current, net, Trial, Run, "ItemP", "TimeP")
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
prevCorFunc(mode, level, phase == Start)
})
lays := net.LayersByClass("PFC")
actGeFunc := axon.StatLayerActGe(ss.Stats, net, Train, Trial, Run, lays...)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
actGeFunc(mode, level, phase == Start)
})
}
// StatCounters returns counters string to show at bottom of netview.
func (ss *Sim) StatCounters(mode, level enums.Enum) string {
counters := ss.Loops.Stacks[mode].CountersString()
vu := ss.NetViewUpdater(mode)
if vu == nil || vu.View == nil {
return counters
}
di := vu.View.Di
counters += fmt.Sprintf(" Di: %d", di)
curModeDir := ss.Current.Dir(mode.String())
if curModeDir.Node("TrialName") == nil {
return counters
}
counters += fmt.Sprintf(" TrialName: %s", curModeDir.StringValue("TrialName").String1D(di))
statNames := []string{"ItemP_CorSim", "TimeP_CorSim"}
if level == Cycle || curModeDir.Node(statNames[0]) == nil {
return counters
}
for _, name := range statNames {
counters += fmt.Sprintf(" %s: %.4g", name, curModeDir.Value(name).Float1D(di))
}
return counters
}
//////// GUI
// ConfigGUI configures the Cogent Core GUI interface for this simulation.
func (ss *Sim) ConfigGUI(b tree.Node) {
ss.GUI.MakeBody(b, ss, ss.Root, ss.Config.Name, ss.Config.Title, ss.Config.Doc)
ss.GUI.StopLevel = Trial
nv := ss.GUI.AddNetView("Network")
nv.Options.MaxRecs = 2 * ss.Config.Run.Cycles()
nv.Options.Raster.Max = ss.Config.Run.Cycles()
nv.SetNet(ss.Net)
ss.TrainUpdate.Config(nv, axon.Theta, ss.StatCounters)
ss.TestUpdate.Config(nv, axon.Theta, ss.StatCounters)
ss.GUI.OnStop = func(mode, level enums.Enum) {
vu := ss.NetViewUpdater(mode)
vu.UpdateWhenStopped(mode, level)
}
nv.SceneXYZ().Camera.Pose.Pos.Set(0, 2.15, 2.45)
nv.SceneXYZ().Camera.LookAt(math32.Vec3(0, 0, 0), math32.Vec3(0, 1, 0))
ss.StatsInit()
ss.GUI.FinalizeGUI(false)
}
func (ss *Sim) MakeToolbar(p *tree.Plan) {
ss.GUI.AddLooperCtrl(p, ss.Loops)
tree.Add(p, func(w *core.Separator) {})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "New seed",
Icon: icons.Add,
Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
Active: egui.ActiveAlways,
Func: func() {
ss.RandSeeds.NewSeeds()
},
})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "README",
Icon: icons.FileMarkdown,
Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
Active: egui.ActiveAlways,
Func: func() {
core.TheApp.OpenURL(ss.Config.URL)
},
})
}
func (ss *Sim) RunNoGUI() {
ss.Init()
if ss.Config.Params.Note != "" {
mpi.Printf("Note: %s\n", ss.Config.Params.Note)
}
if ss.Config.Log.SaveWeights {
mpi.Printf("Saving final weights per run\n")
}
runName := ss.SetRunName()
netName := ss.Net.Name
cfg := &ss.Config.Log
axon.OpenLogFiles(ss.Loops, ss.Stats, netName, runName, [][]string{cfg.Train, cfg.Test})
mpi.Printf("Running %d Runs starting at %d\n", ss.Config.Run.Runs, ss.Config.Run.Run)
ss.Loops.Loop(Train, Run).Counter.SetCurMaxPlusN(ss.Config.Run.Run, ss.Config.Run.Runs)
ss.Loops.Run(Train)
axon.CloseLogFiles(ss.Loops, ss.Stats, Cycle)
axon.GPURelease()
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"github.com/emer/axon/v2/sims/pfcmaint"
"github.com/emer/emergent/v2/egui"
)
func main() { egui.Run[pfcmaint.Sim, pfcmaint.Config]() }
// Code generated by "core generate -add-types -gosl"; DO NOT EDIT.
package cond
import (
"cogentcore.org/core/enums"
)
var _ValenceValues = []Valence{0, 1}
// ValenceN is the highest valid value for type Valence, plus one.
//
//gosl:start
const ValenceN Valence = 2
//gosl:end
var _ValenceValueMap = map[string]Valence{`Pos`: 0, `Neg`: 1}
var _ValenceDescMap = map[Valence]string{0: ``, 1: ``}
var _ValenceMap = map[Valence]string{0: `Pos`, 1: `Neg`}
// String returns the string representation of this Valence value.
func (i Valence) String() string { return enums.String(i, _ValenceMap) }
// SetString sets the Valence value from its string representation,
// and returns an error if the string is invalid.
func (i *Valence) SetString(s string) error {
return enums.SetString(i, s, _ValenceValueMap, "Valence")
}
// Int64 returns the Valence value as an int64.
func (i Valence) Int64() int64 { return int64(i) }
// SetInt64 sets the Valence value from an int64.
func (i *Valence) SetInt64(in int64) { *i = Valence(in) }
// Desc returns the description of the Valence value.
func (i Valence) Desc() string { return enums.Desc(i, _ValenceDescMap) }
// ValenceValues returns all possible values for the type Valence.
func ValenceValues() []Valence { return _ValenceValues }
// Values returns all possible values for the type Valence.
func (i Valence) Values() []enums.Enum { return enums.Values(_ValenceValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Valence) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Valence) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Valence") }
var _TickTypesValues = []TickTypes{0, 1, 2, 3, 4}
// TickTypesN is the highest valid value for type TickTypes, plus one.
//
//gosl:start
const TickTypesN TickTypes = 5
//gosl:end
var _TickTypesValueMap = map[string]TickTypes{`Pre`: 0, `CS`: 1, `Maint`: 2, `US`: 3, `Post`: 4}
var _TickTypesDescMap = map[TickTypes]string{0: `Pre is before the CS`, 1: `CS is CS onset`, 2: `Maint is after CS before US`, 3: `US is the US`, 4: `Post is after the US`}
var _TickTypesMap = map[TickTypes]string{0: `Pre`, 1: `CS`, 2: `Maint`, 3: `US`, 4: `Post`}
// String returns the string representation of this TickTypes value.
func (i TickTypes) String() string { return enums.String(i, _TickTypesMap) }
// SetString sets the TickTypes value from its string representation,
// and returns an error if the string is invalid.
func (i *TickTypes) SetString(s string) error {
return enums.SetString(i, s, _TickTypesValueMap, "TickTypes")
}
// Int64 returns the TickTypes value as an int64.
func (i TickTypes) Int64() int64 { return int64(i) }
// SetInt64 sets the TickTypes value from an int64.
func (i *TickTypes) SetInt64(in int64) { *i = TickTypes(in) }
// Desc returns the description of the TickTypes value.
func (i TickTypes) Desc() string { return enums.Desc(i, _TickTypesDescMap) }
// TickTypesValues returns all possible values for the type TickTypes.
func TickTypesValues() []TickTypes { return _TickTypesValues }
// Values returns all possible values for the type TickTypes.
func (i TickTypes) Values() []enums.Enum { return enums.Values(_TickTypesValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i TickTypes) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *TickTypes) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "TickTypes")
}
// Copyright (c) 2023, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cond
//go:generate core generate -add-types -gosl
import (
"fmt"
"cogentcore.org/lab/tensor"
"github.com/emer/emergent/v2/env"
)
// CondEnv provides a flexible implementation of standard Pavlovian
// conditioning experiments involving CS -> US sequences.
// Has a large database of standard conditioning paradigms
// parameterized in a controlled manner.
//
// Time hierarchy:
// * Run = sequence of 1 or more Conditions
// * Condition = specific mix of sequence types, generated at start of Condition
// * Block = one full pass through all sequence types generated for condition (like Epoch)
// * Sequence = one behavioral trial consisting of CS -> US presentation over time steps (Ticks)
// * Tick = discrete time steps within behavioral Sequence, typically one Network update (Alpha / Theta cycle)
type CondEnv struct {
// name of this environment
Name string
// number of Y repetitions for localist reps
NYReps int
// current run name
RunName string
// description of current run
RunDesc string
// name of current condition
CondName string
// description of current condition
CondDesc string
// counter over runs
Run env.Counter `edit:"-" display:"inline"`
// counter over Condition within a run -- Max depends on number of conditions specified in given Run
Condition env.Counter `edit:"-" display:"inline"`
// counter over full blocks of all sequence types within a Condition -- like an Epoch
Block env.Counter `edit:"-" display:"inline"`
// counter of behavioral sequences within a Block
Sequence env.Counter `edit:"-" display:"inline"`
// counter of discrete steps within a sequence -- typically maps onto Alpha / Theta cycle in network
Tick env.Counter `edit:"-" display:"inline"`
// name of current sequence step
SequenceName string `edit:"-"`
// type of current sequence step
SequenceType string `edit:"-"`
// decoded value of USTimeIn
USTimeInStr string `edit:"-"`
// current generated set of sequences per Block
Sequences []*Sequence
// copy of current run parameters
CurRun Run
// the current rendered tick
CurTick Sequence
// current rendered state tensors -- extensible map
CurStates map[string]*tensor.Float32
}
func (ev *CondEnv) Config(rmax int, rnm string) {
ev.RunName = rnm
ev.Run.Max = rmax
ev.NYReps = 4
ev.CurStates = make(map[string]*tensor.Float32)
stsh := []int{StimShape[0], StimShape[1], ev.NYReps, 1}
ev.CurStates["CS"] = tensor.NewFloat32(stsh...)
ctsh := []int{ContextShape[0], ContextShape[1], ev.NYReps, 1}
ev.CurStates["ContextIn"] = tensor.NewFloat32(ctsh...)
ustsh := make([]int, 4)
copy(ustsh, USTimeShape)
ustsh[2] = ev.NYReps
ev.CurStates["USTimeIn"] = tensor.NewFloat32(ustsh...)
ev.CurStates["Time"] = tensor.NewFloat32(1, MaxTime, ev.NYReps, 1)
ussh := []int{USShape[0], USShape[1], ev.NYReps, 1}
ev.CurStates["USpos"] = tensor.NewFloat32(ussh...)
ev.CurStates["USneg"] = tensor.NewFloat32(ussh...)
}
func (ev *CondEnv) Label() string { return ev.Name }
func (ev *CondEnv) String() string { return ev.SequenceName }
// Init sets current run index and max
func (ev *CondEnv) Init(ridx int) {
run := AllRuns[ev.RunName]
ev.CurRun = *run
ev.RunDesc = run.Desc
ev.Run.Set(ridx)
ev.Condition.Init()
ev.Condition.Max = run.NConds()
ev.InitCond()
ev.Tick.Cur = -1
}
// InitCond initializes for current condition index
func (ev *CondEnv) InitCond() {
if ev.RunName == "" {
ev.RunName = "PosAcq_A100B50"
}
run := AllRuns[ev.RunName]
run.Name = ev.RunName
cnm, cond := run.Cond(ev.Condition.Cur)
ev.CondName = cnm
ev.CondDesc = cond.Desc
ev.Block.Init()
ev.Block.Max = cond.NBlocks
ev.Sequence.Init()
ev.Sequence.Max = cond.NSequences
ev.Sequences = SequenceReps(cnm)
ev.Tick.Init()
trl := ev.Sequences[0]
ev.Tick.Max = trl.NTicks
}
func (ev *CondEnv) State(element string) tensor.Values {
return ev.CurStates[element]
}
func (ev *CondEnv) Step() bool {
ev.Condition.Same()
ev.Block.Same()
ev.Sequence.Same()
if ev.Tick.Incr() {
if ev.Sequence.Incr() {
if ev.Block.Incr() {
if ev.Condition.Incr() {
if ev.Run.Incr() {
return false
}
}
ev.InitCond()
}
}
trl := ev.Sequences[ev.Sequence.Cur]
ev.Tick.Max = trl.NTicks
}
ev.RenderSequence(ev.Sequence.Cur, ev.Tick.Cur)
return true
}
func (ev *CondEnv) Action(_ string, _ tensor.Values) {
// nop
}
func (ev *CondEnv) RenderSequence(trli, tick int) {
for _, tsr := range ev.CurStates {
tsr.SetZeros()
}
trl := ev.Sequences[trli]
ev.CurTick = *trl
ev.SequenceName = fmt.Sprintf("%s_%d", trl.CS, tick)
ev.SequenceType = ev.CurTick.Name
ev.CurTick.Type = Pre
stim := ev.CurStates["CS"]
ctxt := ev.CurStates["ContextIn"]
ustime := ev.CurStates["USTimeIn"]
time := ev.CurStates["Time"]
SetTime(time, ev.NYReps, tick)
if tick >= trl.CSStart && tick <= trl.CSEnd {
ev.CurTick.CSOn = true
if tick == trl.CSStart {
ev.CurTick.Type = CS
} else {
ev.CurTick.Type = Maint
}
cs := trl.CS[0:1]
stidx := SetStim(stim, ev.NYReps, cs)
SetUSTime(ustime, ev.NYReps, stidx, tick, trl.CSStart, trl.CSEnd)
}
if (len(trl.CS) > 1) && (tick >= trl.CS2Start) && (tick <= trl.CS2End) {
ev.CurTick.CSOn = true
if tick == trl.CS2Start {
ev.CurTick.Type = CS
} else {
ev.CurTick.Type = Maint
}
cs := trl.CS[1:2]
stidx := SetStim(stim, ev.NYReps, cs)
SetUSTime(ustime, ev.NYReps, stidx, tick, trl.CSStart, trl.CSEnd)
}
minStart := trl.CSStart
if trl.CS2Start > 0 {
minStart = min(minStart, trl.CS2Start)
}
maxEnd := max(trl.CSEnd, trl.CS2End)
if tick >= minStart && tick <= maxEnd {
SetContext(ctxt, ev.NYReps, trl.Context)
}
if tick == maxEnd+1 {
// use last stimulus for US off signal
SetUSTime(ustime, ev.NYReps, NStims-1, MaxTime, 0, MaxTime)
}
ev.CurTick.USOn = false
if trl.USOn && (tick >= trl.USStart) && (tick <= trl.USEnd) {
ev.CurTick.USOn = true
if trl.Valence == Pos {
SetUS(ev.CurStates["USpos"], ev.NYReps, trl.US, trl.USMag)
ev.SequenceName += fmt.Sprintf("_Pos%d", trl.US)
}
if trl.Valence == Neg || trl.MixedUS {
SetUS(ev.CurStates["USneg"], ev.NYReps, trl.US, trl.USMag)
ev.SequenceName += fmt.Sprintf("_Neg%d", trl.US)
}
}
if (tick >= trl.USStart) && (tick <= trl.USEnd) {
ev.CurTick.Type = US // even if not on, this is the type
}
if tick > trl.USEnd {
ev.CurTick.Type = Post
}
}
// Copyright (c) 2023, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cond
import "cogentcore.org/lab/tensor"
var (
NUSs = 4
NStims = 12
MaxTime = DefTicks + 3
USShape = []int{1, NUSs}
StimShape = []int{3, 4}
ContextShape = []int{6, 5}
// USTimeShape is overall shape of USTime
USTimeShape = []int{StimShape[0], StimShape[1], 1, MaxTime}
// USTimeOff is activated when the US goes off
USTimeOff = []int{StimShape[0] - 1, StimShape[1] - 1, 0, 5}
// Stims maps stimuli to indexes for input layer
Stims = map[string]int{
"A": 0,
"B": 1,
"C": 2,
"D": 3,
"E": 4,
"F": 5,
"U": 6,
"V": 7,
"W": 8,
"X": 9,
"Y": 10,
"Z": 11,
}
// Contexts maps contexts to indexes for input layer
Contexts = map[string]int{
"A": 0,
"B": 1,
"C": 2,
"D": 3,
"E": 4,
"F": 5,
"U": 6,
"V": 7,
"W": 8,
"X": 9,
"Y": 10,
"Z": 11,
"AB": 12,
"AC": 13,
"AX": 14,
"AY": 15,
"AZ": 16,
"BX": 17,
"BY": 18,
"BZ": 19,
"CX": 20,
"CY": 21,
"CZ": 22,
"DU": 23,
"ED": 24,
"EU": 25,
"EV": 26,
"FV": 27,
"A_B": 28,
}
)
// StimIndex returns index for given stimulus
func StimIndex(stm string) int {
return Stims[stm]
}
// StimYX returns stimulus YX indexes for stimulus number
func StimYX(stidx int) []int {
y := stidx / StimShape[1]
x := stidx % StimShape[1]
return []int{y, x}
}
// SetStim sets stimulus for given input, returns index
func SetStim(tsr *tensor.Float32, nyrep int, stm string) int {
stidx := StimIndex(stm)
xy := StimYX(stidx)
xy = append(xy, 0)
xy = append(xy, 0)
for y := 0; y < nyrep; y++ {
xy[2] = y
tsr.Set(1, xy...)
}
return stidx
}
// ContextIndex returns index for given context
func ContextIndex(ctx string) int {
return Contexts[ctx]
}
// ContextYX returns context YX indexes for context number
func ContextYX(ctidx int) []int {
y := ctidx / ContextShape[1]
x := ctidx % ContextShape[1]
return []int{y, x}
}
// SetContext sets context for given input
func SetContext(tsr *tensor.Float32, nyrep int, ctx string) int {
ctidx := ContextIndex(ctx)
xy := ContextYX(ctidx)
xy = append(xy, 0)
xy = append(xy, 0)
for y := 0; y < nyrep; y++ {
xy[2] = y
tsr.Set(1, xy...)
}
return ctidx
}
// USTimeIndex returns index for US time based on stimulus, tick, start and end
// returns nil if not active.
func USTimeIndex(stidx, tick, start, end int) []int {
tm := tick - start
if tm < 1 {
return nil
}
if tick > end {
return nil
}
st := StimYX(stidx)
st = append(st, 0)
st = append(st, tm-1)
return st
}
// SetUSTime sets USTime based on given values.
// returns false if not set.
func SetUSTime(tsr *tensor.Float32, nyrep, stidx, tick, start, end int) bool {
idx := USTimeIndex(stidx, tick, start, end)
if idx == nil {
return false
}
for y := 0; y < nyrep; y++ {
idx[2] = y
tsr.Set(1, idx...)
}
return true
}
// SetTime sets Time input
func SetTime(tsr *tensor.Float32, nyrep int, tick int) {
if tick < 0 {
tick = 0
}
idx := []int{0, tick, 0, 0}
for y := 0; y < nyrep; y++ {
idx[2] = y
tsr.Set(1, idx...)
}
}
// SetUS sets US input
func SetUS(tsr *tensor.Float32, nyrep int, pv int, mag float32) {
idx := []int{0, pv, 0, 0}
for y := 0; y < nyrep; y++ {
idx[2] = y
tsr.Set(mag, idx...)
}
}
// Copyright (c) 2023, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cond
// Run is a sequence of Conditions to run in order
type Run struct {
// Name of the run
Name string
// Description
Desc string
// name of condition for weights file to load prior to starting -- allows faster testing but weights may be out of date
Weights string
// name of condition 1
Cond1 string
// name of condition 2
Cond2 string
// name of condition 3
Cond3 string
// name of condition 4
Cond4 string
// name of condition 5
Cond5 string
}
// NConds returns the number of conditions in this Run
func (rn *Run) NConds() int {
switch {
case rn.Cond5 != "":
return 5
case rn.Cond4 != "":
return 4
case rn.Cond3 != "":
return 3
case rn.Cond2 != "":
return 2
default:
return 1
}
}
// Cond returns the condition name and Condition at the given index
func (rn *Run) Cond(cidx int) (string, *Condition) {
cnm := ""
switch cidx {
case 0:
cnm = rn.Cond1
case 1:
cnm = rn.Cond2
case 2:
cnm = rn.Cond3
case 3:
cnm = rn.Cond4
case 4:
cnm = rn.Cond5
}
return cnm, AllConditions[cnm]
}
// Copyright (c) 2023, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cond
import "sort"
var RunNames []string
func init() {
RunNames = make([]string, len(AllRuns))
idx := 0
for nm := range AllRuns {
RunNames[idx] = nm
idx++
}
sort.Strings(RunNames)
}
var AllRuns = map[string]*Run{
"PosAcq_A100": {
Desc: "Standard positive valence acquisition: A = 100%",
Cond1: "PosAcq_A100",
},
"PosExt_A0": {
Desc: "extinguish positive valence: A_NR_Pos -- typically use after some amount of PosAcq_A100",
Cond1: "PosExt_A0",
},
"PosAcqExt_A100Wts": {
Desc: "Load weights of acquisition A 100%, go directly to extinguish -- must save weights from PosAcq_A100 first",
Weights: "PosAcq_A100",
Cond1: "PosExt_A0",
},
"PosAcqExt_A100_A0": {
Desc: "Standard positive valence acquisition: A = 100%, then extinction A0",
Cond1: "PosAcq_A100",
Cond2: "PosExt_A0",
},
"PosAcq_A100B50": {
Desc: "Standard positive valence acquisition: A = 100%, B = 50%",
Cond1: "PosAcq_A100B50",
},
"PosAcq_A100B0": {
Desc: "Standard positive valence acquisition: A = 100%, B = 0%",
Cond1: "PosAcq_A100B0",
},
"PosExt_A0B0": {
Desc: "extinguish positive valence: A_NR_Pos, B_NR_Pos",
Cond1: "PosExt_A0B0",
},
"PosAcq_A50": {
Desc: "A = 50%",
Cond1: "PosAcq_A50",
},
"PosAcq_ACycle100_50_0_Blk10": {
Desc: "A transitions: 100%, 50%, 0%, 50%, 100% for 10 blocks each",
Cond1: "PosAcq_A100",
Cond2: "PosAcq_A50_Blk10",
Cond3: "PosExt_A0_Blk10",
Cond4: "PosAcq_A50_Blk10",
Cond5: "PosAcq_A100_Blk10",
},
"PosAcq_ACycle100_50_0_Blk20": {
Desc: "A transitions: 100%, 50%, 0%, 50%, 100% for 20 blocks each",
Cond1: "PosAcq_A100",
Cond2: "PosAcq_A50_Blk20",
Cond3: "PosExt_A0_Blk20",
Cond4: "PosAcq_A50_Blk20",
Cond5: "PosAcq_A100_Blk20",
},
"PosAcqExt_A100B50_A0B0": {
Desc: "positive valence acquisition A=100%, B=50%, then extinguish A, B = 0%",
Cond1: "PosAcq_A100B50",
Cond2: "PosExt_A0B0",
},
"PosAcqExt_A100B50_Wts": {
Desc: "Load weights of acquisition A = 100%, B = 50%, go directly to extinguish -- must save weights from PosAcq_A100B50",
Weights: "PosAcq_A100B50",
Cond1: "PosExt_A0B0",
},
"PosAcqExtAcq_A100B50_A0B0_A100B50": {
Desc: "Full cycle: acq, ext, acq, A=100%, B=50%, then extinguish, then acq again, marked as ReAcq",
Cond1: "PosAcq_A100B50",
Cond2: "PosExt_A0B0",
Cond3: "PosReAcq_A100B50",
},
"PosAcqExtAcq_A100_A0_A100": {
Desc: "Full cycle: acq, ext, acq, A=100%, then extinguish, then acq again, marked as ReAcq",
Cond1: "PosAcq_A100",
Cond2: "PosExt_A0",
Cond3: "PosReAcq_A100",
},
"PosAcqExt_A100B100": {
Desc: "",
Cond1: "PosAcq_A100B100",
Cond2: "PosExt_A0B0",
},
"PosAcq_A100B25": {
Desc: "",
Cond1: "PosAcq_A100B25",
},
"NegAcq_D100": {
Desc: "",
Cond1: "NegAcq_D100",
},
"NegAcq_D100E25": {
Desc: "",
Cond1: "NegAcq_D100E25",
},
"NegAcqMag": {
Desc: "",
Cond1: "NegAcqMag",
},
"PosAcqMag": {
Desc: "",
Cond1: "PosAcqMag",
},
"NegAcqExt_D100": {
Desc: "",
Cond1: "NegAcq_D100",
Cond2: "NegExt_D0",
},
"NegExt_D0": {
Desc: "",
Cond1: "NegExt_D0",
},
"NegExt_D100Wts": {
Desc: "Load weights of negative acquisition D 100%, go directly to extinguish -- must save weights from NegAcq_D100 first",
Weights: "NegAcq_D100",
Cond1: "NegExt_D0",
},
"NegAcqExt_D100E25": {
Desc: "",
Cond1: "NegAcq_D100E25",
Cond2: "NegExt_D0E0",
},
"NegExt_D0E0": {
Desc: "",
Cond1: "NegExt_D0E0",
},
"PosCondInhib": {
Desc: "",
Cond1: "PosAcq_cxA",
Cond2: "PosCondInhib",
Cond3: "PosCondInhib_test",
},
"PosSecondOrderCond": {
Desc: "",
Cond1: "PosAcqPreSecondOrder",
Cond2: "PosSecondOrderCond",
},
"PosBlocking": {
Desc: "",
Cond1: "PosBlocking_A_train",
Cond2: "PosBlocking",
Cond3: "PosBlocking_test",
},
"PosBlocking2": {
Desc: "",
Cond1: "PosBlocking_A_train",
Cond2: "PosBlocking",
Cond3: "PosBlocking2_test",
},
"NegCondInhib": {
Desc: "",
Cond1: "NegAcq_D100E25",
Cond2: "NegCondInh",
Cond3: "NegCondInh_test",
},
"AbaRenewal": {
Desc: "",
Cond1: "PosAcq_cxA",
Cond2: "PosExtinct_cxB",
Cond3: "PosRenewal_cxA",
},
"NegBlocking": {
Desc: "",
Cond1: "NegBlocking_E_train",
Cond2: "NegBlocking",
Cond3: "NegBlocking_test",
},
"PosSum_test": {
Desc: "",
Cond1: "PosSumAcq",
Cond2: "PosSumCondInhib",
Cond3: "PosSum_test",
},
"NegSum_test": {
Desc: "",
Cond1: "NegSumAcq",
Cond2: "NegSumCondInhib",
Cond3: "NegSum_test",
},
"UnblockingValue": {
Desc: "",
Cond1: "Unblocking_train",
Cond2: "UnblockingValue",
Cond3: "UnblockingValue_test",
},
"UnblockingIdentity": {
Desc: "",
Cond1: "Unblocking_trainUS",
Cond2: "UnblockingIdentity",
Cond3: "UnblockingIdentity_test",
},
"Overexpect": {
Desc: "",
Cond1: "Overexpect_train",
Cond2: "OverexpectCompound",
Cond3: "Overexpect_test",
},
"PosMagChange": {
Desc: "",
Cond1: "PosAcqMag",
Cond2: "PosAcqMagChange",
Cond3: "Overexpect_test",
},
"NegMagChange": {
Desc: "",
Cond1: "NegAcqMag",
Cond2: "NegAcqMagChange",
},
"PosNeg": {
Desc: "",
Cond1: "PosOrNegAcq",
},
"PosAcqEarlyUSTest": {
Desc: "",
Cond1: "PosAcq_A100B50",
Cond2: "PosAcqEarlyUS_test",
},
"PosOrNegAcq": {
Desc: "",
Cond1: "PosOrNegAcq",
},
"PosCondInhib_test": {
Desc: "For debugging",
Cond1: "PosCondInhib_test",
},
"US0": {
Desc: "",
Cond1: "US0",
},
}
// Copyright (c) 2023, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cond
import (
"math/rand"
"cogentcore.org/core/math32"
"cogentcore.org/lab/base/randx"
)
// Valence
type Valence int32 //enums:enum
const (
Pos Valence = iota
Neg
)
// TickTypes
type TickTypes int32 //enums:enum
const (
// Pre is before the CS
Pre TickTypes = iota
// CS is CS onset
CS
// Maint is after CS before US
Maint
// US is the US
US
// Post is after the US
Post
)
// Sequence represents a sequence of ticks for one behavioral trial, unfolding over
// NTicks individual time steps, with one or more CS's (conditioned stimuli)
// and a US (unconditioned stimulus -- outcome).
type Sequence struct {
// conventional suffixes: _R = reward, _NR = non-reward; _test = test trial (no learning)
Name string
// true if testing only -- no learning
Test bool
// Percent of all trials for this type
Pct float32
// Positive or negative reward valence
Valence Valence
// Probability of US
USProb float32
// Mixed US set?
MixedUS bool
// US magnitude
USMag float32
// Number of ticks for a sequence
NTicks int
// Conditioned stimulus
CS string
// Tick of CS start
CSStart int
// Tick of CS end
CSEnd int
// Tick of CS2 start: -1 for none
CS2Start int
// Tick of CS2 end: -1 for none
CS2End int
// Unconditioned stimulus
US int
// Tick for start of US presentation
USStart int
// Tick for end of US presentation
USEnd int
// Context -- typically same as CS -- if blank CS will be copied -- different in certain extinguishing contexts
Context string
// for rendered sequence, true if US active
USOn bool
// for rendered sequence, true if CS active
CSOn bool
// for rendered sequence, the tick type
Type TickTypes
}
// Block represents a set of sequence types
type Block []*Sequence
func (cd *Block) Length() int {
return len(*cd)
}
func (cd *Block) Append(seq *Sequence) {
*cd = append(*cd, seq)
}
// SequenceReps generates repetitions of specific sequence types
// for given condition name, based on Pct of total blocks,
// and sets the USOn flag for proportion of sequences
// based on USProb probability.
// If Condition.Permute is true, order of all sequences are permuted.
// Gets the block name from the condition name.
func SequenceReps(condNm string) []*Sequence {
var seqs []*Sequence
cond := AllConditions[condNm]
cond.Name = condNm
block := AllBlocks[cond.Block]
for _, seq := range block {
if seq.Context == "" {
seq.Context = seq.CS
}
nRpt := int(math32.Round(seq.Pct * float32(cond.NSequences)))
if nRpt < 1 {
if seq.Pct > 0.0 {
nRpt = 1
} else {
continue // shouldn't happen
}
}
useIsOnList := false
var usIsOn []bool
if cond.FixedProb {
if seq.USProb != 0.0 && seq.USProb != 1.0 {
useIsOnList = true
pn := int(math32.Round(float32(nRpt) * seq.USProb))
usIsOn = make([]bool, nRpt) // defaults to false
for i := 0; i < pn; i++ {
usIsOn[i] = true
}
rand.Shuffle(len(usIsOn), func(i, j int) {
usIsOn[i], usIsOn[j] = usIsOn[j], usIsOn[i]
})
}
}
for ri := 0; ri < nRpt; ri++ {
trlNm := seq.Name + "_" + seq.Valence.String()
usOn := false
if !useIsOnList {
usOn = randx.BoolP32(seq.USProb)
} else {
usOn = usIsOn[ri]
}
curSeq := &Sequence{}
*curSeq = *seq
curSeq.Name = trlNm
curSeq.USOn = usOn
seqs = append(seqs, curSeq)
}
}
if cond.Permute {
rand.Shuffle(len(seqs), func(i, j int) {
seqs[i], seqs[j] = seqs[j], seqs[i]
})
}
return seqs
}
// Copyright (c) 2023, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pvlv
import (
"cogentcore.org/core/core"
"github.com/emer/emergent/v2/egui"
)
// EnvConfig has config params for environment.
type EnvConfig struct {
// Env parameters: can set any field/subfield on Env struct,
// using standard TOML formatting.
Env map[string]any
// RunName is the environment overall run config.
RunName string `default:"PosAcq_A100B50"`
// SetNBlocks overrides the default number of blocks to run conditions with NBlocks.
SetNBlocks bool
// NBlocks is number of blocks to run if SetNBlocks is true.
NBlocks int
}
func (ec *EnvConfig) ShouldDisplay(field string) bool {
switch field {
case "NBlocks":
return ec.SetNBlocks
default:
return true
}
}
// ParamConfig has config parameters related to sim params.
type ParamConfig struct {
// Rubicon parameters: can set any field/subfield on Net.Rubicon params,
// using standard TOML formatting.
Rubicon map[string]any
// Script is an interpreted script that is run to set parameters in Layer and Path
// sheets, by default using the "Script" set name.
Script string `new-window:"+" width:"100"`
// Sheet is the extra params sheet name(s) to use (space separated
// if multiple). Must be valid name as listed in compiled-in params
// or loaded params.
Sheet string
// Tag is an extra tag to add to file names and logs saved from this run.
Tag string
// Note is additional info to describe the run params etc,
// like a git commit message for the run.
Note string
// SaveAll will save a snapshot of all current param and config settings
// in a directory named params_<datestamp> (or _good if Good is true),
// then quit. Useful for comparing to later changes and seeing multiple
// views of current params.
SaveAll bool `nest:"+"`
// Good is for SaveAll, save to params_good for a known good params state.
// This can be done prior to making a new release after all tests are passing.
// Add results to git to provide a full diff record of all params over level.
Good bool `nest:"+"`
}
func (pc *ParamConfig) FieldWidget(field string) core.Value {
return egui.ScriptFieldWidget(field)
}
// RunConfig has config parameters related to running the sim.
type RunConfig struct {
// GPUDevice selects the gpu device to use.
GPUDevice int
// NThreads is the number of parallel threads for CPU computation;
// 0 = use default.
NThreads int `default:"0"`
// Run is the _starting_ run number, which determines the random seed.
// Runs counts up from there. Can do all runs in parallel by launching
// separate jobs with each starting Run, Runs = 1.
Run int `default:"0" flag:"run"`
// Runs is the total number of runs to do when running Train, starting from Run.
Runs int `default:"25" min:"1"`
// ISICycles is the number of no-input inter-stimulus interval
// cycles at the start of the trial.
ISICycles int `default:"0"`
// MinusCycles is the number of cycles in the minus phase per trial.
MinusCycles int `default:"150"`
// PlusCycles is the number of cycles in the plus phase per trial.
PlusCycles int `default:"50"`
}
// Cycles returns the total number of cycles per trial: ISI + Minus + Plus.
func (rc *RunConfig) Cycles() int {
return rc.ISICycles + rc.MinusCycles + rc.PlusCycles
}
// LogConfig has config parameters related to logging data.
type LogConfig struct {
// SaveWeights will save final weights after each run.
SaveWeights bool
// Train has the list of Train mode levels to save log files for.
Train []string `default:"['Run', 'Epoch']" nest:"+"`
// Testing activates testing mode: records detailed data for Go CI tests
// (not the same as running test mode on network, via Looper).
Testing bool
}
// Config has the overall Sim configuration options.
type Config struct {
egui.BaseConfig
// Env has environment configuration options.
Env EnvConfig `display:"add-fields"`
// Params has parameter related configuration options.
Params ParamConfig `display:"add-fields"`
// Run has sim running related configuration options.
Run RunConfig `display:"add-fields"`
// Log has data logging related configuration options.
Log LogConfig `display:"add-fields"`
}
func (cfg *Config) Defaults() {
cfg.Name = "PVLV"
cfg.Title = "Primary Value, Learned Value"
cfg.URL = "https://github.com/emer/axon/blob/main/sims/pvlv/README.md"
cfg.Doc = "Simulates the primary value, learned value model of classical conditioning and phasic dopamine in the amygdala, ventral striatum and associated areas."
}
// Code generated by "core generate -add-types -add-funcs -gosl"; DO NOT EDIT.
package pvlv
import (
"cogentcore.org/core/enums"
)
var _ModesValues = []Modes{0, 1}
// ModesN is the highest valid value for type Modes, plus one.
//
//gosl:start
const ModesN Modes = 2
//gosl:end
var _ModesValueMap = map[string]Modes{`Train`: 0, `Test`: 1}
var _ModesDescMap = map[Modes]string{0: ``, 1: ``}
var _ModesMap = map[Modes]string{0: `Train`, 1: `Test`}
// String returns the string representation of this Modes value.
func (i Modes) String() string { return enums.String(i, _ModesMap) }
// SetString sets the Modes value from its string representation,
// and returns an error if the string is invalid.
func (i *Modes) SetString(s string) error { return enums.SetString(i, s, _ModesValueMap, "Modes") }
// Int64 returns the Modes value as an int64.
func (i Modes) Int64() int64 { return int64(i) }
// SetInt64 sets the Modes value from an int64.
func (i *Modes) SetInt64(in int64) { *i = Modes(in) }
// Desc returns the description of the Modes value.
func (i Modes) Desc() string { return enums.Desc(i, _ModesDescMap) }
// ModesValues returns all possible values for the type Modes.
func ModesValues() []Modes { return _ModesValues }
// Values returns all possible values for the type Modes.
func (i Modes) Values() []enums.Enum { return enums.Values(_ModesValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Modes) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Modes) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Modes") }
var _LevelsValues = []Levels{0, 1, 2, 3, 4, 5}
// LevelsN is the highest valid value for type Levels, plus one.
//
//gosl:start
const LevelsN Levels = 6
//gosl:end
var _LevelsValueMap = map[string]Levels{`Cycle`: 0, `Trial`: 1, `Sequence`: 2, `Block`: 3, `Condition`: 4, `Run`: 5}
var _LevelsDescMap = map[Levels]string{0: ``, 1: ``, 2: ``, 3: ``, 4: ``, 5: ``}
var _LevelsMap = map[Levels]string{0: `Cycle`, 1: `Trial`, 2: `Sequence`, 3: `Block`, 4: `Condition`, 5: `Run`}
// String returns the string representation of this Levels value.
func (i Levels) String() string { return enums.String(i, _LevelsMap) }
// SetString sets the Levels value from its string representation,
// and returns an error if the string is invalid.
func (i *Levels) SetString(s string) error { return enums.SetString(i, s, _LevelsValueMap, "Levels") }
// Int64 returns the Levels value as an int64.
func (i Levels) Int64() int64 { return int64(i) }
// SetInt64 sets the Levels value from an int64.
func (i *Levels) SetInt64(in int64) { *i = Levels(in) }
// Desc returns the description of the Levels value.
func (i Levels) Desc() string { return enums.Desc(i, _LevelsDescMap) }
// LevelsValues returns all possible values for the type Levels.
func LevelsValues() []Levels { return _LevelsValues }
// Values returns all possible values for the type Levels.
func (i Levels) Values() []enums.Enum { return enums.Values(_LevelsValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Levels) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Levels) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Levels") }
var _StatsPhaseValues = []StatsPhase{0, 1}
// StatsPhaseN is the highest valid value for type StatsPhase, plus one.
//
//gosl:start
const StatsPhaseN StatsPhase = 2
//gosl:end
var _StatsPhaseValueMap = map[string]StatsPhase{`Start`: 0, `Step`: 1}
var _StatsPhaseDescMap = map[StatsPhase]string{0: ``, 1: ``}
var _StatsPhaseMap = map[StatsPhase]string{0: `Start`, 1: `Step`}
// String returns the string representation of this StatsPhase value.
func (i StatsPhase) String() string { return enums.String(i, _StatsPhaseMap) }
// SetString sets the StatsPhase value from its string representation,
// and returns an error if the string is invalid.
func (i *StatsPhase) SetString(s string) error {
return enums.SetString(i, s, _StatsPhaseValueMap, "StatsPhase")
}
// Int64 returns the StatsPhase value as an int64.
func (i StatsPhase) Int64() int64 { return int64(i) }
// SetInt64 sets the StatsPhase value from an int64.
func (i *StatsPhase) SetInt64(in int64) { *i = StatsPhase(in) }
// Desc returns the description of the StatsPhase value.
func (i StatsPhase) Desc() string { return enums.Desc(i, _StatsPhaseDescMap) }
// StatsPhaseValues returns all possible values for the type StatsPhase.
func StatsPhaseValues() []StatsPhase { return _StatsPhaseValues }
// Values returns all possible values for the type StatsPhase.
func (i StatsPhase) Values() []enums.Enum { return enums.Values(_StatsPhaseValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i StatsPhase) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *StatsPhase) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "StatsPhase")
}
// Copyright (c) 2022, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pvlv
import "github.com/emer/axon/v2/axon"
// LayerParams sets the minimal non-default params.
// Base is always applied, and others can be optionally selected to apply on top of that.
var LayerParams = axon.LayerSheets{
"Base": {
{Sel: ".InputLayer", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Acts.Decay.Act = 1.0
ly.Acts.Decay.Glong = 1.0
}},
{Sel: "#CS", Doc: "expect act",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.05 // 1 / css
}},
{Sel: "#ContextIn", Doc: "expect act",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.025 // 1 / css
}},
{Sel: ".VSPatchLayer", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Pool.Gi = 0.5 // 0.5 needed for differentiation
ly.Inhib.Layer.Gi = 0.5
ly.Learn.NeuroMod.DipGain = 1 // boa requires balanced..
ly.Learn.TrgAvgAct.GiBaseInit = 0 // 0.5 def; 0 faster
ly.Learn.RLRate.SigmoidMin = 0.05 // 0.05 def
ly.Learn.NeuroMod.AChLRateMod = 0
}},
{Sel: ".VTALayer", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.VTA.CeMGain = 0.5 // 0.75 def -- controls size of CS burst
ly.VTA.LHbGain = 1.25 // 1.25 def -- controls size of PV DA
ly.VTA.AChThr = 0.5 // prevents non-CS-onset CS DA
}},
{Sel: ".MatrixLayer", Doc: "all mtx",
Set: func(ly *axon.LayerParams) {
ly.Acts.Kir.Gk = 2 // 10 > 5 > 2 -- key for pause
ly.Learn.RLRate.On.SetBool(true) // only used for non-rew trials -- key
ly.Learn.RLRate.Diff.SetBool(false)
}},
{Sel: "#BLAposExtD2", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 1.8 // 1.8 puts just under water
ly.Inhib.Pool.Gi = 1.0
ly.Learn.NeuroMod.DAModGain = 0 // critical to be 0 -- otherwise prevents CS onset activity!
}},
{Sel: ".PTMaintLayer", Doc: "time integration params",
Set: func(ly *axon.LayerParams) {
ly.Acts.Dend.ModGain = 1.5
// ly.Inhib.Layer.Gi = 3.0
// ly.Inhib.Pool.Gi = 3.6
}},
{Sel: "#OFCposPT", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Acts.SMaint.Ge = 0.4
}},
{Sel: ".PTPredLayer", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.CT.GeGain = 0.1 // 0.05 orig; stronger ptp
}},
{Sel: ".LDTLayer", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.LDT.MaintInhib = 0.8 // 0.8 def; AChThr = 0.5 typically
}},
{Sel: "#OFCposPTp", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Pool.Gi = 1
ly.Inhib.ActAvg.Nominal = 0.025 // 0.1 -- affects how strongly BLA is driven -- key param
}},
// {Sel: "#OFCposPT", Doc: "",
// Set: func(ly *axon.LayerParams) {
// ly.Acts.SMaint.Gbar = 0.4 // 0.2 def fine
// }},
{Sel: "#SC", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Acts.KNa.Slow.Gk = 0.5 // .5 needed to shut off
}},
{Sel: "#CostP", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Pulvinar.DriveScale = 0.2 // 0.1 def
}},
},
}
// PathParams sets the minimal non-default params.
// Base is always applied, and others can be optionally selected to apply on top of that.
var PathParams = axon.PathSheets{
"Base": {
{Sel: ".VSMatrixPath", Doc: "",
Set: func(pt *axon.PathParams) {
pt.Learn.DWt.LearnThr = 0.1 // prevents learning below this thr: preserves low act
}},
{Sel: ".SuperToThal", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 4.0 // 4 > 2 for gating sooner
}},
{Sel: ".BLAExtPath", Doc: "",
Set: func(pt *axon.PathParams) {
pt.Learn.LRate.Base = 0.05 // 0.05 is fine -- maybe a bit fast
pt.BLA.NegDeltaLRate = 1
pt.PathScale.Abs = 4
}},
// {Sel: ".BLANovelInhib", Doc: "",
// Set: func(pt *axon.PathParams) {
// pt.PathScale.Abs = 0.2
// }},
{Sel: ".GPiToBGThal", Doc: "inhibition from GPi to MD",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 3 // 3 best -- 4 prevents some gating, 2 can sometimes leak
}},
{Sel: ".PTpToBLAExt", Doc: "modulatory so only active with -da, drives extinction learning based on maintained goal rep",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1 // todo: expt
}},
{Sel: "#BLAposAcqD1ToOFCpos", Doc: "strong, high variance",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 2 // key param for OFC focusing on current cs -- expt
}},
{Sel: ".CSToBLApos", Doc: "",
Set: func(pt *axon.PathParams) {
pt.Learn.LRate.Base = 0.05
}},
{Sel: ".BLAAcqToGo", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 4 // 4 def
}},
{Sel: ".BLAExtToAcq", Doc: "fixed inhibitory",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 1.0 // key param for efficacy of inhibition
}},
{Sel: ".VSPatchPath", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 3
pt.Learn.DWt.LearnThr = 0
pt.Learn.LRate.Base = 0.02 // 0.02 for vspatch -- essential to drive long enough extinction
}},
// {Sel: ".ToPTp", Doc: "",
// Set: func(pt *axon.PathParams) {
// pt.PathScale.Abs = 2
// }},
},
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// pvlv simulates the primary value, learned value model of classical
// conditioning and phasic dopamine the amygdala, ventral striatum
// and associated areas.
package pvlv
//go:generate core generate -add-types -add-funcs -gosl
import (
"fmt"
"os"
"reflect"
"cogentcore.org/core/base/errors"
"cogentcore.org/core/base/num"
"cogentcore.org/core/base/reflectx"
"cogentcore.org/core/core"
"cogentcore.org/core/enums"
"cogentcore.org/core/gpu"
"cogentcore.org/core/icons"
"cogentcore.org/core/math32"
"cogentcore.org/core/tree"
"cogentcore.org/lab/base/mpi"
"cogentcore.org/lab/base/randx"
"cogentcore.org/lab/tensorfs"
"github.com/emer/axon/v2/axon"
"github.com/emer/axon/v2/sims/pvlv/cond"
"github.com/emer/emergent/v2/egui"
"github.com/emer/emergent/v2/env"
"github.com/emer/emergent/v2/looper"
"github.com/emer/emergent/v2/paths"
)
// Modes are the looping modes (Stacks) for running and statistics.
type Modes int32 //enums:enum
const (
Train Modes = iota
Test
)
// Levels are the looping levels for running and statistics.
type Levels int32 //enums:enum
const (
Cycle Levels = iota
Trial
Sequence
Block
Condition
Run
)
// StatsPhase is the phase of stats processing for given mode, level.
// Accumulated values are reset at Start, added each Step.
type StatsPhase int32 //enums:enum
const (
Start StatsPhase = iota
Step
)
// see params.go for params
// Sim encapsulates the entire simulation model, and we define all the
// functionality as methods on this struct. This structure keeps all relevant
// state information organized and available without having to pass everything around
// as arguments to methods, and provides the core GUI interface (note the view tags
// for the fields which provide hints to how things should be displayed).
type Sim struct {
// simulation configuration parameters -- set by .toml config file and / or args
Config *Config `new-window:"+"`
// Net is the network: click to view / edit parameters for layers, paths, etc.
Net *axon.Network `new-window:"+" display:"no-inline"`
// Params manages network parameter setting.
Params axon.Params `display:"inline"`
// Loops are the control loops for running the sim, in different Modes
// across stacks of Levels.
Loops *looper.Stacks `new-window:"+" display:"no-inline"`
// Envs provides mode-string based storage of environments.
Envs env.Envs `new-window:"+" display:"no-inline"`
// TrainUpdate has Train mode netview update parameters.
TrainUpdate axon.NetViewUpdate `display:"inline"`
// Root is the root tensorfs directory, where all stats and other misc sim data goes.
Root *tensorfs.Node `display:"-"`
// Stats has the stats directory within Root.
Stats *tensorfs.Node `display:"-"`
// Current has the current stats values within Stats.
Current *tensorfs.Node `display:"-"`
// StatFuncs are statistics functions called at given mode and level,
// to perform all stats computations. phase = Start does init at start of given level,
// and all intialization / configuration (called during Init too).
StatFuncs []func(mode Modes, level Levels, phase StatsPhase) `display:"-"`
// GUI manages all the GUI elements
GUI egui.GUI `display:"-"`
// RandSeeds is a list of random seeds to use for each run.
RandSeeds randx.Seeds `display:"-"`
}
func (ss *Sim) SetConfig(cfg *Config) { ss.Config = cfg }
func (ss *Sim) Body() *core.Body { return ss.GUI.Body }
func (ss *Sim) ConfigSim() {
ss.Root, _ = tensorfs.NewDir("Root")
tensorfs.CurRoot = ss.Root
ss.Net = axon.NewNetwork(ss.Config.Name)
ss.Params.Config(LayerParams, PathParams, ss.Config.Params.Sheet, ss.Config.Params.Tag, reflect.ValueOf(ss))
ss.RandSeeds.Init(100) // max 100 runs
ss.InitRandSeed(0)
if ss.Config.GPU {
gpu.SelectAdapter = ss.Config.Run.GPUDevice
axon.GPUInit()
axon.UseGPU = true
}
ss.ConfigEnv()
ss.ConfigNet(ss.Net)
ss.ConfigLoops()
ss.ConfigStats()
// if ss.Config..GPU {
// fmt.Println(axon.GPUSystem.Vars().StringDoc())
// }
if ss.Config.Params.SaveAll {
ss.Config.Params.SaveAll = false
ss.Net.SaveParamsSnapshot(&ss.Config, ss.Config.Params.Good)
os.Exit(0)
}
}
func (ss *Sim) ConfigEnv() {
var trn *cond.CondEnv
if len(ss.Envs) == 0 {
trn = &cond.CondEnv{}
} else {
trn = ss.Envs.ByMode(Train).(*cond.CondEnv)
}
trn.Name = Train.String()
if ss.Config.Env.Env != nil {
reflectx.SetFieldsFromMap(trn, ss.Config.Env.Env)
}
trn.Config(ss.Config.Run.Runs, ss.Config.Env.RunName)
trn.Init(0)
ss.ConfigRubicon()
ss.Envs.Add(trn)
}
func (ss *Sim) ConfigRubicon() {
rp := &ss.Net.Rubicon
rp.SetNUSs(cond.NUSs, 1) // 1=neg
rp.Defaults()
rp.USs.PVposGain = 2
rp.USs.PVnegGain = 1
rp.LHb.VSPatchGain = 4 // 4 def -- needs more for shorter trial count here
rp.LHb.VSPatchNonRewThr = 0.1 // 0.1 def
rp.USs.USnegGains[0] = 2 // big salient input!
// note: costs weights are very low by default..
rp.Urgency.U50 = 50 // no pressure during regular trials
if ss.Config.Params.Rubicon != nil {
reflectx.SetFieldsFromMap(rp, ss.Config.Params.Rubicon)
}
rp.Update()
}
func (ss *Sim) ConfigNet(net *axon.Network) {
net.SetMaxData(1)
net.Context().SetISICycles(int32(ss.Config.Run.ISICycles)).
SetMinusCycles(int32(ss.Config.Run.MinusCycles)).
SetPlusCycles(int32(ss.Config.Run.PlusCycles)).Update()
net.SetRandSeed(ss.RandSeeds[0]) // init new separate random seed, using run = 0
ev := ss.Envs.ByMode(Train).(*cond.CondEnv)
ny := ev.NYReps
nuBgY := 5
nuBgX := 5
nuCtxY := 6
nuCtxX := 6
popY := 4
popX := 4
space := float32(2)
pone2one := paths.NewPoolOneToOne()
one2one := paths.NewOneToOne()
_ = one2one
full := paths.NewFull()
_ = pone2one
stim := ev.CurStates["CS"]
ctxt := ev.CurStates["ContextIn"]
vSgpi, vSmtxGo, vSmtxNo, vSpatchD1, vSpatchD2, urgency, usPos, pvPos, usNeg, usNegP, pvNeg, pvNegP, blaPosAcq, blaPosExt, blaNegAcq, blaNegExt, blaNov, ofcPos, ofcPosCT, ofcPosPTp, ofcPosPT, ilPos, ilPosCT, ilPosPT, ilPosPTp, ilPosMD, ofcNeg, ofcNegCT, ofcNegPT, ofcNegPTp, accCost, accCostCT, accCostPT, accCostPTp, accCostMD, ilNeg, ilNegCT, ilNegPT, ilNegPTp, ilNegMD, sc := net.AddRubiconOFCus(ny, popY, popX, nuBgY, nuBgX, nuCtxY, nuCtxX, space)
// note: list all above so can copy / paste and validate correct return values
_, _, _, _, _, _ = vSgpi, vSmtxGo, vSmtxNo, vSpatchD1, vSpatchD2, urgency
_, _, _, _, _, _ = usPos, pvPos, usNeg, usNegP, pvNeg, pvNegP
_, _, _, _ = ilPos, ilPosCT, ilPosPTp, ilPosMD
_, _, _ = ofcNeg, ofcNegCT, ofcNegPTp
_, _, _, _ = ilNeg, ilNegCT, ilNegPTp, ilNegMD
_, _, _, _ = accCost, accCostCT, accCostPTp, accCostMD
_, _, _, _, _ = ofcPosPT, ofcNegPT, ilPosPT, ilNegPT, accCostPT
// todo: connect more of above
time, timeP := net.AddInputPulv4D("Time", 1, cond.MaxTime, ny, 1, space)
cs, csP := net.AddInputPulv4D("CS", stim.DimSize(0), stim.DimSize(1), stim.DimSize(2), stim.DimSize(3), space)
ctxIn := net.AddLayer4D("ContextIn", axon.InputLayer, ctxt.DimSize(0), ctxt.DimSize(1), ctxt.DimSize(2), ctxt.DimSize(3))
//////// CS -> BLA, OFC
net.ConnectToSC1to1(cs, sc)
net.ConnectCSToBLApos(cs, blaPosAcq, blaNov)
net.ConnectToBLAAcq(cs, blaNegAcq, full)
net.ConnectLayers(cs, vSpatchD1, full, axon.ForwardPath) // these are critical for discriminating A vs. B
net.ConnectLayers(cs, vSpatchD2, full, axon.ForwardPath)
// note: context is hippocampus -- key thing is that it comes on with stim
// most of ctxIn is same as CS / CS in this case, but a few key things for extinction
// ptpred input is important for learning to make conditional on actual engagement
net.ConnectToBLAExt(ctxIn, blaPosExt, full)
net.ConnectToBLAExt(ctxIn, blaNegExt, full)
// OFCus predicts cs
net.ConnectToPFCBack(cs, csP, ofcPos, ofcPosCT, ofcPosPT, ofcPosPTp, full, "CSToPFC")
net.ConnectToPFCBack(cs, csP, ofcNeg, ofcNegCT, ofcNegPT, ofcNegPTp, full, "CSToPFC")
//////// OFC predicts time, effort, urgency
// todo: a more dynamic US rep is needed to drive predictions in OFC
net.ConnectToPFCBack(time, timeP, ofcPos, ofcPosCT, ofcPosPT, ofcPosPTp, full, "TimeToPFC")
net.ConnectToPFCBack(time, timeP, ilPos, ilPosCT, ilPosPT, ilPosPTp, full, "TimeToPFC")
net.ConnectToPFCBack(time, timeP, ofcNeg, ofcNegCT, ofcNegPT, ofcNegPTp, full, "TimeToPFC")
net.ConnectToPFCBack(time, timeP, accCost, accCostCT, accCostPT, accCostPTp, full, "TimeToPFC")
net.ConnectToPFCBack(time, timeP, ilNeg, ilNegCT, ilNegPT, ilNegPTp, full, "TimeToPFC")
//////// position
time.PlaceRightOf(pvPos, space*2)
cs.PlaceRightOf(time, space)
ctxIn.PlaceRightOf(cs, space)
net.Build()
net.Defaults()
net.SetNThreads(ss.Config.Run.NThreads)
ss.ApplyParams()
net.InitWeights()
}
func (ss *Sim) ApplyParams() {
ss.Params.Script = ss.Config.Params.Script
ss.Params.ApplyAll(ss.Net)
}
//////// Init, utils
// Init restarts the run, and initializes everything, including network weights
// and resets the epoch log table
func (ss *Sim) Init() {
ss.Loops.ResetCounters()
ss.SetRunName()
ss.InitRandSeed(0)
// ss.ConfigEnv() // always do -- otherwise env params not reset after run
ss.ApplyParams()
ss.StatsInit()
ss.NewRun()
ss.TrainUpdate.RecordSyns()
ss.TrainUpdate.Update(Train, Trial)
}
// InitRandSeed initializes the random seed based on current training run number
func (ss *Sim) InitRandSeed(run int) {
ss.RandSeeds.Set(run)
ss.RandSeeds.Set(run, &ss.Net.Rand)
}
// NetViewUpdater returns the NetViewUpdate for given mode.
func (ss *Sim) NetViewUpdater(mode enums.Enum) *axon.NetViewUpdate {
return &ss.TrainUpdate
}
// ConfigLoops configures the control loops: Training, Testing
func (ss *Sim) ConfigLoops() {
ls := looper.NewStacks()
cycles := ss.Config.Run.Cycles()
// Note: actual max counters set by env
ls.AddStack(Train, Trial).
AddLevel(Run, ss.Config.Run.Runs).
AddLevel(Condition, 1).
AddLevel(Block, 50).
AddLevel(Sequence, 8).
AddLevel(Trial, 5).
AddLevel(Cycle, cycles)
axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, Cycle, Trial, Train,
func(mode enums.Enum) { ss.Net.ClearInputs() },
func(mode enums.Enum) { ss.ApplyInputs(mode.(Modes)) },
)
ls.Stacks[Train].OnInit.Add("Init", ss.Init)
ls.Loop(Train, Run).OnStart.Add("NewRun", ss.NewRun)
ls.AddOnStartToAll("StatsStart", ss.StatsStart)
ls.AddOnEndToAll("StatsStep", ss.StatsStep)
if ss.Config.GUI {
axon.LooperUpdateNetView(ls, Cycle, Trial, ss.NetViewUpdater)
ls.Stacks[Train].OnInit.Add("GUI-Init", ss.GUI.UpdateWindow)
}
if ss.Config.Debug {
mpi.Println(ls.DocString())
}
ss.Loops = ls
}
// UpdateLoopMax gets the latest loop counter Max values from env
func (ss *Sim) UpdateLoopMax() {
ev := ss.Envs.ByMode(Train).(*cond.CondEnv)
trn := ss.Loops.Stacks[Train]
trn.Loops[Condition].Counter.Max = ev.Condition.Max
trn.Loops[Block].Counter.Max = ev.Block.Max
trn.Loops[Sequence].Counter.Max = ev.Sequence.Max
trn.Loops[Trial].Counter.Max = ev.Tick.Max
if ss.Config.Env.SetNBlocks {
trn.Loops[Block].Counter.Max = ss.Config.Env.NBlocks
}
}
// ApplyInputs applies input patterns from given environment for given mode.
// Any other start-of-trial logic can also be put here.
func (ss *Sim) ApplyInputs(mode Modes) {
net := ss.Net
ss.Net.InitExt()
curModeDir := ss.Current.Dir(mode.String())
ev := ss.Envs.ByMode(mode).(*cond.CondEnv)
ev.Step()
ss.UpdateLoopMax()
lays := net.LayersByType(axon.InputLayer, axon.TargetLayer)
for _, lnm := range lays {
ly := ss.Net.LayerByName(lnm)
pats := ev.State(ly.Name)
if pats != nil {
ly.ApplyExt(0, pats)
}
switch lnm {
case "CS":
lpi := ly.Params.PoolIndex(0)
axon.PoolsInt.Set(num.FromBool[int32](ev.CurTick.CSOn), int(lpi), 0, int(axon.Clamped))
}
}
curModeDir.StringValue("TrialName", 1).SetString1D(ev.String(), 0)
curModeDir.StringValue("SeqType", 1).SetString1D(ev.SequenceType, 0)
curModeDir.StringValue("Cond", 1).SetString1D(ev.CondName, 0)
curModeDir.StringValue("TickType", 1).SetString1D(fmt.Sprintf("%02d_%s", ev.Tick.Prev, ev.CurTick.Type.String()), 0)
ss.ApplyRubicon(ev, mode, &ev.CurTick)
net.ApplyExts()
}
// ApplyRubicon applies Rubicon reward inputs.
func (ss *Sim) ApplyRubicon(ev *cond.CondEnv, mode Modes, seq *cond.Sequence) {
rp := &ss.Net.Rubicon
di := uint32(0) // not doing NData here -- otherwise loop over
rp.NewState(di, &ss.Net.Rand) // first before anything else is updated
rp.SetGoalMaintFromLayer(di, ss.Net, "ILposPT", 0.3)
rp.DecodePVEsts(di, ss.Net)
dist := math32.Abs(float32(3 - ev.Tick.Cur))
rp.SetGoalDistEst(di, dist)
rp.EffortUrgencyUpdate(di, 1)
if seq.USOn {
if seq.Valence == cond.Pos {
rp.SetUS(di, axon.Positive, seq.US, seq.USMag)
} else {
rp.SetUS(di, axon.Negative, seq.US, seq.USMag) // adds to neg us
}
}
drvs := make([]float32, cond.NUSs)
drvs[seq.US] = 1
rp.SetDrives(di, 1, drvs...)
rp.Step(di, &ss.Net.Rand)
}
// InitEnvRun intializes a new environment run, as when the RunName is changed
// or at NewRun()
func (ss *Sim) InitEnvRun() {
ev := ss.Envs.ByMode(Train).(*cond.CondEnv)
ev.RunName = ss.Config.Env.RunName
ev.Init(0)
ss.LoadCondWeights(ev.CurRun.Weights) // only if nonempty
// todo:
// ss.Loops.ResetCountersBelow(Train, Sequence)
// ss.Logs.ResetLog(Train, Trial)
// ss.Logs.ResetLog(Train, Sequence)
}
// LoadRunWeights loads weights specified in current run, if any
func (ss *Sim) LoadRunWeights() {
ev := ss.Envs.ByMode(Train).(*cond.CondEnv)
ss.LoadCondWeights(ev.CurRun.Weights) // only if nonempty
}
// LoadCondWeights loads weights saved after named condition, in wts/cond.wts.gz
func (ss *Sim) LoadCondWeights(cond string) {
if cond == "" {
return
}
wfn := "wts/" + cond + ".wts.gz"
errors.Log(ss.Net.OpenWeightsJSON(core.Filename(wfn)))
}
// SaveCondWeights saves weights based on current condition, in wts/cond.wts.gz
func (ss *Sim) SaveCondWeights() {
ev := ss.Envs.ByMode(Train).(*cond.CondEnv)
cnm, _ := ev.CurRun.Cond(ev.Condition.Cur)
if cnm == "" {
return
}
wfn := "wts/" + cnm + ".wts.gz"
err := errors.Log(ss.Net.SaveWeightsJSON(core.Filename(wfn)))
if err == nil {
fmt.Printf("Saved weights to: %s\n", wfn)
}
}
// NewRun intializes a new Run level of the model.
func (ss *Sim) NewRun() {
ctx := ss.Net.Context()
ss.InitRandSeed(ss.Loops.Loop(Train, Run).Counter.Cur)
ss.InitEnvRun()
ctx.Reset()
ss.Net.InitWeights()
ss.LoadRunWeights()
ss.UpdateLoopMax()
}
//////// Stats
// AddStat adds a stat compute function.
func (ss *Sim) AddStat(f func(mode Modes, level Levels, phase StatsPhase)) {
ss.StatFuncs = append(ss.StatFuncs, f)
}
// StatsStart is called by Looper at the start of given level, for each iteration.
// It needs to call RunStats Start at the next level down.
// e.g., each Epoch is the start of the full set of Trial Steps.
func (ss *Sim) StatsStart(lmd, ltm enums.Enum) {
mode := lmd.(Modes)
level := ltm.(Levels)
if level <= Trial {
return
}
ss.RunStats(mode, level-1, Start)
}
// StatsStep is called by Looper at each step of iteration,
// where it accumulates the stat results.
func (ss *Sim) StatsStep(lmd, ltm enums.Enum) {
mode := lmd.(Modes)
level := ltm.(Levels)
if level < Trial {
return
}
ss.RunStats(mode, level, Step)
tensorfs.DirTable(axon.StatsNode(ss.Stats, mode, level), nil).WriteToLog()
}
// RunStats runs the StatFuncs for given mode, level and phase.
func (ss *Sim) RunStats(mode Modes, level Levels, phase StatsPhase) {
for _, sf := range ss.StatFuncs {
sf(mode, level, phase)
}
if phase == Step && ss.GUI.Tabs != nil {
nm := mode.String() + " " + level.String() + " Plot"
ss.GUI.Tabs.AsLab().GoUpdatePlot(nm)
}
}
// SetRunName sets the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) SetRunName() string {
runName := ss.Params.RunName(ss.Config.Run.Run)
ss.Current.StringValue("RunName", 1).SetString1D(runName, 0)
return runName
}
// RunName returns the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) RunName() string {
return ss.Current.StringValue("RunName", 1).String1D(0)
}
// StatsInit initializes all the stats by calling Start across all modes and levels.
func (ss *Sim) StatsInit() {
for md, st := range ss.Loops.Stacks {
mode := md.(Modes)
for _, lev := range st.Order {
level := lev.(Levels)
if level == Cycle {
continue
}
ss.RunStats(mode, level, Start)
}
}
if ss.GUI.Tabs != nil {
tbs := ss.GUI.Tabs.AsLab()
_, idx := tbs.CurrentTab()
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Sequence))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Block))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Run))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Trial))
tbs.SelectTabIndex(idx)
}
}
// ConfigStats handles configures functions to do all stats computation
// in the tensorfs system.
func (ss *Sim) ConfigStats() {
net := ss.Net
ss.Stats = ss.Root.Dir("Stats")
ss.Current = ss.Stats.Dir("Current")
ss.SetRunName()
// note: Trial level is not recorded, only the sequence
// last arg(s) are levels to exclude
counterFunc := axon.StatLoopCounters(ss.Stats, ss.Current, ss.Loops, net, Trial, Cycle)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
counterFunc(mode, level, phase == Start)
})
// todo: add Cond
runNameFunc := axon.StatRunName(ss.Stats, ss.Current, ss.Loops, net, Trial, Cycle)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
runNameFunc(mode, level, phase == Start)
})
// todo: add SeqType, TickType
trialNameFunc := axon.StatTrialName(ss.Stats, ss.Current, ss.Loops, net, Trial)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
trialNameFunc(mode, level, phase == Start)
})
perTrlFunc := axon.StatPerTrialMSec(ss.Stats, Train, Trial)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
perTrlFunc(mode, level, phase == Start)
})
// trialStats := []string{"Action", "Target", "Correct"}
// ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
// if level != Trial {
// return
// }
// for si, name := range trialStats {
// modeDir := ss.Stats.Dir(mode.String())
// curModeDir := ss.Current.Dir(mode.String())
// levelDir := modeDir.Dir(level.String())
// di := 0 //
// tsr := levelDir.Float64(name)
// if phase == Start {
// tsr.SetNumRows(0)
// plot.SetFirstStyler(tsr, func(s *plot.Style) {
// s.Range.SetMin(0).SetMax(1)
// if si >= 2 && si <= 5 {
// s.On = true
// }
// })
// continue
// }
// ev := ss.Envs.ByModeDi(mode, di).(*MotorSeqEnv)
// var stat float32
// switch name {
// case "Action":
// stat = float32(ev.CurAction)
// case "Target":
// stat = float32(ev.Target)
// case "Correct":
// stat = num.FromBool[float32](ev.Correct)
// }
// curModeDir.Float32(name, 1).SetFloat1D(float64(stat), di)
// tsr.AppendRowFloat(float64(stat))
// }
// })
//
// seqStats := []string{"NCorrect", "Rew", "RewPred", "RPE", "RewEpc"}
// ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
// if level <= Trial {
// return
// }
// for _, name := range seqStats {
// modeDir := ss.Stats.Dir(mode.String())
// curModeDir := ss.Current.Dir(mode.String())
// levelDir := modeDir.Dir(level.String())
// subDir := modeDir.Dir((level - 1).String()) // note: will fail for Cycle
// tsr := levelDir.Float64(name)
// ndata := int(ss.Net.Context().NData)
// var stat float64
// if phase == Start {
// tsr.SetNumRows(0)
// plot.SetFirstStyler(tsr, func(s *plot.Style) {
// s.Range.SetMin(0).SetMax(1)
// s.On = true
// })
// continue
// }
// switch level {
// case Trial:
// curModeDir.Float32(name, ndata).SetFloat1D(float64(stat), di)
// tsr.AppendRowFloat(float64(stat))
// default:
// stat = stats.StatMean.Call(subDir.Value(name)).Float1D(0)
// tsr.AppendRowFloat(stat)
// }
// }
// })
}
// StatCounters returns counters string to show at bottom of netview.
func (ss *Sim) StatCounters(mode, level enums.Enum) string {
counters := ss.Loops.Stacks[mode].CountersString()
vu := ss.NetViewUpdater(mode)
if vu == nil || vu.View == nil {
return counters
}
di := vu.View.Di
counters += fmt.Sprintf(" Di: %d", di)
curModeDir := ss.Current.Dir(mode.String())
if curModeDir.Node("TrialName") == nil {
return counters
}
strNames := []string{"Cond", "TrialName", "SeqType", "TickType"}
for _, name := range strNames {
counters += fmt.Sprintf(" %s: %s", name, curModeDir.StringValue(name).String1D(di))
}
statNames := []string{"DA", "RewPred"}
if level == Cycle || curModeDir.Node(statNames[0]) == nil {
return counters
}
for _, name := range statNames {
counters += fmt.Sprintf(" %s: %.4g", name, curModeDir.Value(name).Float1D(di))
}
return counters
}
//////// GUI
// ConfigGUI configures the Cogent Core GUI interface for this simulation.
func (ss *Sim) ConfigGUI(b tree.Node) {
ss.GUI.MakeBody(b, ss, ss.Root, ss.Config.Name, ss.Config.Title, ss.Config.Doc)
ss.GUI.StopLevel = Trial
nv := ss.GUI.AddNetView("Network")
nv.Options.MaxRecs = 2 * ss.Config.Run.Cycles()
nv.Options.Raster.Max = ss.Config.Run.Cycles()
nv.Options.LayerNameSize = 0.02
nv.SetNet(ss.Net)
ss.TrainUpdate.Config(nv, axon.Theta, ss.StatCounters)
ss.GUI.OnStop = func(mode, level enums.Enum) {
vu := ss.NetViewUpdater(mode)
vu.UpdateWhenStopped(mode, level)
}
nv.SceneXYZ().Camera.Pose.Pos.Set(0, 1.4, 2.6)
nv.SceneXYZ().Camera.LookAt(math32.Vec3(0, 0, 0), math32.Vec3(0, 1, 0))
ss.StatsInit()
ss.GUI.FinalizeGUI(false)
}
func (ss *Sim) MakeToolbar(p *tree.Plan) {
ss.GUI.AddLooperCtrl(p, ss.Loops)
tree.Add(p, func(w *core.Separator) {})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "New seed",
Icon: icons.Add,
Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
Active: egui.ActiveAlways,
Func: func() {
ss.RandSeeds.NewSeeds()
},
})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "README",
Icon: icons.FileMarkdown,
Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
Active: egui.ActiveAlways,
Func: func() {
core.TheApp.OpenURL(ss.Config.URL)
},
})
}
func (ss *Sim) RunNoGUI() {
ss.Init()
if ss.Config.Params.Note != "" {
mpi.Printf("Note: %s\n", ss.Config.Params.Note)
}
if ss.Config.Log.SaveWeights {
mpi.Printf("Saving final weights per run\n")
}
runName := ss.SetRunName()
netName := ss.Net.Name
cfg := &ss.Config.Log
axon.OpenLogFiles(ss.Loops, ss.Stats, netName, runName, [][]string{cfg.Train})
mpi.Printf("Running %d Runs starting at %d\n", ss.Config.Run.Runs, ss.Config.Run.Run)
ss.Loops.Loop(Train, Run).Counter.SetCurMaxPlusN(ss.Config.Run.Run, ss.Config.Run.Runs)
ss.Loops.Run(Train)
axon.CloseLogFiles(ss.Loops, ss.Stats, Cycle)
axon.GPURelease()
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"github.com/emer/axon/v2/sims/pvlv"
"github.com/emer/emergent/v2/egui"
)
func main() { egui.Run[pvlv.Sim, pvlv.Config]() }
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ra25
import (
"cogentcore.org/core/core"
"cogentcore.org/core/math32/vecint"
"github.com/emer/emergent/v2/egui"
)
// ParamConfig has config parameters related to sim params.
type ParamConfig struct {
// Hidden1Size is the size of hidden 1 layer.
Hidden1Size vecint.Vector2i `default:"{'X':10,'Y':10}" nest:"+"`
// Hidden2Size is the size of hidden 2 layer.
Hidden2Size vecint.Vector2i `default:"{'X':10,'Y':10}" nest:"+"`
// Script is an interpreted script that is run to set parameters in Layer and Path
// sheets, by default using the "Script" set name.
Script string `new-window:"+" width:"100"`
// Sheet is the extra params sheet name(s) to use (space separated
// if multiple). Must be valid name as listed in compiled-in params
// or loaded params.
Sheet string
// Tag is an extra tag to add to file names and logs saved from this run.
Tag string
// Note is additional info to describe the run params etc,
// like a git commit message for the run.
Note string
// SaveAll will save a snapshot of all current param and config settings
// in a directory named params_<datestamp> (or _good if Good is true),
// then quit. Useful for comparing to later changes and seeing multiple
// views of current params.
SaveAll bool `nest:"+"`
// Good is for SaveAll, save to params_good for a known good params state.
// This can be done prior to making a new release after all tests are passing.
// Add results to git to provide a full diff record of all params over level.
Good bool `nest:"+"`
}
func (pc *ParamConfig) FieldWidget(field string) core.Value {
return egui.ScriptFieldWidget(field)
}
// RunConfig has config parameters related to running the sim.
type RunConfig struct {
// GPUDevice selects the gpu device to use.
GPUDevice int
// NData is the number of data-parallel items to process in parallel per trial.
// Is significantly faster for both CPU and GPU. Results in an effective
// mini-batch of learning.
NData int `default:"1" min:"1"`
// NThreads is the number of parallel threads for CPU computation;
// 0 = use default.
NThreads int `default:"0"`
// Run is the _starting_ run number, which determines the random seed.
// Runs counts up from there. Can do all runs in parallel by launching
// separate jobs with each starting Run, Runs = 1.
Run int `default:"0" flag:"run"`
// Runs is the total number of runs to do when running Train, starting from Run.
Runs int `default:"5" min:"1"`
// Epochs is the total number of epochs per run.
Epochs int `default:"100"`
// Trials is the total number of trials per epoch.
// Should be an even multiple of NData.
Trials int `default:"32"`
// ISICycles is the number of no-input inter-stimulus interval
// cycles at the start of the trial.
ISICycles int `default:"0"`
// MinusCycles is the number of cycles in the minus phase per trial.
MinusCycles int `default:"150"`
// PlusCycles is the number of cycles in the plus phase per trial.
PlusCycles int `default:"50"`
// NZero is how many perfect, zero-error epochs before stopping a Run.
NZero int `default:"2"`
// TestInterval is how often (in epochs) to run through all the test patterns,
// in terms of training epochs. Can use 0 or -1 for no testing.
TestInterval int `default:"5"`
// PCAInterval is how often (in epochs) to compute PCA on hidden
// representations to measure variance.
PCAInterval int `default:"10"`
// StartWeights is the name of weights file to load at start of first run.
StartWeights string
}
// Cycles returns the total number of cycles per trial: ISI + Minus + Plus.
func (rc *RunConfig) Cycles() int {
return rc.ISICycles + rc.MinusCycles + rc.PlusCycles
}
// LogConfig has config parameters related to logging data.
type LogConfig struct {
// SaveWeights will save final weights after each run.
SaveWeights bool
// Train has the list of Train mode levels to save log files for.
Train []string `default:"['Expt', 'Run', 'Epoch']" nest:"+"`
// Test has the list of Test mode levels to save log files for.
Test []string `nest:"+"`
}
// Config has the overall Sim configuration options.
type Config struct {
egui.BaseConfig
// Params has parameter related configuration options.
Params ParamConfig `display:"add-fields"`
// Run has sim running related configuration options.
Run RunConfig `display:"add-fields"`
// Log has data logging related configuration options.
Log LogConfig `display:"add-fields"`
}
func (cfg *Config) Defaults() {
cfg.Name = "RA25"
cfg.Title = "Axon random associator"
cfg.URL = "https://github.com/emer/axon/blob/main/sims/ra25/README.md"
cfg.Doc = "This demonstrates a basic Axon model and provides a template for creating new models. It has a random-associator four-layer axon network that uses the standard supervised learning paradigm to learn mappings between 25 random input / output patterns defined over 5x5 input / output layers."
}
// Code generated by "core generate -add-types -add-funcs -gosl"; DO NOT EDIT.
package ra25
import (
"cogentcore.org/core/enums"
)
var _ModesValues = []Modes{0, 1}
// ModesN is the highest valid value for type Modes, plus one.
//
//gosl:start
const ModesN Modes = 2
//gosl:end
var _ModesValueMap = map[string]Modes{`Train`: 0, `Test`: 1}
var _ModesDescMap = map[Modes]string{0: ``, 1: ``}
var _ModesMap = map[Modes]string{0: `Train`, 1: `Test`}
// String returns the string representation of this Modes value.
func (i Modes) String() string { return enums.String(i, _ModesMap) }
// SetString sets the Modes value from its string representation,
// and returns an error if the string is invalid.
func (i *Modes) SetString(s string) error { return enums.SetString(i, s, _ModesValueMap, "Modes") }
// Int64 returns the Modes value as an int64.
func (i Modes) Int64() int64 { return int64(i) }
// SetInt64 sets the Modes value from an int64.
func (i *Modes) SetInt64(in int64) { *i = Modes(in) }
// Desc returns the description of the Modes value.
func (i Modes) Desc() string { return enums.Desc(i, _ModesDescMap) }
// ModesValues returns all possible values for the type Modes.
func ModesValues() []Modes { return _ModesValues }
// Values returns all possible values for the type Modes.
func (i Modes) Values() []enums.Enum { return enums.Values(_ModesValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Modes) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Modes) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Modes") }
var _LevelsValues = []Levels{0, 1, 2, 3, 4}
// LevelsN is the highest valid value for type Levels, plus one.
//
//gosl:start
const LevelsN Levels = 5
//gosl:end
var _LevelsValueMap = map[string]Levels{`Cycle`: 0, `Trial`: 1, `Epoch`: 2, `Run`: 3, `Expt`: 4}
var _LevelsDescMap = map[Levels]string{0: ``, 1: ``, 2: ``, 3: ``, 4: ``}
var _LevelsMap = map[Levels]string{0: `Cycle`, 1: `Trial`, 2: `Epoch`, 3: `Run`, 4: `Expt`}
// String returns the string representation of this Levels value.
func (i Levels) String() string { return enums.String(i, _LevelsMap) }
// SetString sets the Levels value from its string representation,
// and returns an error if the string is invalid.
func (i *Levels) SetString(s string) error { return enums.SetString(i, s, _LevelsValueMap, "Levels") }
// Int64 returns the Levels value as an int64.
func (i Levels) Int64() int64 { return int64(i) }
// SetInt64 sets the Levels value from an int64.
func (i *Levels) SetInt64(in int64) { *i = Levels(in) }
// Desc returns the description of the Levels value.
func (i Levels) Desc() string { return enums.Desc(i, _LevelsDescMap) }
// LevelsValues returns all possible values for the type Levels.
func LevelsValues() []Levels { return _LevelsValues }
// Values returns all possible values for the type Levels.
func (i Levels) Values() []enums.Enum { return enums.Values(_LevelsValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Levels) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Levels) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Levels") }
var _StatsPhaseValues = []StatsPhase{0, 1}
// StatsPhaseN is the highest valid value for type StatsPhase, plus one.
//
//gosl:start
const StatsPhaseN StatsPhase = 2
//gosl:end
var _StatsPhaseValueMap = map[string]StatsPhase{`Start`: 0, `Step`: 1}
var _StatsPhaseDescMap = map[StatsPhase]string{0: ``, 1: ``}
var _StatsPhaseMap = map[StatsPhase]string{0: `Start`, 1: `Step`}
// String returns the string representation of this StatsPhase value.
func (i StatsPhase) String() string { return enums.String(i, _StatsPhaseMap) }
// SetString sets the StatsPhase value from its string representation,
// and returns an error if the string is invalid.
func (i *StatsPhase) SetString(s string) error {
return enums.SetString(i, s, _StatsPhaseValueMap, "StatsPhase")
}
// Int64 returns the StatsPhase value as an int64.
func (i StatsPhase) Int64() int64 { return int64(i) }
// SetInt64 sets the StatsPhase value from an int64.
func (i *StatsPhase) SetInt64(in int64) { *i = StatsPhase(in) }
// Desc returns the description of the StatsPhase value.
func (i StatsPhase) Desc() string { return enums.Desc(i, _StatsPhaseDescMap) }
// StatsPhaseValues returns all possible values for the type StatsPhase.
func StatsPhaseValues() []StatsPhase { return _StatsPhaseValues }
// Values returns all possible values for the type StatsPhase.
func (i StatsPhase) Values() []enums.Enum { return enums.Values(_StatsPhaseValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i StatsPhase) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *StatsPhase) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "StatsPhase")
}
// Copyright (c) 2019, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ra25
import (
"github.com/emer/axon/v2/axon"
)
// LayerParams sets the minimal non-default params.
// Base is always applied, and others can be optionally selected to apply on top of that.
var LayerParams = axon.LayerSheets{
"Base": {
{Sel: "Layer", Doc: "all defaults",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 1.05 // 1.05 > 1.1 for short-term; 1.1 better long-run stability
ly.Inhib.Layer.FB = 0.5 // 0.5 > 0.2 > 0.1 > 1.0 -- usu 1.0
ly.Inhib.ActAvg.Nominal = 0.06 // 0.06 > 0.05
ly.Acts.NMDA.MgC = 1.2 // 1.2 > 1.4 here, still..
ly.Acts.VGCC.Ge = 0
ly.Learn.CaSpike.SpikeCaSyn = 8
ly.Learn.Timing.On.SetBool(false)
ly.Learn.Timing.Refractory.SetBool(false)
ly.Learn.Timing.SynCaCycles = 160
ly.Learn.Timing.Cycles = 170
ly.Learn.Timing.TimeDiffTau = 4
// ly.Learn.CaLearn.ETraceTau = 4
// ly.Learn.CaLearn.ETraceScale = 0.1 // 4,0.1 best in sequential
ly.Learn.RLRate.SigmoidLinear.SetBool(false) // false > true here
// ly.Learn.RLRate.Diff.SetBool(false) // false = very bad
}},
{Sel: "#Input", Doc: "critical now to specify the activity level",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 0.9 // 0.9 > 1.0
ly.Acts.Clamp.Ge = 1.5 // 1.5 > 1.0
ly.Inhib.ActAvg.Nominal = 0.15 // .24 nominal, lower to give higher excitation
}},
{Sel: "#Output", Doc: "output definitely needs lower inhib -- true for smaller layers in general",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 0.65 // 0.65
ly.Inhib.ActAvg.Nominal = 0.24
ly.Acts.Spikes.Tr = 1 // 1 is new minimum.. > 3
ly.Acts.Clamp.Ge = 0.8 // 0.8 > 0.6
ly.Learn.RLRate.SigmoidMin = 0.05 // sigmoid derivative actually useful here!
}},
},
}
// PathParams sets the minimal non-default params.
// Base is always applied, and others can be optionally selected to apply on top of that.
var PathParams = axon.PathSheets{
"Base": {
{Sel: "Path", Doc: "basic path params",
Set: func(pt *axon.PathParams) {
// pt.Com.MaxDelay = 10 // robust to this
// pt.Com.Delay = 10
pt.Learn.LRate.Base = 0.06 // 0.06
pt.SWts.Adapt.LRate = 0.1 // .1 >= .2,
pt.SWts.Init.SPct = 0.5 // .5 >= 1 here -- 0.5 more reliable, 1.0 faster..
pt.Learn.DWt.SubMean = 0 // 1 > 0 for long run stability
pt.Learn.DWt.CaPScale = 1 // 1
pt.Learn.DWt.SynCa20.SetBool(false)
pt.Learn.DWt.LearnThr = 0.1
}},
{Sel: ".BackPath", Doc: "top-down back-pathways MUST have lower relative weight scale, otherwise network hallucinates",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.3 // 0.3 > 0.2 > 0.1 > 0.5
}},
},
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// ra25 runs a simple random-associator four-layer axon network
// that uses the standard supervised learning paradigm to learn
// mappings between 25 random input / output patterns
// defined over 5x5 input / output layers (i.e., 25 units)
package ra25
//go:generate core generate -add-types -add-funcs -gosl
import (
"embed"
"fmt"
"io/fs"
"os"
"reflect"
"cogentcore.org/core/base/errors"
"cogentcore.org/core/base/metadata"
"cogentcore.org/core/core"
"cogentcore.org/core/enums"
"cogentcore.org/core/gpu"
"cogentcore.org/core/icons"
"cogentcore.org/core/math32"
"cogentcore.org/core/tree"
"cogentcore.org/lab/base/mpi"
"cogentcore.org/lab/base/randx"
"cogentcore.org/lab/patterns"
"cogentcore.org/lab/plot"
"cogentcore.org/lab/stats/stats"
"cogentcore.org/lab/table"
"cogentcore.org/lab/tensor"
"cogentcore.org/lab/tensorfs"
"github.com/emer/axon/v2/axon"
"github.com/emer/emergent/v2/egui"
"github.com/emer/emergent/v2/env"
"github.com/emer/emergent/v2/looper"
"github.com/emer/emergent/v2/paths"
)
//go:embed random_5x5_25.tsv
var embedfs embed.FS
// Modes are the looping modes (Stacks) for running and statistics.
type Modes int32 //enums:enum
const (
Train Modes = iota
Test
)
// Levels are the looping levels for running and statistics.
type Levels int32 //enums:enum
const (
Cycle Levels = iota
Trial
Epoch
Run
Expt
)
// StatsPhase is the phase of stats processing for given mode, level.
// Accumulated values are reset at Start, added each Step.
type StatsPhase int32 //enums:enum
const (
Start StatsPhase = iota
Step
)
// see params.go for params, config.go for config
// Sim encapsulates the entire simulation model, and we define all the
// functionality as methods on this struct. This structure keeps all relevant
// state information organized and available without having to pass everything around
// as arguments to methods, and provides the core GUI interface (note the view tags
// for the fields which provide hints to how things should be displayed).
type Sim struct {
// simulation configuration parameters -- set by .toml config file and / or args
Config *Config `new-window:"+"`
// Net is the network: click to view / edit parameters for layers, paths, etc.
Net *axon.Network `new-window:"+" display:"no-inline"`
// Params manages network parameter setting.
Params axon.Params `display:"inline"`
// Loops are the control loops for running the sim, in different Modes
// across stacks of Levels.
Loops *looper.Stacks `new-window:"+" display:"no-inline"`
// Envs provides mode-string based storage of environments.
Envs env.Envs `new-window:"+" display:"no-inline"`
// TrainUpdate has Train mode netview update parameters.
TrainUpdate axon.NetViewUpdate `display:"inline"`
// TestUpdate has Test mode netview update parameters.
TestUpdate axon.NetViewUpdate `display:"inline"`
// Root is the root tensorfs directory, where all stats and other misc sim data goes.
Root *tensorfs.Node `display:"-"`
// Stats has the stats directory within Root.
Stats *tensorfs.Node `display:"-"`
// Current has the current stats values within Stats.
Current *tensorfs.Node `display:"-"`
// StatFuncs are statistics functions called at given mode and level,
// to perform all stats computations. phase = Start does init at start of given level,
// and all intialization / configuration (called during Init too).
StatFuncs []func(mode Modes, level Levels, phase StatsPhase) `display:"-"`
// GUI manages all the GUI elements
GUI egui.GUI `display:"-"`
// RandSeeds is a list of random seeds to use for each run.
RandSeeds randx.Seeds `display:"-"`
}
func (ss *Sim) SetConfig(cfg *Config) { ss.Config = cfg }
func (ss *Sim) Body() *core.Body { return ss.GUI.Body }
func (ss *Sim) ConfigSim() {
ss.Root, _ = tensorfs.NewDir("Root")
tensorfs.CurRoot = ss.Root
ss.Net = axon.NewNetwork(ss.Config.Name)
ss.Params.Config(LayerParams, PathParams, ss.Config.Params.Sheet, ss.Config.Params.Tag, reflect.ValueOf(ss))
ss.RandSeeds.Init(100) // max 100 runs
ss.InitRandSeed(0)
if ss.Config.GPU {
// gpu.DebugAdapter = true
gpu.SelectAdapter = ss.Config.Run.GPUDevice
axon.GPUInit()
axon.UseGPU = true
}
// ss.ConfigInputs()
ss.OpenInputs()
ss.ConfigEnv()
ss.ConfigNet(ss.Net)
ss.ConfigLoops()
ss.ConfigStats()
// if ss.Config..GPU {
// fmt.Println(axon.GPUSystem.Vars().StringDoc())
// }
if ss.Config.Params.SaveAll {
ss.Config.Params.SaveAll = false
ss.Net.SaveParamsSnapshot(&ss.Config, ss.Config.Params.Good)
os.Exit(0)
}
}
func (ss *Sim) ConfigEnv() {
// Can be called multiple times -- don't re-create
var trn, tst *env.FixedTable
if len(ss.Envs) == 0 {
trn = &env.FixedTable{}
tst = &env.FixedTable{}
} else {
trn = ss.Envs.ByMode(Train).(*env.FixedTable)
tst = ss.Envs.ByMode(Test).(*env.FixedTable)
}
inputs := tensorfs.DirTable(ss.Root.Dir("Inputs/Train"), nil)
// this logic can be used to create train-test splits of a set of patterns:
// n := inputs.NumRows()
// order := rand.Perm(n)
// ntrn := int(0.85 * float64(n))
// trnEnv := table.NewView(inputs)
// tstEnv := table.NewView(inputs)
// trnEnv.Indexes = order[:ntrn]
// tstEnv.Indexes = order[ntrn:]
// note: names must be standard here!
trn.Name = Train.String()
trn.Config(table.NewView(inputs))
trn.Validate()
tst.Name = Test.String()
tst.Config(table.NewView(inputs))
tst.Sequential = true
tst.Validate()
trn.Init(0)
tst.Init(0)
// note: names must be in place when adding
ss.Envs.Add(trn, tst)
}
func (ss *Sim) ConfigNet(net *axon.Network) {
net.SetMaxData(ss.Config.Run.NData)
net.Context().SetISICycles(int32(ss.Config.Run.ISICycles)).
SetMinusCycles(int32(ss.Config.Run.MinusCycles)).
SetPlusCycles(int32(ss.Config.Run.PlusCycles)).Update()
net.SetRandSeed(ss.RandSeeds[0]) // init new separate random seed, using run = 0
inp := net.AddLayer2D("Input", axon.InputLayer, 5, 5)
hid1 := net.AddLayer2D("Hidden1", axon.SuperLayer, ss.Config.Params.Hidden1Size.Y, ss.Config.Params.Hidden1Size.X)
hid2 := net.AddLayer2D("Hidden2", axon.SuperLayer, ss.Config.Params.Hidden2Size.Y, ss.Config.Params.Hidden2Size.X)
out := net.AddLayer2D("Output", axon.TargetLayer, 5, 5)
// use this to position layers relative to each other
// hid2.PlaceRightOf(hid1, 2)
// note: see emergent/path module for all the options on how to connect
// NewFull returns a new paths.Full connectivity pattern
full := paths.NewFull()
net.ConnectLayers(inp, hid1, full, axon.ForwardPath)
net.BidirConnectLayers(hid1, hid2, full)
net.BidirConnectLayers(hid2, out, full)
// net.LateralConnectLayerPath(hid1, full, &axon.HebbPath{}).SetType(InhibPath)
// note: if you wanted to change a layer type from e.g., Target to Compare, do this:
// out.Type = axon.CompareLayer
// that would mean that the output layer doesn't reflect target values in plus phase
// and thus removes error-driven learning -- but stats are still computed.
net.Build()
net.Defaults()
net.SetNThreads(ss.Config.Run.NThreads)
ss.ApplyParams()
net.InitWeights()
}
func (ss *Sim) ApplyParams() {
ss.Params.Script = ss.Config.Params.Script
ss.Params.ApplyAll(ss.Net)
}
//////// Init, utils
// Init restarts the run, and initializes everything, including network weights
// and resets the epoch log table
func (ss *Sim) Init() {
ss.Loops.ResetCounters()
ss.SetRunName()
ss.InitRandSeed(0)
// ss.ConfigEnv() // re-config env just in case a different set of patterns was
// selected or patterns have been modified etc
ss.ApplyParams()
ss.StatsInit()
ss.NewRun()
ss.TrainUpdate.RecordSyns()
ss.TrainUpdate.Update(Train, Trial)
}
// InitRandSeed initializes the random seed based on current training run number
func (ss *Sim) InitRandSeed(run int) {
ss.RandSeeds.Set(run)
ss.RandSeeds.Set(run, &ss.Net.Rand)
}
// NetViewUpdater returns the NetViewUpdate for given mode.
func (ss *Sim) NetViewUpdater(mode enums.Enum) *axon.NetViewUpdate {
if mode.Int64() == Train.Int64() {
return &ss.TrainUpdate
}
return &ss.TestUpdate
}
// ConfigLoops configures the control loops: Training, Testing
func (ss *Sim) ConfigLoops() {
ls := looper.NewStacks()
trials := int(math32.IntMultipleGE(float32(ss.Config.Run.Trials), float32(ss.Config.Run.NData)))
cycles := ss.Config.Run.Cycles()
ls.AddStack(Train, Trial).
AddLevel(Expt, 1).
AddLevel(Run, ss.Config.Run.Runs).
AddLevel(Epoch, ss.Config.Run.Epochs).
AddLevelIncr(Trial, trials, ss.Config.Run.NData).
AddLevel(Cycle, cycles)
ls.AddStack(Test, Trial).
AddLevel(Epoch, 1).
AddLevelIncr(Trial, trials, ss.Config.Run.NData).
AddLevel(Cycle, cycles)
axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, Cycle, Trial, Train,
func(mode enums.Enum) { ss.Net.ClearInputs() },
func(mode enums.Enum) { ss.ApplyInputs(mode.(Modes)) },
)
ls.Stacks[Train].OnInit.Add("Init", ss.Init)
ls.Loop(Train, Run).OnStart.Add("NewRun", ss.NewRun)
trainEpoch := ls.Loop(Train, Epoch)
trainEpoch.IsDone.AddBool("NZeroStop", func() bool {
stopNz := ss.Config.Run.NZero
if stopNz <= 0 {
return false
}
curModeDir := ss.Current.Dir(Train.String())
curNZero := int(curModeDir.Value("NZero").Float1D(-1))
stop := curNZero >= stopNz
return stop
return false
})
trainEpoch.OnStart.Add("TestAtInterval", func() {
if (ss.Config.Run.TestInterval > 0) && ((trainEpoch.Counter.Cur+1)%ss.Config.Run.TestInterval == 0) {
ss.TestAll()
}
})
ls.AddOnStartToAll("StatsStart", ss.StatsStart)
ls.AddOnEndToAll("StatsStep", ss.StatsStep)
ls.Loop(Train, Run).OnEnd.Add("SaveWeights", func() {
ctrString := fmt.Sprintf("%03d_%05d", ls.Loop(Train, Run).Counter.Cur, ls.Loop(Train, Epoch).Counter.Cur)
axon.SaveWeightsIfConfigSet(ss.Net, ss.Config.Log.SaveWeights, ctrString, ss.RunName())
})
if ss.Config.GUI {
axon.LooperUpdateNetView(ls, Cycle, Trial, ss.NetViewUpdater)
ls.Stacks[Train].OnInit.Add("GUI-Init", ss.GUI.UpdateWindow)
ls.Stacks[Test].OnInit.Add("GUI-Init", ss.GUI.UpdateWindow)
}
if ss.Config.Debug {
mpi.Println(ls.DocString())
}
ss.Loops = ls
}
// ApplyInputs applies input patterns from given environment for given mode.
// Any other start-of-trial logic can also be put here.
func (ss *Sim) ApplyInputs(mode Modes) {
net := ss.Net
ndata := int(net.Context().NData)
curModeDir := ss.Current.Dir(mode.String())
ev := ss.Envs.ByMode(mode)
lays := net.LayersByType(axon.InputLayer, axon.TargetLayer)
net.InitExt()
for di := range ndata {
ev.Step()
curModeDir.StringValue("TrialName", ndata).SetString1D(ev.String(), di)
for _, lnm := range lays {
ly := ss.Net.LayerByName(lnm)
st := ev.State(ly.Name)
if st != nil {
ly.ApplyExt(uint32(di), st)
}
}
}
net.ApplyExts()
}
// NewRun intializes a new Run level of the model.
func (ss *Sim) NewRun() {
ctx := ss.Net.Context()
run := ss.Loops.Loop(Train, Run).Counter.Cur
ss.InitRandSeed(run)
ss.Envs.ByMode(Train).Init(run)
ss.Envs.ByMode(Test).Init(run)
ctx.Reset()
ss.Net.InitWeights()
if ss.Config.Run.StartWeights != "" {
ss.Net.OpenWeightsJSON(core.Filename(ss.Config.Run.StartWeights))
mpi.Printf("Starting with initial weights from: %s\n", ss.Config.Run.StartWeights)
}
}
// TestAll runs through the full set of testing items
func (ss *Sim) TestAll() {
ss.Envs.ByMode(Test).Init(0)
ss.Loops.ResetAndRun(Test)
ss.Loops.Mode = Train // important because this is called from Train Run: go back.
}
//////// Inputs
func (ss *Sim) ConfigInputs() {
dt := table.New()
metadata.SetName(dt, "Train")
metadata.SetDoc(dt, "Training inputs")
dt.AddStringColumn("Name")
dt.AddFloat32Column("Input", 5, 5)
dt.AddFloat32Column("Output", 5, 5)
dt.SetNumRows(25)
patterns.PermutedBinaryMinDiff(dt.Columns.Values[1], 6, 1, 0, 3)
patterns.PermutedBinaryMinDiff(dt.Columns.Values[2], 6, 1, 0, 3)
dt.SaveCSV("random_5x5_25_gen.tsv", tensor.Tab, table.Headers)
tensorfs.DirFromTable(ss.Root.Dir("Inputs/Train"), dt)
}
// OpenTable opens a [table.Table] from embedded content, storing
// the data in the given tensorfs directory.
func (ss *Sim) OpenTable(dir *tensorfs.Node, fsys fs.FS, fnm, name, docs string) (*table.Table, error) {
dt := table.New()
metadata.SetName(dt, name)
metadata.SetDoc(dt, docs)
err := dt.OpenFS(embedfs, fnm, tensor.Tab)
if errors.Log(err) != nil {
return dt, err
}
tensorfs.DirFromTable(dir.Dir(name), dt)
return dt, err
}
func (ss *Sim) OpenInputs() {
dir := ss.Root.Dir("Inputs")
ss.OpenTable(dir, embedfs, "random_5x5_25.tsv", "Train", "Training inputs")
}
//////// Stats
// AddStat adds a stat compute function.
func (ss *Sim) AddStat(f func(mode Modes, level Levels, phase StatsPhase)) {
ss.StatFuncs = append(ss.StatFuncs, f)
}
// StatsStart is called by Looper at the start of given level, for each iteration.
// It needs to call RunStats Start at the next level down.
// e.g., each Epoch is the start of the full set of Trial Steps.
func (ss *Sim) StatsStart(lmd, ltm enums.Enum) {
mode := lmd.(Modes)
level := ltm.(Levels)
if level <= Trial {
return
}
ss.RunStats(mode, level-1, Start)
}
// StatsStep is called by Looper at each step of iteration,
// where it accumulates the stat results.
func (ss *Sim) StatsStep(lmd, ltm enums.Enum) {
mode := lmd.(Modes)
level := ltm.(Levels)
if level == Cycle {
return
}
ss.RunStats(mode, level, Step)
tensorfs.DirTable(axon.StatsNode(ss.Stats, mode, level), nil).WriteToLog()
}
// RunStats runs the StatFuncs for given mode, level and phase.
func (ss *Sim) RunStats(mode Modes, level Levels, phase StatsPhase) {
for _, sf := range ss.StatFuncs {
sf(mode, level, phase)
}
if phase == Step && ss.GUI.Tabs != nil {
nm := mode.String() + " " + level.String() + " Plot"
ss.GUI.Tabs.AsLab().GoUpdatePlot(nm)
if level == Run {
ss.GUI.Tabs.AsLab().GoUpdatePlot("Train RunAll Plot")
}
}
}
// SetRunName sets the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) SetRunName() string {
runName := ss.Params.RunName(ss.Config.Run.Run)
ss.Current.StringValue("RunName", 1).SetString1D(runName, 0)
return runName
}
// RunName returns the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) RunName() string {
return ss.Current.StringValue("RunName", 1).String1D(0)
}
// StatsInit initializes all the stats by calling Start across all modes and levels.
func (ss *Sim) StatsInit() {
for md, st := range ss.Loops.Stacks {
mode := md.(Modes)
for _, lev := range st.Order {
level := lev.(Levels)
if level == Cycle {
continue
}
ss.RunStats(mode, level, Start)
}
}
if ss.GUI.Tabs != nil {
tbs := ss.GUI.Tabs.AsLab()
_, idx := tbs.CurrentTab()
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Epoch))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Run))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Test, Trial))
tbs.PlotTensorFS(ss.Stats.Dir("Train/RunAll"))
tbs.SelectTabIndex(idx)
}
}
// ConfigStats handles configures functions to do all stats computation
// in the tensorfs system.
func (ss *Sim) ConfigStats() {
net := ss.Net
ss.Stats = ss.Root.Dir("Stats")
ss.Current = ss.Stats.Dir("Current")
ss.SetRunName()
// last arg(s) are levels to exclude
counterFunc := axon.StatLoopCounters(ss.Stats, ss.Current, ss.Loops, net, Trial, Cycle)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
counterFunc(mode, level, phase == Start)
})
runNameFunc := axon.StatRunName(ss.Stats, ss.Current, ss.Loops, net, Trial, Cycle)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
runNameFunc(mode, level, phase == Start)
})
trialNameFunc := axon.StatTrialName(ss.Stats, ss.Current, ss.Loops, net, Trial)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
trialNameFunc(mode, level, phase == Start)
})
// up to a point, it is good to use loops over stats in one function,
// to reduce repetition of boilerplate.
statNames := []string{"CorSim", "UnitErr", "Err", "NZero", "FirstZero", "LastZero"}
statDocs := map[string]string{
"CorSim": "The correlation-based similarity of the neural activity patterns between the minus and plus phase (1 = patterns are effectively identical). For target layers, this is good continuous, normalized measure of learning performance, which can be more sensitive than thresholded SSE measures.",
"UnitErr": "Normalized proportion of neurons with activities on the wrong side of 0.5 relative to the target values. This is a good normalized error measure.",
"Err": "At the trial level this indicates the presence of an error (i.e., UnitErr > 0), and at higher levels, it is the proportion of errors across the epoch. Thus, when this is zero, the network is performing perfectly (with respect to target outputs).",
"NZero": "The number of zero-error epochs in a row.",
"FirstZero": "The first epoch when there were no errors according to Err stat.",
"LastZero": "The epoch when training was stopped because NZero got above the threshold for number of perfect epochs in a row",
}
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
for _, name := range statNames {
if name == "NZero" && (mode != Train || level == Trial) {
return
}
modeDir := ss.Stats.Dir(mode.String())
curModeDir := ss.Current.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
subDir := modeDir.Dir((level - 1).String()) // note: will fail for Cycle
tsr := levelDir.Float64(name)
ndata := int(ss.Net.Context().NData)
var stat float64
if phase == Start {
tsr.SetNumRows(0)
plot.SetFirstStyler(tsr, func(s *plot.Style) {
s.Range.SetMin(0).SetMax(1)
s.On = true
switch name {
case "NZero":
s.On = false
case "FirstZero", "LastZero":
if level < Run {
s.On = false
}
}
})
metadata.SetDoc(tsr, statDocs[name])
switch name {
case "NZero":
if level == Epoch {
curModeDir.Float64(name, 1).SetFloat1D(0, 0)
}
case "FirstZero", "LastZero":
if level == Epoch {
curModeDir.Float64(name, 1).SetFloat1D(-1, 0)
}
}
continue
}
switch level {
case Trial:
out := ss.Net.LayerByName("Output")
for di := range ndata {
var stat float64
switch name {
case "CorSim":
stat = 1.0 - float64(axon.LayerStates.Value(int(out.Index), int(di), int(axon.LayerPhaseDiff)))
case "UnitErr":
stat = out.PctUnitErr(ss.Net.Context())[di]
case "Err":
uniterr := curModeDir.Float64("UnitErr", ndata).Float1D(di)
stat = 1.0
if uniterr == 0 {
stat = 0
}
}
curModeDir.Float64(name, ndata).SetFloat1D(stat, di)
tsr.AppendRowFloat(stat)
}
case Epoch:
nz := curModeDir.Float64("NZero", 1).Float1D(0)
switch name {
case "NZero":
err := stats.StatSum.Call(subDir.Value("Err")).Float1D(0)
stat = curModeDir.Float64(name, 1).Float1D(0)
if err == 0 {
stat++
} else {
stat = 0
}
curModeDir.Float64(name, 1).SetFloat1D(stat, 0)
case "FirstZero":
stat = curModeDir.Float64(name, 1).Float1D(0)
if stat < 0 && nz == 1 {
stat = curModeDir.Int("Epoch", 1).Float1D(0)
}
curModeDir.Float64(name, 1).SetFloat1D(stat, 0)
case "LastZero":
stat = curModeDir.Float64(name, 1).Float1D(0)
if stat < 0 && nz >= float64(ss.Config.Run.NZero) {
stat = curModeDir.Int("Epoch", 1).Float1D(0)
}
curModeDir.Float64(name, 1).SetFloat1D(stat, 0)
default:
stat = stats.StatMean.Call(subDir.Value(name)).Float1D(0)
}
tsr.AppendRowFloat(stat)
case Run:
stat = stats.StatFinal.Call(subDir.Value(name)).Float1D(0)
tsr.AppendRowFloat(stat)
default: // Expt
stat = stats.StatMean.Call(subDir.Value(name)).Float1D(0)
tsr.AppendRowFloat(stat)
}
}
})
perTrlFunc := axon.StatPerTrialMSec(ss.Stats, Train, Trial)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
perTrlFunc(mode, level, phase == Start)
})
lays := net.LayersByType(axon.SuperLayer, axon.CTLayer, axon.TargetLayer)
actGeFunc := axon.StatLayerActGe(ss.Stats, net, Train, Trial, Run, lays...)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
actGeFunc(mode, level, phase == Start)
})
pcaFunc := axon.StatPCA(ss.Stats, ss.Current, net, ss.Config.Run.PCAInterval, Train, Trial, Run, lays...)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
trnEpc := ss.Loops.Loop(Train, Epoch).Counter.Cur
pcaFunc(mode, level, phase == Start, trnEpc)
})
stateFunc := axon.StatLayerState(ss.Stats, net, Test, Trial, true, "ActM", "Input", "Output")
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
stateFunc(mode, level, phase == Start)
})
runAllFunc := axon.StatLevelAll(ss.Stats, Train, Run, func(s *plot.Style, cl tensor.Values) {
name := metadata.Name(cl)
switch name {
case "FirstZero", "LastZero":
s.On = true
s.Range.SetMin(0)
}
})
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
runAllFunc(mode, level, phase == Start)
})
}
// StatCounters returns counters string to show at bottom of netview.
func (ss *Sim) StatCounters(mode, level enums.Enum) string {
counters := ss.Loops.Stacks[mode].CountersString()
vu := ss.NetViewUpdater(mode)
if vu == nil || vu.View == nil {
return counters
}
di := vu.View.Di
counters += fmt.Sprintf(" Di: %d", di)
curModeDir := ss.Current.Dir(mode.String())
if curModeDir.Node("TrialName") == nil {
return counters
}
counters += fmt.Sprintf(" TrialName: %s", curModeDir.StringValue("TrialName").String1D(di))
statNames := []string{"CorSim", "UnitErr", "Err"}
if level == Cycle || curModeDir.Node(statNames[0]) == nil {
return counters
}
for _, name := range statNames {
counters += fmt.Sprintf(" %s: %.4g", name, curModeDir.Float64(name).Float1D(di))
}
return counters
}
//////// GUI
// ConfigGUI configures the Cogent Core GUI interface for this simulation.
func (ss *Sim) ConfigGUI(b tree.Node) {
ss.GUI.MakeBody(b, ss, ss.Root, ss.Config.Name, ss.Config.Title, ss.Config.Doc)
ss.GUI.StopLevel = Trial
nv := ss.GUI.AddNetView("Network")
nv.Options.MaxRecs = 2 * ss.Config.Run.Cycles()
nv.Options.Raster.Max = ss.Config.Run.Cycles()
nv.SetNet(ss.Net)
ss.TrainUpdate.Config(nv, axon.Theta, ss.StatCounters)
ss.TestUpdate.Config(nv, axon.Theta, ss.StatCounters)
ss.GUI.OnStop = func(mode, level enums.Enum) {
vu := ss.NetViewUpdater(mode)
vu.UpdateWhenStopped(mode, level)
}
nv.SceneXYZ().Camera.Pose.Pos.Set(0, 1, 2.75)
nv.SceneXYZ().Camera.LookAt(math32.Vec3(0, 0, 0), math32.Vec3(0, 1, 0))
ss.StatsInit()
ss.GUI.FinalizeGUI(false)
}
func (ss *Sim) MakeToolbar(p *tree.Plan) {
ss.GUI.AddLooperCtrl(p, ss.Loops)
tree.Add(p, func(w *core.Separator) {})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "New Seed",
Icon: icons.Add,
Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
Active: egui.ActiveAlways,
Func: func() {
ss.RandSeeds.NewSeeds()
},
})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "README",
Icon: icons.FileMarkdown,
Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
Active: egui.ActiveAlways,
Func: func() {
core.TheApp.OpenURL(ss.Config.URL)
},
})
}
func (ss *Sim) RunNoGUI() {
ss.Init()
if ss.Config.Params.Note != "" {
mpi.Printf("Note: %s\n", ss.Config.Params.Note)
}
if ss.Config.Log.SaveWeights {
mpi.Printf("Saving final weights per run\n")
}
runName := ss.SetRunName()
netName := ss.Net.Name
cfg := &ss.Config.Log
axon.OpenLogFiles(ss.Loops, ss.Stats, netName, runName, [][]string{cfg.Train, cfg.Test})
mpi.Printf("Running %d Runs starting at %d\n", ss.Config.Run.Runs, ss.Config.Run.Run)
ss.Loops.Loop(Train, Run).Counter.SetCurMaxPlusN(ss.Config.Run.Run, ss.Config.Run.Runs)
ss.Loops.Run(Train)
axon.CloseLogFiles(ss.Loops, ss.Stats, Cycle)
axon.GPURelease()
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"github.com/emer/axon/v2/sims/ra25"
"github.com/emer/emergent/v2/egui"
)
func main() { egui.Run[ra25.Sim, ra25.Config]() }
// Copyright (c) 2019, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ra25x
import (
"cogentcore.org/core/core"
"cogentcore.org/core/math32/vecint"
"github.com/emer/emergent/v2/egui"
)
// ParamConfig has config parameters related to sim params.
type ParamConfig struct {
// Hidden1Size is the size of hidden 1 layer.
Hidden1Size vecint.Vector2i `default:"{'X':10,'Y':10}" nest:"+"`
// Hidden2Size is the size of hidden 2 layer.
Hidden2Size vecint.Vector2i `default:"{'X':10,'Y':10}" nest:"+"`
// Script is an interpreted script that is run to set parameters in Layer and Path
// sheets, by default using the "Script" set name.
Script string `new-window:"+" width:"100"`
// Sheet is the extra params sheet name(s) to use (space separated
// if multiple). Must be valid name as listed in compiled-in params
// or loaded params.
Sheet string
// Tag is an extra tag to add to file names and logs saved from this run.
Tag string
// Note is additional info to describe the run params etc,
// like a git commit message for the run.
Note string
// SaveAll will save a snapshot of all current param and config settings
// in a directory named params_<datestamp> (or _good if Good is true),
// then quit. Useful for comparing to later changes and seeing multiple
// views of current params.
SaveAll bool `nest:"+"`
// Good is for SaveAll, save to params_good for a known good params state.
// This can be done prior to making a new release after all tests are passing.
// Add results to git to provide a full diff record of all params over level.
Good bool `nest:"+"`
}
func (pc *ParamConfig) FieldWidget(field string) core.Value {
return egui.ScriptFieldWidget(field)
}
// RunConfig has config parameters related to running the sim.
type RunConfig struct {
// GPUDevice selects the gpu device to use.
GPUDevice int
// NData is the number of data-parallel items to process in parallel per trial.
// Is significantly faster for both CPU and GPU. Results in an effective
// mini-batch of learning.
NData int `default:"16" min:"1"`
// NThreads is the number of parallel threads for CPU computation;
// 0 = use default.
NThreads int `default:"0"`
// Run is the _starting_ run number, which determines the random seed.
// Runs counts up from there. Can do all runs in parallel by launching
// separate jobs with each starting Run, Runs = 1.
Run int `default:"0" flag:"run"`
// Runs is the total number of runs to do when running Train, starting from Run.
Runs int `default:"5" min:"1"`
// Epochs is the total number of epochs per run.
Epochs int `default:"1000"`
// Trials is the total number of trials per epoch.
// Should be an even multiple of NData.
Trials int `default:"32"`
// ISICycles is the number of no-input inter-stimulus interval
// cycles at the start of the trial.
ISICycles int `default:"0"` // note: > 0 bad for long-term stability
// MinusCycles is the number of cycles in the minus phase per trial.
MinusCycles int `default:"150"`
// PlusCycles is the number of cycles in the plus phase per trial.
PlusCycles int `default:"50"`
// NZero is how many perfect, zero-error epochs before stopping a Run.
NZero int `default:"0"`
// TestInterval is how often (in epochs) to run through all the test patterns,
// in terms of training epochs. Can use 0 or -1 for no testing.
TestInterval int `default:"5"`
// PCAInterval is how often (in epochs) to compute PCA on hidden
// representations to measure variance.
PCAInterval int `default:"10"`
// StartWeights is the name of weights file to load at start of first run.
StartWeights string
}
// Cycles returns the total number of cycles per trial: ISI + Minus + Plus.
func (rc *RunConfig) Cycles() int {
return rc.ISICycles + rc.MinusCycles + rc.PlusCycles
}
// LogConfig has config parameters related to logging data.
type LogConfig struct {
// SaveWeights will save final weights after each run.
SaveWeights bool
// Train has the list of Train mode levels to save log files for.
Train []string `default:"['Expt', 'Run', 'Epoch']" nest:"+"`
// Test has the list of Test mode levels to save log files for.
Test []string `nest:"+"`
}
// Config has the overall Sim configuration options.
type Config struct {
egui.BaseConfig
// Params has parameter related configuration options.
Params ParamConfig `display:"add-fields"`
// Run has sim running related configuration options.
Run RunConfig `display:"add-fields"`
// Log has data logging related configuration options.
Log LogConfig `display:"add-fields"`
}
func (cfg *Config) Defaults() {
cfg.Name = "RA25x"
cfg.Title = "Axon random associator: experimental version"
cfg.URL = "https://github.com/emer/axon/blob/main/sims/ra25x/README.md"
cfg.Doc = "This demonstrates a basic Axon model and provides a template for creating new models. It has a random-associator four-layer axon network that uses the standard supervised learning paradigm to learn mappings between 25 random input / output patterns defined over 5x5 input / output layers."
}
// Code generated by "core generate -add-types -add-funcs -gosl"; DO NOT EDIT.
package ra25x
import (
"cogentcore.org/core/enums"
)
var _ModesValues = []Modes{0, 1}
// ModesN is the highest valid value for type Modes, plus one.
//
//gosl:start
const ModesN Modes = 2
//gosl:end
var _ModesValueMap = map[string]Modes{`Train`: 0, `Test`: 1}
var _ModesDescMap = map[Modes]string{0: ``, 1: ``}
var _ModesMap = map[Modes]string{0: `Train`, 1: `Test`}
// String returns the string representation of this Modes value.
func (i Modes) String() string { return enums.String(i, _ModesMap) }
// SetString sets the Modes value from its string representation,
// and returns an error if the string is invalid.
func (i *Modes) SetString(s string) error { return enums.SetString(i, s, _ModesValueMap, "Modes") }
// Int64 returns the Modes value as an int64.
func (i Modes) Int64() int64 { return int64(i) }
// SetInt64 sets the Modes value from an int64.
func (i *Modes) SetInt64(in int64) { *i = Modes(in) }
// Desc returns the description of the Modes value.
func (i Modes) Desc() string { return enums.Desc(i, _ModesDescMap) }
// ModesValues returns all possible values for the type Modes.
func ModesValues() []Modes { return _ModesValues }
// Values returns all possible values for the type Modes.
func (i Modes) Values() []enums.Enum { return enums.Values(_ModesValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Modes) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Modes) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Modes") }
var _LevelsValues = []Levels{0, 1, 2, 3, 4}
// LevelsN is the highest valid value for type Levels, plus one.
//
//gosl:start
const LevelsN Levels = 5
//gosl:end
var _LevelsValueMap = map[string]Levels{`Cycle`: 0, `Trial`: 1, `Epoch`: 2, `Run`: 3, `Expt`: 4}
var _LevelsDescMap = map[Levels]string{0: ``, 1: ``, 2: ``, 3: ``, 4: ``}
var _LevelsMap = map[Levels]string{0: `Cycle`, 1: `Trial`, 2: `Epoch`, 3: `Run`, 4: `Expt`}
// String returns the string representation of this Levels value.
func (i Levels) String() string { return enums.String(i, _LevelsMap) }
// SetString sets the Levels value from its string representation,
// and returns an error if the string is invalid.
func (i *Levels) SetString(s string) error { return enums.SetString(i, s, _LevelsValueMap, "Levels") }
// Int64 returns the Levels value as an int64.
func (i Levels) Int64() int64 { return int64(i) }
// SetInt64 sets the Levels value from an int64.
func (i *Levels) SetInt64(in int64) { *i = Levels(in) }
// Desc returns the description of the Levels value.
func (i Levels) Desc() string { return enums.Desc(i, _LevelsDescMap) }
// LevelsValues returns all possible values for the type Levels.
func LevelsValues() []Levels { return _LevelsValues }
// Values returns all possible values for the type Levels.
func (i Levels) Values() []enums.Enum { return enums.Values(_LevelsValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Levels) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Levels) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Levels") }
var _StatsPhaseValues = []StatsPhase{0, 1}
// StatsPhaseN is the highest valid value for type StatsPhase, plus one.
//
//gosl:start
const StatsPhaseN StatsPhase = 2
//gosl:end
var _StatsPhaseValueMap = map[string]StatsPhase{`Start`: 0, `Step`: 1}
var _StatsPhaseDescMap = map[StatsPhase]string{0: ``, 1: ``}
var _StatsPhaseMap = map[StatsPhase]string{0: `Start`, 1: `Step`}
// String returns the string representation of this StatsPhase value.
func (i StatsPhase) String() string { return enums.String(i, _StatsPhaseMap) }
// SetString sets the StatsPhase value from its string representation,
// and returns an error if the string is invalid.
func (i *StatsPhase) SetString(s string) error {
return enums.SetString(i, s, _StatsPhaseValueMap, "StatsPhase")
}
// Int64 returns the StatsPhase value as an int64.
func (i StatsPhase) Int64() int64 { return int64(i) }
// SetInt64 sets the StatsPhase value from an int64.
func (i *StatsPhase) SetInt64(in int64) { *i = StatsPhase(in) }
// Desc returns the description of the StatsPhase value.
func (i StatsPhase) Desc() string { return enums.Desc(i, _StatsPhaseDescMap) }
// StatsPhaseValues returns all possible values for the type StatsPhase.
func StatsPhaseValues() []StatsPhase { return _StatsPhaseValues }
// Values returns all possible values for the type StatsPhase.
func (i StatsPhase) Values() []enums.Enum { return enums.Values(_StatsPhaseValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i StatsPhase) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *StatsPhase) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "StatsPhase")
}
// Copyright (c) 2019, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ra25x
import "github.com/emer/axon/v2/axon"
// LayerParams sets the minimal non-default params.
// Base is always applied, and others can be optionally selected to apply on top of that.
var LayerParams = axon.LayerSheets{
"Base": {
{Sel: "Layer", Doc: "all defaults",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.06 // 0.06 > 0.05
ly.Inhib.Layer.Gi = 1.1 // 1.1 > 1.05
ly.Inhib.Layer.SS = 30 // 30 > others
ly.Inhib.Layer.FS0 = 0.1
ly.Inhib.Layer.FSTau = 6
ly.Inhib.Layer.FB = 0.5 // 0.5 > 0.2 > 0.1 > 1.0
ly.Inhib.Layer.SSfTau = 20 // 20 > 30 > 15
ly.Inhib.Layer.SSiTau = 50 // 50 > 40 -- try 40, 60 @ gi= 1.1?
ly.Inhib.ActAvg.AdaptRate = 0.1 // 0.1 seems good
ly.Inhib.ActAvg.LoTol = 0.8
ly.Inhib.ActAvg.HiTol = 0.0
ly.Acts.Dend.SSGi = 2.0 // 2.0 > 1.5 more reliable
ly.Acts.Decay.Act = 0.2 // 0.2 def
ly.Acts.Decay.Glong = 0.6 // 0.6 def
ly.Acts.NMDA.Ge = 0.006 // 0.006 def
ly.Acts.NMDA.MgC = 1.2 // 1.2 > 1.4 here
ly.Acts.NMDA.Voff = 0 // 5 == 0 for trace
ly.Acts.NMDA.Tau = 100 // 100 def -- 50 is sig worse
ly.Acts.Mahp.Gk = 0.05 // 0.05 works..
ly.Acts.Sahp.Gk = 0.05 //
ly.Acts.Sahp.Off = 0.8 //
ly.Acts.Sahp.Slope = 0.02 //
ly.Acts.Sahp.CaTau = 5 //
ly.Acts.GabaB.Gk = 0.015 // 0.015 > lower
ly.Acts.KNa.On.SetBool(false)
ly.Acts.AK.Gk = 0.1 // 0.05 to 0.1 likely good per urakubo, but 1.0 needed to prevent vgcc blowup
ly.Acts.VGCC.Ge = 0.02 // 0.12 per urakubo / etc models, but produces too much high-burst plateau -- even 0.05 with AK = .1 blows up
ly.Acts.VGCC.Ca = 25 // 25 / 10tau default
ly.Learn.CaLearn.Norm = 80 // 80 works
ly.Learn.CaLearn.SpikeVGCC.SetBool(true) // sig better..
ly.Learn.CaLearn.SpikeVgccCa = 35 // 70 / 5 or 35 / 10 both work
ly.Learn.CaLearn.VgccTau = 10 // 10 > 5 ?
ly.Learn.CaLearn.Dt.MTau = 2 // 2 > 1 ?
ly.Learn.CaSpike.SpikeCaM = 8 // 8 produces reasonable 0-1 norm CaSpk levels?
ly.Learn.CaSpike.CaSynTau = 30 // 30 > 20, 40
ly.Learn.CaSpike.Dt.MTau = 5 // 5 > 10?
ly.Learn.LearnNMDA.MgC = 1.4 // 1.2 for unified Act params, else 1.4
ly.Learn.LearnNMDA.Voff = 0 // 0 for unified Act params, else 5
ly.Learn.LearnNMDA.Ge = 0.006
ly.Learn.LearnNMDA.Tau = 100 // 100 def
ly.Learn.TrgAvgAct.RescaleOn.SetBool(true) // true > false even with adapt gi
ly.Learn.TrgAvgAct.SubMean = 1 // 1 > 0 essential
ly.Learn.TrgAvgAct.SynScaleRate = 0.0002 // 0.0002 > others; 0.005 not as good
ly.Learn.RLRate.On.SetBool(true) // beneficial for trace
ly.Learn.RLRate.SigmoidMin = 0.05 // 0.05 > .1 > .02
ly.Learn.RLRate.Diff.SetBool(true)
ly.Learn.RLRate.DiffThr = 0.02 // 0.02 def - todo
ly.Learn.RLRate.SpikeThr = 0.1 // 0.1 def
ly.Learn.RLRate.Min = 0.001
ly.Learn.Timing.On.SetBool(false)
ly.Learn.Timing.Refractory.SetBool(false)
ly.Learn.Timing.SynCaCycles = 160
ly.Learn.Timing.Cycles = 170
ly.Learn.Timing.TimeDiffTau = 4
}},
{Sel: "#Input", Doc: "critical now to specify the activity level",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 0.9 // 0.9 > 1.0
ly.Acts.Clamp.Ge = 1.5 // 1.5 matches old fffb for gex (v13) > 1.0
ly.Inhib.ActAvg.Nominal = 0.15 // .24 nominal, lower to give higher excitation
ly.Acts.VGCC.Ca = 1 // otherwise dominates display
ly.Acts.Decay.Act = 1 // this is subtly beneficial
ly.Acts.Decay.Glong = 1
}},
{Sel: ".SuperLayer", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 1.1 // 1.1 > others
ly.Inhib.ActAvg.Nominal = 0.06 // 0.06 > 0.05
ly.Inhib.ActAvg.AdaptGi.SetBool(true)
}},
{Sel: "#Output", Doc: "output definitely needs lower inhib -- true for smaller layers in general",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.24 // 0.24 > 0.3
ly.Inhib.ActAvg.AdaptGi.SetBool(true)
ly.Inhib.Layer.Gi = 0.65 // 0.65 FB0.5 best
ly.Inhib.Layer.SS = 30 // 30 > others
ly.Inhib.Layer.FB = 0.5 // 0 > 1 here in output
ly.Acts.Spikes.Tr = 1 // 1 is new minimum.. > 3
ly.Acts.Clamp.Ge = 0.8 // 0.8 > 0.7 > 1.0 > 0.6
ly.Acts.VGCC.Ca = 1 // otherwise dominates display
ly.Learn.RLRate.On.SetBool(true) // beneficial for trace
ly.Learn.RLRate.SigmoidMin = 0.05 // sigmoid derivative actually useful here!
}},
},
}
// PathParams sets the minimal non-default params.
// Base is always applied, and others can be optionally selected to apply on top of that.
var PathParams = axon.PathSheets{
"Base": {
{Sel: "Path", Doc: "basic path params",
Set: func(pt *axon.PathParams) {
pt.Learn.LRate.Base = 0.06 // .06 from ra25
pt.SWts.Adapt.LRate = 0.1 // .1 >= .2,
pt.SWts.Adapt.SubMean = 1 // key for stability
pt.SWts.Init.SPct = 0.5 // .5 >= 1 here -- 0.5 more reliable, 1.0 faster..
pt.Learn.DWt.SubMean = 1 // 1 > 0 for long-term stability
pt.Learn.DWt.LearnThr = .1
}},
{Sel: ".ToTarget", Doc: "",
Set: func(pt *axon.PathParams) {
pt.Learn.LRate.Base = 0.03 // don't need diff lrate here
pt.SWts.Adapt.SigGain = 6 // 6 def; 1 does not work
}},
{Sel: ".BackPath", Doc: "top-down back-pathways MUST have lower relative weight scale, otherwise network hallucinates",
Set: func(pt *axon.PathParams) {
pt.PathScale.Rel = 0.3 // 0.3 > 0.2 > 0.1 > 0.5
}},
},
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// ra25 runs a simple random-associator four-layer axon network
// that uses the standard supervised learning paradigm to learn
// mappings between 25 random input / output patterns
// defined over 5x5 input / output layers (i.e., 25 units)
package ra25x
//go:generate core generate -add-types -add-funcs -gosl
import (
"embed"
"fmt"
"io/fs"
"os"
"reflect"
"cogentcore.org/core/base/errors"
"cogentcore.org/core/base/metadata"
"cogentcore.org/core/core"
"cogentcore.org/core/enums"
"cogentcore.org/core/gpu"
"cogentcore.org/core/icons"
"cogentcore.org/core/math32"
"cogentcore.org/core/tree"
"cogentcore.org/lab/base/mpi"
"cogentcore.org/lab/base/randx"
"cogentcore.org/lab/patterns"
"cogentcore.org/lab/plot"
"cogentcore.org/lab/stats/stats"
"cogentcore.org/lab/table"
"cogentcore.org/lab/tensor"
"cogentcore.org/lab/tensorfs"
"github.com/emer/axon/v2/axon"
"github.com/emer/emergent/v2/egui"
"github.com/emer/emergent/v2/env"
"github.com/emer/emergent/v2/looper"
"github.com/emer/emergent/v2/paths"
)
//go:embed random_5x5_25.tsv
var embedfs embed.FS
// Modes are the looping modes (Stacks) for running and statistics.
type Modes int32 //enums:enum
const (
Train Modes = iota
Test
)
// Levels are the looping levels for running and statistics.
type Levels int32 //enums:enum
const (
Cycle Levels = iota
Trial
Epoch
Run
Expt
)
// StatsPhase is the phase of stats processing for given mode, level.
// Accumulated values are reset at Start, added each Step.
type StatsPhase int32 //enums:enum
const (
Start StatsPhase = iota
Step
)
// see params.go for params
// Sim encapsulates the entire simulation model, and we define all the
// functionality as methods on this struct. This structure keeps all relevant
// state information organized and available without having to pass everything around
// as arguments to methods, and provides the core GUI interface (note the view tags
// for the fields which provide hints to how things should be displayed).
type Sim struct {
// simulation configuration parameters -- set by .toml config file and / or args
Config *Config `new-window:"+"`
// Net is the network: click to view / edit parameters for layers, paths, etc.
Net *axon.Network `new-window:"+" display:"no-inline"`
// Params manages network parameter setting.
Params axon.Params `display:"inline"`
// Loops are the control loops for running the sim, in different Modes
// across stacks of Levels.
Loops *looper.Stacks `new-window:"+" display:"no-inline"`
// Envs provides mode-string based storage of environments.
Envs env.Envs `new-window:"+" display:"no-inline"`
// TrainUpdate has Train mode netview update parameters.
TrainUpdate axon.NetViewUpdate `display:"inline"`
// TestUpdate has Test mode netview update parameters.
TestUpdate axon.NetViewUpdate `display:"inline"`
// Root is the root tensorfs directory, where all stats and other misc sim data goes.
Root *tensorfs.Node `display:"-"`
// Stats has the stats directory within Root.
Stats *tensorfs.Node `display:"-"`
// Current has the current stats values within Stats.
Current *tensorfs.Node `display:"-"`
// StatFuncs are statistics functions called at given mode and level,
// to perform all stats computations. phase = Start does init at start of given level,
// and all intialization / configuration (called during Init too).
StatFuncs []func(mode Modes, level Levels, phase StatsPhase) `display:"-"`
// GUI manages all the GUI elements
GUI egui.GUI `display:"-"`
// RandSeeds is a list of random seeds to use for each run.
RandSeeds randx.Seeds `display:"-"`
}
func (ss *Sim) SetConfig(cfg *Config) { ss.Config = cfg }
func (ss *Sim) Body() *core.Body { return ss.GUI.Body }
func (ss *Sim) ConfigSim() {
ss.Root, _ = tensorfs.NewDir("Root")
tensorfs.CurRoot = ss.Root
ss.Net = axon.NewNetwork(ss.Config.Name)
ss.Params.Config(LayerParams, PathParams, ss.Config.Params.Sheet, ss.Config.Params.Tag, reflect.ValueOf(ss))
ss.RandSeeds.Init(100) // max 100 runs
ss.InitRandSeed(0)
if ss.Config.GPU {
gpu.SelectAdapter = ss.Config.Run.GPUDevice
axon.GPUInit()
axon.UseGPU = true
}
// ss.ConfigInputs()
ss.OpenInputs()
ss.ConfigEnv()
ss.ConfigNet(ss.Net)
ss.ConfigLoops()
ss.ConfigStats()
// if ss.Config..GPU {
// fmt.Println(axon.GPUSystem.Vars().StringDoc())
// }
if ss.Config.Params.SaveAll {
ss.Config.Params.SaveAll = false
ss.Net.SaveParamsSnapshot(&ss.Config, ss.Config.Params.Good)
os.Exit(0)
}
}
func (ss *Sim) ConfigEnv() {
// Can be called multiple times -- don't re-create
var trn, tst *env.FixedTable
if len(ss.Envs) == 0 {
trn = &env.FixedTable{}
tst = &env.FixedTable{}
} else {
trn = ss.Envs.ByMode(Train).(*env.FixedTable)
tst = ss.Envs.ByMode(Test).(*env.FixedTable)
}
inputs := tensorfs.DirTable(ss.Root.Dir("Inputs/Train"), nil)
// this logic can be used to create train-test splits of a set of patterns:
// n := inputs.NumRows()
// order := rand.Perm(n)
// ntrn := int(0.85 * float64(n))
// trnEnv := table.NewView(inputs)
// tstEnv := table.NewView(inputs)
// trnEnv.Indexes = order[:ntrn]
// tstEnv.Indexes = order[ntrn:]
// note: names must be standard here!
trn.Name = Train.String()
trn.Config(table.NewView(inputs))
trn.Validate()
tst.Name = Test.String()
tst.Config(table.NewView(inputs))
tst.Sequential = true
tst.Validate()
trn.Init(0)
tst.Init(0)
// note: names must be in place when adding
ss.Envs.Add(trn, tst)
}
func (ss *Sim) ConfigNet(net *axon.Network) {
net.SetMaxData(ss.Config.Run.NData)
net.Context().SetISICycles(int32(ss.Config.Run.ISICycles)).
SetMinusCycles(int32(ss.Config.Run.MinusCycles)).
SetPlusCycles(int32(ss.Config.Run.PlusCycles)).Update()
net.SetRandSeed(ss.RandSeeds[0]) // init new separate random seed, using run = 0
inp := net.AddLayer2D("Input", axon.InputLayer, 5, 5)
hid1 := net.AddLayer2D("Hidden1", axon.SuperLayer, ss.Config.Params.Hidden1Size.Y, ss.Config.Params.Hidden1Size.X)
hid2 := net.AddLayer2D("Hidden2", axon.SuperLayer, ss.Config.Params.Hidden2Size.Y, ss.Config.Params.Hidden2Size.X)
out := net.AddLayer2D("Output", axon.TargetLayer, 5, 5)
// use this to position layers relative to each other
// hid2.PlaceRightOf(hid1, 2)
// note: see emergent/path module for all the options on how to connect
// NewFull returns a new paths.Full connectivity pattern
full := paths.NewFull()
net.ConnectLayers(inp, hid1, full, axon.ForwardPath)
net.BidirConnectLayers(hid1, hid2, full)
net.BidirConnectLayers(hid2, out, full)
// net.LateralConnectLayerPath(hid1, full, &axon.HebbPath{}).SetType(InhibPath)
// note: if you wanted to change a layer type from e.g., Target to Compare, do this:
// out.Type = axon.CompareLayer
// that would mean that the output layer doesn't reflect target values in plus phase
// and thus removes error-driven learning -- but stats are still computed.
net.Build()
net.Defaults()
net.SetNThreads(ss.Config.Run.NThreads)
ss.ApplyParams()
net.InitWeights()
}
func (ss *Sim) ApplyParams() {
ss.Params.Script = ss.Config.Params.Script
ss.Params.ApplyAll(ss.Net)
}
//////// Init, utils
// Init restarts the run, and initializes everything, including network weights
// and resets the epoch log table
func (ss *Sim) Init() {
ss.Loops.ResetCounters()
ss.SetRunName()
ss.InitRandSeed(0)
// ss.ConfigEnv() // re-config env just in case a different set of patterns was
// selected or patterns have been modified etc
ss.ApplyParams()
ss.StatsInit()
ss.NewRun()
ss.TrainUpdate.RecordSyns()
ss.TrainUpdate.Update(Train, Trial)
}
// InitRandSeed initializes the random seed based on current training run number
func (ss *Sim) InitRandSeed(run int) {
ss.RandSeeds.Set(run)
ss.RandSeeds.Set(run, &ss.Net.Rand)
}
// NetViewUpdater returns the NetViewUpdate for given mode.
func (ss *Sim) NetViewUpdater(mode enums.Enum) *axon.NetViewUpdate {
if mode.Int64() == Train.Int64() {
return &ss.TrainUpdate
}
return &ss.TestUpdate
}
// ConfigLoops configures the control loops: Training, Testing
func (ss *Sim) ConfigLoops() {
ls := looper.NewStacks()
trials := int(math32.IntMultipleGE(float32(ss.Config.Run.Trials), float32(ss.Config.Run.NData)))
cycles := ss.Config.Run.Cycles()
ls.AddStack(Train, Trial).
AddLevel(Expt, 1).
AddLevel(Run, ss.Config.Run.Runs).
AddLevel(Epoch, ss.Config.Run.Epochs).
AddLevelIncr(Trial, trials, ss.Config.Run.NData).
AddLevel(Cycle, cycles)
ls.AddStack(Test, Trial).
AddLevel(Epoch, 1).
AddLevelIncr(Trial, trials, ss.Config.Run.NData).
AddLevel(Cycle, cycles)
axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, Cycle, Trial, Train,
func(mode enums.Enum) { ss.Net.ClearInputs() },
func(mode enums.Enum) { ss.ApplyInputs(mode.(Modes)) },
)
ls.Stacks[Train].OnInit.Add("Init", ss.Init)
ls.Loop(Train, Run).OnStart.Add("NewRun", ss.NewRun)
trainEpoch := ls.Loop(Train, Epoch)
trainEpoch.IsDone.AddBool("NZeroStop", func() bool {
stopNz := ss.Config.Run.NZero
if stopNz <= 0 {
return false
}
curModeDir := ss.Current.Dir(Train.String())
curNZero := int(curModeDir.Value("NZero").Float1D(-1))
stop := curNZero >= stopNz
return stop
return false
})
trainEpoch.OnStart.Add("TestAtInterval", func() {
if (ss.Config.Run.TestInterval > 0) && ((trainEpoch.Counter.Cur+1)%ss.Config.Run.TestInterval == 0) {
ss.TestAll()
}
})
ls.AddOnStartToAll("StatsStart", ss.StatsStart)
ls.AddOnEndToAll("StatsStep", ss.StatsStep)
ls.Loop(Train, Run).OnEnd.Add("SaveWeights", func() {
ctrString := fmt.Sprintf("%03d_%05d", ls.Loop(Train, Run).Counter.Cur, ls.Loop(Train, Epoch).Counter.Cur)
axon.SaveWeightsIfConfigSet(ss.Net, ss.Config.Log.SaveWeights, ctrString, ss.RunName())
})
if ss.Config.GUI {
axon.LooperUpdateNetView(ls, Cycle, Trial, ss.NetViewUpdater)
ls.Stacks[Train].OnInit.Add("GUI-Init", ss.GUI.UpdateWindow)
ls.Stacks[Test].OnInit.Add("GUI-Init", ss.GUI.UpdateWindow)
}
if ss.Config.Debug {
mpi.Println(ls.DocString())
}
ss.Loops = ls
}
// ApplyInputs applies input patterns from given environment for given mode.
// Any other start-of-trial logic can also be put here.
func (ss *Sim) ApplyInputs(mode Modes) {
net := ss.Net
ndata := int(net.Context().NData)
curModeDir := ss.Current.Dir(mode.String())
ev := ss.Envs.ByMode(mode)
lays := net.LayersByType(axon.InputLayer, axon.TargetLayer)
net.InitExt()
for di := range ndata {
ev.Step()
curModeDir.StringValue("TrialName", ndata).SetString1D(ev.String(), di)
for _, lnm := range lays {
ly := ss.Net.LayerByName(lnm)
st := ev.State(ly.Name)
if st != nil {
ly.ApplyExt(uint32(di), st)
}
}
}
net.ApplyExts()
}
// NewRun intializes a new Run level of the model.
func (ss *Sim) NewRun() {
ctx := ss.Net.Context()
run := ss.Loops.Loop(Train, Run).Counter.Cur
ss.InitRandSeed(run)
ss.Envs.ByMode(Train).Init(run)
ss.Envs.ByMode(Test).Init(run)
ctx.Reset()
ss.Net.InitWeights()
if ss.Config.Run.StartWeights != "" {
ss.Net.OpenWeightsJSON(core.Filename(ss.Config.Run.StartWeights))
mpi.Printf("Starting with initial weights from: %s\n", ss.Config.Run.StartWeights)
}
}
// TestAll runs through the full set of testing items
func (ss *Sim) TestAll() {
ss.Envs.ByMode(Test).Init(0)
ss.Loops.ResetAndRun(Test)
ss.Loops.Mode = Train // important because this is called from Train Run: go back.
}
//////// Inputs
func (ss *Sim) ConfigInputs() {
dt := table.New()
metadata.SetName(dt, "Train")
metadata.SetDoc(dt, "Training inputs")
dt.AddStringColumn("Name")
dt.AddFloat32Column("Input", 5, 5)
dt.AddFloat32Column("Output", 5, 5)
dt.SetNumRows(25)
patterns.PermutedBinaryMinDiff(dt.Columns.Values[1], 6, 1, 0, 3)
patterns.PermutedBinaryMinDiff(dt.Columns.Values[2], 6, 1, 0, 3)
dt.SaveCSV("random_5x5_25_gen.tsv", tensor.Tab, table.Headers)
tensorfs.DirFromTable(ss.Root.Dir("Inputs/Train"), dt)
}
// OpenTable opens a [table.Table] from embedded content, storing
// the data in the given tensorfs directory.
func (ss *Sim) OpenTable(dir *tensorfs.Node, fsys fs.FS, fnm, name, docs string) (*table.Table, error) {
dt := table.New()
metadata.SetName(dt, name)
metadata.SetDoc(dt, docs)
err := dt.OpenFS(embedfs, fnm, tensor.Tab)
if errors.Log(err) != nil {
return dt, err
}
tensorfs.DirFromTable(dir.Dir(name), dt)
return dt, err
}
func (ss *Sim) OpenInputs() {
dir := ss.Root.Dir("Inputs")
ss.OpenTable(dir, embedfs, "random_5x5_25.tsv", "Train", "Training inputs")
}
//////// Stats
// AddStat adds a stat compute function.
func (ss *Sim) AddStat(f func(mode Modes, level Levels, phase StatsPhase)) {
ss.StatFuncs = append(ss.StatFuncs, f)
}
// StatsStart is called by Looper at the start of given level, for each iteration.
// It needs to call RunStats Start at the next level down.
// e.g., each Epoch is the start of the full set of Trial Steps.
func (ss *Sim) StatsStart(lmd, ltm enums.Enum) {
mode := lmd.(Modes)
level := ltm.(Levels)
if level <= Trial {
return
}
ss.RunStats(mode, level-1, Start)
}
// StatsStep is called by Looper at each step of iteration,
// where it accumulates the stat results.
func (ss *Sim) StatsStep(lmd, ltm enums.Enum) {
mode := lmd.(Modes)
level := ltm.(Levels)
if level == Cycle {
return
}
ss.RunStats(mode, level, Step)
tensorfs.DirTable(axon.StatsNode(ss.Stats, mode, level), nil).WriteToLog()
}
// RunStats runs the StatFuncs for given mode, level and phase.
func (ss *Sim) RunStats(mode Modes, level Levels, phase StatsPhase) {
for _, sf := range ss.StatFuncs {
sf(mode, level, phase)
}
if phase == Step && ss.GUI.Tabs != nil {
nm := mode.String() + " " + level.String() + " Plot"
ss.GUI.Tabs.AsLab().GoUpdatePlot(nm)
}
}
// SetRunName sets the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) SetRunName() string {
runName := ss.Params.RunName(ss.Config.Run.Run)
ss.Current.StringValue("RunName", 1).SetString1D(runName, 0)
return runName
}
// RunName returns the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) RunName() string {
return ss.Current.StringValue("RunName", 1).String1D(0)
}
// StatsInit initializes all the stats by calling Start across all modes and levels.
func (ss *Sim) StatsInit() {
for md, st := range ss.Loops.Stacks {
mode := md.(Modes)
for _, lev := range st.Order {
level := lev.(Levels)
if level == Cycle {
continue
}
ss.RunStats(mode, level, Start)
}
}
if ss.GUI.Tabs != nil {
tbs := ss.GUI.Tabs.AsLab()
_, idx := tbs.CurrentTab()
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Epoch))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Run))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Test, Trial))
tbs.SelectTabIndex(idx)
}
}
// ConfigStats handles configures functions to do all stats computation
// in the tensorfs system.
func (ss *Sim) ConfigStats() {
net := ss.Net
ss.Stats = ss.Root.Dir("Stats")
ss.Current = ss.Stats.Dir("Current")
ss.SetRunName()
// last arg(s) are levels to exclude
counterFunc := axon.StatLoopCounters(ss.Stats, ss.Current, ss.Loops, net, Trial, Cycle)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
counterFunc(mode, level, phase == Start)
})
runNameFunc := axon.StatRunName(ss.Stats, ss.Current, ss.Loops, net, Trial, Cycle)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
runNameFunc(mode, level, phase == Start)
})
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
if level != Trial {
return
}
name := "TrialName"
modeDir := ss.Stats.Dir(mode.String())
curModeDir := ss.Current.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
tsr := levelDir.StringValue(name)
ndata := int(ss.Net.Context().NData)
if phase == Start {
tsr.SetNumRows(0)
plot.SetFirstStyler(tsr, func(s *plot.Style) {
s.On = false
})
return
}
for di := range ndata {
// saved in apply inputs
trlNm := curModeDir.StringValue(name, ndata).String1D(di)
tsr.AppendRowString(trlNm)
}
})
// up to a point, it is good to use loops over stats in one function,
// to reduce repetition of boilerplate.
statNames := []string{"CorSim", "UnitErr", "Err", "NZero", "FirstZero", "LastZero"}
statDocs := map[string]string{
"CorSim": "The correlation-based similarity of the neural activity patterns between the minus and plus phase (1 = patterns are effectively identical). For target layers, this is good continuous, normalized measure of learning performance, which can be more sensitive than thresholded SSE measures.",
"UnitErr": "Normalized proportion of neurons with activities on the wrong side of 0.5 relative to the target values. This is a good normalized error measure.",
"Err": "At the trial level this indicates the presence of an error (i.e., UnitErr > 0), and at higher levels, it is the proportion of errors across the epoch. Thus, when this is zero, the network is performing perfectly (with respect to target outputs).",
"NZero": "The number of zero-error epochs in a row.",
"FirstZero": "The first epoch when there were no errors according to Err stat.",
"LastZero": "The epoch when training was stopped because NZero got above the threshold for number of perfect epochs in a row",
}
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
for _, name := range statNames {
if name == "NZero" && (mode != Train || level == Trial) {
return
}
modeDir := ss.Stats.Dir(mode.String())
curModeDir := ss.Current.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
subDir := modeDir.Dir((level - 1).String()) // note: will fail for Cycle
tsr := levelDir.Float64(name)
ndata := int(ss.Net.Context().NData)
var stat float64
if phase == Start {
tsr.SetNumRows(0)
plot.SetFirstStyler(tsr, func(s *plot.Style) {
s.Range.SetMin(0).SetMax(1)
s.On = true
switch name {
case "NZero":
s.On = false
case "FirstZero", "LastZero":
if level < Run {
s.On = false
}
}
})
metadata.SetDoc(tsr, statDocs[name])
switch name {
case "NZero":
if level == Epoch {
curModeDir.Float64(name, 1).SetFloat1D(0, 0)
}
case "FirstZero", "LastZero":
if level == Epoch {
curModeDir.Float64(name, 1).SetFloat1D(-1, 0)
}
}
continue
}
switch level {
case Trial:
out := ss.Net.LayerByName("Output")
for di := range ndata {
var stat float64
switch name {
case "CorSim":
stat = 1.0 - float64(axon.LayerStates.Value(int(out.Index), int(di), int(axon.LayerPhaseDiff)))
case "UnitErr":
stat = out.PctUnitErr(ss.Net.Context())[di]
case "Err":
uniterr := curModeDir.Float64("UnitErr", ndata).Float1D(di)
stat = 1.0
if uniterr == 0 {
stat = 0
}
}
curModeDir.Float64(name, ndata).SetFloat1D(stat, di)
tsr.AppendRowFloat(stat)
}
case Epoch:
nz := curModeDir.Float64("NZero", 1).Float1D(0)
switch name {
case "NZero":
err := stats.StatSum.Call(subDir.Value("Err")).Float1D(0)
stat = curModeDir.Float64(name, 1).Float1D(0)
if err == 0 {
stat++
} else {
stat = 0
}
curModeDir.Float64(name, 1).SetFloat1D(stat, 0)
case "FirstZero":
stat = curModeDir.Float64(name, 1).Float1D(0)
if stat < 0 && nz == 1 {
stat = curModeDir.Int("Epoch", 1).Float1D(0)
}
curModeDir.Float64(name, 1).SetFloat1D(stat, 0)
case "LastZero":
stat = curModeDir.Float64(name, 1).Float1D(0)
if stat < 0 && nz >= float64(ss.Config.Run.NZero) {
stat = curModeDir.Int("Epoch", 1).Float1D(0)
}
curModeDir.Float64(name, 1).SetFloat1D(stat, 0)
default:
stat = stats.StatMean.Call(subDir.Value(name)).Float1D(0)
}
tsr.AppendRowFloat(stat)
case Run:
stat = stats.StatFinal.Call(subDir.Value(name)).Float1D(0)
tsr.AppendRowFloat(stat)
default: // Expt
stat = stats.StatMean.Call(subDir.Value(name)).Float1D(0)
tsr.AppendRowFloat(stat)
}
}
})
perTrlFunc := axon.StatPerTrialMSec(ss.Stats, Train, Trial)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
perTrlFunc(mode, level, phase == Start)
})
lays := net.LayersByType(axon.SuperLayer, axon.CTLayer, axon.TargetLayer)
actGeFunc := axon.StatLayerActGe(ss.Stats, net, Train, Trial, Run, lays...)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
actGeFunc(mode, level, phase == Start)
})
pcaFunc := axon.StatPCA(ss.Stats, ss.Current, net, ss.Config.Run.PCAInterval, Train, Trial, Run, lays...)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
trnEpc := ss.Loops.Loop(Train, Epoch).Counter.Cur
pcaFunc(mode, level, phase == Start, trnEpc)
})
stateFunc := axon.StatLayerState(ss.Stats, net, Test, Trial, true, "ActM", "Input", "Output")
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
stateFunc(mode, level, phase == Start)
})
}
// StatCounters returns counters string to show at bottom of netview.
func (ss *Sim) StatCounters(mode, level enums.Enum) string {
counters := ss.Loops.Stacks[mode].CountersString()
vu := ss.NetViewUpdater(mode)
if vu == nil || vu.View == nil {
return counters
}
di := vu.View.Di
counters += fmt.Sprintf(" Di: %d", di)
curModeDir := ss.Current.Dir(mode.String())
if curModeDir.Node("TrialName") == nil {
return counters
}
counters += fmt.Sprintf(" TrialName: %s", curModeDir.StringValue("TrialName").String1D(di))
statNames := []string{"CorSim", "UnitErr", "Err"}
if level == Cycle || curModeDir.Node(statNames[0]) == nil {
return counters
}
for _, name := range statNames {
counters += fmt.Sprintf(" %s: %.4g", name, curModeDir.Float64(name).Float1D(di))
}
return counters
}
//////// GUI
// ConfigGUI configures the Cogent Core GUI interface for this simulation.
func (ss *Sim) ConfigGUI(b tree.Node) {
ss.GUI.MakeBody(b, ss, ss.Root, ss.Config.Name, ss.Config.Title, ss.Config.Doc)
ss.GUI.StopLevel = Trial
nv := ss.GUI.AddNetView("Network")
nv.Options.MaxRecs = 2 * ss.Config.Run.Cycles()
nv.Options.Raster.Max = ss.Config.Run.Cycles()
nv.SetNet(ss.Net)
ss.TrainUpdate.Config(nv, axon.Theta, ss.StatCounters)
ss.TestUpdate.Config(nv, axon.Theta, ss.StatCounters)
ss.GUI.OnStop = func(mode, level enums.Enum) {
vu := ss.NetViewUpdater(mode)
vu.UpdateWhenStopped(mode, level)
}
nv.SceneXYZ().Camera.Pose.Pos.Set(0, 1, 2.75) // more "head on" than default which is more "top down"
nv.SceneXYZ().Camera.LookAt(math32.Vec3(0, 0, 0), math32.Vec3(0, 1, 0))
ss.StatsInit()
ss.GUI.FinalizeGUI(false)
}
func (ss *Sim) MakeToolbar(p *tree.Plan) {
ss.GUI.AddLooperCtrl(p, ss.Loops)
tree.Add(p, func(w *core.Separator) {})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "New Seed",
Icon: icons.Add,
Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
Active: egui.ActiveAlways,
Func: func() {
ss.RandSeeds.NewSeeds()
},
})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "README",
Icon: icons.FileMarkdown,
Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
Active: egui.ActiveAlways,
Func: func() {
core.TheApp.OpenURL(ss.Config.URL)
},
})
}
func (ss *Sim) RunNoGUI() {
ss.Init()
if ss.Config.Params.Note != "" {
mpi.Printf("Note: %s\n", ss.Config.Params.Note)
}
if ss.Config.Log.SaveWeights {
mpi.Printf("Saving final weights per run\n")
}
runName := ss.SetRunName()
netName := ss.Net.Name
cfg := &ss.Config.Log
axon.OpenLogFiles(ss.Loops, ss.Stats, netName, runName, [][]string{cfg.Train, cfg.Test})
mpi.Printf("Running %d Runs starting at %d\n", ss.Config.Run.Runs, ss.Config.Run.Run)
ss.Loops.Loop(Train, Run).Counter.SetCurMaxPlusN(ss.Config.Run.Run, ss.Config.Run.Runs)
ss.Loops.Run(Train)
axon.CloseLogFiles(ss.Loops, ss.Stats, Cycle)
axon.GPURelease()
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"github.com/emer/axon/v2/sims/ra25x"
"github.com/emer/emergent/v2/egui"
)
func main() { egui.Run[ra25x.Sim, ra25x.Config]() }
// Copyright (c) 2019, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package rl
import (
"fmt"
"math/rand"
"cogentcore.org/lab/base/randx"
"cogentcore.org/lab/tensor"
"github.com/emer/emergent/v2/env"
)
// OnOff represents stimulus On / Off timing
type OnOff struct {
// is this stimulus active -- use it?
Act bool
// when stimulus turns on
On int
// when stimulu turns off
Off int
// probability of being active on any given trial
P float32
// variability in onset timing (max number of trials before/after On that it could start)
OnVar int
// variability in offset timing (max number of trials before/after Off that it could end)
OffVar int
// current active status based on P probability
CurAct bool `display:"-"`
// current on / off values using Var variability
CurOn, CurOff int `display:"-"`
}
func (oo *OnOff) Set(act bool, on, off int) {
oo.Act = act
oo.On = on
oo.Off = off
oo.P = 1 // default
}
// TrialUpdate updates Cur state at start of trial
func (oo *OnOff) TrialUpdate() {
if !oo.Act {
return
}
oo.CurAct = randx.BoolP32(oo.P)
oo.CurOn = oo.On - oo.OnVar + 2*rand.Intn(oo.OnVar+1)
oo.CurOff = oo.Off - oo.OffVar + 2*rand.Intn(oo.OffVar+1)
}
// IsOn returns true if should be on according current time
func (oo *OnOff) IsOn(tm int) bool {
return oo.Act && oo.CurAct && tm >= oo.CurOn && tm < oo.CurOff
}
// CondEnv simulates an n-armed bandit, where each of n inputs is associated with
// a specific probability of reward.
type CondEnv struct {
// name of this environment
Name string
// total time for trial
TotTime int
// Conditioned stimulus A (e.g., Tone)
CSA OnOff `display:"inline"`
// Conditioned stimulus B (e.g., Light)
CSB OnOff `display:"inline"`
// Conditioned stimulus C
CSC OnOff `display:"inline"`
// Unconditioned stimulus -- reward
US OnOff `display:"inline"`
// value for reward
RewVal float32
// value for non-reward
NoRewVal float32
// one-hot input representation of current option
Input tensor.Float64
// single reward value
Reward tensor.Float64
// true if a US reward value was set
HasRew bool
// one trial is a pass through all TotTime Events
Trial env.Counter `display:"inline"`
// event is one time step within Trial -- e.g., CS turning on, etc
Event env.Counter `display:"inline"`
}
func (ev *CondEnv) Label() string { return ev.Name }
func (ev *CondEnv) Defaults() {
ev.TotTime = 20
ev.CSA.Set(true, 10, 16) // 10, 16
ev.CSB.Set(false, 2, 10)
ev.CSC.Set(false, 2, 5)
ev.US.Set(true, 15, 16) // 15, 16
}
func (ev *CondEnv) Validate() error {
if ev.TotTime == 0 {
ev.Defaults()
}
return nil
}
func (ev *CondEnv) State(element string) tensor.Values {
switch element {
case "Input":
return &ev.Input
case "Reward":
return &ev.Reward
}
return nil
}
// String returns the current state as a string
func (ev *CondEnv) String() string {
return fmt.Sprintf("S_%d_%g", ev.Event.Cur, ev.Reward.Values[0])
}
func (ev *CondEnv) Init(run int) {
ev.Input.SetShapeSizes(3, ev.TotTime)
ev.Reward.SetShapeSizes(1)
ev.Trial.Init()
ev.Event.Init()
ev.Event.Max = ev.TotTime
ev.Event.Cur = -1 // init state -- key so that first Step() = 0
ev.TrialUpdate()
}
// TrialUpdate updates all random vars at start of trial
func (ev *CondEnv) TrialUpdate() {
ev.CSA.TrialUpdate()
ev.CSB.TrialUpdate()
ev.CSC.TrialUpdate()
ev.US.TrialUpdate()
}
// SetInput sets the input state
func (ev *CondEnv) SetInput() {
ev.Input.SetZeros()
tm := ev.Event.Cur
if ev.CSA.IsOn(tm) {
ev.Input.Values[tm] = 1
}
if ev.CSB.IsOn(tm) {
ev.Input.Values[ev.TotTime+tm] = 1
}
if ev.CSC.IsOn(tm) {
ev.Input.Values[2*ev.TotTime+tm] = 1
}
}
// SetReward sets reward for current option according to probability -- returns true if rewarded
func (ev *CondEnv) SetReward() bool {
tm := ev.Event.Cur
rw := ev.US.IsOn(tm)
if rw {
ev.HasRew = true
ev.Reward.Values[0] = float64(ev.RewVal)
} else {
ev.HasRew = false
ev.Reward.Values[0] = float64(ev.NoRewVal)
}
return rw
}
func (ev *CondEnv) Step() bool {
ev.Trial.Same() // this ensures that they only report changed when actually changed
incr := ev.Event.Incr()
ev.SetInput()
ev.SetReward()
if incr {
ev.TrialUpdate()
ev.Trial.Incr()
}
return true
}
func (ev *CondEnv) Action(element string, input tensor.Values) {
// nop
}
// Compile-time check that implements Env interface
var _ env.Env = (*CondEnv)(nil)
// Copyright (c) 2023, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package rl
import (
"cogentcore.org/core/core"
"github.com/emer/emergent/v2/egui"
)
// EnvConfig has config params for environment.
type EnvConfig struct {
// Env parameters: can set any field/subfield on Env struct,
// using standard TOML formatting.
Env map[string]any
}
// ParamConfig has config parameters related to sim params.
type ParamConfig struct {
// Script is an interpreted script that is run to set parameters in Layer and Path
// sheets, by default using the "Script" set name.
Script string `new-window:"+" width:"100"`
// Sheet is the extra params sheet name(s) to use (space separated
// if multiple). Must be valid name as listed in compiled-in params
// or loaded params.
Sheet string
// Tag is an extra tag to add to file names and logs saved from this run.
Tag string
// Note is additional info to describe the run params etc,
// like a git commit message for the run.
Note string
// SaveAll will save a snapshot of all current param and config settings
// in a directory named params_<datestamp> (or _good if Good is true),
// then quit. Useful for comparing to later changes and seeing multiple
// views of current params.
SaveAll bool `nest:"+"`
// Good is for SaveAll, save to params_good for a known good params state.
// This can be done prior to making a new release after all tests are passing.
// Add results to git to provide a full diff record of all params over level.
Good bool `nest:"+"`
}
func (pc *ParamConfig) FieldWidget(field string) core.Value {
return egui.ScriptFieldWidget(field)
}
// RunConfig has config parameters related to running the sim.
type RunConfig struct {
// GPUDevice selects the gpu device to use.
GPUDevice int
// NData is the number of data-parallel items to process in parallel per trial.
// Is significantly faster for both CPU and GPU. Results in an effective
// mini-batch of learning.
NData int `default:"1" min:"1"`
// NThreads is the number of parallel threads for CPU computation;
// 0 = use default.
NThreads int `default:"0"`
// Run is the _starting_ run number, which determines the random seed.
// Runs counts up from there. Can do all runs in parallel by launching
// separate jobs with each starting Run, Runs = 1.
Run int `default:"0" flag:"run"`
// Runs is the total number of runs to do when running Train, starting from Run.
Runs int `default:"1" min:"1"`
// Epochs is the total number of epochs per run.
Epochs int `default:"100"`
// Trials is the total number of trials per epoch.
// Should be an even multiple of NData.
Trials int `default:"20"`
// ISICycles is the number of no-input inter-stimulus interval
// cycles at the start of the trial.
ISICycles int `default:"0"`
// MinusCycles is the number of cycles in the minus phase per trial.
MinusCycles int `default:"150"`
// PlusCycles is the number of cycles in the plus phase per trial.
PlusCycles int `default:"50"`
}
// Cycles returns the total number of cycles per trial: ISI + Minus + Plus.
func (rc *RunConfig) Cycles() int {
return rc.ISICycles + rc.MinusCycles + rc.PlusCycles
}
// LogConfig has config parameters related to logging data.
type LogConfig struct {
// SaveWeights will save final weights after each run.
SaveWeights bool
// Train has the list of Train mode levels to save log files for.
Train []string `default:"['Run', 'Epoch']" nest:"+"`
// Test has the list of Test mode levels to save log files for.
Test []string `nest:"+"`
}
// Config has the overall Sim configuration options.
type Config struct {
egui.BaseConfig
// if true, use Rescorla-Wagner -- set in code or rebuild network
RW bool
// Env has environment configuration options.
Env EnvConfig `display:"add-fields"`
// Params has parameter related configuration options.
Params ParamConfig `display:"add-fields"`
// Run has sim running related configuration options.
Run RunConfig `display:"add-fields"`
// Log has data logging related configuration options.
Log LogConfig `display:"add-fields"`
}
func (cfg *Config) Defaults() {
cfg.Name = "RL"
cfg.Title = "Reinforcement learning"
cfg.URL = "https://github.com/emer/axon/blob/main/sims/rl/README.md"
cfg.Doc = "rl explores the temporal differences (TD) reinforcement learning algorithm under some basic Pavlovian conditioning environments."
}
// Code generated by "core generate -add-types -add-funcs -gosl"; DO NOT EDIT.
package rl
import (
"cogentcore.org/core/enums"
)
var _ModesValues = []Modes{0}
// ModesN is the highest valid value for type Modes, plus one.
//
//gosl:start
const ModesN Modes = 1
//gosl:end
var _ModesValueMap = map[string]Modes{`Train`: 0}
var _ModesDescMap = map[Modes]string{0: ``}
var _ModesMap = map[Modes]string{0: `Train`}
// String returns the string representation of this Modes value.
func (i Modes) String() string { return enums.String(i, _ModesMap) }
// SetString sets the Modes value from its string representation,
// and returns an error if the string is invalid.
func (i *Modes) SetString(s string) error { return enums.SetString(i, s, _ModesValueMap, "Modes") }
// Int64 returns the Modes value as an int64.
func (i Modes) Int64() int64 { return int64(i) }
// SetInt64 sets the Modes value from an int64.
func (i *Modes) SetInt64(in int64) { *i = Modes(in) }
// Desc returns the description of the Modes value.
func (i Modes) Desc() string { return enums.Desc(i, _ModesDescMap) }
// ModesValues returns all possible values for the type Modes.
func ModesValues() []Modes { return _ModesValues }
// Values returns all possible values for the type Modes.
func (i Modes) Values() []enums.Enum { return enums.Values(_ModesValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Modes) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Modes) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Modes") }
var _LevelsValues = []Levels{0, 1, 2, 3}
// LevelsN is the highest valid value for type Levels, plus one.
//
//gosl:start
const LevelsN Levels = 4
//gosl:end
var _LevelsValueMap = map[string]Levels{`Cycle`: 0, `Trial`: 1, `Epoch`: 2, `Run`: 3}
var _LevelsDescMap = map[Levels]string{0: ``, 1: ``, 2: ``, 3: ``}
var _LevelsMap = map[Levels]string{0: `Cycle`, 1: `Trial`, 2: `Epoch`, 3: `Run`}
// String returns the string representation of this Levels value.
func (i Levels) String() string { return enums.String(i, _LevelsMap) }
// SetString sets the Levels value from its string representation,
// and returns an error if the string is invalid.
func (i *Levels) SetString(s string) error { return enums.SetString(i, s, _LevelsValueMap, "Levels") }
// Int64 returns the Levels value as an int64.
func (i Levels) Int64() int64 { return int64(i) }
// SetInt64 sets the Levels value from an int64.
func (i *Levels) SetInt64(in int64) { *i = Levels(in) }
// Desc returns the description of the Levels value.
func (i Levels) Desc() string { return enums.Desc(i, _LevelsDescMap) }
// LevelsValues returns all possible values for the type Levels.
func LevelsValues() []Levels { return _LevelsValues }
// Values returns all possible values for the type Levels.
func (i Levels) Values() []enums.Enum { return enums.Values(_LevelsValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Levels) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Levels) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Levels") }
var _StatsPhaseValues = []StatsPhase{0, 1}
// StatsPhaseN is the highest valid value for type StatsPhase, plus one.
//
//gosl:start
const StatsPhaseN StatsPhase = 2
//gosl:end
var _StatsPhaseValueMap = map[string]StatsPhase{`Start`: 0, `Step`: 1}
var _StatsPhaseDescMap = map[StatsPhase]string{0: ``, 1: ``}
var _StatsPhaseMap = map[StatsPhase]string{0: `Start`, 1: `Step`}
// String returns the string representation of this StatsPhase value.
func (i StatsPhase) String() string { return enums.String(i, _StatsPhaseMap) }
// SetString sets the StatsPhase value from its string representation,
// and returns an error if the string is invalid.
func (i *StatsPhase) SetString(s string) error {
return enums.SetString(i, s, _StatsPhaseValueMap, "StatsPhase")
}
// Int64 returns the StatsPhase value as an int64.
func (i StatsPhase) Int64() int64 { return int64(i) }
// SetInt64 sets the StatsPhase value from an int64.
func (i *StatsPhase) SetInt64(in int64) { *i = StatsPhase(in) }
// Desc returns the description of the StatsPhase value.
func (i StatsPhase) Desc() string { return enums.Desc(i, _StatsPhaseDescMap) }
// StatsPhaseValues returns all possible values for the type StatsPhase.
func StatsPhaseValues() []StatsPhase { return _StatsPhaseValues }
// Values returns all possible values for the type StatsPhase.
func (i StatsPhase) Values() []enums.Enum { return enums.Values(_StatsPhaseValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i StatsPhase) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *StatsPhase) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "StatsPhase")
}
// Copyright (c) 2019, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package rl
import "github.com/emer/axon/v2/axon"
// LayerParams sets the minimal non-default params.
// Base is always applied, and others can be optionally selected to apply on top of that.
var LayerParams = axon.LayerSheets{
"Base": {
{Sel: "#Input", Doc: "input fixed act",
Set: func(ly *axon.LayerParams) {
ly.Acts.Decay.Act = 1
ly.Acts.Decay.Glong = 1
ly.Inhib.ActAvg.Nominal = 0.05
}},
{Sel: "#Rew", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 0.2
ly.Inhib.ActAvg.Nominal = 1
}},
},
"RW": {
{Sel: ".RWPredLayer", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 0.2
ly.Inhib.ActAvg.Nominal = 1
ly.Acts.Dt.GeTau = 40
}},
},
"TD": {
{Sel: ".TDPredLayer", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 0.2
ly.Inhib.ActAvg.Nominal = 1
ly.Acts.Dt.GeTau = 40
}},
{Sel: ".TDIntegLayer", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Layer.Gi = 0.2
ly.Inhib.ActAvg.Nominal = 1
ly.TDInteg.Discount = 0.9
ly.TDInteg.PredGain = 1.0
}},
},
}
// PathParams sets the minimal non-default params.
// Base is always applied, and others can be optionally selected to apply on top of that.
var PathParams = axon.PathSheets{
"Base": {},
"RW": {
{Sel: ".RWPath", Doc: "RW pred",
Set: func(pt *axon.PathParams) {
pt.SWts.Init.Mean = 0
pt.SWts.Init.Var = 0
pt.SWts.Init.Sym.SetBool(false)
pt.Learn.LRate.Base = 0.1
pt.RLPred.OppSignLRate = 1.0
pt.RLPred.DaTol = 0.0
}},
},
"TD": {
{Sel: "#InputToRewPred", Doc: "input to rewpred",
Set: func(pt *axon.PathParams) {
pt.SWts.Init.Mean = 0
pt.SWts.Init.Var = 0
pt.SWts.Init.Sym.SetBool(false)
pt.Learn.LRate.Base = 0.1
pt.RLPred.OppSignLRate = 1.0
}},
},
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// rl explores the temporal differences (TD) and Rescorla-Wagner
// reinforcement learning algorithms under some basic
// Pavlovian conditioning environments.
package rl
//go:generate core generate -add-types -add-funcs -gosl
import (
"fmt"
"os"
"reflect"
"cogentcore.org/core/base/metadata"
"cogentcore.org/core/base/reflectx"
"cogentcore.org/core/core"
"cogentcore.org/core/enums"
"cogentcore.org/core/gpu"
"cogentcore.org/core/icons"
"cogentcore.org/core/math32"
"cogentcore.org/core/tree"
"cogentcore.org/lab/base/mpi"
"cogentcore.org/lab/base/randx"
"cogentcore.org/lab/plot"
"cogentcore.org/lab/stats/stats"
"cogentcore.org/lab/tensor"
"cogentcore.org/lab/tensorfs"
"github.com/emer/axon/v2/axon"
"github.com/emer/emergent/v2/egui"
"github.com/emer/emergent/v2/env"
"github.com/emer/emergent/v2/looper"
"github.com/emer/emergent/v2/paths"
"github.com/emer/emergent/v2/relpos"
)
// Modes are the looping modes (Stacks) for running and statistics.
type Modes int32 //enums:enum
const (
Train Modes = iota
)
// Levels are the looping levels for running and statistics.
type Levels int32 //enums:enum
const (
Cycle Levels = iota
Trial
Epoch
Run
)
// StatsPhase is the phase of stats processing for given mode, level.
// Accumulated values are reset at Start, added each Step.
type StatsPhase int32 //enums:enum
const (
Start StatsPhase = iota
Step
)
// see params.go for params
// Sim encapsulates the entire simulation model, and we define all the
// functionality as methods on this struct. This structure keeps all relevant
// state information organized and available without having to pass everything around
// as arguments to methods, and provides the core GUI interface (note the view tags
// for the fields which provide hints to how things should be displayed).
type Sim struct {
// simulation configuration parameters -- set by .toml config file and / or args
Config *Config `new-window:"+"`
// Net is the network: click to view / edit parameters for layers, paths, etc.
Net *axon.Network `new-window:"+" display:"no-inline"`
// Params manages network parameter setting.
Params axon.Params `display:"inline"`
// Loops are the control loops for running the sim, in different Modes
// across stacks of Levels.
Loops *looper.Stacks `new-window:"+" display:"no-inline"`
// Envs provides mode-string based storage of environments.
Envs env.Envs `new-window:"+" display:"no-inline"`
// TrainUpdate has Train mode netview update parameters.
TrainUpdate axon.NetViewUpdate `display:"inline"`
// Root is the root tensorfs directory, where all stats and other misc sim data goes.
Root *tensorfs.Node `display:"-"`
// Stats has the stats directory within Root.
Stats *tensorfs.Node `display:"-"`
// Current has the current stats values within Stats.
Current *tensorfs.Node `display:"-"`
// StatFuncs are statistics functions called at given mode and level,
// to perform all stats computations. phase = Start does init at start of given level,
// and all intialization / configuration (called during Init too).
StatFuncs []func(mode Modes, level Levels, phase StatsPhase) `display:"-"`
// GUI manages all the GUI elements
GUI egui.GUI `display:"-"`
// RandSeeds is a list of random seeds to use for each run.
RandSeeds randx.Seeds `display:"-"`
}
func (ss *Sim) SetConfig(cfg *Config) { ss.Config = cfg }
func (ss *Sim) Body() *core.Body { return ss.GUI.Body }
func (ss *Sim) ConfigSim() {
ss.Root, _ = tensorfs.NewDir("Root")
tensorfs.CurRoot = ss.Root
ss.Net = axon.NewNetwork(ss.Config.Name)
ss.Params.Config(LayerParams, PathParams, ss.Config.Params.Sheet, ss.Config.Params.Tag, reflect.ValueOf(ss))
ss.RandSeeds.Init(100) // max 100 runs
ss.InitRandSeed(0)
if ss.Config.GPU {
gpu.SelectAdapter = ss.Config.Run.GPUDevice
axon.GPUInit()
axon.UseGPU = true
}
ss.ConfigEnv()
ss.ConfigNet(ss.Net)
ss.ConfigLoops()
ss.ConfigStats()
// if ss.Config..GPU {
// fmt.Println(axon.GPUSystem.Vars().StringDoc())
// }
if ss.Config.Params.SaveAll {
ss.Config.Params.SaveAll = false
ss.Net.SaveParamsSnapshot(&ss.Config, ss.Config.Params.Good)
os.Exit(0)
}
}
func (ss *Sim) ConfigEnv() {
// Can be called multiple times -- don't re-create
var trn *CondEnv
if len(ss.Envs) == 0 {
trn = &CondEnv{}
trn.Name = Train.String()
trn.Defaults()
trn.RewVal = 1 // -1
trn.NoRewVal = 0
if ss.Config.Env.Env != nil {
reflectx.SetFieldsFromMap(trn, ss.Config.Env.Env)
}
trn.Validate()
} else {
trn = ss.Envs.ByMode(Train).(*CondEnv)
}
trn.Init(0)
ss.Envs.Add(trn)
}
func (ss *Sim) ConfigNet(net *axon.Network) {
net.SetMaxData(ss.Config.Run.NData)
net.Context().SetISICycles(int32(ss.Config.Run.ISICycles)).
SetMinusCycles(int32(ss.Config.Run.MinusCycles)).
SetPlusCycles(int32(ss.Config.Run.PlusCycles)).Update()
net.SetRandSeed(ss.RandSeeds[0]) // init new separate random seed, using run = 0
space := float32(4)
full := paths.NewFull()
var rp, rplay, rew *axon.Layer
var ptype axon.PathTypes
if ss.Config.RW {
rew, rp, _ = net.AddRWLayers("", relpos.RightOf, space)
rplay = rp
ptype = axon.RWPath
} else {
rew, rp, _, _ = net.AddTDLayers("", relpos.RightOf, space)
rplay = rp
ptype = axon.TDPredPath
}
ldt := net.AddLDTLayer("")
ldt.Name = "ACh"
ldt.PlaceBehind(rew, 1)
inp := net.AddLayer2D("Input", axon.InputLayer, 3, 20)
inp.PlaceAbove(rew)
net.ConnectLayers(inp, rplay, full, ptype)
net.Build()
net.Defaults()
net.SetNThreads(ss.Config.Run.NThreads)
ss.ApplyParams()
net.InitWeights()
}
func (ss *Sim) ApplyParams() {
ss.Params.Script = ss.Config.Params.Script
ss.Params.ApplyAll(ss.Net)
}
//////// Init, utils
// Init restarts the run, and initializes everything, including network weights
// and resets the epoch log table
func (ss *Sim) Init() {
ss.Loops.ResetCounters()
ss.SetRunName()
ss.InitRandSeed(0)
// ss.ConfigEnv() // re-config env just in case a different set of patterns was
// selected or patterns have been modified etc
ss.ApplyParams()
ss.StatsInit()
ss.NewRun()
ss.TrainUpdate.RecordSyns()
ss.TrainUpdate.Update(Train, Trial)
}
// InitRandSeed initializes the random seed based on current training run number
func (ss *Sim) InitRandSeed(run int) {
ss.RandSeeds.Set(run)
ss.RandSeeds.Set(run, &ss.Net.Rand)
}
// NetViewUpdater returns the NetViewUpdate for given mode.
func (ss *Sim) NetViewUpdater(mode enums.Enum) *axon.NetViewUpdate {
// if mode.Int64() == Train.Int64() {
return &ss.TrainUpdate
// }
}
// ConfigLoops configures the control loops: Training, Testing
func (ss *Sim) ConfigLoops() {
ls := looper.NewStacks()
trials := int(math32.IntMultipleGE(float32(ss.Config.Run.Trials), float32(ss.Config.Run.NData)))
cycles := ss.Config.Run.Cycles()
ls.AddStack(Train, Trial).
AddLevel(Run, ss.Config.Run.Runs).
AddLevel(Epoch, ss.Config.Run.Epochs).
AddLevelIncr(Trial, trials, ss.Config.Run.NData).
AddLevel(Cycle, cycles)
axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, Cycle, Trial, Train,
func(mode enums.Enum) { ss.Net.ClearInputs() },
func(mode enums.Enum) { ss.ApplyInputs(mode.(Modes)) },
)
ls.Stacks[Train].OnInit.Add("Init", ss.Init)
ls.Loop(Train, Run).OnStart.Add("NewRun", ss.NewRun)
ls.AddOnStartToAll("StatsStart", ss.StatsStart)
ls.AddOnEndToAll("StatsStep", ss.StatsStep)
ls.Loop(Train, Run).OnEnd.Add("SaveWeights", func() {
ctrString := fmt.Sprintf("%03d_%05d", ls.Loop(Train, Run).Counter.Cur, ls.Loop(Train, Epoch).Counter.Cur)
axon.SaveWeightsIfConfigSet(ss.Net, ss.Config.Log.SaveWeights, ctrString, ss.RunName())
})
if ss.Config.GUI {
axon.LooperUpdateNetView(ls, Cycle, Trial, ss.NetViewUpdater)
ls.Stacks[Train].OnInit.Add("GUI-Init", ss.GUI.UpdateWindow)
}
if ss.Config.Debug {
mpi.Println(ls.DocString())
}
ss.Loops = ls
}
// ApplyInputs applies input patterns from given environment for given mode.
// Any other start-of-trial logic can also be put here.
func (ss *Sim) ApplyInputs(mode Modes) {
net := ss.Net
ndata := int(net.Context().NData)
curModeDir := ss.Current.Dir(mode.String())
ev := ss.Envs.ByMode(mode).(*CondEnv)
lays := net.LayersByType(axon.InputLayer, axon.TargetLayer)
net.InitExt()
for di := range ndata {
ev.Step()
curModeDir.StringValue("TrialName", ndata).SetString1D(ev.String(), di)
for _, lnm := range lays {
ly := ss.Net.LayerByName(lnm)
st := ev.State(ly.Name)
if st != nil {
ly.ApplyExt(uint32(di), st)
}
}
axon.GlobalSetRew(uint32(di), float32(ev.Reward.Values[0]), ev.HasRew)
}
net.ApplyExts()
}
// NewRun intializes a new Run level of the model.
func (ss *Sim) NewRun() {
ctx := ss.Net.Context()
run := ss.Loops.Loop(Train, Run).Counter.Cur
ss.InitRandSeed(run)
ss.Envs.ByMode(Train).Init(run)
ctx.Reset()
ss.Net.InitWeights()
}
//////// Stats
// AddStat adds a stat compute function.
func (ss *Sim) AddStat(f func(mode Modes, level Levels, phase StatsPhase)) {
ss.StatFuncs = append(ss.StatFuncs, f)
}
// StatsStart is called by Looper at the start of given level, for each iteration.
// It needs to call RunStats Start at the next level down.
// e.g., each Epoch is the start of the full set of Trial Steps.
func (ss *Sim) StatsStart(lmd, ltm enums.Enum) {
mode := lmd.(Modes)
level := ltm.(Levels)
if level <= Trial {
return
}
ss.RunStats(mode, level-1, Start)
}
// StatsStep is called by Looper at each step of iteration,
// where it accumulates the stat results.
func (ss *Sim) StatsStep(lmd, ltm enums.Enum) {
mode := lmd.(Modes)
level := ltm.(Levels)
if level == Cycle {
return
}
ss.RunStats(mode, level, Step)
tensorfs.DirTable(axon.StatsNode(ss.Stats, mode, level), nil).WriteToLog()
}
// RunStats runs the StatFuncs for given mode, level and phase.
func (ss *Sim) RunStats(mode Modes, level Levels, phase StatsPhase) {
for _, sf := range ss.StatFuncs {
sf(mode, level, phase)
}
if phase == Step && ss.GUI.Tabs != nil {
nm := mode.String() + " " + level.String() + " Plot"
ss.GUI.Tabs.AsLab().GoUpdatePlot(nm)
ss.GUI.Tabs.AsLab().GoUpdatePlot("Train TrialAll Plot")
}
}
// SetRunName sets the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) SetRunName() string {
runName := ss.Params.RunName(ss.Config.Run.Run)
ss.Current.StringValue("RunName", 1).SetString1D(runName, 0)
return runName
}
// RunName returns the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) RunName() string {
return ss.Current.StringValue("RunName", 1).String1D(0)
}
// StatsInit initializes all the stats by calling Start across all modes and levels.
func (ss *Sim) StatsInit() {
for md, st := range ss.Loops.Stacks {
mode := md.(Modes)
for _, lev := range st.Order {
level := lev.(Levels)
if level == Cycle {
continue
}
ss.RunStats(mode, level, Start)
}
}
if ss.GUI.Tabs != nil {
tbs := ss.GUI.Tabs.AsLab()
_, idx := tbs.CurrentTab()
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Trial))
tbs.PlotTensorFS(ss.Stats.Dir("Train/TrialAll"))
tbs.SelectTabIndex(idx)
}
}
// ConfigStats handles configures functions to do all stats computation
// in the tensorfs system.
func (ss *Sim) ConfigStats() {
net := ss.Net
ss.Stats = ss.Root.Dir("Stats")
ss.Current = ss.Stats.Dir("Current")
ss.SetRunName()
// last arg(s) are levels to exclude
counterFunc := axon.StatLoopCounters(ss.Stats, ss.Current, ss.Loops, net, Trial, Cycle)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
counterFunc(mode, level, phase == Start)
})
runNameFunc := axon.StatRunName(ss.Stats, ss.Current, ss.Loops, net, Trial, Cycle)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
runNameFunc(mode, level, phase == Start)
})
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
if level != Trial {
return
}
name := "TrialName"
modeDir := ss.Stats.Dir(mode.String())
curModeDir := ss.Current.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
tsr := levelDir.StringValue(name)
ndata := int(ss.Net.Context().NData)
if phase == Start {
tsr.SetNumRows(0)
plot.SetFirstStyler(tsr, func(s *plot.Style) {
s.On = false
})
return
}
for di := range ndata {
// saved in apply inputs
trlNm := curModeDir.StringValue(name, ndata).String1D(di)
tsr.AppendRowString(trlNm)
}
})
// up to a point, it is good to use loops over stats in one function,
// to reduce repetition of boilerplate.
statNames := []string{"DA_Act", "ACh_Act", "RewPred_Act", "RewInteg_Act"}
tdLayerNames := []string{"TD", "ACh", "RewPred", "RewInteg"}
rwLayerNames := []string{"DA", "ACh", "RWPred", "RWPred"}
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
for si, name := range statNames {
modeDir := ss.Stats.Dir(mode.String())
curModeDir := ss.Current.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
subDir := modeDir.Dir((level - 1).String()) // note: will fail for Cycle
tsr := levelDir.Float64(name)
ndata := int(ss.Net.Context().NData)
var stat float64
if phase == Start {
tsr.SetNumRows(0)
plot.SetFirstStyler(tsr, func(s *plot.Style) {
s.Range.SetMin(0).SetMax(1)
if si == 0 {
s.On = true
}
})
continue
}
switch level {
case Trial:
lnm := ""
if ss.Config.RW {
lnm = rwLayerNames[si]
} else {
lnm = tdLayerNames[si]
}
ly := ss.Net.LayerByName(lnm)
vidx, _ := ly.UnitVarIndex("Act")
for di := range ndata {
stat := float64(ly.UnitValue1D(vidx, 0, di))
curModeDir.Float64(name, ndata).SetFloat1D(stat, di)
tsr.AppendRowFloat(stat)
}
case Epoch:
stat = stats.StatMean.Call(subDir.Value(name)).Float1D(0)
tsr.AppendRowFloat(stat)
case Run:
stat = stats.StatMean.Call(subDir.Value(name)).Float1D(0)
tsr.AppendRowFloat(stat)
}
}
})
perTrlFunc := axon.StatPerTrialMSec(ss.Stats, Train, Trial)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
perTrlFunc(mode, level, phase == Start)
})
trlAllFunc := axon.StatLevelAll(ss.Stats, Train, Trial, func(s *plot.Style, cl tensor.Values) {
s.Range.SetMin(0).SetMax(1)
name := metadata.Name(cl)
switch name {
case "DA_Act":
s.On = true
}
})
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
trlAllFunc(mode, level, phase == Start)
})
}
// StatCounters returns counters string to show at bottom of netview.
func (ss *Sim) StatCounters(mode, level enums.Enum) string {
counters := ss.Loops.Stacks[mode].CountersString()
vu := ss.NetViewUpdater(mode)
if vu == nil || vu.View == nil {
return counters
}
di := vu.View.Di
counters += fmt.Sprintf(" Di: %d", di)
curModeDir := ss.Current.Dir(mode.String())
if curModeDir.Node("TrialName") == nil {
return counters
}
counters += fmt.Sprintf(" TrialName: %s", curModeDir.StringValue("TrialName").String1D(di))
statNames := []string{"DA_Act"}
if level == Cycle || curModeDir.Node(statNames[0]) == nil {
return counters
}
for _, name := range statNames {
counters += fmt.Sprintf(" %s: %.4g", name, curModeDir.Float64(name).Float1D(di))
}
return counters
}
//////// GUI
// ConfigGUI configures the Cogent Core GUI interface for this simulation.
func (ss *Sim) ConfigGUI(b tree.Node) {
ss.GUI.MakeBody(b, ss, ss.Root, ss.Config.Name, ss.Config.Title, ss.Config.Doc)
nv := ss.GUI.AddNetView("Network")
nv.Options.MaxRecs = 2 * ss.Config.Run.Cycles()
nv.Options.Raster.Max = ss.Config.Run.Cycles()
nv.SetNet(ss.Net)
ss.TrainUpdate.Config(nv, axon.Theta, ss.StatCounters)
ss.GUI.OnStop = func(mode, level enums.Enum) {
vu := ss.NetViewUpdater(mode)
vu.UpdateWhenStopped(mode, level)
}
// nv.SceneXYZ().Camera.Pose.Pos.Set(0, 1, 2.75) // more "head on" than default which is more "top down"
// nv.SceneXYZ().Camera.LookAt(math32.Vec3(0, 0, 0), math32.Vec3(0, 1, 0))
ss.StatsInit()
ss.GUI.FinalizeGUI(false)
}
func (ss *Sim) MakeToolbar(p *tree.Plan) {
ss.GUI.AddLooperCtrl(p, ss.Loops)
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "Reset Trial Plot",
Icon: icons.Add,
Tooltip: "Reset the trial plot.",
Active: egui.ActiveAlways,
Func: func() {
name := "Train/TrialAll"
levelDir := ss.Stats.Dir(name)
cols := levelDir.ValuesFunc(nil)
for _, cl := range cols {
cl.(tensor.Values).SetNumRows(0)
}
ss.GUI.Tabs.AsLab().UpdatePlot(name + " Plot")
},
})
tree.Add(p, func(w *core.Separator) {})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "New seed",
Icon: icons.Add,
Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
Active: egui.ActiveAlways,
Func: func() {
ss.RandSeeds.NewSeeds()
},
})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "README",
Icon: icons.FileMarkdown,
Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
Active: egui.ActiveAlways,
Func: func() {
core.TheApp.OpenURL(ss.Config.URL)
},
})
}
func (ss *Sim) RunNoGUI() {
ss.Init()
if ss.Config.Params.Note != "" {
mpi.Printf("Note: %s\n", ss.Config.Params.Note)
}
if ss.Config.Log.SaveWeights {
mpi.Printf("Saving final weights per run\n")
}
runName := ss.SetRunName()
netName := ss.Net.Name
cfg := &ss.Config.Log
axon.OpenLogFiles(ss.Loops, ss.Stats, netName, runName, [][]string{cfg.Train, cfg.Test})
mpi.Printf("Running %d Runs starting at %d\n", ss.Config.Run.Runs, ss.Config.Run.Run)
ss.Loops.Loop(Train, Run).Counter.SetCurMaxPlusN(ss.Config.Run.Run, ss.Config.Run.Runs)
ss.Loops.Run(Train)
axon.CloseLogFiles(ss.Loops, ss.Stats, Cycle)
axon.GPURelease()
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"github.com/emer/axon/v2/sims/rl"
"github.com/emer/emergent/v2/egui"
)
func main() { egui.Run[rl.Sim, rl.Config]() }
// Copyright (c) 2023, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package vspatch
import (
"github.com/emer/emergent/v2/egui"
)
// EnvConfig has config params for environment.
type EnvConfig struct {
// Env parameters: can set any field/subfield on Env struct,
// using standard TOML formatting.
Env map[string]any
}
// ParamConfig has config parameters related to sim params.
type ParamConfig struct {
// Tweak means to perform automated parameter tweaking for
// parameters marked Hypers Tweak = log,incr, or [vals].
Tweak bool
// Baseline for Tweak, if true, first run a baseline with current default params.
Baseline bool
// DryRun for Tweak, if true, only print what would be done, don't run.
DryRun bool
// Script is an interpreted script that is run to set parameters in Layer and Path
// sheets, by default using the "Script" set name.
Script string `new-window:"+" width:"100"`
// Sheet is the extra params sheet name(s) to use (space separated
// if multiple). Must be valid name as listed in compiled-in params
// or loaded params.
Sheet string
// Tag is an extra tag to add to file names and logs saved from this run.
Tag string
// Note is additional info to describe the run params etc,
// like a git commit message for the run.
Note string
// SaveAll will save a snapshot of all current param and config settings
// in a directory named params_<datestamp> (or _good if Good is true),
// then quit. Useful for comparing to later changes and seeing multiple
// views of current params.
SaveAll bool `nest:"+"`
// Good is for SaveAll, save to params_good for a known good params state.
// This can be done prior to making a new release after all tests are passing.
// Add results to git to provide a full diff record of all params over level.
Good bool `nest:"+"`
}
// RunConfig has config parameters related to running the sim.
type RunConfig struct {
// GPUDevice selects the gpu device to use.
GPUDevice int
// NData is the number of data-parallel items to process in parallel per trial.
// Is significantly faster for both CPU and GPU. Results in an effective
// mini-batch of learning.
NData int `default:"16" min:"1"`
// NThreads is the number of parallel threads for CPU computation;
// 0 = use default.
NThreads int `default:"0"`
// Run is the _starting_ run number, which determines the random seed.
// Runs counts up from there. Can do all runs in parallel by launching
// separate jobs with each starting Run, Runs = 1.
Run int `default:"0" flag:"run"`
// Runs is the total number of runs to do when running Train, starting from Run.
Runs int `default:"1" min:"1"`
// CondEpochs is the number of epochs to run per condition.
// Every this interval new reward values are picked.
CondEpochs int `default:"20"`
// Epochs is the total number of epochs per run.
Epochs int `default:"100"`
// Trials is the total number of trials per epoch.
// Should be an even multiple of NData.
Trials int `default:"256"`
// ISICycles is the number of no-input inter-stimulus interval
// cycles at the start of the trial.
ISICycles int `default:"0"`
// MinusCycles is the number of cycles in the minus phase per trial.
MinusCycles int `default:"150"`
// PlusCycles is the number of cycles in the plus phase per trial.
PlusCycles int `default:"50"`
}
// Cycles returns the total number of cycles per trial: ISI + Minus + Plus.
func (rc *RunConfig) Cycles() int {
return rc.ISICycles + rc.MinusCycles + rc.PlusCycles
}
// LogConfig has config parameters related to logging data.
type LogConfig struct {
// stats to aggregate at higher levels
AggStats []string `default:"['Rew', 'RewPred', 'DA', 'RewPred_NR', 'DA_NR']"`
// SaveWeights will save final weights after each run.
SaveWeights bool
// Train has the list of Train mode levels to save log files for.
Train []string `default:"['Run', 'Epoch']" nest:"+"`
// Test has the list of Test mode levels to save log files for.
Test []string `nest:"+"`
}
// Config has the overall Sim configuration options.
type Config struct {
egui.BaseConfig
// Env has environment configuration options.
Env EnvConfig `display:"add-fields"`
// Params has parameter related configuration options.
Params ParamConfig `display:"add-fields"`
// Run has sim running related configuration options.
Run RunConfig `display:"add-fields"`
// Log has data logging related configuration options.
Log LogConfig `display:"add-fields"`
}
func (cfg *Config) Defaults() {
cfg.Name = "VSPatch"
cfg.Title = "Ventral Striatum Patch"
cfg.URL = "https://github.com/emer/axon/blob/main/sims/vspatch/README.md"
cfg.Doc = "This project simulates the Ventral Striatum (VS) Patch (striosome) neurons that predict reward to generate an RPE (reward prediction error). It is a testbed for learning the quantitative value representations needed for this."
}
// Code generated by "core generate -add-types -add-funcs -gosl"; DO NOT EDIT.
package vspatch
import (
"cogentcore.org/core/enums"
)
var _ModesValues = []Modes{0, 1}
// ModesN is the highest valid value for type Modes, plus one.
//
//gosl:start
const ModesN Modes = 2
//gosl:end
var _ModesValueMap = map[string]Modes{`Train`: 0, `Test`: 1}
var _ModesDescMap = map[Modes]string{0: ``, 1: ``}
var _ModesMap = map[Modes]string{0: `Train`, 1: `Test`}
// String returns the string representation of this Modes value.
func (i Modes) String() string { return enums.String(i, _ModesMap) }
// SetString sets the Modes value from its string representation,
// and returns an error if the string is invalid.
func (i *Modes) SetString(s string) error { return enums.SetString(i, s, _ModesValueMap, "Modes") }
// Int64 returns the Modes value as an int64.
func (i Modes) Int64() int64 { return int64(i) }
// SetInt64 sets the Modes value from an int64.
func (i *Modes) SetInt64(in int64) { *i = Modes(in) }
// Desc returns the description of the Modes value.
func (i Modes) Desc() string { return enums.Desc(i, _ModesDescMap) }
// ModesValues returns all possible values for the type Modes.
func ModesValues() []Modes { return _ModesValues }
// Values returns all possible values for the type Modes.
func (i Modes) Values() []enums.Enum { return enums.Values(_ModesValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Modes) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Modes) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Modes") }
var _LevelsValues = []Levels{0, 1, 2, 3, 4}
// LevelsN is the highest valid value for type Levels, plus one.
//
//gosl:start
const LevelsN Levels = 5
//gosl:end
var _LevelsValueMap = map[string]Levels{`Cycle`: 0, `Theta`: 1, `Trial`: 2, `Epoch`: 3, `Run`: 4}
var _LevelsDescMap = map[Levels]string{0: ``, 1: ``, 2: ``, 3: ``, 4: ``}
var _LevelsMap = map[Levels]string{0: `Cycle`, 1: `Theta`, 2: `Trial`, 3: `Epoch`, 4: `Run`}
// String returns the string representation of this Levels value.
func (i Levels) String() string { return enums.String(i, _LevelsMap) }
// SetString sets the Levels value from its string representation,
// and returns an error if the string is invalid.
func (i *Levels) SetString(s string) error { return enums.SetString(i, s, _LevelsValueMap, "Levels") }
// Int64 returns the Levels value as an int64.
func (i Levels) Int64() int64 { return int64(i) }
// SetInt64 sets the Levels value from an int64.
func (i *Levels) SetInt64(in int64) { *i = Levels(in) }
// Desc returns the description of the Levels value.
func (i Levels) Desc() string { return enums.Desc(i, _LevelsDescMap) }
// LevelsValues returns all possible values for the type Levels.
func LevelsValues() []Levels { return _LevelsValues }
// Values returns all possible values for the type Levels.
func (i Levels) Values() []enums.Enum { return enums.Values(_LevelsValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i Levels) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *Levels) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Levels") }
var _StatsPhaseValues = []StatsPhase{0, 1}
// StatsPhaseN is the highest valid value for type StatsPhase, plus one.
//
//gosl:start
const StatsPhaseN StatsPhase = 2
//gosl:end
var _StatsPhaseValueMap = map[string]StatsPhase{`Start`: 0, `Step`: 1}
var _StatsPhaseDescMap = map[StatsPhase]string{0: ``, 1: ``}
var _StatsPhaseMap = map[StatsPhase]string{0: `Start`, 1: `Step`}
// String returns the string representation of this StatsPhase value.
func (i StatsPhase) String() string { return enums.String(i, _StatsPhaseMap) }
// SetString sets the StatsPhase value from its string representation,
// and returns an error if the string is invalid.
func (i *StatsPhase) SetString(s string) error {
return enums.SetString(i, s, _StatsPhaseValueMap, "StatsPhase")
}
// Int64 returns the StatsPhase value as an int64.
func (i StatsPhase) Int64() int64 { return int64(i) }
// SetInt64 sets the StatsPhase value from an int64.
func (i *StatsPhase) SetInt64(in int64) { *i = StatsPhase(in) }
// Desc returns the description of the StatsPhase value.
func (i StatsPhase) Desc() string { return enums.Desc(i, _StatsPhaseDescMap) }
// StatsPhaseValues returns all possible values for the type StatsPhase.
func StatsPhaseValues() []StatsPhase { return _StatsPhaseValues }
// Values returns all possible values for the type StatsPhase.
func (i StatsPhase) Values() []enums.Enum { return enums.Values(_StatsPhaseValues) }
// MarshalText implements the [encoding.TextMarshaler] interface.
func (i StatsPhase) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (i *StatsPhase) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "StatsPhase")
}
// Copyright (c) 2022, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package vspatch
import "github.com/emer/axon/v2/axon"
// LayerParams sets the minimal non-default params.
// Base is always applied, and others can be optionally selected to apply on top of that.
var LayerParams = axon.LayerSheets{
"Base": {
{Sel: "Layer", Doc: "clamp gain makes big diff on overall excitation, gating propensity",
Set: func(ly *axon.LayerParams) {
ly.Acts.Clamp.Ge = 1.0 // 1.5 is def, was 0.6 (too low)
}},
{Sel: "#State", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.ActAvg.Nominal = 0.2
}},
{Sel: ".VSPatchLayer", Doc: "",
Set: func(ly *axon.LayerParams) {
ly.Inhib.Pool.On.SetBool(false)
ly.Inhib.Layer.On.SetBool(true)
ly.Inhib.Layer.Gi = 0.5 // 0.5 needed for differentiated reps
ly.Learn.NeuroMod.DipGain = 1 // boa requires balanced..
ly.Learn.TrgAvgAct.GiBaseInit = 0 // 0.5 default; 0 better
ly.Learn.RLRate.SigmoidMin = 1 // 0.05 def; 1 causes positive DA bias
ly.Learn.NeuroMod.AChLRateMod = 0
ly.Learn.NeuroMod.DAModGain = 0 // this is actual perf mod
}},
},
}
// PathParams sets the minimal non-default params.
// Base is always applied, and others can be optionally selected to apply on top of that.
var PathParams = axon.PathSheets{
"Base": {
{Sel: ".VSPatchPath", Doc: "",
Set: func(pt *axon.PathParams) {
pt.PathScale.Abs = 2
pt.Learn.DWt.LearnThr = 0
pt.Learn.LRate.Base = 0.02 // 0.02 necc to fit closely; no bene for 0.01
pt.SWts.Init.Mean = 0.5
pt.SWts.Init.Var = 0.25
}},
},
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// vspatch simulates the inhibitory dynamics in the STN and GPe
// leading to integration of Go vs. NoGo signal in the basal
// ganglia, for the Ventral Striatum (VS) global Go vs. No case.
package vspatch
//go:generate core generate -add-types -add-funcs -gosl
import (
"fmt"
"math/rand/v2"
"os"
"reflect"
"cogentcore.org/core/base/reflectx"
"cogentcore.org/core/core"
"cogentcore.org/core/enums"
"cogentcore.org/core/gpu"
"cogentcore.org/core/icons"
"cogentcore.org/core/math32"
"cogentcore.org/core/tree"
"cogentcore.org/lab/base/mpi"
"cogentcore.org/lab/base/randx"
"cogentcore.org/lab/plot"
"cogentcore.org/lab/stats/stats"
"cogentcore.org/lab/tensor"
"cogentcore.org/lab/tensorfs"
"github.com/emer/axon/v2/axon"
"github.com/emer/emergent/v2/egui"
"github.com/emer/emergent/v2/env"
"github.com/emer/emergent/v2/etime"
"github.com/emer/emergent/v2/looper"
"github.com/emer/emergent/v2/paths"
)
// Modes are the looping modes (Stacks) for running and statistics.
type Modes int32 //enums:enum
const (
Train Modes = iota
Test
)
// Levels are the looping levels for running and statistics.
type Levels int32 //enums:enum
const (
Cycle Levels = iota
Theta
Trial
Epoch
Run
)
// StatsPhase is the phase of stats processing for given mode, level.
// Accumulated values are reset at Start, added each Step.
type StatsPhase int32 //enums:enum
const (
Start StatsPhase = iota
Step
)
// see params.go for params
// Sim encapsulates the entire simulation model, and we define all the
// functionality as methods on this struct. This structure keeps all relevant
// state information organized and available without having to pass everything around
// as arguments to methods, and provides the core GUI interface (note the view tags
// for the fields which provide hints to how things should be displayed).
type Sim struct {
// simulation configuration parameters -- set by .toml config file and / or args
Config *Config `new-window:"+"`
// Net is the network: click to view / edit parameters for layers, paths, etc.
Net *axon.Network `new-window:"+" display:"no-inline"`
// Params manages network parameter setting.
Params axon.Params `display:"inline"`
// Loops are the control loops for running the sim, in different Modes
// across stacks of Levels.
Loops *looper.Stacks `new-window:"+" display:"no-inline"`
// Envs provides mode-string based storage of environments.
Envs env.Envs `new-window:"+" display:"no-inline"`
// TrainUpdate has Train mode netview update parameters.
TrainUpdate axon.NetViewUpdate `display:"inline"`
// TestUpdate has Test mode netview update parameters.
TestUpdate axon.NetViewUpdate `display:"inline"`
// Root is the root tensorfs directory, where all stats and other misc sim data goes.
Root *tensorfs.Node `display:"-"`
// Stats has the stats directory within Root.
Stats *tensorfs.Node `display:"-"`
// Current has the current stats values within Stats.
Current *tensorfs.Node `display:"-"`
// StatFuncs are statistics functions called at given mode and level,
// to perform all stats computations. phase = Start does init at start of given level,
// and all intialization / configuration (called during Init too).
StatFuncs []func(mode Modes, level Levels, phase StatsPhase) `display:"-"`
// GUI manages all the GUI elements
GUI egui.GUI `display:"-"`
// RandSeeds is a list of random seeds to use for each run.
RandSeeds randx.Seeds `display:"-"`
}
func (ss *Sim) SetConfig(cfg *Config) { ss.Config = cfg }
func (ss *Sim) Body() *core.Body { return ss.GUI.Body }
func (ss *Sim) ConfigSim() {
ss.Root, _ = tensorfs.NewDir("Root")
tensorfs.CurRoot = ss.Root
ss.Net = axon.NewNetwork(ss.Config.Name)
ss.Params.Config(LayerParams, PathParams, ss.Config.Params.Sheet, ss.Config.Params.Tag, reflect.ValueOf(ss))
ss.RandSeeds.Init(100) // max 100 runs
ss.InitRandSeed(0)
if ss.Config.GPU {
gpu.SelectAdapter = ss.Config.Run.GPUDevice
axon.GPUInit()
axon.UseGPU = true
}
ss.ConfigEnv()
ss.ConfigNet(ss.Net)
ss.ConfigLoops()
ss.ConfigStats()
// if ss.Config..GPU {
// fmt.Println(axon.GPUSystem.Vars().StringDoc())
// }
if ss.Config.Params.SaveAll {
ss.Config.Params.SaveAll = false
ss.Net.SaveParamsSnapshot(&ss.Config, ss.Config.Params.Good)
os.Exit(0)
}
}
func (ss *Sim) ConfigEnv() {
// Can be called multiple times -- don't re-create
newEnv := (len(ss.Envs) == 0)
var trn0 *VSPatchEnv
for di := 0; di < ss.Config.Run.NData; di++ {
var trn, tst *VSPatchEnv
if newEnv {
trn = &VSPatchEnv{}
tst = &VSPatchEnv{}
} else {
trn = ss.Envs.ByModeDi(etime.Train, di).(*VSPatchEnv)
tst = ss.Envs.ByModeDi(etime.Test, di).(*VSPatchEnv)
}
// note: names must be standard here!
trn.Name = env.ModeDi(etime.Train, di)
trn.Defaults()
if ss.Config.Env.Env != nil {
reflectx.SetFieldsFromMap(trn, ss.Config.Env.Env)
}
trn.Config(etime.Train, di, 73+int64(di)*73)
if di == 0 {
trn.ConfigPats()
trn0 = trn
} else {
trn.Pats = trn0.Pats
}
tst.Name = env.ModeDi(etime.Test, di)
tst.Defaults()
if ss.Config.Env.Env != nil {
reflectx.SetFieldsFromMap(tst, ss.Config.Env.Env)
}
tst.Config(etime.Test, di, 181+int64(di)*181)
tst.Pats = trn0.Pats
trn.Init(0)
tst.Init(0)
// note: names must be in place when adding
ss.Envs.Add(trn, tst)
if di == 0 {
ss.ConfigRubicon(trn)
}
}
}
func (ss *Sim) ConfigRubicon(trn *VSPatchEnv) {
pv := &ss.Net.Rubicon
pv.SetNUSs(1, 1)
pv.Defaults()
pv.Urgency.U50 = 20 // 20 def
pv.LHb.VSPatchGain = 3
pv.LHb.VSPatchNonRewThr = 0.1
pv.USs.PVposGain = 10
}
func (ss *Sim) ConfigNet(net *axon.Network) {
net.SetMaxData(ss.Config.Run.NData)
net.Context().SetISICycles(int32(ss.Config.Run.ISICycles)).
SetMinusCycles(int32(ss.Config.Run.MinusCycles)).
SetPlusCycles(int32(ss.Config.Run.PlusCycles)).Update()
net.SetRandSeed(ss.RandSeeds[0]) // init new separate random seed, using run = 0
ev := ss.Envs.ByModeDi(Train, 0).(*VSPatchEnv)
space := float32(2)
full := paths.NewFull()
// mtxRandPath := paths.NewPoolUniformRand()
// mtxRandPath.PCon = 0.5
// _ = mtxRandPath
in := net.AddLayer2D("State", axon.InputLayer, ev.NUnitsY, ev.NUnitsX)
vSpatchD1, vSpatchD2 := net.AddVSPatchLayers("", 1, 6, 6, space)
net.ConnectToVSPatch(in, vSpatchD1, vSpatchD2, full)
net.Build()
net.Defaults()
net.SetNThreads(ss.Config.Run.NThreads)
ss.ApplyParams()
net.InitWeights()
}
func (ss *Sim) ApplyParams() {
ss.Params.Script = ss.Config.Params.Script
ss.Params.ApplyAll(ss.Net)
}
//////// Init, utils
// Init restarts the run, and initializes everything, including network weights
// and resets the epoch log table
func (ss *Sim) Init() {
ss.Loops.ResetCounters()
ss.SetRunName()
ss.InitRandSeed(0)
ss.ConfigEnv() // always do -- otherwise env params not reset after run
// selected or patterns have been modified etc
ss.ApplyParams()
ss.StatsInit()
ss.NewRun()
ss.TrainUpdate.RecordSyns()
ss.TrainUpdate.Update(Train, Trial)
}
// InitRandSeed initializes the random seed based on current training run number
func (ss *Sim) InitRandSeed(run int) {
ss.RandSeeds.Set(run)
ss.RandSeeds.Set(run, &ss.Net.Rand)
}
// NetViewUpdater returns the NetViewUpdate for given mode.
func (ss *Sim) NetViewUpdater(mode enums.Enum) *axon.NetViewUpdate {
if mode.Int64() == Train.Int64() {
return &ss.TrainUpdate
}
return &ss.TestUpdate
}
// ConfigLoops configures the control loops: Training, Testing
func (ss *Sim) ConfigLoops() {
ls := looper.NewStacks()
ev := ss.Envs.ByModeDi(Train, 0).(*VSPatchEnv)
trials := int(math32.IntMultipleGE(float32(ss.Config.Run.Trials), float32(ss.Config.Run.NData)))
cycles := ss.Config.Run.Cycles()
ls.AddStack(Train, Trial).
AddLevel(Run, ss.Config.Run.Runs).
AddLevel(Epoch, ss.Config.Run.Epochs).
AddLevelIncr(Trial, trials, ss.Config.Run.NData).
AddLevel(Theta, ev.Thetas).
AddLevel(Cycle, cycles)
ls.AddStack(Test, Trial).
AddLevel(Epoch, 1).
AddLevelIncr(Trial, trials, ss.Config.Run.NData).
AddLevel(Theta, ev.Thetas).
AddLevel(Cycle, cycles)
axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, Cycle, Theta, Train,
func(mode enums.Enum) { ss.Net.ClearInputs() },
func(mode enums.Enum) {
trial := ls.Stacks[mode].Loops[Trial].Counter.Cur
theta := ls.Stacks[mode].Loops[Theta].Counter.Cur
ss.ApplyInputs(mode.(Modes), trial, theta)
},
)
ls.Stacks[Train].OnInit.Add("Init", ss.Init)
ls.Loop(Train, Run).OnStart.Add("NewRun", ss.NewRun)
ls.AddOnStartToAll("StatsStart", ss.StatsStart)
ls.AddOnEndToAll("StatsStep", ss.StatsStep)
trainEpoch := ls.Loop(Train, Epoch)
trainEpoch.OnEnd.Add("NewConds", func() {
trnEpc := trainEpoch.Counter.Cur
if trnEpc > 1 && trnEpc%ss.Config.Run.CondEpochs == 0 {
ord := rand.Perm(ev.NConds)
for di := 0; di < ss.Config.Run.NData; di++ {
ev := ss.Envs.ByModeDi(etime.Train, di).(*VSPatchEnv)
ev.SetCondValuesPermute(ord)
}
}
})
ls.Loop(Train, Run).OnEnd.Add("SaveWeights", func() {
ctrString := fmt.Sprintf("%03d_%05d", ls.Loop(Train, Run).Counter.Cur, ls.Loop(Train, Epoch).Counter.Cur)
axon.SaveWeightsIfConfigSet(ss.Net, ss.Config.Log.SaveWeights, ctrString, ss.RunName())
})
if ss.Config.GUI {
axon.LooperUpdateNetView(ls, Cycle, Theta, ss.NetViewUpdater)
ls.Stacks[Train].OnInit.Add("GUI-Init", ss.GUI.UpdateWindow)
ls.Stacks[Test].OnInit.Add("GUI-Init", ss.GUI.UpdateWindow)
}
if ss.Config.Debug {
mpi.Println(ls.DocString())
}
ss.Loops = ls
}
// ApplyInputs applies input patterns from given environment for given mode.
// Any other start-of-trial logic can also be put here.
func (ss *Sim) ApplyInputs(mode Modes, trial, theta int) {
net := ss.Net
ndata := int(net.Context().NData)
curModeDir := ss.Current.Dir(mode.String())
lays := []string{"State"}
net.InitExt()
for di := range ndata {
ev := ss.Envs.ByModeDi(mode, di).(*VSPatchEnv)
ev.Step()
for _, lnm := range lays {
ly := ss.Net.LayerByName(lnm)
st := ev.State(ly.Name)
if st != nil {
ly.ApplyExt(uint32(di), st)
}
}
curModeDir.StringValue("TrialName", ndata).SetString1D(ev.String(), di)
ss.ApplyRubicon(ev, mode, theta, uint32(di))
}
net.ApplyExts()
}
// ApplyRubicon applies Rubicon reward inputs
func (ss *Sim) ApplyRubicon(ev *VSPatchEnv, mode Modes, theta int, di uint32) {
pv := &ss.Net.Rubicon
pv.NewState(di, &ss.Net.Rand) // first before anything else is updated
pv.EffortUrgencyUpdate(di, 1)
// if mode == Test {
pv.Urgency.Reset(di)
// }
if theta == ev.Thetas-1 {
axon.GlobalScalars.Set(1, int(axon.GvACh), int(di))
ss.ApplyRew(di, ev.Rew)
} else {
ss.ApplyRew(di, 0)
axon.GlobalScalars.Set(0, int(axon.GvACh), int(di))
}
}
// ApplyRew applies reward input based on gating action and input
func (ss *Sim) ApplyRew(di uint32, rew float32) {
pv := &ss.Net.Rubicon
if rew > 0 {
pv.SetUS(di, axon.Positive, 0, rew)
} else if rew < 0 {
pv.SetUS(di, axon.Negative, 0, -rew)
}
pv.SetDrives(di, 1, 1)
pv.Step(di, &ss.Net.Rand)
// normally set by VTA layer, including CS:
lhbDA := axon.GlobalScalars.Value(int(axon.GvLHbPVDA), int(di))
axon.GlobalScalars.Set(lhbDA, int(axon.GvDA), int(di))
}
// NewRun intializes a new Run level of the model.
func (ss *Sim) NewRun() {
ctx := ss.Net.Context()
run := ss.Loops.Loop(Train, Run).Counter.Cur
ss.InitRandSeed(run)
for di := 0; di < int(ctx.NData); di++ {
ss.Envs.ByModeDi(Train, di).Init(run)
ss.Envs.ByModeDi(Test, di).Init(run)
}
ctx.Reset()
ss.Net.InitWeights()
}
//////// Stats
// AddStat adds a stat compute function.
func (ss *Sim) AddStat(f func(mode Modes, level Levels, phase StatsPhase)) {
ss.StatFuncs = append(ss.StatFuncs, f)
}
// StatsStart is called by Looper at the start of given level, for each iteration.
// It needs to call RunStats Start at the next level down.
// e.g., each Epoch is the start of the full set of Trial Steps.
func (ss *Sim) StatsStart(lmd, ltm enums.Enum) {
mode := lmd.(Modes)
level := ltm.(Levels)
if level <= Theta {
return
}
ss.RunStats(mode, level-1, Start)
}
// StatsStep is called by Looper at each step of iteration,
// where it accumulates the stat results.
func (ss *Sim) StatsStep(lmd, ltm enums.Enum) {
mode := lmd.(Modes)
level := ltm.(Levels)
if level == Cycle {
return
}
ss.RunStats(mode, level, Step)
tensorfs.DirTable(axon.StatsNode(ss.Stats, mode, level), nil).WriteToLog()
}
// RunStats runs the StatFuncs for given mode, level and phase.
func (ss *Sim) RunStats(mode Modes, level Levels, phase StatsPhase) {
for _, sf := range ss.StatFuncs {
sf(mode, level, phase)
}
if phase == Step && ss.GUI.Tabs != nil {
nm := mode.String() + " " + level.String() + " Plot"
ss.GUI.Tabs.AsLab().GoUpdatePlot(nm)
ss.GUI.Tabs.AsLab().GoUpdatePlot("Train TrialAll Plot")
}
}
// SetRunName sets the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) SetRunName() string {
runName := ss.Params.RunName(ss.Config.Run.Run)
ss.Current.StringValue("RunName", 1).SetString1D(runName, 0)
return runName
}
// RunName returns the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) RunName() string {
return ss.Current.StringValue("RunName", 1).String1D(0)
}
// StatsInit initializes all the stats by calling Start across all modes and levels.
func (ss *Sim) StatsInit() {
for md, st := range ss.Loops.Stacks {
mode := md.(Modes)
for _, lev := range st.Order {
level := lev.(Levels)
if level == Cycle {
continue
}
ss.RunStats(mode, level, Start)
}
}
if ss.GUI.Tabs != nil {
tbs := ss.GUI.Tabs.AsLab()
_, idx := tbs.CurrentTab()
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Trial))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Epoch))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Run))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Test, Trial))
tbs.PlotTensorFS(axon.StatsNode(ss.Stats, Test, Epoch))
tbs.SelectTabIndex(idx)
}
}
// ConfigStats handles configures functions to do all stats computation
// in the tensorfs system.
func (ss *Sim) ConfigStats() {
net := ss.Net
ss.Stats = ss.Root.Dir("Stats")
ss.Current = ss.Stats.Dir("Current")
ss.SetRunName()
// last arg(s) are levels to exclude
counterFunc := axon.StatLoopCounters(ss.Stats, ss.Current, ss.Loops, net, Trial, Cycle)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
counterFunc(mode, level, phase == Start)
})
runNameFunc := axon.StatRunName(ss.Stats, ss.Current, ss.Loops, net, Trial, Cycle)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
runNameFunc(mode, level, phase == Start)
})
trialNameFunc := axon.StatTrialName(ss.Stats, ss.Current, ss.Loops, net, Trial)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
trialNameFunc(mode, level, phase == Start)
})
// up to a point, it is good to use loops over stats in one function,
// to reduce repetition of boilerplate.
statNames := []string{"Cond", "CondRew", "Rew", "RewPred", "DA", "RewPred_NR", "DA_NR"}
numStats := len(statNames)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
for si, name := range statNames {
modeDir := ss.Stats.Dir(mode.String())
curModeDir := ss.Current.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
subDir := modeDir.Dir((level - 1).String()) // note: will fail for Cycle
tsr := levelDir.Float64(name)
ndata := int(ss.Net.Context().NData)
var stat float64
if phase == Start {
tsr.SetNumRows(0)
plot.SetFirstStyler(tsr, func(s *plot.Style) {
s.Range.SetMin(0).SetMax(1)
if si >= 2 && si <= 5 {
s.On = true
}
})
if si == numStats-1 && level == Epoch {
all, _ := levelDir.Values()
for _, ts := range all {
ts.(tensor.Values).SetNumRows(0)
}
}
continue
}
switch level {
case Trial, Theta:
for di := range ndata {
ev := ss.Envs.ByModeDi(mode, di).(*VSPatchEnv)
theta := ev.Theta.Cur
var stat float32
switch name {
case "Cond":
stat = float32(ev.Cond)
case "CondRew":
stat = ev.CondRew
case "Rew":
stat = axon.GlobalScalars.Value(int(axon.GvRew), di)
case "RewPred":
stat = axon.GlobalScalars.Value(int(axon.GvRewPred), di)
ev.RewPred = stat
if level == Theta && theta == ev.Thetas-1 {
ev.RewPred_NR = stat
}
case "DA":
stat = axon.GlobalScalars.Value(int(axon.GvDA), di)
ev.DA = stat
if level == Theta && theta == ev.Thetas-1 {
ev.DA_NR = stat
}
case "RewPred_NR":
stat = ev.RewPred_NR
case "DA_NR":
stat = ev.DA_NR
}
curModeDir.Float32(name, ndata).SetFloat1D(float64(stat), di)
tsr.AppendRowFloat(float64(stat))
}
case Epoch:
stat = stats.StatMean.Call(subDir.Value(name)).Float1D(0)
tsr.AppendRowFloat(stat)
if si == 0 {
stats.Groups(curModeDir, subDir.Value("Cond"))
break
} else {
stats.GroupStats(curModeDir, stats.StatMean, subDir.Value(name))
}
// note: results go under Group name: Cond
gp := curModeDir.Dir("Stats/Cond/" + name).Value("Mean")
plot.SetFirstStyler(gp, func(s *plot.Style) {
s.Range.SetMin(0)
if si >= 2 && si <= 4 {
s.On = true
}
})
if si == numStats-1 {
nrows := gp.DimSize(0)
row := curModeDir.Dir("Stats").Int("Row", nrows)
for i := range nrows {
row.Set(i, i)
}
_, idx := ss.GUI.Tabs.AsLab().CurrentTab()
gpst := curModeDir.Dir("Stats/Cond").Value("Cond")
for j := range gpst.DimSize(0) {
val := gpst.String1D(j)
for si, name := range statNames {
if si == 0 {
continue
}
svals := curModeDir.Dir("Stats/Cond/" + name).Value("Mean")
snm := "Cond_" + val + "_" + name
tsr := levelDir.Float64(snm)
tsr.AppendRowFloat(svals.Float1D(j))
}
}
ss.GUI.Tabs.AsLab().PlotTensorFS(curModeDir.Dir("Stats"))
ss.GUI.Tabs.AsLab().SelectTabIndex(idx)
}
case Run:
stat = stats.StatFinal.Call(subDir.Value(name)).Float1D(0)
tsr.AppendRowFloat(stat)
}
}
})
perTrlFunc := axon.StatPerTrialMSec(ss.Stats, Train, Trial)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
perTrlFunc(mode, level, phase == Start)
})
}
// StatCounters returns counters string to show at bottom of netview.
func (ss *Sim) StatCounters(mode, level enums.Enum) string {
counters := ss.Loops.Stacks[mode].CountersString()
vu := ss.NetViewUpdater(mode)
if vu == nil || vu.View == nil {
return counters
}
di := vu.View.Di
counters += fmt.Sprintf(" Di: %d", di)
curModeDir := ss.Current.Dir(mode.String())
if curModeDir.Node("TrialName") == nil {
return counters
}
counters += fmt.Sprintf(" TrialName: %s", curModeDir.StringValue("TrialName").String1D(di))
statNames := []string{"Rew", "RewPred", "DA", "RewPred_NR", "DA_NR"}
if level == Cycle || curModeDir.Node(statNames[0]) == nil {
return counters
}
for _, name := range statNames {
counters += fmt.Sprintf(" %s: %.4g", name, curModeDir.Value(name).Float1D(di))
}
return counters
}
//////// GUI
// ConfigGUI configures the Cogent Core GUI interface for this simulation.
func (ss *Sim) ConfigGUI(b tree.Node) {
ss.GUI.MakeBody(b, ss, ss.Root, ss.Config.Name, ss.Config.Title, ss.Config.Doc)
nv := ss.GUI.AddNetView("Network")
nv.Options.MaxRecs = 2 * ss.Config.Run.Cycles()
nv.Options.Raster.Max = ss.Config.Run.Cycles()
nv.SetNet(ss.Net)
ss.TrainUpdate.Config(nv, axon.Theta, ss.StatCounters)
ss.TestUpdate.Config(nv, axon.Theta, ss.StatCounters)
ss.GUI.OnStop = func(mode, level enums.Enum) {
vu := ss.NetViewUpdater(mode)
vu.UpdateWhenStopped(mode, level)
}
nv.SceneXYZ().Camera.Pose.Pos.Set(0, 2.15, 2.45)
nv.SceneXYZ().Camera.LookAt(math32.Vec3(0, 0, 0), math32.Vec3(0, 1, 0))
ss.StatsInit()
ss.GUI.FinalizeGUI(false)
}
func (ss *Sim) MakeToolbar(p *tree.Plan) {
ss.GUI.AddLooperCtrl(p, ss.Loops)
tree.Add(p, func(w *core.Separator) {})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "New seed",
Icon: icons.Add,
Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
Active: egui.ActiveAlways,
Func: func() {
ss.RandSeeds.NewSeeds()
},
})
ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
Label: "README",
Icon: icons.FileMarkdown,
Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
Active: egui.ActiveAlways,
Func: func() {
core.TheApp.OpenURL(ss.Config.URL)
},
})
}
func (ss *Sim) RunNoGUI() {
ss.Init()
if ss.Config.Params.Note != "" {
mpi.Printf("Note: %s\n", ss.Config.Params.Note)
}
if ss.Config.Log.SaveWeights {
mpi.Printf("Saving final weights per run\n")
}
runName := ss.SetRunName()
netName := ss.Net.Name
cfg := &ss.Config.Log
axon.OpenLogFiles(ss.Loops, ss.Stats, netName, runName, [][]string{cfg.Train, cfg.Test})
mpi.Printf("Running %d Runs starting at %d\n", ss.Config.Run.Runs, ss.Config.Run.Run)
ss.Loops.Loop(Train, Run).Counter.SetCurMaxPlusN(ss.Config.Run.Run, ss.Config.Run.Runs)
ss.Loops.Run(Train)
axon.CloseLogFiles(ss.Loops, ss.Stats, Cycle)
axon.GPURelease()
}
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"github.com/emer/axon/v2/sims/vspatch"
"github.com/emer/emergent/v2/egui"
)
func main() { egui.Run[vspatch.Sim, vspatch.Config]() }
// Copyright (c) 2022, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package vspatch
import (
"fmt"
"cogentcore.org/lab/base/randx"
"cogentcore.org/lab/patterns"
"cogentcore.org/lab/table"
"cogentcore.org/lab/tensor"
"cogentcore.org/lab/tensorfs"
"github.com/emer/emergent/v2/env"
"github.com/emer/emergent/v2/etime"
)
// VSPatchEnv implements a simple training environment for VSPatch reward
// prediction learning, with a given number of theta steps within
// an overall trial, that lead up to a reward outcome delivery at the end.
// There is a fixed progression of patterns for each theta step, generated from
// a prototypical pattern for each condition. The different trial steps
// iterate through different conditions, each of which has a different reward level.
// Rewards can be graded or probabilistic.
type VSPatchEnv struct {
// name of environment -- Train or Test
Name string
// training or testing env?
Mode etime.Modes
// Di is the data parallel index for this env, which determines
// the starting offset for the condition so that it matches
// what would happen sequentially.
Di int
// trial counter is outer loop over thetas, iterates over Conds up to NCond
Trial env.Counter `display:"inline"`
// theta counter is for the step within trial
Theta env.Counter `display:"inline"`
// current condition index
Cond int
// current condition target reward
CondRew float32
// if true, reward value is a probability of getting a 1 reward
Probs bool
// number of conditions, each of which can have a different reward value
NConds int
// number of trials
Thetas int
// condition current values
CondValues []float32
// state rep, number of units, Y
NUnitsY int `display:"-"`
// state rep, number of units, X
NUnitsX int `display:"-"`
// total number of units
NUnits int `display:"-"`
// condition, time-step patterns
Pats *table.Table
// pattern similarity matrix
// PatSimMat simat.SimMat
// random number generator for the env -- all random calls must use this
Rand randx.SysRand `display:"-"`
// random seed
RandSeed int64 `edit:"-"`
// named states: ACCPos, ACCNeg
States map[string]*tensor.Float32
// current reward value -- is 0 until final theta step.
Rew float32 `edit:"-"`
// reward prediction from model for current theta step.
RewPred float32 `edit:"-"`
// DA = reward prediction error on current theta step: Rew - RewPred
DA float32 `edit:"-"`
// non-reward prediction from model, from theta step just before reward.
RewPred_NR float32 `edit:"-"`
// DA = non-reward prediction error: Rew - RewPred_NR
DA_NR float32 `edit:"-"`
}
func (ev *VSPatchEnv) Label() string { return ev.Name }
func (ev *VSPatchEnv) Defaults() {
ev.Probs = true
ev.NConds = 4
ev.Thetas = 3
ev.NUnitsY = 5
ev.NUnitsX = 5
ev.NUnits = ev.NUnitsY * ev.NUnitsX
}
func (ev *VSPatchEnv) String() string {
return fmt.Sprintf("Cond_%d_Rew_%g", ev.Cond, ev.CondRew)
}
// SetCondValues sets values of each condition incrementing upward
func (ev *VSPatchEnv) SetCondValues() {
pc := float32(1) / (float32(ev.NConds) + 1)
for i := 0; i < ev.NConds; i++ {
ev.CondValues[i] = float32(1+i) * pc
}
}
// SetCondValuesPermute sets permuted order of values
func (ev *VSPatchEnv) SetCondValuesPermute(ord []int) {
pc := float32(1) / (float32(ev.NConds) + 1)
for i := 0; i < ev.NConds; i++ {
ev.CondValues[ord[i]] = float32(1+i) * pc
}
}
// Config configures the world
func (ev *VSPatchEnv) Config(mode etime.Modes, di int, rndseed int64) {
ev.Mode = mode
ev.Di = di
ev.RandSeed = rndseed
ev.Rand.NewRand(ev.RandSeed)
ev.States = make(map[string]*tensor.Float32)
ev.States["State"] = tensor.NewFloat32(ev.NUnitsY, ev.NUnitsX)
ev.CondValues = make([]float32, ev.NConds)
ev.Trial.Max = ev.NConds
ev.Theta.Max = ev.Thetas
ev.SetCondValues()
}
// ConfigPats configures patterns -- only done on the first env
func (ev *VSPatchEnv) ConfigPats() {
dir, _ := tensorfs.NewDir("pats")
pctAct := float32(0.2)
minDiff := float32(0.5)
flipPct := float32(0.2)
nUn := ev.NUnitsY * ev.NUnitsX
nOn := patterns.NFromPct(float64(pctAct), nUn)
flipBits := patterns.NFromPct(float64(flipPct), nOn)
protos := dir.Float32("Protos", ev.NConds, ev.NUnitsY, ev.NUnitsX)
patterns.PermutedBinaryMinDiff(protos, nOn, 1, 0, int(float32(nOn)*minDiff))
npats := ev.NConds * ev.Thetas
ev.Pats = table.New()
ev.Pats.AddStringColumn("Name")
ev.Pats.AddFloat32Column("Input", ev.NUnitsY, ev.NUnitsX)
ev.Pats.SetNumRows(npats)
idx := 0
for i := 0; i < ev.NConds; i++ {
condNm := fmt.Sprintf("cond%d", i)
cond := dir.Float32(condNm)
patterns.ReplicateRows(cond, protos.SubSpace(i), ev.Thetas)
// tsr, _ := patterns.AddVocabRepeat(ev.PatVocab, condNm, ev.Thetas, "Protos", i)
patterns.FlipBitsRows(cond, flipBits, flipBits, 1, 0)
for j := 0; j < ev.Thetas; j++ {
ev.Pats.Column("Input").SetRowTensor(cond.SubSpace(j), idx+j)
ev.Pats.Column("Name").SetStringRow(fmt.Sprintf("Cond%d_Theta%d", i, j), idx+j, 0)
}
idx += ev.Thetas
}
// ev.PatSimMat.TableColumn(table.NewIndexView(ev.Pats), "Input", "Name", true, metric.Correlation64)
}
func (ev *VSPatchEnv) Init(run int) {
ev.Trial.Init()
ev.Theta.Init()
}
func (ev *VSPatchEnv) State(el string) tensor.Values {
return ev.States[el]
}
// RenderState renders the given condition, trial
func (ev *VSPatchEnv) RenderState(cond, trial int) {
st := ev.States["State"]
idx := cond*ev.Thetas + trial
st.CopyFrom(ev.Pats.Column("Input").RowTensor(idx))
}
// Step does one step -- must set Theta.Cur first if doing testing
func (ev *VSPatchEnv) Step() bool {
cond := (ev.Di + ev.Trial.Cur) % ev.NConds
ev.Cond = cond
ev.RenderState(cond, ev.Theta.Cur)
ev.Rew = 0
rv := ev.CondValues[cond]
ev.CondRew = rv
if ev.Theta.Cur == ev.Thetas-1 {
if ev.Probs {
if randx.BoolP32(rv, &ev.Rand) {
ev.Rew = 1
} else {
ev.Rew = 0.001
}
} else {
ev.Rew = rv
}
}
ev.Trial.Same()
if ev.Theta.Incr() {
ev.Trial.Incr()
}
return true
}
func (ev *VSPatchEnv) Action(action string, nop tensor.Values) {
}
// Code generated by 'yaegi extract github.com/emer/axon/v2/axon'. DO NOT EDIT.
package yaegiaxon
import (
"github.com/emer/axon/v2/axon"
"go/constant"
"go/token"
"reflect"
)
func init() {
Symbols["github.com/emer/axon/v2/axon/axon"] = map[string]reflect.Value{
// function, constant and variable definitions
"AMAct": reflect.ValueOf(axon.AMAct),
"AMAvgDif": reflect.ValueOf(axon.AMAvgDif),
"AMCaD": reflect.ValueOf(axon.AMCaD),
"AMCaP": reflect.ValueOf(axon.AMCaP),
"AMCaPMax": reflect.ValueOf(axon.AMCaPMax),
"AMCycle": reflect.ValueOf(axon.AMCycle),
"AMGeInt": reflect.ValueOf(axon.AMGeInt),
"AMGiInt": reflect.ValueOf(axon.AMGiInt),
"AMMinus": reflect.ValueOf(axon.AMMinus),
"AMPlus": reflect.ValueOf(axon.AMPlus),
"AMPrev": reflect.ValueOf(axon.AMPrev),
"Act": reflect.ValueOf(axon.Act),
"ActAvg": reflect.ValueOf(axon.ActAvg),
"ActInt": reflect.ValueOf(axon.ActInt),
"ActM": reflect.ValueOf(axon.ActM),
"ActP": reflect.ValueOf(axon.ActP),
"AdaptGiLayer": reflect.ValueOf(axon.AdaptGiLayer),
"Alpha": reflect.ValueOf(axon.Alpha),
"ApplyExtsNeuron": reflect.ValueOf(axon.ApplyExtsNeuron),
"ApplyLayerSearch": reflect.ValueOf(axon.ApplyLayerSearch),
"ApplyLayerSheet": reflect.ValueOf(axon.ApplyLayerSheet),
"ApplyParamSheets": reflect.ValueOf(axon.ApplyParamSheets),
"ApplyPathSearch": reflect.ValueOf(axon.ApplyPathSearch),
"ApplyPathSheet": reflect.ValueOf(axon.ApplyPathSheet),
"Avg": reflect.ValueOf(axon.Avg),
"AvgDif": reflect.ValueOf(axon.AvgDif),
"AvgMaxIntVarIndex": reflect.ValueOf(axon.AvgMaxIntVarIndex),
"AvgMaxN": reflect.ValueOf(axon.AvgMaxN),
"AvgMaxPhasesN": reflect.ValueOf(axon.AvgMaxPhasesN),
"AvgMaxPhasesValues": reflect.ValueOf(axon.AvgMaxPhasesValues),
"AvgMaxValues": reflect.ValueOf(axon.AvgMaxValues),
"AvgMaxVarIndex": reflect.ValueOf(axon.AvgMaxVarIndex),
"AvgMaxVarsN": reflect.ValueOf(axon.AvgMaxVarsN),
"AvgMaxVarsValues": reflect.ValueOf(axon.AvgMaxVarsValues),
"AvgPct": reflect.ValueOf(axon.AvgPct),
"BGThalLayer": reflect.ValueOf(axon.BGThalLayer),
"BLALayer": reflect.ValueOf(axon.BLALayer),
"BLAPath": reflect.ValueOf(axon.BLAPath),
"BackPath": reflect.ValueOf(axon.BackPath),
"Beta": reflect.ValueOf(axon.Beta),
"Beta1": reflect.ValueOf(axon.Beta1),
"Beta1Neuron": reflect.ValueOf(axon.Beta1Neuron),
"Beta2": reflect.ValueOf(axon.Beta2),
"Beta2Neuron": reflect.ValueOf(axon.Beta2Neuron),
"BetweenGi": reflect.ValueOf(axon.BetweenGi),
"BorrowedGPU": reflect.ValueOf(&axon.BorrowedGPU).Elem(),
"Burst": reflect.ValueOf(axon.Burst),
"BurstPrv": reflect.ValueOf(axon.BurstPrv),
"CNIOPath": reflect.ValueOf(axon.CNIOPath),
"CNeLayer": reflect.ValueOf(axon.CNeLayer),
"CNeUpPath": reflect.ValueOf(axon.CNeUpPath),
"CNiIOLayer": reflect.ValueOf(axon.CNiIOLayer),
"CNiUpLayer": reflect.ValueOf(axon.CNiUpLayer),
"CTCtxtPath": reflect.ValueOf(axon.CTCtxtPath),
"CTLayer": reflect.ValueOf(axon.CTLayer),
"CaBinCycles": reflect.ValueOf(constant.MakeFromLiteral("10", token.INT, 0)),
"CaBinForCycle": reflect.ValueOf(axon.CaBinForCycle),
"CaBinIncrement": reflect.ValueOf(axon.CaBinIncrement),
"CaBins": reflect.ValueOf(axon.CaBins),
"CaD": reflect.ValueOf(axon.CaD),
"CaDPrev": reflect.ValueOf(axon.CaDPrev),
"CaDiff": reflect.ValueOf(axon.CaDiff),
"CaM": reflect.ValueOf(axon.CaM),
"CaP": reflect.ValueOf(axon.CaP),
"CaPMax": reflect.ValueOf(axon.CaPMax),
"CaPMaxCa": reflect.ValueOf(axon.CaPMaxCa),
"CaSyn": reflect.ValueOf(axon.CaSyn),
"CeMLayer": reflect.ValueOf(axon.CeMLayer),
"Clamped": reflect.ValueOf(axon.Clamped),
"CloseLogFiles": reflect.ValueOf(axon.CloseLogFiles),
"CompareLayer": reflect.ValueOf(axon.CompareLayer),
"ComputeGPU": reflect.ValueOf(&axon.ComputeGPU).Elem(),
"ContextG": reflect.ValueOf(axon.ContextG),
"Cost": reflect.ValueOf(axon.Cost),
"Ctx": reflect.ValueOf(&axon.Ctx).Elem(),
"CtxVar": reflect.ValueOf(axon.CtxVar),
"CtxtGe": reflect.ValueOf(axon.CtxtGe),
"CtxtGeOrig": reflect.ValueOf(axon.CtxtGeOrig),
"CtxtGeRaw": reflect.ValueOf(axon.CtxtGeRaw),
"CurrentNetwork": reflect.ValueOf(&axon.CurrentNetwork).Elem(),
"Cycle": reflect.ValueOf(axon.Cycle),
"CycleInc": reflect.ValueOf(axon.CycleInc),
"CycleNeuron": reflect.ValueOf(axon.CycleNeuron),
"CyclePost": reflect.ValueOf(axon.CyclePost),
"D1AbsMod": reflect.ValueOf(axon.D1AbsMod),
"D1Mod": reflect.ValueOf(axon.D1Mod),
"D2Mod": reflect.ValueOf(axon.D2Mod),
"DAModTypesN": reflect.ValueOf(axon.DAModTypesN),
"DAModTypesValues": reflect.ValueOf(axon.DAModTypesValues),
"DSMatrixLayer": reflect.ValueOf(axon.DSMatrixLayer),
"DSMatrixPath": reflect.ValueOf(axon.DSMatrixPath),
"DSPatchLayer": reflect.ValueOf(axon.DSPatchLayer),
"DSPatchPath": reflect.ValueOf(axon.DSPatchPath),
"DSWt": reflect.ValueOf(axon.DSWt),
"DTr": reflect.ValueOf(axon.DTr),
"DTrgAvg": reflect.ValueOf(axon.DTrgAvg),
"DWt": reflect.ValueOf(axon.DWt),
"DWtFromDiSyn": reflect.ValueOf(axon.DWtFromDiSyn),
"DWtSubMeanNeuron": reflect.ValueOf(axon.DWtSubMeanNeuron),
"DWtSyn": reflect.ValueOf(axon.DWtSyn),
"DiDWt": reflect.ValueOf(axon.DiDWt),
"DrivesLayer": reflect.ValueOf(axon.DrivesLayer),
"ETrLearn": reflect.ValueOf(axon.ETrLearn),
"ETrace": reflect.ValueOf(axon.ETrace),
"ExcitatoryG": reflect.ValueOf(axon.ExcitatoryG),
"Ext": reflect.ValueOf(axon.Ext),
"Exts": reflect.ValueOf(&axon.Exts).Elem(),
"ExtsVar": reflect.ValueOf(axon.ExtsVar),
"FBsRawInt": reflect.ValueOf(axon.FBsRawInt),
"FFsRawInt": reflect.ValueOf(axon.FFsRawInt),
"FastSpike": reflect.ValueOf(axon.FastSpike),
"ForwardPath": reflect.ValueOf(axon.ForwardPath),
"GMaintRaw": reflect.ValueOf(axon.GMaintRaw),
"GMaintSyn": reflect.ValueOf(axon.GMaintSyn),
"GModRaw": reflect.ValueOf(axon.GModRaw),
"GModSyn": reflect.ValueOf(axon.GModSyn),
"GPLayer": reflect.ValueOf(axon.GPLayer),
"GPLayerTypesN": reflect.ValueOf(axon.GPLayerTypesN),
"GPLayerTypesValues": reflect.ValueOf(axon.GPLayerTypesValues),
"GPUInit": reflect.ValueOf(axon.GPUInit),
"GPUInitialized": reflect.ValueOf(&axon.GPUInitialized).Elem(),
"GPURelease": reflect.ValueOf(axon.GPURelease),
"GPUSystem": reflect.ValueOf(&axon.GPUSystem).Elem(),
"GPUTestWrite": reflect.ValueOf(axon.GPUTestWrite),
"GPUVarsN": reflect.ValueOf(axon.GPUVarsN),
"GPUVarsValues": reflect.ValueOf(axon.GPUVarsValues),
"GPeAk": reflect.ValueOf(axon.GPeAk),
"GPeAkDoc": reflect.ValueOf(&axon.GPeAkDoc).Elem(),
"GPePr": reflect.ValueOf(axon.GPePr),
"GPePrDoc": reflect.ValueOf(&axon.GPePrDoc).Elem(),
"GPi": reflect.ValueOf(axon.GPi),
"GPiDoc": reflect.ValueOf(&axon.GPiDoc).Elem(),
"GaD": reflect.ValueOf(axon.GaD),
"GaM": reflect.ValueOf(axon.GaM),
"GaP": reflect.ValueOf(axon.GaP),
"GababM": reflect.ValueOf(axon.GababM),
"GababX": reflect.ValueOf(axon.GababX),
"Gak": reflect.ValueOf(axon.Gak),
"Gamma": reflect.ValueOf(axon.Gamma),
"GatedRT": reflect.ValueOf(axon.GatedRT),
"GatherSpikes": reflect.ValueOf(axon.GatherSpikes),
"Ge": reflect.ValueOf(axon.Ge),
"GeBase": reflect.ValueOf(axon.GeBase),
"GeExt": reflect.ValueOf(axon.GeExt),
"GeExtRawInt": reflect.ValueOf(axon.GeExtRawInt),
"GeInt": reflect.ValueOf(axon.GeInt),
"GeIntNorm": reflect.ValueOf(axon.GeIntNorm),
"GeNoise": reflect.ValueOf(axon.GeNoise),
"GeNoiseP": reflect.ValueOf(axon.GeNoiseP),
"GeRaw": reflect.ValueOf(axon.GeRaw),
"GeSyn": reflect.ValueOf(axon.GeSyn),
"GetCtx": reflect.ValueOf(axon.GetCtx),
"GetLayers": reflect.ValueOf(axon.GetLayers),
"GetNetworkIxs": reflect.ValueOf(axon.GetNetworkIxs),
"GetPaths": reflect.ValueOf(axon.GetPaths),
"GetRandomNumber": reflect.ValueOf(axon.GetRandomNumber),
"GgabaB": reflect.ValueOf(axon.GgabaB),
"Gi": reflect.ValueOf(axon.Gi),
"GiBase": reflect.ValueOf(axon.GiBase),
"GiInt": reflect.ValueOf(axon.GiInt),
"GiNoise": reflect.ValueOf(axon.GiNoise),
"GiNoiseP": reflect.ValueOf(axon.GiNoiseP),
"GiRaw": reflect.ValueOf(axon.GiRaw),
"GiSyn": reflect.ValueOf(axon.GiSyn),
"Gk": reflect.ValueOf(axon.Gk),
"Gkir": reflect.ValueOf(axon.Gkir),
"GknaMed": reflect.ValueOf(axon.GknaMed),
"GknaSlow": reflect.ValueOf(axon.GknaSlow),
"GlobalScalarVarsN": reflect.ValueOf(axon.GlobalScalarVarsN),
"GlobalScalarVarsValues": reflect.ValueOf(axon.GlobalScalarVarsValues),
"GlobalScalars": reflect.ValueOf(&axon.GlobalScalars).Elem(),
"GlobalScalarsVar": reflect.ValueOf(axon.GlobalScalarsVar),
"GlobalSetRew": reflect.ValueOf(axon.GlobalSetRew),
"GlobalVectorVarsN": reflect.ValueOf(axon.GlobalVectorVarsN),
"GlobalVectorVarsValues": reflect.ValueOf(axon.GlobalVectorVarsValues),
"GlobalVectors": reflect.ValueOf(&axon.GlobalVectors).Elem(),
"GlobalVectorsVar": reflect.ValueOf(axon.GlobalVectorsVar),
"GlobalsReset": reflect.ValueOf(axon.GlobalsReset),
"Gmahp": reflect.ValueOf(axon.Gmahp),
"Gnmda": reflect.ValueOf(axon.Gnmda),
"GnmdaLrn": reflect.ValueOf(axon.GnmdaLrn),
"GnmdaMaint": reflect.ValueOf(axon.GnmdaMaint),
"GnmdaSyn": reflect.ValueOf(axon.GnmdaSyn),
"Gsahp": reflect.ValueOf(axon.Gsahp),
"Gsk": reflect.ValueOf(axon.Gsk),
"GvACh": reflect.ValueOf(axon.GvACh),
"GvAChRaw": reflect.ValueOf(axon.GvAChRaw),
"GvCaBinWts": reflect.ValueOf(axon.GvCaBinWts),
"GvCeMneg": reflect.ValueOf(axon.GvCeMneg),
"GvCeMpos": reflect.ValueOf(axon.GvCeMpos),
"GvContProgress": reflect.ValueOf(axon.GvContProgress),
"GvContSum": reflect.ValueOf(axon.GvContSum),
"GvContTiming": reflect.ValueOf(axon.GvContTiming),
"GvContUtility": reflect.ValueOf(axon.GvContUtility),
"GvCost": reflect.ValueOf(axon.GvCost),
"GvCostRaw": reflect.ValueOf(axon.GvCostRaw),
"GvCuriosityPoolGated": reflect.ValueOf(axon.GvCuriosityPoolGated),
"GvDA": reflect.ValueOf(axon.GvDA),
"GvDAtonic": reflect.ValueOf(axon.GvDAtonic),
"GvDrives": reflect.ValueOf(axon.GvDrives),
"GvEffort": reflect.ValueOf(axon.GvEffort),
"GvGaveUp": reflect.ValueOf(axon.GvGaveUp),
"GvGiveUp": reflect.ValueOf(axon.GvGiveUp),
"GvGiveUpProb": reflect.ValueOf(axon.GvGiveUpProb),
"GvGiveUpProgress": reflect.ValueOf(axon.GvGiveUpProgress),
"GvGiveUpSum": reflect.ValueOf(axon.GvGiveUpSum),
"GvGiveUpTiming": reflect.ValueOf(axon.GvGiveUpTiming),
"GvGiveUpUtility": reflect.ValueOf(axon.GvGiveUpUtility),
"GvGoalDistEst": reflect.ValueOf(axon.GvGoalDistEst),
"GvGoalDistPrev": reflect.ValueOf(axon.GvGoalDistPrev),
"GvGoalMaint": reflect.ValueOf(axon.GvGoalMaint),
"GvHadNegUSOutcome": reflect.ValueOf(axon.GvHadNegUSOutcome),
"GvHadPosUS": reflect.ValueOf(axon.GvHadPosUS),
"GvHadRew": reflect.ValueOf(axon.GvHadRew),
"GvHasPosUS": reflect.ValueOf(axon.GvHasPosUS),
"GvHasRew": reflect.ValueOf(axon.GvHasRew),
"GvLHbBurst": reflect.ValueOf(axon.GvLHbBurst),
"GvLHbDip": reflect.ValueOf(axon.GvLHbDip),
"GvLHbPVDA": reflect.ValueOf(axon.GvLHbPVDA),
"GvNE": reflect.ValueOf(axon.GvNE),
"GvNegUSOutcome": reflect.ValueOf(axon.GvNegUSOutcome),
"GvOFCposPTMaint": reflect.ValueOf(axon.GvOFCposPTMaint),
"GvPVneg": reflect.ValueOf(axon.GvPVneg),
"GvPVnegEst": reflect.ValueOf(axon.GvPVnegEst),
"GvPVnegSum": reflect.ValueOf(axon.GvPVnegSum),
"GvPVnegVar": reflect.ValueOf(axon.GvPVnegVar),
"GvPVpos": reflect.ValueOf(axon.GvPVpos),
"GvPVposEst": reflect.ValueOf(axon.GvPVposEst),
"GvPVposSum": reflect.ValueOf(axon.GvPVposSum),
"GvPVposVar": reflect.ValueOf(axon.GvPVposVar),
"GvPrevPred": reflect.ValueOf(axon.GvPrevPred),
"GvProgressRate": reflect.ValueOf(axon.GvProgressRate),
"GvRew": reflect.ValueOf(axon.GvRew),
"GvRewPred": reflect.ValueOf(axon.GvRewPred),
"GvSer": reflect.ValueOf(axon.GvSer),
"GvTime": reflect.ValueOf(axon.GvTime),
"GvUSneg": reflect.ValueOf(axon.GvUSneg),
"GvUSnegRaw": reflect.ValueOf(axon.GvUSnegRaw),
"GvUSpos": reflect.ValueOf(axon.GvUSpos),
"GvUrgency": reflect.ValueOf(axon.GvUrgency),
"GvUrgencyRaw": reflect.ValueOf(axon.GvUrgencyRaw),
"GvVSMatrixHasGated": reflect.ValueOf(axon.GvVSMatrixHasGated),
"GvVSMatrixJustGated": reflect.ValueOf(axon.GvVSMatrixJustGated),
"GvVSMatrixPoolGated": reflect.ValueOf(axon.GvVSMatrixPoolGated),
"GvVSPatchD1": reflect.ValueOf(axon.GvVSPatchD1),
"GvVSPatchD2": reflect.ValueOf(axon.GvVSPatchD2),
"GvVSPatchPos": reflect.ValueOf(axon.GvVSPatchPos),
"GvVSPatchPosPrev": reflect.ValueOf(axon.GvVSPatchPosPrev),
"GvVSPatchPosRPE": reflect.ValueOf(axon.GvVSPatchPosRPE),
"GvVSPatchPosSum": reflect.ValueOf(axon.GvVSPatchPosSum),
"GvVSPatchPosThr": reflect.ValueOf(axon.GvVSPatchPosThr),
"GvVSPatchPosVar": reflect.ValueOf(axon.GvVSPatchPosVar),
"GvVtaDA": reflect.ValueOf(axon.GvVtaDA),
"Gvgcc": reflect.ValueOf(axon.Gvgcc),
"HashEncodeSlice": reflect.ValueOf(axon.HashEncodeSlice),
"HipPath": reflect.ValueOf(axon.HipPath),
"IOLayer": reflect.ValueOf(axon.IOLayer),
"ISI": reflect.ValueOf(axon.ISI),
"ISIAvg": reflect.ValueOf(axon.ISIAvg),
"IndexToAvgMaxIntVar": reflect.ValueOf(axon.IndexToAvgMaxIntVar),
"IndexToAvgMaxVar": reflect.ValueOf(axon.IndexToAvgMaxVar),
"Inet": reflect.ValueOf(axon.Inet),
"InhibPath": reflect.ValueOf(axon.InhibPath),
"InhibitoryG": reflect.ValueOf(axon.InhibitoryG),
"InitGBuffsPath": reflect.ValueOf(axon.InitGBuffsPath),
"InputLayer": reflect.ValueOf(axon.InputLayer),
"IsExtLayerType": reflect.ValueOf(axon.IsExtLayerType),
"JsonToParams": reflect.ValueOf(axon.JsonToParams),
"KirM": reflect.ValueOf(axon.KirM),
"LDTLayer": reflect.ValueOf(axon.LDTLayer),
"LHbLayer": reflect.ValueOf(axon.LHbLayer),
"LWt": reflect.ValueOf(axon.LWt),
"LateralPath": reflect.ValueOf(axon.LateralPath),
"LayerActMAvg": reflect.ValueOf(axon.LayerActMAvg),
"LayerActPAvg": reflect.ValueOf(axon.LayerActPAvg),
"LayerAvgMaxGeM": reflect.ValueOf(axon.LayerAvgMaxGeM),
"LayerAvgMaxGiM": reflect.ValueOf(axon.LayerAvgMaxGiM),
"LayerGi": reflect.ValueOf(axon.LayerGi),
"LayerGiMult": reflect.ValueOf(axon.LayerGiMult),
"LayerPhaseDiff": reflect.ValueOf(axon.LayerPhaseDiff),
"LayerPhaseDiffAvg": reflect.ValueOf(axon.LayerPhaseDiffAvg),
"LayerPhaseDiffVar": reflect.ValueOf(axon.LayerPhaseDiffVar),
"LayerRT": reflect.ValueOf(axon.LayerRT),
"LayerRewPredNeg": reflect.ValueOf(axon.LayerRewPredNeg),
"LayerRewPredPos": reflect.ValueOf(axon.LayerRewPredPos),
"LayerStates": reflect.ValueOf(&axon.LayerStates).Elem(),
"LayerStatesVar": reflect.ValueOf(axon.LayerStatesVar),
"LayerTypesN": reflect.ValueOf(axon.LayerTypesN),
"LayerTypesValues": reflect.ValueOf(axon.LayerTypesValues),
"LayerVarsN": reflect.ValueOf(axon.LayerVarsN),
"LayerVarsValues": reflect.ValueOf(axon.LayerVarsValues),
"Layers": reflect.ValueOf(&axon.Layers).Elem(),
"LayersVar": reflect.ValueOf(axon.LayersVar),
"LearnCa": reflect.ValueOf(axon.LearnCa),
"LearnCaD": reflect.ValueOf(axon.LearnCaD),
"LearnCaM": reflect.ValueOf(axon.LearnCaM),
"LearnCaP": reflect.ValueOf(axon.LearnCaP),
"LearnDiff": reflect.ValueOf(axon.LearnDiff),
"LearnNow": reflect.ValueOf(axon.LearnNow),
"LogFilename": reflect.ValueOf(axon.LogFilename),
"LooperCycleStartFunc": reflect.ValueOf(axon.LooperCycleStartFunc),
"LooperStandard": reflect.ValueOf(axon.LooperStandard),
"LooperUpdateNetView": reflect.ValueOf(axon.LooperUpdateNetView),
"LooperUpdateWeightsFunc": reflect.ValueOf(axon.LooperUpdateWeightsFunc),
"MahpN": reflect.ValueOf(axon.MahpN),
"MaintG": reflect.ValueOf(axon.MaintG),
"Max": reflect.ValueOf(axon.Max),
"MaxGlobalVecN": reflect.ValueOf(constant.MakeFromLiteral("16", token.INT, 0)),
"MinusPhaseNeuron": reflect.ValueOf(axon.MinusPhaseNeuron),
"MinusPhasePool": reflect.ValueOf(axon.MinusPhasePool),
"MinusPhasePost": reflect.ValueOf(axon.MinusPhasePost),
"ModulatoryG": reflect.ValueOf(axon.ModulatoryG),
"NNeuronCaBins": reflect.ValueOf(&axon.NNeuronCaBins).Elem(),
"NNeuronLayerVars": reflect.ValueOf(&axon.NNeuronLayerVars).Elem(),
"Negative": reflect.ValueOf(axon.Negative),
"NetworkIxs": reflect.ValueOf(&axon.NetworkIxs).Elem(),
"NetworkIxsVar": reflect.ValueOf(axon.NetworkIxsVar),
"NeurFlags": reflect.ValueOf(axon.NeurFlags),
"NeuronAvgVarsN": reflect.ValueOf(axon.NeuronAvgVarsN),
"NeuronAvgVarsValues": reflect.ValueOf(axon.NeuronAvgVarsValues),
"NeuronAvgs": reflect.ValueOf(&axon.NeuronAvgs).Elem(),
"NeuronAvgsVar": reflect.ValueOf(axon.NeuronAvgsVar),
"NeuronClearFlag": reflect.ValueOf(axon.NeuronClearFlag),
"NeuronFlagsN": reflect.ValueOf(axon.NeuronFlagsN),
"NeuronFlagsValues": reflect.ValueOf(axon.NeuronFlagsValues),
"NeuronHasCmpr": reflect.ValueOf(axon.NeuronHasCmpr),
"NeuronHasExt": reflect.ValueOf(axon.NeuronHasExt),
"NeuronHasFlag": reflect.ValueOf(axon.NeuronHasFlag),
"NeuronHasTarg": reflect.ValueOf(axon.NeuronHasTarg),
"NeuronIndexVarsN": reflect.ValueOf(axon.NeuronIndexVarsN),
"NeuronIndexVarsValues": reflect.ValueOf(axon.NeuronIndexVarsValues),
"NeuronIsOff": reflect.ValueOf(axon.NeuronIsOff),
"NeuronIxs": reflect.ValueOf(&axon.NeuronIxs).Elem(),
"NeuronIxsVar": reflect.ValueOf(axon.NeuronIxsVar),
"NeuronLayerVars": reflect.ValueOf(&axon.NeuronLayerVars).Elem(),
"NeuronOff": reflect.ValueOf(axon.NeuronOff),
"NeuronSetFlag": reflect.ValueOf(axon.NeuronSetFlag),
"NeuronVarIndexByName": reflect.ValueOf(axon.NeuronVarIndexByName),
"NeuronVarNames": reflect.ValueOf(&axon.NeuronVarNames).Elem(),
"NeuronVarProps": reflect.ValueOf(&axon.NeuronVarProps).Elem(),
"NeuronVarsMap": reflect.ValueOf(&axon.NeuronVarsMap).Elem(),
"NeuronVarsN": reflect.ValueOf(axon.NeuronVarsN),
"NeuronVarsValues": reflect.ValueOf(axon.NeuronVarsValues),
"Neurons": reflect.ValueOf(&axon.Neurons).Elem(),
"NeuronsVar": reflect.ValueOf(axon.NeuronsVar),
"NewBLANovelPath": reflect.ValueOf(axon.NewBLANovelPath),
"NewContext": reflect.ValueOf(axon.NewContext),
"NewNetwork": reflect.ValueOf(axon.NewNetwork),
"NewStateLayer": reflect.ValueOf(axon.NewStateLayer),
"NewStateNeuron": reflect.ValueOf(axon.NewStateNeuron),
"Nitems": reflect.ValueOf(axon.Nitems),
"NmdaCa": reflect.ValueOf(axon.NmdaCa),
"NoDAMod": reflect.ValueOf(axon.NoDAMod),
"NrnLayIndex": reflect.ValueOf(axon.NrnLayIndex),
"NrnNeurIndex": reflect.ValueOf(axon.NrnNeurIndex),
"NrnSubPool": reflect.ValueOf(axon.NrnSubPool),
"OpenLogFile": reflect.ValueOf(axon.OpenLogFile),
"OpenLogFiles": reflect.ValueOf(axon.OpenLogFiles),
"PCAStrongThr": reflect.ValueOf(&axon.PCAStrongThr).Elem(),
"PTMaintLayer": reflect.ValueOf(axon.PTMaintLayer),
"PTPredLayer": reflect.ValueOf(axon.PTPredLayer),
"PVLayer": reflect.ValueOf(axon.PVLayer),
"PathGBuf": reflect.ValueOf(&axon.PathGBuf).Elem(),
"PathGBufVar": reflect.ValueOf(axon.PathGBufVar),
"PathGSyns": reflect.ValueOf(&axon.PathGSyns).Elem(),
"PathGSynsVar": reflect.ValueOf(axon.PathGSynsVar),
"PathGTypesN": reflect.ValueOf(axon.PathGTypesN),
"PathGTypesValues": reflect.ValueOf(axon.PathGTypesValues),
"PathRecvCon": reflect.ValueOf(&axon.PathRecvCon).Elem(),
"PathRecvConVar": reflect.ValueOf(axon.PathRecvConVar),
"PathSendCon": reflect.ValueOf(&axon.PathSendCon).Elem(),
"PathSendConVar": reflect.ValueOf(axon.PathSendConVar),
"PathTypesN": reflect.ValueOf(axon.PathTypesN),
"PathTypesValues": reflect.ValueOf(axon.PathTypesValues),
"Paths": reflect.ValueOf(&axon.Paths).Elem(),
"PathsVar": reflect.ValueOf(axon.PathsVar),
"Phase": reflect.ValueOf(axon.Phase),
"PlusPhaseEndNeuron": reflect.ValueOf(axon.PlusPhaseEndNeuron),
"PlusPhaseEndPool": reflect.ValueOf(axon.PlusPhaseEndPool),
"PlusPhaseEndPost": reflect.ValueOf(axon.PlusPhaseEndPost),
"PlusPhaseStartContext": reflect.ValueOf(axon.PlusPhaseStartContext),
"PlusPhaseStartNeuron": reflect.ValueOf(axon.PlusPhaseStartNeuron),
"PoolAvgDifCalc": reflect.ValueOf(axon.PoolAvgDifCalc),
"PoolAvgDifInit": reflect.ValueOf(axon.PoolAvgDifInit),
"PoolAvgDifUpdate": reflect.ValueOf(axon.PoolAvgDifUpdate),
"PoolAvgMax": reflect.ValueOf(axon.PoolAvgMax),
"PoolAvgMaxCalc": reflect.ValueOf(axon.PoolAvgMaxCalc),
"PoolAvgMaxCalcVar": reflect.ValueOf(axon.PoolAvgMaxCalcVar),
"PoolAvgMaxInit": reflect.ValueOf(axon.PoolAvgMaxInit),
"PoolAvgMaxUpdate": reflect.ValueOf(axon.PoolAvgMaxUpdate),
"PoolAvgMaxUpdateVar": reflect.ValueOf(axon.PoolAvgMaxUpdateVar),
"PoolAvgMaxUpdateVarNonAtomic": reflect.ValueOf(axon.PoolAvgMaxUpdateVarNonAtomic),
"PoolAvgMaxZero": reflect.ValueOf(axon.PoolAvgMaxZero),
"PoolCycleToMinus": reflect.ValueOf(axon.PoolCycleToMinus),
"PoolCycleToPlus": reflect.ValueOf(axon.PoolCycleToPlus),
"PoolGated": reflect.ValueOf(axon.PoolGated),
"PoolGi": reflect.ValueOf(axon.PoolGi),
"PoolIndexVarsN": reflect.ValueOf(axon.PoolIndexVarsN),
"PoolIndexVarsValues": reflect.ValueOf(axon.PoolIndexVarsValues),
"PoolInhib": reflect.ValueOf(axon.PoolInhib),
"PoolInhibDecay": reflect.ValueOf(axon.PoolInhibDecay),
"PoolInhibInit": reflect.ValueOf(axon.PoolInhibInit),
"PoolInhibInitRaw": reflect.ValueOf(axon.PoolInhibInitRaw),
"PoolInhibIntToRaw": reflect.ValueOf(axon.PoolInhibIntToRaw),
"PoolInhibLayerMax": reflect.ValueOf(axon.PoolInhibLayerMax),
"PoolInhibPoolMax": reflect.ValueOf(axon.PoolInhibPoolMax),
"PoolInhibRawIncrInt": reflect.ValueOf(axon.PoolInhibRawIncrInt),
"PoolInhibSpikesFromRaw": reflect.ValueOf(axon.PoolInhibSpikesFromRaw),
"PoolInhibZero": reflect.ValueOf(axon.PoolInhibZero),
"PoolInit": reflect.ValueOf(axon.PoolInit),
"PoolIntAvgMaxStart": reflect.ValueOf(axon.PoolIntAvgMaxStart),
"PoolIntVarName": reflect.ValueOf(axon.PoolIntVarName),
"PoolIntVarsN": reflect.ValueOf(axon.PoolIntVarsN),
"PoolIntVarsTot": reflect.ValueOf(axon.PoolIntVarsTot),
"PoolIntVarsValues": reflect.ValueOf(axon.PoolIntVarsValues),
"PoolIsLayer": reflect.ValueOf(axon.PoolIsLayer),
"PoolIxs": reflect.ValueOf(&axon.PoolIxs).Elem(),
"PoolIxsVar": reflect.ValueOf(axon.PoolIxsVar),
"PoolLayerIdx": reflect.ValueOf(axon.PoolLayerIdx),
"PoolNNeurons": reflect.ValueOf(axon.PoolNNeurons),
"PoolNeurEd": reflect.ValueOf(axon.PoolNeurEd),
"PoolNeurSt": reflect.ValueOf(axon.PoolNeurSt),
"PoolPoolGi": reflect.ValueOf(axon.PoolPoolGi),
"PoolTestValues": reflect.ValueOf(axon.PoolTestValues),
"PoolVarName": reflect.ValueOf(axon.PoolVarName),
"PoolVarsTotal": reflect.ValueOf(axon.PoolVarsTotal),
"Pools": reflect.ValueOf(&axon.Pools).Elem(),
"PoolsInt": reflect.ValueOf(&axon.PoolsInt).Elem(),
"PoolsIntVar": reflect.ValueOf(axon.PoolsIntVar),
"PoolsVar": reflect.ValueOf(axon.PoolsVar),
"Positive": reflect.ValueOf(axon.Positive),
"PulvinarLayer": reflect.ValueOf(axon.PulvinarLayer),
"RLRate": reflect.ValueOf(axon.RLRate),
"RWDaLayer": reflect.ValueOf(axon.RWDaLayer),
"RWPath": reflect.ValueOf(axon.RWPath),
"RWPredLayer": reflect.ValueOf(axon.RWPredLayer),
"RandFunActPGe": reflect.ValueOf(axon.RandFunActPGe),
"RandFunActPGi": reflect.ValueOf(axon.RandFunActPGi),
"RandFunActSMaintP": reflect.ValueOf(axon.RandFunActSMaintP),
"RandFunIndexN": reflect.ValueOf(axon.RandFunIndexN),
"ReadFromGPU": reflect.ValueOf(axon.ReadFromGPU),
"RecvPathIxs": reflect.ValueOf(&axon.RecvPathIxs).Elem(),
"RecvPathIxsVar": reflect.ValueOf(axon.RecvPathIxsVar),
"RecvSynIxs": reflect.ValueOf(&axon.RecvSynIxs).Elem(),
"RecvSynIxsVar": reflect.ValueOf(axon.RecvSynIxsVar),
"RewLayer": reflect.ValueOf(axon.RewLayer),
"RubiconNormFun": reflect.ValueOf(axon.RubiconNormFun),
"RubiconUSStimValue": reflect.ValueOf(axon.RubiconUSStimValue),
"RunAdaptGiLayer": reflect.ValueOf(axon.RunAdaptGiLayer),
"RunAdaptGiLayerCPU": reflect.ValueOf(axon.RunAdaptGiLayerCPU),
"RunAdaptGiLayerGPU": reflect.ValueOf(axon.RunAdaptGiLayerGPU),
"RunApplyExtsNeuron": reflect.ValueOf(axon.RunApplyExtsNeuron),
"RunApplyExtsNeuronCPU": reflect.ValueOf(axon.RunApplyExtsNeuronCPU),
"RunApplyExtsNeuronGPU": reflect.ValueOf(axon.RunApplyExtsNeuronGPU),
"RunBeta1Neuron": reflect.ValueOf(axon.RunBeta1Neuron),
"RunBeta1NeuronCPU": reflect.ValueOf(axon.RunBeta1NeuronCPU),
"RunBeta1NeuronGPU": reflect.ValueOf(axon.RunBeta1NeuronGPU),
"RunBeta2Neuron": reflect.ValueOf(axon.RunBeta2Neuron),
"RunBeta2NeuronCPU": reflect.ValueOf(axon.RunBeta2NeuronCPU),
"RunBeta2NeuronGPU": reflect.ValueOf(axon.RunBeta2NeuronGPU),
"RunBetweenGi": reflect.ValueOf(axon.RunBetweenGi),
"RunBetweenGiCPU": reflect.ValueOf(axon.RunBetweenGiCPU),
"RunBetweenGiGPU": reflect.ValueOf(axon.RunBetweenGiGPU),
"RunCycleInc": reflect.ValueOf(axon.RunCycleInc),
"RunCycleIncCPU": reflect.ValueOf(axon.RunCycleIncCPU),
"RunCycleIncGPU": reflect.ValueOf(axon.RunCycleIncGPU),
"RunCycleNeuron": reflect.ValueOf(axon.RunCycleNeuron),
"RunCycleNeuronCPU": reflect.ValueOf(axon.RunCycleNeuronCPU),
"RunCycleNeuronGPU": reflect.ValueOf(axon.RunCycleNeuronGPU),
"RunCyclePost": reflect.ValueOf(axon.RunCyclePost),
"RunCyclePostCPU": reflect.ValueOf(axon.RunCyclePostCPU),
"RunCyclePostGPU": reflect.ValueOf(axon.RunCyclePostGPU),
"RunDWtFromDiSyn": reflect.ValueOf(axon.RunDWtFromDiSyn),
"RunDWtFromDiSynCPU": reflect.ValueOf(axon.RunDWtFromDiSynCPU),
"RunDWtFromDiSynGPU": reflect.ValueOf(axon.RunDWtFromDiSynGPU),
"RunDWtSubMeanNeuron": reflect.ValueOf(axon.RunDWtSubMeanNeuron),
"RunDWtSubMeanNeuronCPU": reflect.ValueOf(axon.RunDWtSubMeanNeuronCPU),
"RunDWtSubMeanNeuronGPU": reflect.ValueOf(axon.RunDWtSubMeanNeuronGPU),
"RunDWtSyn": reflect.ValueOf(axon.RunDWtSyn),
"RunDWtSynCPU": reflect.ValueOf(axon.RunDWtSynCPU),
"RunDWtSynGPU": reflect.ValueOf(axon.RunDWtSynGPU),
"RunDone": reflect.ValueOf(axon.RunDone),
"RunDoneContext": reflect.ValueOf(axon.RunDoneContext),
"RunDoneLayers": reflect.ValueOf(axon.RunDoneLayers),
"RunDoneLayersNeurons": reflect.ValueOf(axon.RunDoneLayersNeurons),
"RunDoneLayersSynapses": reflect.ValueOf(axon.RunDoneLayersSynapses),
"RunDoneSynapses": reflect.ValueOf(axon.RunDoneSynapses),
"RunDoneSynapsesTrace": reflect.ValueOf(axon.RunDoneSynapsesTrace),
"RunGPUSync": reflect.ValueOf(axon.RunGPUSync),
"RunGPUTestWrite": reflect.ValueOf(axon.RunGPUTestWrite),
"RunGPUTestWriteCPU": reflect.ValueOf(axon.RunGPUTestWriteCPU),
"RunGPUTestWriteGPU": reflect.ValueOf(axon.RunGPUTestWriteGPU),
"RunGatherSpikes": reflect.ValueOf(axon.RunGatherSpikes),
"RunGatherSpikesCPU": reflect.ValueOf(axon.RunGatherSpikesCPU),
"RunGatherSpikesGPU": reflect.ValueOf(axon.RunGatherSpikesGPU),
"RunInitGBuffsPath": reflect.ValueOf(axon.RunInitGBuffsPath),
"RunInitGBuffsPathCPU": reflect.ValueOf(axon.RunInitGBuffsPathCPU),
"RunInitGBuffsPathGPU": reflect.ValueOf(axon.RunInitGBuffsPathGPU),
"RunLayerGi": reflect.ValueOf(axon.RunLayerGi),
"RunLayerGiCPU": reflect.ValueOf(axon.RunLayerGiCPU),
"RunLayerGiGPU": reflect.ValueOf(axon.RunLayerGiGPU),
"RunMinusPhaseNeuron": reflect.ValueOf(axon.RunMinusPhaseNeuron),
"RunMinusPhaseNeuronCPU": reflect.ValueOf(axon.RunMinusPhaseNeuronCPU),
"RunMinusPhaseNeuronGPU": reflect.ValueOf(axon.RunMinusPhaseNeuronGPU),
"RunMinusPhasePool": reflect.ValueOf(axon.RunMinusPhasePool),
"RunMinusPhasePoolCPU": reflect.ValueOf(axon.RunMinusPhasePoolCPU),
"RunMinusPhasePoolGPU": reflect.ValueOf(axon.RunMinusPhasePoolGPU),
"RunMinusPhasePost": reflect.ValueOf(axon.RunMinusPhasePost),
"RunMinusPhasePostCPU": reflect.ValueOf(axon.RunMinusPhasePostCPU),
"RunMinusPhasePostGPU": reflect.ValueOf(axon.RunMinusPhasePostGPU),
"RunNewStateLayer": reflect.ValueOf(axon.RunNewStateLayer),
"RunNewStateLayerCPU": reflect.ValueOf(axon.RunNewStateLayerCPU),
"RunNewStateLayerGPU": reflect.ValueOf(axon.RunNewStateLayerGPU),
"RunNewStateNeuron": reflect.ValueOf(axon.RunNewStateNeuron),
"RunNewStateNeuronCPU": reflect.ValueOf(axon.RunNewStateNeuronCPU),
"RunNewStateNeuronGPU": reflect.ValueOf(axon.RunNewStateNeuronGPU),
"RunOneAdaptGiLayer": reflect.ValueOf(axon.RunOneAdaptGiLayer),
"RunOneApplyExtsNeuron": reflect.ValueOf(axon.RunOneApplyExtsNeuron),
"RunOneBeta1Neuron": reflect.ValueOf(axon.RunOneBeta1Neuron),
"RunOneBeta2Neuron": reflect.ValueOf(axon.RunOneBeta2Neuron),
"RunOneBetweenGi": reflect.ValueOf(axon.RunOneBetweenGi),
"RunOneCycleInc": reflect.ValueOf(axon.RunOneCycleInc),
"RunOneCycleNeuron": reflect.ValueOf(axon.RunOneCycleNeuron),
"RunOneCyclePost": reflect.ValueOf(axon.RunOneCyclePost),
"RunOneDWtFromDiSyn": reflect.ValueOf(axon.RunOneDWtFromDiSyn),
"RunOneDWtSubMeanNeuron": reflect.ValueOf(axon.RunOneDWtSubMeanNeuron),
"RunOneDWtSyn": reflect.ValueOf(axon.RunOneDWtSyn),
"RunOneGPUTestWrite": reflect.ValueOf(axon.RunOneGPUTestWrite),
"RunOneGatherSpikes": reflect.ValueOf(axon.RunOneGatherSpikes),
"RunOneInitGBuffsPath": reflect.ValueOf(axon.RunOneInitGBuffsPath),
"RunOneLayerGi": reflect.ValueOf(axon.RunOneLayerGi),
"RunOneMinusPhaseNeuron": reflect.ValueOf(axon.RunOneMinusPhaseNeuron),
"RunOneMinusPhasePool": reflect.ValueOf(axon.RunOneMinusPhasePool),
"RunOneMinusPhasePost": reflect.ValueOf(axon.RunOneMinusPhasePost),
"RunOneNewStateLayer": reflect.ValueOf(axon.RunOneNewStateLayer),
"RunOneNewStateNeuron": reflect.ValueOf(axon.RunOneNewStateNeuron),
"RunOnePlusPhaseEndNeuron": reflect.ValueOf(axon.RunOnePlusPhaseEndNeuron),
"RunOnePlusPhaseEndPool": reflect.ValueOf(axon.RunOnePlusPhaseEndPool),
"RunOnePlusPhaseEndPost": reflect.ValueOf(axon.RunOnePlusPhaseEndPost),
"RunOnePlusPhaseStartContext": reflect.ValueOf(axon.RunOnePlusPhaseStartContext),
"RunOnePlusPhaseStartNeuron": reflect.ValueOf(axon.RunOnePlusPhaseStartNeuron),
"RunOnePoolGi": reflect.ValueOf(axon.RunOnePoolGi),
"RunOneSendSpike": reflect.ValueOf(axon.RunOneSendSpike),
"RunOneSlowAdaptLayer": reflect.ValueOf(axon.RunOneSlowAdaptLayer),
"RunOneSlowAdaptNeuron": reflect.ValueOf(axon.RunOneSlowAdaptNeuron),
"RunOneWtFromDWtLayer": reflect.ValueOf(axon.RunOneWtFromDWtLayer),
"RunOneWtFromDWtSyn": reflect.ValueOf(axon.RunOneWtFromDWtSyn),
"RunPlusPhaseEndNeuron": reflect.ValueOf(axon.RunPlusPhaseEndNeuron),
"RunPlusPhaseEndNeuronCPU": reflect.ValueOf(axon.RunPlusPhaseEndNeuronCPU),
"RunPlusPhaseEndNeuronGPU": reflect.ValueOf(axon.RunPlusPhaseEndNeuronGPU),
"RunPlusPhaseEndPool": reflect.ValueOf(axon.RunPlusPhaseEndPool),
"RunPlusPhaseEndPoolCPU": reflect.ValueOf(axon.RunPlusPhaseEndPoolCPU),
"RunPlusPhaseEndPoolGPU": reflect.ValueOf(axon.RunPlusPhaseEndPoolGPU),
"RunPlusPhaseEndPost": reflect.ValueOf(axon.RunPlusPhaseEndPost),
"RunPlusPhaseEndPostCPU": reflect.ValueOf(axon.RunPlusPhaseEndPostCPU),
"RunPlusPhaseEndPostGPU": reflect.ValueOf(axon.RunPlusPhaseEndPostGPU),
"RunPlusPhaseStartContext": reflect.ValueOf(axon.RunPlusPhaseStartContext),
"RunPlusPhaseStartContextCPU": reflect.ValueOf(axon.RunPlusPhaseStartContextCPU),
"RunPlusPhaseStartContextGPU": reflect.ValueOf(axon.RunPlusPhaseStartContextGPU),
"RunPlusPhaseStartNeuron": reflect.ValueOf(axon.RunPlusPhaseStartNeuron),
"RunPlusPhaseStartNeuronCPU": reflect.ValueOf(axon.RunPlusPhaseStartNeuronCPU),
"RunPlusPhaseStartNeuronGPU": reflect.ValueOf(axon.RunPlusPhaseStartNeuronGPU),
"RunPoolGi": reflect.ValueOf(axon.RunPoolGi),
"RunPoolGiCPU": reflect.ValueOf(axon.RunPoolGiCPU),
"RunPoolGiGPU": reflect.ValueOf(axon.RunPoolGiGPU),
"RunSendSpike": reflect.ValueOf(axon.RunSendSpike),
"RunSendSpikeCPU": reflect.ValueOf(axon.RunSendSpikeCPU),
"RunSendSpikeGPU": reflect.ValueOf(axon.RunSendSpikeGPU),
"RunSlowAdaptLayer": reflect.ValueOf(axon.RunSlowAdaptLayer),
"RunSlowAdaptLayerCPU": reflect.ValueOf(axon.RunSlowAdaptLayerCPU),
"RunSlowAdaptLayerGPU": reflect.ValueOf(axon.RunSlowAdaptLayerGPU),
"RunSlowAdaptNeuron": reflect.ValueOf(axon.RunSlowAdaptNeuron),
"RunSlowAdaptNeuronCPU": reflect.ValueOf(axon.RunSlowAdaptNeuronCPU),
"RunSlowAdaptNeuronGPU": reflect.ValueOf(axon.RunSlowAdaptNeuronGPU),
"RunWtFromDWtLayer": reflect.ValueOf(axon.RunWtFromDWtLayer),
"RunWtFromDWtLayerCPU": reflect.ValueOf(axon.RunWtFromDWtLayerCPU),
"RunWtFromDWtLayerGPU": reflect.ValueOf(axon.RunWtFromDWtLayerGPU),
"RunWtFromDWtSyn": reflect.ValueOf(axon.RunWtFromDWtSyn),
"RunWtFromDWtSynCPU": reflect.ValueOf(axon.RunWtFromDWtSynCPU),
"RunWtFromDWtSynGPU": reflect.ValueOf(axon.RunWtFromDWtSynGPU),
"SKCaIn": reflect.ValueOf(axon.SKCaIn),
"SKCaM": reflect.ValueOf(axon.SKCaM),
"SKCaR": reflect.ValueOf(axon.SKCaR),
"SMaintP": reflect.ValueOf(axon.SMaintP),
"SSGiDend": reflect.ValueOf(axon.SSGiDend),
"STNLayer": reflect.ValueOf(axon.STNLayer),
"SWt": reflect.ValueOf(axon.SWt),
"SahpCa": reflect.ValueOf(axon.SahpCa),
"SahpN": reflect.ValueOf(axon.SahpN),
"SaveWeights": reflect.ValueOf(axon.SaveWeights),
"SaveWeightsIfConfigSet": reflect.ValueOf(axon.SaveWeightsIfConfigSet),
"ScriptParams": reflect.ValueOf(&axon.ScriptParams).Elem(),
"SendSpike": reflect.ValueOf(axon.SendSpike),
"SetNeuronExtPosNeg": reflect.ValueOf(axon.SetNeuronExtPosNeg),
"SigFun": reflect.ValueOf(axon.SigFun),
"SigFun61": reflect.ValueOf(axon.SigFun61),
"SigInvFun": reflect.ValueOf(axon.SigInvFun),
"SigInvFun61": reflect.ValueOf(axon.SigInvFun61),
"SigmoidFun": reflect.ValueOf(axon.SigmoidFun),
"SlowAdaptLayer": reflect.ValueOf(axon.SlowAdaptLayer),
"SlowAdaptNeuron": reflect.ValueOf(axon.SlowAdaptNeuron),
"Spike": reflect.ValueOf(axon.Spike),
"Spiked": reflect.ValueOf(axon.Spiked),
"StartNN": reflect.ValueOf(axon.StartNN),
"StartOff": reflect.ValueOf(axon.StartOff),
"StatCorSim": reflect.ValueOf(axon.StatCorSim),
"StatExcludeLevel": reflect.ValueOf(axon.StatExcludeLevel),
"StatLayerActGe": reflect.ValueOf(axon.StatLayerActGe),
"StatLayerGiMult": reflect.ValueOf(axon.StatLayerGiMult),
"StatLayerState": reflect.ValueOf(axon.StatLayerState),
"StatLearnNow": reflect.ValueOf(axon.StatLearnNow),
"StatLevelAll": reflect.ValueOf(axon.StatLevelAll),
"StatLoopCounters": reflect.ValueOf(axon.StatLoopCounters),
"StatPCA": reflect.ValueOf(axon.StatPCA),
"StatPerTrialMSec": reflect.ValueOf(axon.StatPerTrialMSec),
"StatPrevCorSim": reflect.ValueOf(axon.StatPrevCorSim),
"StatRunName": reflect.ValueOf(axon.StatRunName),
"StatTrialName": reflect.ValueOf(axon.StatTrialName),
"StatsLayerValues": reflect.ValueOf(axon.StatsLayerValues),
"StatsNode": reflect.ValueOf(axon.StatsNode),
"StructValues": reflect.ValueOf(axon.StructValues),
"SuperLayer": reflect.ValueOf(axon.SuperLayer),
"SynPathIndex": reflect.ValueOf(axon.SynPathIndex),
"SynRecvIndex": reflect.ValueOf(axon.SynRecvIndex),
"SynSendIndex": reflect.ValueOf(axon.SynSendIndex),
"SynapseIndexVarsN": reflect.ValueOf(axon.SynapseIndexVarsN),
"SynapseIndexVarsValues": reflect.ValueOf(axon.SynapseIndexVarsValues),
"SynapseIxs": reflect.ValueOf(&axon.SynapseIxs).Elem(),
"SynapseIxsVar": reflect.ValueOf(axon.SynapseIxsVar),
"SynapseTraceVarsN": reflect.ValueOf(axon.SynapseTraceVarsN),
"SynapseTraceVarsValues": reflect.ValueOf(axon.SynapseTraceVarsValues),
"SynapseTraces": reflect.ValueOf(&axon.SynapseTraces).Elem(),
"SynapseTracesVar": reflect.ValueOf(axon.SynapseTracesVar),
"SynapseVarByName": reflect.ValueOf(axon.SynapseVarByName),
"SynapseVarNames": reflect.ValueOf(&axon.SynapseVarNames).Elem(),
"SynapseVarProps": reflect.ValueOf(&axon.SynapseVarProps).Elem(),
"SynapseVarsMap": reflect.ValueOf(&axon.SynapseVarsMap).Elem(),
"SynapseVarsN": reflect.ValueOf(axon.SynapseVarsN),
"SynapseVarsValues": reflect.ValueOf(axon.SynapseVarsValues),
"Synapses": reflect.ValueOf(&axon.Synapses).Elem(),
"SynapsesVar": reflect.ValueOf(axon.SynapsesVar),
"SyncFromGPU": reflect.ValueOf(axon.SyncFromGPU),
"TDDaLayer": reflect.ValueOf(axon.TDDaLayer),
"TDIntegLayer": reflect.ValueOf(axon.TDIntegLayer),
"TDPredLayer": reflect.ValueOf(axon.TDPredLayer),
"TDPredPath": reflect.ValueOf(axon.TDPredPath),
"TRNLayer": reflect.ValueOf(axon.TRNLayer),
"Target": reflect.ValueOf(axon.Target),
"TargetLayer": reflect.ValueOf(axon.TargetLayer),
"TensorStrides": reflect.ValueOf(&axon.TensorStrides).Elem(),
"Theta": reflect.ValueOf(axon.Theta),
"TimeCycle": reflect.ValueOf(axon.TimeCycle),
"TimeDiff": reflect.ValueOf(axon.TimeDiff),
"TimePeak": reflect.ValueOf(axon.TimePeak),
"ToGPU": reflect.ValueOf(axon.ToGPU),
"ToGPUAll": reflect.ValueOf(axon.ToGPUAll),
"ToGPUCtx": reflect.ValueOf(axon.ToGPUCtx),
"ToGPUCtxGlobal": reflect.ValueOf(axon.ToGPUCtxGlobal),
"ToGPUExts": reflect.ValueOf(axon.ToGPUExts),
"ToGPUIndexes": reflect.ValueOf(axon.ToGPUIndexes),
"ToGPULayers": reflect.ValueOf(axon.ToGPULayers),
"ToGPULayersNeurons": reflect.ValueOf(axon.ToGPULayersNeurons),
"ToGPULayersSynapses": reflect.ValueOf(axon.ToGPULayersSynapses),
"ToGPUNeurons": reflect.ValueOf(axon.ToGPUNeurons),
"ToGPUParams": reflect.ValueOf(axon.ToGPUParams),
"ToGPUSynapses": reflect.ValueOf(axon.ToGPUSynapses),
"ToGPUTensorStrides": reflect.ValueOf(axon.ToGPUTensorStrides),
"ToggleLayersOff": reflect.ValueOf(axon.ToggleLayersOff),
"Tr": reflect.ValueOf(axon.Tr),
"TrgAvg": reflect.ValueOf(axon.TrgAvg),
"USLayer": reflect.ValueOf(axon.USLayer),
"UrgencyLayer": reflect.ValueOf(axon.UrgencyLayer),
"UseGPU": reflect.ValueOf(&axon.UseGPU).Elem(),
"VSGatedLayer": reflect.ValueOf(axon.VSGatedLayer),
"VSMatrixLayer": reflect.ValueOf(axon.VSMatrixLayer),
"VSMatrixPath": reflect.ValueOf(axon.VSMatrixPath),
"VSPatchLayer": reflect.ValueOf(axon.VSPatchLayer),
"VSPatchPath": reflect.ValueOf(axon.VSPatchPath),
"VTALayer": reflect.ValueOf(axon.VTALayer),
"ValenceTypesN": reflect.ValueOf(axon.ValenceTypesN),
"ValenceTypesValues": reflect.ValueOf(axon.ValenceTypesValues),
"VarCategories": reflect.ValueOf(&axon.VarCategories).Elem(),
"VgccCa": reflect.ValueOf(axon.VgccCa),
"VgccCaInt": reflect.ValueOf(axon.VgccCaInt),
"VgccH": reflect.ValueOf(axon.VgccH),
"VgccM": reflect.ValueOf(axon.VgccM),
"ViewTimeCycles": reflect.ValueOf(&axon.ViewTimeCycles).Elem(),
"ViewTimesN": reflect.ValueOf(axon.ViewTimesN),
"ViewTimesValues": reflect.ValueOf(axon.ViewTimesValues),
"Vm": reflect.ValueOf(axon.Vm),
"VmDend": reflect.ValueOf(axon.VmDend),
"WalkFields": reflect.ValueOf(axon.WalkFields),
"WeightsFilename": reflect.ValueOf(axon.WeightsFilename),
"Wt": reflect.ValueOf(axon.Wt),
"WtFromDWtLayer": reflect.ValueOf(axon.WtFromDWtLayer),
"WtFromDWtSyn": reflect.ValueOf(axon.WtFromDWtSyn),
// type definitions
"ActAvgParams": reflect.ValueOf((*axon.ActAvgParams)(nil)),
"ActInitParams": reflect.ValueOf((*axon.ActInitParams)(nil)),
"ActParams": reflect.ValueOf((*axon.ActParams)(nil)),
"AvgMax": reflect.ValueOf((*axon.AvgMax)(nil)),
"AvgMaxPhases": reflect.ValueOf((*axon.AvgMaxPhases)(nil)),
"AvgMaxVars": reflect.ValueOf((*axon.AvgMaxVars)(nil)),
"BLANovelPath": reflect.ValueOf((*axon.BLANovelPath)(nil)),
"BLAPathParams": reflect.ValueOf((*axon.BLAPathParams)(nil)),
"BurstParams": reflect.ValueOf((*axon.BurstParams)(nil)),
"CTParams": reflect.ValueOf((*axon.CTParams)(nil)),
"ClampParams": reflect.ValueOf((*axon.ClampParams)(nil)),
"Context": reflect.ValueOf((*axon.Context)(nil)),
"DAModTypes": reflect.ValueOf((*axon.DAModTypes)(nil)),
"DSMatrixParams": reflect.ValueOf((*axon.DSMatrixParams)(nil)),
"DSMatrixPathParams": reflect.ValueOf((*axon.DSMatrixPathParams)(nil)),
"DWtParams": reflect.ValueOf((*axon.DWtParams)(nil)),
"DecayParams": reflect.ValueOf((*axon.DecayParams)(nil)),
"DendParams": reflect.ValueOf((*axon.DendParams)(nil)),
"DriveParams": reflect.ValueOf((*axon.DriveParams)(nil)),
"DtParams": reflect.ValueOf((*axon.DtParams)(nil)),
"FieldValue": reflect.ValueOf((*axon.FieldValue)(nil)),
"GPLayerTypes": reflect.ValueOf((*axon.GPLayerTypes)(nil)),
"GPParams": reflect.ValueOf((*axon.GPParams)(nil)),
"GPUVars": reflect.ValueOf((*axon.GPUVars)(nil)),
"GScaleValues": reflect.ValueOf((*axon.GScaleValues)(nil)),
"GiveUpParams": reflect.ValueOf((*axon.GiveUpParams)(nil)),
"GlobalScalarVars": reflect.ValueOf((*axon.GlobalScalarVars)(nil)),
"GlobalVectorVars": reflect.ValueOf((*axon.GlobalVectorVars)(nil)),
"HebbParams": reflect.ValueOf((*axon.HebbParams)(nil)),
"HipConfig": reflect.ValueOf((*axon.HipConfig)(nil)),
"HipPathParams": reflect.ValueOf((*axon.HipPathParams)(nil)),
"IOParams": reflect.ValueOf((*axon.IOParams)(nil)),
"InhibParams": reflect.ValueOf((*axon.InhibParams)(nil)),
"LDTParams": reflect.ValueOf((*axon.LDTParams)(nil)),
"LHbParams": reflect.ValueOf((*axon.LHbParams)(nil)),
"LRateMod": reflect.ValueOf((*axon.LRateMod)(nil)),
"LRateParams": reflect.ValueOf((*axon.LRateParams)(nil)),
"Layer": reflect.ValueOf((*axon.Layer)(nil)),
"LayerIndexes": reflect.ValueOf((*axon.LayerIndexes)(nil)),
"LayerInhibIndexes": reflect.ValueOf((*axon.LayerInhibIndexes)(nil)),
"LayerParams": reflect.ValueOf((*axon.LayerParams)(nil)),
"LayerSearches": reflect.ValueOf((*axon.LayerSearches)(nil)),
"LayerSel": reflect.ValueOf((*axon.LayerSel)(nil)),
"LayerSheet": reflect.ValueOf((*axon.LayerSheet)(nil)),
"LayerSheets": reflect.ValueOf((*axon.LayerSheets)(nil)),
"LayerTypes": reflect.ValueOf((*axon.LayerTypes)(nil)),
"LayerVars": reflect.ValueOf((*axon.LayerVars)(nil)),
"LearnCaParams": reflect.ValueOf((*axon.LearnCaParams)(nil)),
"LearnNeuronParams": reflect.ValueOf((*axon.LearnNeuronParams)(nil)),
"LearnSynParams": reflect.ValueOf((*axon.LearnSynParams)(nil)),
"LearnTimingParams": reflect.ValueOf((*axon.LearnTimingParams)(nil)),
"NetViewUpdate": reflect.ValueOf((*axon.NetViewUpdate)(nil)),
"Network": reflect.ValueOf((*axon.Network)(nil)),
"NetworkIndexes": reflect.ValueOf((*axon.NetworkIndexes)(nil)),
"NeuroModParams": reflect.ValueOf((*axon.NeuroModParams)(nil)),
"NeuronAvgVars": reflect.ValueOf((*axon.NeuronAvgVars)(nil)),
"NeuronFlags": reflect.ValueOf((*axon.NeuronFlags)(nil)),
"NeuronIndexVars": reflect.ValueOf((*axon.NeuronIndexVars)(nil)),
"NeuronVars": reflect.ValueOf((*axon.NeuronVars)(nil)),
"NuclearParams": reflect.ValueOf((*axon.NuclearParams)(nil)),
"Params": reflect.ValueOf((*axon.Params)(nil)),
"Path": reflect.ValueOf((*axon.Path)(nil)),
"PathGTypes": reflect.ValueOf((*axon.PathGTypes)(nil)),
"PathIndexes": reflect.ValueOf((*axon.PathIndexes)(nil)),
"PathParams": reflect.ValueOf((*axon.PathParams)(nil)),
"PathScaleParams": reflect.ValueOf((*axon.PathScaleParams)(nil)),
"PathSearches": reflect.ValueOf((*axon.PathSearches)(nil)),
"PathSel": reflect.ValueOf((*axon.PathSel)(nil)),
"PathSheet": reflect.ValueOf((*axon.PathSheet)(nil)),
"PathSheets": reflect.ValueOf((*axon.PathSheets)(nil)),
"PathTypes": reflect.ValueOf((*axon.PathTypes)(nil)),
"PoolIndexVars": reflect.ValueOf((*axon.PoolIndexVars)(nil)),
"PoolIntVars": reflect.ValueOf((*axon.PoolIntVars)(nil)),
"PopCodeParams": reflect.ValueOf((*axon.PopCodeParams)(nil)),
"PulvinarParams": reflect.ValueOf((*axon.PulvinarParams)(nil)),
"RLPredPathParams": reflect.ValueOf((*axon.RLPredPathParams)(nil)),
"RLRateParams": reflect.ValueOf((*axon.RLRateParams)(nil)),
"RWDaParams": reflect.ValueOf((*axon.RWDaParams)(nil)),
"RWPredParams": reflect.ValueOf((*axon.RWPredParams)(nil)),
"RandFunIndex": reflect.ValueOf((*axon.RandFunIndex)(nil)),
"Rubicon": reflect.ValueOf((*axon.Rubicon)(nil)),
"SMaintParams": reflect.ValueOf((*axon.SMaintParams)(nil)),
"SWtAdaptParams": reflect.ValueOf((*axon.SWtAdaptParams)(nil)),
"SWtInitParams": reflect.ValueOf((*axon.SWtInitParams)(nil)),
"SWtParams": reflect.ValueOf((*axon.SWtParams)(nil)),
"SpikeNoiseParams": reflect.ValueOf((*axon.SpikeNoiseParams)(nil)),
"SpikeParams": reflect.ValueOf((*axon.SpikeParams)(nil)),
"StartN": reflect.ValueOf((*axon.StartN)(nil)),
"StriatumParams": reflect.ValueOf((*axon.StriatumParams)(nil)),
"SynComParams": reflect.ValueOf((*axon.SynComParams)(nil)),
"SynapseIndexVars": reflect.ValueOf((*axon.SynapseIndexVars)(nil)),
"SynapseTraceVars": reflect.ValueOf((*axon.SynapseTraceVars)(nil)),
"SynapseVars": reflect.ValueOf((*axon.SynapseVars)(nil)),
"TDDaParams": reflect.ValueOf((*axon.TDDaParams)(nil)),
"TDIntegParams": reflect.ValueOf((*axon.TDIntegParams)(nil)),
"TrgAvgActParams": reflect.ValueOf((*axon.TrgAvgActParams)(nil)),
"USParams": reflect.ValueOf((*axon.USParams)(nil)),
"UrgencyParams": reflect.ValueOf((*axon.UrgencyParams)(nil)),
"VSMatrixPathParams": reflect.ValueOf((*axon.VSMatrixPathParams)(nil)),
"VTAParams": reflect.ValueOf((*axon.VTAParams)(nil)),
"ValenceTypes": reflect.ValueOf((*axon.ValenceTypes)(nil)),
"ViewTimes": reflect.ValueOf((*axon.ViewTimes)(nil)),
}
}
// Code generated by 'yaegi extract github.com/emer/axon/v2/chans/chanplots'. DO NOT EDIT.
package yaegiaxon
import (
"github.com/emer/axon/v2/chans/chanplots"
"reflect"
)
func init() {
Symbols["github.com/emer/axon/v2/chans/chanplots/chanplots"] = map[string]reflect.Value{
// type definitions
"AKParams": reflect.ValueOf((*chanplots.AKParams)(nil)),
"AKPlot": reflect.ValueOf((*chanplots.AKPlot)(nil)),
"GABABPlot": reflect.ValueOf((*chanplots.GABABPlot)(nil)),
"KirPlot": reflect.ValueOf((*chanplots.KirPlot)(nil)),
"MahpPlot": reflect.ValueOf((*chanplots.MahpPlot)(nil)),
"NMDAPlot": reflect.ValueOf((*chanplots.NMDAPlot)(nil)),
"SKCaPlot": reflect.ValueOf((*chanplots.SKCaPlot)(nil)),
"SahpPlot": reflect.ValueOf((*chanplots.SahpPlot)(nil)),
"SynCaPlot": reflect.ValueOf((*chanplots.SynCaPlot)(nil)),
"VGCCPlot": reflect.ValueOf((*chanplots.VGCCPlot)(nil)),
}
}
// Code generated by 'yaegi extract github.com/emer/axon/v2/chans'. DO NOT EDIT.
package yaegiaxon
import (
"github.com/emer/axon/v2/chans"
"reflect"
)
func init() {
Symbols["github.com/emer/axon/v2/chans/chans"] = map[string]reflect.Value{
// type definitions
"AKsParams": reflect.ValueOf((*chans.AKsParams)(nil)),
"Chans": reflect.ValueOf((*chans.Chans)(nil)),
"GABABParams": reflect.ValueOf((*chans.GABABParams)(nil)),
"KNaMedSlow": reflect.ValueOf((*chans.KNaMedSlow)(nil)),
"KNaParams": reflect.ValueOf((*chans.KNaParams)(nil)),
"KirParams": reflect.ValueOf((*chans.KirParams)(nil)),
"MahpParams": reflect.ValueOf((*chans.MahpParams)(nil)),
"NMDAParams": reflect.ValueOf((*chans.NMDAParams)(nil)),
"SKCaParams": reflect.ValueOf((*chans.SKCaParams)(nil)),
"SahpParams": reflect.ValueOf((*chans.SahpParams)(nil)),
"VGCCParams": reflect.ValueOf((*chans.VGCCParams)(nil)),
}
}
// Code generated by 'yaegi extract github.com/emer/axon/v2/fsfffb'. DO NOT EDIT.
package yaegiaxon
import (
"github.com/emer/axon/v2/fsfffb"
"reflect"
)
func init() {
Symbols["github.com/emer/axon/v2/fsfffb/fsfffb"] = map[string]reflect.Value{
// function, constant and variable definitions
"DAD1": reflect.ValueOf(fsfffb.DAD1),
"DAD2": reflect.ValueOf(fsfffb.DAD2),
"FBs": reflect.ValueOf(fsfffb.FBs),
"FBsRaw": reflect.ValueOf(fsfffb.FBsRaw),
"FFAvg": reflect.ValueOf(fsfffb.FFAvg),
"FFAvgPrv": reflect.ValueOf(fsfffb.FFAvgPrv),
"FFs": reflect.ValueOf(fsfffb.FFs),
"FFsRaw": reflect.ValueOf(fsfffb.FFsRaw),
"FSGi": reflect.ValueOf(fsfffb.FSGi),
"FSi": reflect.ValueOf(fsfffb.FSi),
"GeExtRaw": reflect.ValueOf(fsfffb.GeExtRaw),
"GeExts": reflect.ValueOf(fsfffb.GeExts),
"GiOrig": reflect.ValueOf(fsfffb.GiOrig),
"InhibVarsN": reflect.ValueOf(fsfffb.InhibVarsN),
"InhibVarsValues": reflect.ValueOf(fsfffb.InhibVarsValues),
"LayGi": reflect.ValueOf(fsfffb.LayGi),
"ModAct": reflect.ValueOf(fsfffb.ModAct),
"SSGi": reflect.ValueOf(fsfffb.SSGi),
"SSf": reflect.ValueOf(fsfffb.SSf),
"SSi": reflect.ValueOf(fsfffb.SSi),
"TotalGi": reflect.ValueOf(fsfffb.TotalGi),
// type definitions
"GiParams": reflect.ValueOf((*fsfffb.GiParams)(nil)),
"InhibVars": reflect.ValueOf((*fsfffb.InhibVars)(nil)),
}
}
// Code generated by 'yaegi extract github.com/emer/axon/v2/kinase'. DO NOT EDIT.
package yaegiaxon
import (
"github.com/emer/axon/v2/kinase"
"reflect"
)
func init() {
Symbols["github.com/emer/axon/v2/kinase/kinase"] = map[string]reflect.Value{
// function, constant and variable definitions
"CaBinWts": reflect.ValueOf(kinase.CaBinWts),
// type definitions
"CaDtParams": reflect.ValueOf((*kinase.CaDtParams)(nil)),
"CaSpikeParams": reflect.ValueOf((*kinase.CaSpikeParams)(nil)),
}
}
// Copyright (c) 2025, Cogent Core. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package yaegiaxon exports axon packages to the yaegi interpreter
// and provides functions for connecting with Cogent Lab.
package yaegiaxon
import (
"reflect"
"cogentcore.org/core/base/errors"
"cogentcore.org/core/yaegicore"
"cogentcore.org/lab/goal/interpreter"
"cogentcore.org/lab/yaegilab/labsymbols"
"cogentcore.org/lab/yaegilab/tensorsymbols"
"github.com/cogentcore/yaegi/interp"
"github.com/emer/emergent/v2/yaegiemergent"
)
func init() {
yaegicore.Interpreters["Goal"] = func(options interp.Options) yaegicore.Interpreter {
return NewInterpreter(options)
}
}
// Interpreter implements [yaegicore.Interpreter] using the [interpreter.Interpreter] for Goal.
type Interpreter struct {
*interpreter.Interpreter
}
// NewInterpreter returns a new [Interpreter] initialized with the given options.
func NewInterpreter(options interp.Options) *Interpreter {
return &Interpreter{interpreter.NewInterpreter(options)}
}
func (in *Interpreter) Use(values interp.Exports) error {
return in.Interp.Use(values)
}
func (in *Interpreter) ImportUsed() {
errors.Log(in.Use(tensorsymbols.Symbols))
errors.Log(in.Use(labsymbols.Symbols))
errors.Log(in.Use(yaegiemergent.Symbols))
errors.Log(in.Use(Symbols))
in.Config()
}
func (in *Interpreter) Eval(src string) (res reflect.Value, err error) {
res, _, err = in.Interpreter.Eval(src)
return
}