package ecs
import (
"fmt"
)
// This is the identifier for entities in the world
//
//cod:struct
type Id uint32
type archetypeId uint32
type entLoc struct {
archId archetypeId
index uint32
}
// Provides generic storage for all archetypes
type archEngine struct {
generation int
lookup []*lookupList // Indexed by archetypeId
compStorage []storage // Indexed by componentId
dcr *componentRegistry
// TODO: Optimization: Hook loops can be improved by tracking a slice of CompId for each type of hook. Then when I Track components on that finalizeSlice, I can just loop over the list of CompId which will only be as long as the number of hooks that the user has added
onAddHooks []Handler // A list of hooks to execute for onAdd events. Indexed by componentId
finalizeOnAdd []CompId // The temporary list of components to run the onAdd hooks
// TODO: You could unify hooks with observers by making initial ranges of EventId
// [0, maxComponent) -> Add event per component
// [maxComponent, 2*maxComponent) -> Remove event per component
// etc...
}
func newArchEngine() *archEngine {
return &archEngine{
generation: 1, // Start at 1 so that anyone with the default int value will always realize they are in the wrong generation
lookup: make([]*lookupList, 0, DefaultAllocation),
compStorage: make([]storage, maxComponentId+1),
dcr: newComponentRegistry(),
onAddHooks: make([]Handler, maxComponentId+1),
}
}
func (e *archEngine) print() {
// fmt.Printf("%+v\n", *e)
for i := range e.lookup {
fmt.Printf(" id: %+v\n", e.lookup[i].id)
fmt.Printf(" holes: %+v\n", e.lookup[i].holes)
fmt.Printf(" mask: %+v\n", e.lookup[i].mask)
fmt.Printf(" components: %+v\n", e.lookup[i].components)
fmt.Printf("--------------------------------------------------------------------------------\n")
}
for i := range e.compStorage {
fmt.Printf("css: %d: %+v\n", i, e.compStorage[i])
}
}
func (e *archEngine) newArchetypeId(archMask archetypeMask, components []CompId) archetypeId {
e.generation++ // Increment the generation
archId := archetypeId(len(e.lookup))
e.lookup = append(e.lookup,
&lookupList{
id: make([]Id, 0, DefaultAllocation),
holes: make([]int, 0, DefaultAllocation),
mask: archMask,
components: components,
},
)
return archId
}
func (e *archEngine) getGeneration() int {
return e.generation
}
func (e *archEngine) count(anything ...any) int {
comps := make([]CompId, len(anything))
for i, c := range anything {
comps[i] = name(c)
}
archIds := make([]archetypeId, 0)
archIds = e.FilterList(archIds, comps)
total := 0
for _, archId := range archIds {
lookup := e.lookup[archId]
if lookup == nil {
panic(fmt.Sprintf("Couldnt find archId in archEngine lookup table: %d", archId))
}
// Each id represents an entity that holds the requested component(s)
// Each hole represents a deleted entity that used to hold the requested component(s)
total = total + len(lookup.id) - len(lookup.holes)
}
return total
}
func (e *archEngine) getArchetypeId(mask archetypeMask) archetypeId {
return e.dcr.getArchetypeId(e, mask)
}
// Returns replaces archIds with a list of archids that match the compId list
func (e *archEngine) FilterList(archIds []archetypeId, comp []CompId) []archetypeId {
// Idea 3: Loop through every registered archMask to see if it matches
// Problem - Forces you to check every arch mask, even if the
// The good side is that you dont need to deduplicate your list, and you dont need to allocate
requiredArchMask := buildArchMaskFromId(comp...)
archIds = archIds[:0]
for archId := range e.dcr.revArchMask {
if requiredArchMask.contains(e.dcr.revArchMask[archId]) {
archIds = append(archIds, archetypeId(archId))
}
}
return archIds
//--------------------------------------------------------------------------------
// Idea 2: Loop through every archMask that every componentId points to
// // TODO: could I maybe do something more optimal with archetypeMask? Something like this could work.
// requiredArchMask := buildArchMaskFromId(comp...)
// archCount := make(map[archetypeId]struct{})
// archIds = archIds[:0]
// for _, compId := range comp {
// for _, archId := range e.dcr.archSet[compId] {
// archMask, ok := e.dcr.revArchMask[archId]
// if !ok {
// panic("AAA")
// continue
// } // TODO: This shouldn't happen?
// if requiredArchMask.contains(archMask) {
// archCount[archId] = struct{}{}
// }
// }
// }
// for archId := range archCount {
// archIds = append(archIds, archId)
// }
// return archIds
}
func getStorage[T any](e *archEngine) *componentStorage[T] {
var val T
n := name(val)
return getStorageByCompId[T](e, n)
}
// Note: This will panic if the wrong compId doesn't match the generic type
func getStorageByCompId[T any](e *archEngine, compId CompId) *componentStorage[T] {
ss := e.compStorage[compId]
if ss == nil {
ss = &componentStorage[T]{
slice: newMap[archetypeId, *componentList[T]](DefaultAllocation),
}
e.compStorage[compId] = ss
}
storage := ss.(*componentStorage[T])
return storage
}
func (e *archEngine) getOrAddLookupIndex(archId archetypeId, id Id) int {
lookup := e.lookup[archId]
index := lookup.addToEasiestHole(id)
return index
}
// Writes all of the components to the archetype.
// Internally requires that the id is not added to the archetype
func (e *archEngine) spawn(archId archetypeId, id Id, comp ...Component) int {
lookup := e.lookup[archId]
// TODO: Doesn't cleanup holes?
index := lookup.addToEasiestHole(id)
loc := entLoc{archId, uint32(index)}
e.writeIndex(loc, id, comp...)
// All components are added
e.finalizeOnAdd = markComponents(e.finalizeOnAdd, comp...)
return index
}
func (e *archEngine) writeIndex(loc entLoc, id Id, comp ...Component) {
// Loop through all components and add them to individual component slices
wd := W{
engine: e,
archId: loc.archId,
index: int(loc.index),
}
for i := range comp {
comp[i].CompWrite(wd)
}
}
// Allocates a slot for the supplied archId
func (e *archEngine) allocate(archId archetypeId, id Id) int {
// Add to lookup list
index := e.getOrAddLookupIndex(archId, id)
// for compId registered to archId
lookup := e.lookup[archId]
for _, compId := range lookup.components {
s := e.getStorage(compId)
s.Allocate(archId, index)
}
return index
}
func (e *archEngine) getStorage(compId CompId) storage {
ss := e.compStorage[compId]
if ss == nil {
ss = newComponentStorage(compId)
e.compStorage[compId] = ss
}
return ss
}
func writeArch[T any](e *archEngine, archId archetypeId, index int, store *componentStorage[T], val T) {
cSlice := store.GetSlice(archId)
cSlice.Write(index, val)
}
// Returns the archetypeId of where the entity ends up
func (e *archEngine) rewriteArch(loc entLoc, id Id, comp ...Component) entLoc {
// Calculate the new mask based on the bitwise or of the old and added masks
lookup := e.lookup[loc.archId]
oldMask := lookup.mask
addMask := buildArchMask(comp...)
newMask := oldMask.bitwiseOr(addMask)
if oldMask == newMask {
// Case 1: Archetype and index stays the same.
// This means that we only need to write the newly added components because we wont be moving the base entity data
e.writeIndex(loc, id, comp...)
return loc
} else {
// 1. Move Archetype Data
newLoc := e.moveArchetype(loc, newMask, id)
// 2. Write new componts to new archetype/index location
e.writeIndex(newLoc, id, comp...)
// Mark all new components
e.finalizeOnAdd = markNewComponents(e.finalizeOnAdd, oldMask, comp...)
return newLoc
}
}
// Moves an entity from one archetype to another, copying all of the data from the old archetype to the new one
func (e *archEngine) moveArchetype(oldLoc entLoc, newMask archetypeMask, id Id) entLoc {
newArchId := e.dcr.getArchetypeId(e, newMask)
newIndex := e.allocate(newArchId, id)
newLoc := entLoc{newArchId, uint32(newIndex)}
oldLookup := e.lookup[oldLoc.archId]
for _, compId := range oldLookup.components {
store := e.compStorage[compId]
store.moveArchetype(oldLoc, newLoc)
}
e.TagForDeletion(oldLoc, id)
return entLoc{newArchId, uint32(newIndex)}
}
// Moves an entity from one archetype to another, copying all of the data required by the new archetype
func (e *archEngine) moveArchetypeDown(oldLoc entLoc, newMask archetypeMask, id Id) entLoc {
newArchId := e.dcr.getArchetypeId(e, newMask)
newIndex := e.allocate(newArchId, id)
newLoc := entLoc{newArchId, uint32(newIndex)}
newLookup := e.lookup[newArchId]
for _, compId := range newLookup.components {
store := e.compStorage[compId]
store.moveArchetype(oldLoc, newLoc) //oldArchId, oldIndex, newArchId, newIndex)
}
e.TagForDeletion(oldLoc, id)
return newLoc
}
// This creates a "hole" in the archetype at the specified Id
// Once we get enough holes, we can re-pack the entire slice
// TODO - How many holes before we repack? How many holes to pack at a time?
func (e *archEngine) TagForDeletion(loc entLoc, id Id) {
lookup := e.lookup[loc.archId]
if lookup == nil {
panic("Archetype doesn't have lookup list")
}
// This indicates that the index needs to be cleaned up and should be skipped in any list processing
lookup.id[loc.index] = InvalidEntity
// This is used to track the current list of indices that need to be cleaned
lookup.holes = append(lookup.holes, int(loc.index))
}
// func (e *archEngine) CleanupHoles(archId archetypeId) {
// lookup := e.lookup[archId]
// if lookup == nil {
// panic("Archetype doesn't have lookup list")
// }
// for _, index := range lookup.holes {
// // Pop all holes off the end of the archetype
// for {
// lastIndex := len(lookup.id) - 1
// if lastIndex < 0 {
// break
// } // Break if the index we are trying to pop off is -1
// lastId := lookup.id[lastIndex]
// if lastId == InvalidEntity {
// // If the last id is a hole, then slice it off
// lookup.id = lookup.id[:lastIndex]
// for n := range e.compStorage {
// if e.compStorage[n] != nil {
// e.compStorage[n].Delete(archId, lastIndex)
// }
// }
// continue // Try again
// }
// break
// }
// // Check bounds because we may have popped past our original index
// if index >= len(lookup.id) {
// continue
// }
// // Swap lastIndex (which is not a hole) with index (which is a hole)
// lastIndex := len(lookup.id) - 1
// lastId := lookup.id[lastIndex]
// if lastId == InvalidEntity {
// panic("Bug: This shouldn't happen")
// }
// // TODO: To fix this, you need to bubble the index swap up to the entLoc map. You probably want to relocate how the "CleanupHoles" gets called. I kinda feel like it shouldn't get executed on write?
// lookup.id[index] = lastId
// lookup.id = lookup.id[:lastIndex]
// lookup.index.Put(lastId, index)
// for n := range e.compStorage {
// if e.compStorage[n] != nil {
// e.compStorage[n].Delete(archId, index)
// }
// }
// }
// // Clear holes slice
// lookup.holes = lookup.holes[:0]
// }
// This is a defragment operation which tries to repack entities closer together
// You wont usually need to do this, but if you delete a lot of entities of one archetype and dont plan
// to add them back, then you can run this to repack
func (w *World) CleanupHoles() {
for lookupIdx, lookup := range w.engine.lookup {
archId := archetypeId(lookupIdx)
for _, index := range lookup.holes {
// Pop all holes off the end of the archetype
for {
lastIndex := len(lookup.id) - 1
if lastIndex < 0 {
break // Break if the index we are trying to pop off is -1
}
lastId := lookup.id[lastIndex]
if lastId == InvalidEntity {
// If the last id is a hole, then slice it off
lookup.id = lookup.id[:lastIndex]
for n := range w.engine.compStorage {
if w.engine.compStorage[n] != nil {
w.engine.compStorage[n].Delete(archId, lastIndex)
}
}
continue // Try again
}
break
}
// Check bounds because we may have popped past our original index
if index >= len(lookup.id) {
continue
}
// Swap lastIndex (which is not a hole) with index (which is a hole)
lastIndex := len(lookup.id) - 1
lastId := lookup.id[lastIndex]
if lastId == InvalidEntity {
panic("Bug: This shouldn't happen")
}
// Update id list
lookup.id[index] = lastId
lookup.id = lookup.id[:lastIndex]
// Update entity location for this id
newEntLoc := entLoc{archId, uint32(index)} // lookup.index.Put(lastId, index)
w.arch.Put(lastId, newEntLoc)
for n := range w.engine.compStorage {
if w.engine.compStorage[n] != nil {
w.engine.compStorage[n].Delete(archId, index)
}
}
}
// Clear holes slice
lookup.holes = lookup.holes[:0]
}
}
package main
// TODO - Add ballast and investigate GC pressure?
// TODO - Disable GC: GOCG=-1 go run .
// TODO - manual runtime.GC()
// Baseline: 8/3/23
// go run . ecs-slow 10000 1
// Iter Time
// 0 0.548914251
// 1 0.563671395
// 2 0.567264819
// 3 0.576352863
// 4 0.571570715
// Baseline: 9/2/23 (intmap replacement)
// Iter Time
// 0 0.478192213
// 1 0.447929508
// 2 0.449512679
// 3 0.45052541
// 4 0.453497259
import (
"fmt"
"log"
"math/rand"
"strconv"
"time"
"flag"
"os"
"runtime"
"runtime/pprof"
"github.com/unitoftime/ecs"
)
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile to `file`")
var memprofile = flag.String("memprofile", "", "write memory profile to `file`")
type Vec2 struct {
X, Y float64
}
type Position Vec2
type Velocity Vec2
type Collider struct {
Radius float64
}
type Count struct {
Count int32
}
const iterations = 5
const maxPosition = 100.0
const maxSpeed = 10.0
const maxCollider = 1.0
func main() {
// Create a large heap allocation of 10 GiB
flag.Parse()
if *cpuprofile != "" {
f, err := os.Create(*cpuprofile)
if err != nil {
log.Fatal("could not create CPU profile: ", err)
}
defer f.Close() // error handling omitted for example
go func() {
if err := pprof.StartCPUProfile(f); err != nil {
log.Fatal("could not start CPU profile: ", err)
}
}()
defer pprof.StopCPUProfile()
}
program := os.Args[1]
size, err := strconv.Atoi(os.Args[2])
if err != nil {
panic(err)
}
colLimitArg, err := strconv.Atoi(os.Args[3])
if err != nil {
panic(err)
}
collisionLimit := int32(colLimitArg)
// ballast := make([]byte, 10<<30)
fmt.Println("Iter", "Time")
switch program {
case "native":
benchNativeComponents(size, collisionLimit)
case "native-split":
benchNativeSplit(size, collisionLimit)
case "ecs-slow":
benchPhysicsAlt(size, collisionLimit)
case "ecs":
benchPhysicsOptimized(size, collisionLimit)
default:
fmt.Printf("Invalid Program name %s\n", program)
fmt.Println("Available Options")
fmt.Println("physics - Runs a physics simulation")
}
// fmt.Println(len(ballast))
if *memprofile != "" {
f, err := os.Create(*memprofile)
if err != nil {
log.Fatal("could not create memory profile: ", err)
}
defer f.Close() // error handling omitted for example
runtime.GC() // get up-to-date statistics
if err := pprof.WriteHeapProfile(f); err != nil {
log.Fatal("could not write memory profile: ", err)
}
}
}
func createWorld(size int) *ecs.World {
world := ecs.NewWorld()
for i := 0; i < size; i++ {
id := world.NewId()
ecs.Write(world, id,
ecs.C(Position{maxPosition * rand.Float64(), maxPosition * rand.Float64()}),
ecs.C(Velocity{maxSpeed * rand.Float64(), maxSpeed * rand.Float64()}),
ecs.C(Collider{
Radius: maxCollider * rand.Float64(),
}),
ecs.C(Count{}),
)
}
return world
}
func moveCircles(query *ecs.View2[Position, Velocity], fixedTime float64, maxPosition float64) {
query.MapSlices(func(ids []ecs.Id, pos []Position, vel []Velocity) {
if len(ids) != len(pos) || len(ids) != len(vel) {
panic("ERR")
}
for i := range ids {
if ids[i] == ecs.InvalidEntity {
continue
}
pos[i].X += vel[i].X * fixedTime
pos[i].Y += vel[i].Y * fixedTime
// Bump into the bounding rect
if pos[i].X <= 0 || pos[i].X >= maxPosition {
vel[i].X = -vel[i].X
}
if pos[i].Y <= 0 || pos[i].Y >= maxPosition {
vel[i].Y = -vel[i].Y
}
}
})
}
func checkCollisions(world *ecs.World,
query *ecs.View3[Position, Collider, Count],
innerQuery *ecs.View2[Position, Collider],
collisionLimit int32, deathCount *int) {
query.MapSlices(func(aId []ecs.Id, aPos []Position, aCol []Collider, aCnt []Count) {
innerQuery.MapSlices(func(bId []ecs.Id, bPos []Position, bCol []Collider) {
if len(aId) != len(aPos) || len(aId) != len(aCol) {
panic("ERR")
}
if len(bId) != len(bPos) || len(bId) != len(bCol) {
panic("ERR")
}
for i := range aId {
if aId[i] == ecs.InvalidEntity {
continue
}
aPos_i := &aPos[i]
aCol_i := &aCol[i]
for j := range bId {
if bId[i] == ecs.InvalidEntity {
continue
}
bPos_j := &bPos[j]
bCol_j := &bCol[j]
if aId[i] == bId[j] {
continue
} // Skip if entity is the same
dx := aPos_i.X - bPos_j.X
dy := aPos_i.Y - bPos_j.Y
distSq := (dx * dx) + (dy * dy)
dr := aCol_i.Radius + bCol_j.Radius
drSq := dr * dr
if drSq > distSq {
aCnt[i].Count++
}
// Kill and spawn one
// TODO move to outer loop?
if collisionLimit > 0 && aCnt[i].Count > collisionLimit {
success := ecs.Delete(world, aId[i])
if success {
*deathCount++
break
}
}
}
}
})
})
}
func benchPhysicsOptimized(size int, collisionLimit int32) {
world := createWorld(size)
fixedTime := (15 * time.Millisecond).Seconds()
moveQuery := ecs.Query2[Position, Velocity](world)
posColCntQuery := ecs.Query3[Position, Collider, Count](world)
posColQuery := ecs.Query2[Position, Collider](world)
start := time.Now()
dt := time.Since(start)
for iterCount := 0; iterCount < iterations; iterCount++ {
start = time.Now()
// ecs.ExecuteSystem2(world, func(query *ecs.Query2[Position, Velocity]) {
// moveCircles(query, fixedTime, maxPosition)
// })
// deathCount := 0
// ecs.ExecuteSystem2(world, func(query *ecs.Query2[Position, Collider]) {
// checkCollisions(world, query, collisionLimit, &deathCount)
// })
{
moveCircles(moveQuery, fixedTime, maxPosition)
}
deathCount := 0
{
checkCollisions(world, posColCntQuery, posColQuery, collisionLimit, &deathCount)
}
// fmt.Println("DeathCount:", deathCount)
// Spawn new entities, one per each entity we deleted
for i := 0; i < deathCount; i++ {
id := world.NewId()
ent := ecs.NewEntity(
ecs.C(Position{maxPosition * rand.Float64(), maxPosition * rand.Float64()}),
ecs.C(Velocity{maxSpeed * rand.Float64(), maxSpeed * rand.Float64()}),
ecs.C(Collider{
Radius: maxCollider * rand.Float64(),
}),
ecs.C(Count{}),
)
ent.Write(world, id)
}
// world.Print(0)
dt = time.Since(start)
fmt.Println(iterCount, dt.Seconds())
}
// query := ecs.Query1[Count](world)
// query.MapId(func(id ecs.Id, count *Count) {
// fmt.Println(id, count.Count)
// })
}
/*
974 1031
975 625
976 787
977 208
978 1601
979 1243
980 167
981 108
982 1040
983 500
984 637
985 1011
986 830
987 1247
988 901
989 1597
990 418
991 767
992 951
993 1252
994 948
995 194
996 290
997 181
998 1276
999 858
1000 789
1001 638
*/
// func benchPhysics(size int, collisionLimit int32) {
// world := createWorld(size)
// start := time.Now()
// dt := time.Since(start)
// fixedTime := (15 * time.Millisecond).Seconds()
// for i := 0; i < iterations; i++ {
// start = time.Now()
// // Update positions
// ecs.Map2(world, func(id ecs.Id, position *Position, velocity *Velocity) {
// position.X += velocity.X * fixedTime
// position.Y += velocity.Y * fixedTime
// // Bump into the bounding rect
// if position.X <= 0 || position.X >= maxPosition {
// velocity.X = -velocity.X
// }
// if position.Y <= 0 || position.Y >= maxPosition {
// velocity.Y = -velocity.Y
// }
// })
// // Check collisions, increment the count if a collision happens
// deathCount := 0
// ecs.Map3(world, func(aId ecs.Id, aPos *Position, aCol *Collider, aCnt *Count) {
// ecs.Map2(world, func(bId ecs.Id, bPos *Position, bCol *Collider) {
// if aId == bId { return } // Skip if entity is the same
// dx := aPos.X - bPos.X
// dy := aPos.Y - bPos.Y
// distSq := (dx * dx) + (dy * dy)
// dr := aCol.Radius + bCol.Radius
// drSq := dr * dr
// if drSq > distSq {
// aCnt.Count++
// }
// // Kill and spawn one
// // TODO move to outer loop?
// if collisionLimit > 0 && aCnt.Count > collisionLimit {
// success := ecs.Delete(world, aId)
// if success {
// deathCount++
// return
// }
// }
// })
// })
// // Spawn new entities, one per each entity we deleted
// for i := 0; i < deathCount; i++ {
// id := world.NewId()
// ent := ecs.NewEntity(
// ecs.C(Position{maxPosition * rand.Float64(), maxPosition * rand.Float64()}),
// ecs.C(Velocity{maxSpeed * rand.Float64(), maxSpeed * rand.Float64()}),
// ecs.C(Collider{
// Radius: maxCollider * rand.Float64(),
// }),
// ecs.C(Count{}),
// )
// ecs.WriteEntity(world, id, ent)
// }
// // world.Print(0)
// dt = time.Since(start)
// fmt.Println(i, dt.Seconds())
// }
// // ecs.Map(world, func(id ecs.Id, collider *Collider) {
// // fmt.Println(id, collider.Count)
// // })
// }
func benchPhysicsAlt(size int, collisionLimit int32) {
world := createWorld(size)
posVelQuery := ecs.Query2[Position, Velocity](world)
posColQuery := ecs.Query2[Position, Collider](world)
posColCntQuery := ecs.Query3[Position, Collider, Count](world)
// TODO - maybe one day
// posVelSystem := ecs.NewSystemFunc(world *ecs.World, func(query ecs.View2[Position, Velocity]))
start := time.Now()
dt := time.Since(start)
fixedTime := (15 * time.Millisecond).Seconds()
for i := 0; i < iterations; i++ {
start = time.Now()
// Update positions
posVelQuery.MapId(func(id ecs.Id, position *Position, velocity *Velocity) {
// ecs.Map2(world, func(id ecs.Id, position *Position, velocity *Velocity) {
position.X += velocity.X * fixedTime
position.Y += velocity.Y * fixedTime
// Bump into the bounding rect
if position.X <= 0 || position.X >= maxPosition {
velocity.X = -velocity.X
}
if position.Y <= 0 || position.Y >= maxPosition {
velocity.Y = -velocity.Y
}
})
// Check collisions, increment the count if a collision happens
deathCount := 0
posColCntQuery.MapId(func(aId ecs.Id, aPos *Position, aCol *Collider, aCnt *Count) {
posColQuery.MapId(func(bId ecs.Id, bPos *Position, bCol *Collider) {
if aId == bId {
return
} // Skip if entity is the same
dx := aPos.X - bPos.X
dy := aPos.Y - bPos.Y
distSq := (dx * dx) + (dy * dy)
dr := aCol.Radius + bCol.Radius
drSq := dr * dr
if drSq > distSq {
aCnt.Count++
}
// Kill and spawn one
// TODO move to outer loop?
if collisionLimit > 0 && aCnt.Count > collisionLimit {
success := ecs.Delete(world, aId)
if success {
deathCount++
return
}
}
})
})
// Spawn new entities, one per each entity we deleted
for i := 0; i < deathCount; i++ {
id := world.NewId()
ent := ecs.NewEntity(
ecs.C(Position{maxPosition * rand.Float64(), maxPosition * rand.Float64()}),
ecs.C(Velocity{maxSpeed * rand.Float64(), maxSpeed * rand.Float64()}),
ecs.C(Collider{
Radius: maxCollider * rand.Float64(),
}),
ecs.C(Count{}),
)
ent.Write(world, id)
}
// world.Print(0)
dt = time.Since(start)
fmt.Println(i, dt.Seconds())
}
// query := ecs.Query1[Count](world)
// query.MapId(func(id ecs.Id, count *Count) {
// fmt.Println(id, count.Count)
// })
}
/*
func benchPhysicsOptimized(size int, collisionLimit int32) {
iterations := 1000
world := ecs.NewWorld()
maxSpeed := 10.0
maxPosition := 100.0
maxCollider := 1.0
for i := 0; i < size; i++ {
id := world.NewId()
ent := ecs.NewEntity(
ecs.C(Position{maxPosition * rand.Float64(), maxPosition * rand.Float64()}),
ecs.C(Velocity{maxSpeed * rand.Float64(), maxSpeed * rand.Float64()}),
ecs.C(Collider{
Radius: maxCollider * rand.Float64(),
Count: 0,
}),
)
ecs.WriteEntity(world, id, ent)
}
loopCounter := 0
fixedTime := (15 * time.Millisecond).Seconds()
start := time.Now()
dt := time.Since(start)
for iterCount := 0; iterCount < iterations; iterCount++ {
start = time.Now()
{
// view := ecs.ViewAll2[Position, Velocity](world)
// for iter := view.Iterate(); iter.Ok(); {
// _, pos, vel := iter.Next()
// // fmt.Println("0", iter)
// pos.X += vel.X * fixedTime
// pos.Y += vel.Y * fixedTime
// // Bump into the bounding rect
// if pos.X <= 0 || pos.X >= maxPosition {
// vel.X = -vel.X
// }
// if pos.Y <= 0 || pos.Y >= maxPosition {
// vel.Y = -vel.Y
// }
// loopCounter++
// }
view := ecs.ViewAll2[Position, Velocity](world)
for view.Ok() {
_, pos, vel := view.IterChunkClean()
if len(pos) != len(vel) { panic("ERR") }
for j := range pos {
pos[j].X += vel[j].X * fixedTime
pos[j].Y += vel[j].Y * fixedTime
// Bump into the bounding rect
if pos[j].X <= 0 || pos[j].X >= maxPosition {
vel[j].X = -vel[j].X
}
if pos[j].Y <= 0 || pos[j].Y >= maxPosition {
vel[j].Y = -vel[j].Y
}
loopCounter++
}
}
}
// deathCount := 0
// view := ecs.ViewAll2[Position, Collider](world)
// // view2 := ecs.ViewAll2[Position, Collider](world)
// for iter := view.Iterate(); iter.Ok(); {
// aId, aPos, aCol := iter.Next()
// // fmt.Println("1", iter, aId, aPos, aCol)
// // for view.Ok() {
// // aId, aPos, aCol := view.Iter4()
// for iter2 := view.Iterate(); iter2.Ok(); {
// bId, bPos, bCol := iter2.Next()
// // fmt.Println("2", iter2, bId, bPos, bCol)
// // view2.Reset()
// // for view2.Ok() {
// // bId, bPos, bCol := view2.Iter4()
// if aId == bId { continue } // Skip if entity is the same
// dx := aPos.X - bPos.X
// dy := aPos.Y - bPos.Y
// distSq := (dx * dx) + (dy * dy)
// dr := aCol.Radius + bCol.Radius
// drSq := dr * dr
// if drSq > distSq {
// aCol.Count++
// }
// // Kill and spawn one
// // TODO move to outer loop?
// if collisionLimit > 0 && aCol.Count > collisionLimit {
// success := ecs.Delete(world, aId)
// if success {
// deathCount++
// break
// }
// }
// loopCounter++
// }
// }
// !!!Fastest!!!!
// Check collisions, increment the count if a collision happens
deathCount := 0
view := ecs.ViewAll2[Position, Collider](world)
view2 := ecs.ViewAll2[Position, Collider](world)
for view.Ok() {
ids, pos, col := view.IterChunkClean()
if len(ids) != len(pos) || len(ids) != len(col) { panic ("ERROR") }
for j := range ids {
aId := ids[j]
aPos := &pos[j]
aCol := &col[j]
view2.Reset()
for view2.Ok() {
targIdList, targPosList, targCol := view2.IterChunkClean()
if len(targIdList) != len(targPosList) || len(targIdList) != len(targCol) { panic ("ERROR") }
for jj := range targIdList {
bId := targIdList[jj]
bPos := &targPosList[jj]
bCol := &targCol[jj]
if aId == bId { continue } // Skip if entity is the same
dx := aPos.X - bPos.X
dy := aPos.Y - bPos.Y
distSq := (dx * dx) + (dy * dy)
dr := aCol.Radius + bCol.Radius
drSq := dr * dr
if drSq > distSq {
aCol.Count++
}
// Kill and spawn one
// TODO move to outer loop?
if collisionLimit > 0 && aCol.Count > collisionLimit {
success := ecs.Delete(world, aId)
if success {
deathCount++
break
}
}
loopCounter++
}
}
}
}
// Spawn new entities, one per each entity we deleted
for i := 0; i < deathCount; i++ {
id := world.NewId()
ent := ecs.NewEntity(
ecs.C(Position{maxPosition * rand.Float64(), maxPosition * rand.Float64()}),
ecs.C(Velocity{maxSpeed * rand.Float64(), maxSpeed * rand.Float64()}),
ecs.C(Collider{
Radius: maxCollider * rand.Float64(),
Count: 0,
}),
)
ecs.WriteEntity(world, id, ent)
}
// world.Print(0)
dt = time.Since(start)
fmt.Println(iterCount, dt, loopCounter)
loopCounter = 0
}
ecs.Map(world, func(id ecs.Id, collider *Collider) {
fmt.Println(id, collider.Count)
})
}
*/
func benchNativeComponents(size int, collisionLimit int32) {
// [uint64]
// [{float64, float64}]
// [{float64, float64}]
// [{float64, int32}]
// [uint64, uint64]
// [{float64, float64}, {float64, float64}]
// [{float64, float64}, {float64, float64}]
// [{float64, int32}, {float64, int32}]
ids := make([]ecs.Id, size, size)
pos := make([]Position, size, size)
vel := make([]Velocity, size, size)
col := make([]Collider, size, size)
cnt := make([]Count, size, size)
for i := 0; i < size; i++ {
ids[i] = ecs.Id(i + 2)
pos[i] = Position{maxPosition * rand.Float64(), maxPosition * rand.Float64()}
vel[i] = Velocity{maxSpeed * rand.Float64(), maxSpeed * rand.Float64()}
col[i] = Collider{
Radius: maxCollider * rand.Float64(),
}
cnt[i] = Count{}
}
start := time.Now()
dt := time.Since(start)
fixedTime := (15 * time.Millisecond).Seconds()
for iterCount := 0; iterCount < iterations; iterCount++ {
start = time.Now()
// Update positions
for i := range ids {
pos[i].X += vel[i].X * fixedTime
pos[i].Y += vel[i].Y * fixedTime
// Bump into the bounding rect
if pos[i].X <= 0 || pos[i].X >= maxPosition {
vel[i].X = -vel[i].X
}
if pos[i].Y <= 0 || pos[i].Y >= maxPosition {
vel[i].Y = -vel[i].Y
}
}
// Check collisions, increment the count if a collision happens
deathCount := 0
for i := range ids {
aId := ids[i]
aPos := &pos[i]
aCol := &col[i]
aCnt := &cnt[i]
for j := range ids {
bId := ids[j]
bPos := &pos[j]
bCol := &col[j]
if aId == bId {
continue
} // Skip if entity is the same
dx := aPos.X - bPos.X
dy := aPos.Y - bPos.Y
distSq := (dx * dx) + (dy * dy)
dr := aCol.Radius + bCol.Radius
drSq := dr * dr
if drSq > distSq {
aCnt.Count++
}
// Kill and spawn one
// TODO move to outer loop?
if collisionLimit > 0 && aCnt.Count > collisionLimit {
deathCount++
}
}
}
dt = time.Since(start)
fmt.Println(iterCount, dt.Seconds())
}
// for i := range ids {
// fmt.Println(ids[i], cnt[i].Count)
// }
}
// struct myStruct {
// X float64
// Y float64
// }
// myarray []myStruct
// myArrayX []float64
// myArrayY []float64
// [uint64]
// [{float64, float64}]
// [{float64, float64}]
// [{float64, int32}]
// Holes [bool] [true] ...
// Id [uint64] [uint64] ...
// PosX [float64] [float64] ...
// PosY [float64] [float64] ...
// VelX [float64] [float64] ...
// VelY [float64] [float64] ...
// ColRadius [float64] [float64] ...
// ColCount [int32] [int32] ...
// Test with this new memory layout
// [uint64]
// PosX [float64]
// PosY [float64]
// VelX [float64]
// VelY [float64]
// ColRadius [float64]
// ColCount [int32]
func benchNativeSplit(size int, collisionLimit int32) {
ids := make([]ecs.Id, size, size)
posX := make([]float64, size, size)
posY := make([]float64, size, size)
velX := make([]float64, size, size)
velY := make([]float64, size, size)
col := make([]float64, size, size)
cnt := make([]int32, size, size)
for i := 0; i < size; i++ {
ids[i] = ecs.Id(i + 2)
posX[i] = maxPosition * rand.Float64()
posY[i] = maxPosition * rand.Float64()
velX[i] = maxSpeed * rand.Float64()
velY[i] = maxSpeed * rand.Float64()
col[i] = maxCollider * rand.Float64()
cnt[i] = 0
}
fixedTime := 0.015
for iterCount := 0; iterCount < iterations; iterCount++ {
start := time.Now()
// Update positions
for i := range ids {
posX[i] += velX[i] * fixedTime
posY[i] += velY[i] * fixedTime
// Bump into the bounding rect
if posX[i] <= 0 || posX[i] >= maxPosition {
velX[i] = -velX[i]
}
if posY[i] <= 0 || posY[i] >= maxPosition {
velY[i] = -velY[i]
}
}
// Check collisions, increment the count if a collision happens
deathCount := 0
for i := range ids {
aId := ids[i]
aPosX := &posX[i]
aPosY := &posY[i]
aCol := &col[i]
for j := range ids {
bId := ids[j]
bPosX := &posX[j]
bPosY := &posY[j]
bCol := &col[j]
if aId == bId {
continue
} // Skip if entity is the same
dx := *aPosX - *bPosX
dy := *aPosY - *bPosY
distSq := (dx * dx) + (dy * dy)
dr := *aCol + *bCol
drSq := dr * dr
if drSq > distSq {
cnt[i]++
}
if collisionLimit > 0 && cnt[i] > collisionLimit {
deathCount++
}
}
}
dt := time.Since(start)
fmt.Println(iterCount, dt.Seconds())
}
// for i := range ids {
// fmt.Println(ids[i], cnt[i])
// }
}
// [ id, id , id ]
// [ pos, pos, pos ]
// [ vel, vel, ]
// [ col, col, col ]
package ecs
type Bundler struct {
archMask archetypeMask // The current archetypeMask
// TODO: Instead of set, you could just use the arch mask
Set [maxComponentId]bool // The list of components that are being bundled
Components [maxComponentId]Component // Component storage array for everything we've bundled
maxComponentIdAdded CompId
}
func (b *Bundler) Clear() {
b.archMask = blankArchMask
b.Set = [maxComponentId]bool{}
b.maxComponentIdAdded = 0
// b.Components // Note: No need to clear because we only use set values
}
// func (bun *Bundler) Add(comp Component) {
// compId := comp.id()
// bun.archMask.addComponent(compId)
// bun.Set[compId] = true
// if bun.Components[compId] == nil {
// bun.Components[compId] = comp.Clone() // Create an internal copy
// } else {
// comp.SetOther(bun.Components[compId])
// }
// bun.maxComponentIdAdded = max(bun.maxComponentIdAdded, compId)
// }
func (bun *Bundler) Has(comp Component) bool {
return bun.Set[comp.CompId()]
}
func readBundle[T Component](bun *Bundler) (T, bool) {
var comp T
compId := comp.CompId()
if !bun.Set[compId] {
return comp, false // Was not set
}
return bun.Components[compId].(*box[T]).val, true
}
// func (bun *Bundler) Read(comp Component) (Component, bool) {
// compId := comp.CompId()
// if !bun.Set[compId] {
// return comp, false // Was not set
// }
// return bun.Components[compId], true
// }
func (bun *Bundler) Remove(compId CompId) {
bun.archMask.removeComponent(compId)
bun.Set[compId] = false
}
// func WriteComponent[T any](bun *Bundler, comp T) {
// compId := nameTyped(comp)
// bun.archMask.addComponent(compId)
// bun.Set[compId] = true
// if bun.Components[compId] == nil {
// bun.Components[compId] = C(comp)
// } else {
// bun.Components[compId].Set(comp)
// }
// bun.maxComponentIdAdded = max(bun.maxComponentIdAdded, compId)
// }
func (b *Bundler) Write(world *World, id Id) {
if b.archMask == blankArchMask {
return // If the bundler is empty, just dont do anything
}
world.writeBundler(id, b)
}
package ecs
import (
"github.com/unitoftime/cod/backend"
)
func (t Id) CodEquals(tt Id) bool {
return t == tt
}
func (t Id) EncodeCod(bs []byte) []byte {
{
value0 := uint32(t)
bs = backend.WriteVarUint32(bs, value0)
}
return bs
}
func (t *Id) DecodeCod(bs []byte) (int, error) {
var err error
var n int
var nOff int
{
var value0 uint32
value0, nOff, err = backend.ReadVarUint32(bs[n:])
if err != nil {
return 0, err
}
n += nOff
*t = Id(value0)
}
return n, err
}
package ecs
type onInsert interface {
Component
OnInsert(ent EntityCommand)
}
// type singleCmd interface {
// apply(*World)
// }
// type spawnCmd struct {
// bundler *Bundler
// }
// func (c spawnCmd) apply(world *World) {
// id := world.NewId()
// c.bundler.Write(world, id)
// }
type CmdType uint8
const (
CmdTypeNone CmdType = iota
CmdTypeSpawn
CmdTypeWrite
CmdTypeTrigger
CmdTypeDelete
// CmdTypeCustom
)
type singleCmd struct {
Type CmdType
id Id
bundler *Bundler
world *World
event Event
}
func (c *singleCmd) apply(world *World) {
switch c.Type {
case CmdTypeNone:
// Do nothing, Command was probably cancelled
case CmdTypeSpawn:
if world.cmd.preWrite != nil {
world.cmd.preWrite(EntityCommand{c})
}
c.bundler.Write(world, c.id) // TODO: This could probably use a Spawn function which would be faster
// if world.cmd.postWrite != nil {
// world.cmd.postWrite(c.id)
// }
case CmdTypeWrite:
if world.cmd.preWrite != nil {
world.cmd.preWrite(EntityCommand{c})
}
c.bundler.Write(world, c.id)
// if world.cmd.postWrite != nil {
// world.cmd.postWrite(c.id)
// }
case CmdTypeTrigger:
world.Trigger(c.event, c.id)
case CmdTypeDelete:
if world.cmd.preDelete != nil {
world.cmd.preDelete(c.id)
}
Delete(world, c.id)
}
}
type EntityCommand struct {
cmd *singleCmd
}
// func (e EntityCommand) Printout() {
// fmt.Println("---")
// for i := range e.cmd.bundler.Components {
// if e.cmd.bundler.Set[i] {
// fmt.Printf("+%v\n", e.cmd.bundler.Components[i])
// }
// }
// // fmt.Printf("+%v\n", e.cmd.bundler)
// }
func (e EntityCommand) Cancel() {
e.cmd.Type = CmdTypeNone
e.cmd.id = InvalidEntity
}
// Removes the supplied component type from this entity command.
// TODO: Should this also remove it from the world? if it exists there?
func (e EntityCommand) Remove(comp Component) {
compId := comp.CompId()
e.cmd.bundler.Remove(compId)
}
func (e EntityCommand) Empty() bool {
return (e == EntityCommand{})
}
func (e EntityCommand) Insert(bun Writer) EntityCommand {
inserter, ok := bun.(onInsert)
alreadyHas := false
if ok {
alreadyHas = e.cmd.bundler.Has(inserter)
}
unbundle(bun, e.cmd.bundler)
// Only run OnInsert, if the writer supports it and we havent already inserted it to the bundler
if ok && !alreadyHas {
inserter.OnInsert(e)
}
return e
}
// Inserts the component if it is missing
func (e EntityCommand) InsertIfMissing(bun Component) EntityCommand {
if e.cmd.bundler.Has(bun) {
return e
}
if e.cmd.world.hasCompId(e.Id(), bun.CompId()) {
return e
}
unbundle(bun, e.cmd.bundler)
return e
}
func (e EntityCommand) Id() Id {
return e.cmd.id
}
// func (e EntityCommand) Remove(bun Bundle) EntityCommand {
// bun.Unbundle(e.cmd.bundler)
// return e
// }
// func (e EntityCommand) Add(seq iter.Seq[Component]) EntityCommand {
// for c := range seq {
// e.cmd.bundler.Add(c)
// }
// return e
// }
func ReadComp[T Component](e EntityCommand) (T, bool) {
var t T
// comp, ok := e.cmd.bundler.Read(t)
// if ok {
// box := comp.(*box[T])
// return box.val, true
// }
comp, ok := readBundle[T](e.cmd.bundler)
if ok {
return comp, true
}
return t, false
}
type CommandQueue struct {
world *World
preWrite func(EntityCommand)
preDelete func(Id)
commands []singleCmd
currentBundlerIndex int
bundlers []*Bundler
}
func NewCommandQueue(world *World) *CommandQueue {
return &CommandQueue{
world: world,
}
}
func (c *CommandQueue) Initialize(world *World) any {
return NewCommandQueue(world)
}
func (c *CommandQueue) NextBundler() *Bundler {
if c.currentBundlerIndex >= len(c.bundlers) {
bundler := &Bundler{}
c.bundlers = append(c.bundlers, bundler)
c.currentBundlerIndex = len(c.bundlers)
return bundler
} else {
bundler := c.bundlers[c.currentBundlerIndex]
bundler.Clear()
c.currentBundlerIndex++
return bundler
}
}
func unbundle(bundle Writer, bundler *Bundler) {
wd := W{bundler: bundler}
bundle.CompWrite(wd)
}
func remove(bundle Writer, bundler *Bundler) {
wd := W{bundler: bundler}
bundle.CompWrite(wd)
}
// func CmdSpawn[T Writer](c *CommandQueue, ub T) {
// bundler := c.NextBundler()
// unbundle(ub, bundler)
// // ub.Unbundle(bundler)
// c.commands = append(c.commands, singleCmd{
// Type: CmdTypeSpawn,
// id: c.world.NewId(),
// bundler: bundler,
// })
// }
// func (c *CommandQueue) Spawn(bun Writer) {
// entCmd := c.SpawnEmpty()
// entCmd.Insert(bun)
// }
func (c *CommandQueue) SpawnEmpty() EntityCommand {
bundler := c.NextBundler()
c.commands = append(c.commands, singleCmd{
Type: CmdTypeSpawn,
id: c.world.NewId(),
bundler: bundler,
world: c.world,
})
return EntityCommand{
cmd: &(c.commands[len(c.commands)-1]),
}
}
// // Pushes a command to delete the entity
// func (c *CommandQueue) Delete(id Id) {
// c.commands = append(c.commands, singleCmd{
// Type: CmdTypeDelete,
// id: id,
// })
// }
func (c *CommandQueue) Write(id Id) EntityCommand {
bundler := c.NextBundler()
c.commands = append(c.commands, singleCmd{
Type: CmdTypeWrite,
id: id,
bundler: bundler,
world: c.world,
})
return EntityCommand{
cmd: &(c.commands[len(c.commands)-1]),
}
}
func (c *CommandQueue) Trigger(event Event, ids ...Id) {
// Special Case: no ids provided, so just trigger a single, unrelated
if len(ids) == 0 {
c.commands = append(c.commands, singleCmd{
Type: CmdTypeTrigger,
id: InvalidEntity,
event: event,
world: c.world,
})
return
}
for _, id := range ids {
c.commands = append(c.commands, singleCmd{
Type: CmdTypeTrigger,
id: id,
event: event,
world: c.world,
})
}
}
// Adds a prewrite function to be executed before every write or spawn command is executed
// Useful for ensuring entities are fully formed before pushing them into the ECS
func (c *CommandQueue) SetPrewrite(lambda func(EntityCommand)) {
c.preWrite = lambda
}
// // Adds a predelite function to be executed before every delete command is executed
// // Useful for ensuring any external datastructures get cleaned up when an entity is deleted
// func (c *CommandQueue) SetPredelete(lambda func(Id)) {
// c.preDelete = lambda
// }
func (c *CommandQueue) Execute() {
// Perform all commands
// Note: We must check length every time in case calling one command adds more commands
for i := 0; i < len(c.commands); i++ {
c.commands[i].apply(c.world)
}
// Cleanup Queue
c.commands = c.commands[:0]
c.currentBundlerIndex = 0
}
// TODO: Maybe?
// func (c *CommandQueue) ExecutePostWrite(postWrite func (ecs.Id)) {
// // Perform all commands
// for i := range c.commands {
// c.commands[i].apply(c.world)
// }
// for i := range c.commands {
// if c.commands[i].id == InvalidEntity {
// continue
// }
// postWrite(c.commands[i].id)
// }
// // Cleanup Queue
// c.commands = c.commands[:0]
// c.currentBundlerIndex = 0
// }
package ecs
type CompId uint16
func NewComp[T any]() comp[T] {
var t T
return Comp(t)
}
func Comp[T any](t T) comp[T] {
compId := nameTyped[T](t)
return comp[T]{
compId,
}
}
type comp[T any] struct {
compId CompId
}
func (c comp[T]) CompId() CompId {
return c.compId
}
func (c comp[T]) newBox(val T) box[T] {
return box[T]{
val: val,
comp: c,
}
}
type W struct {
engine *archEngine
archId archetypeId
index int
bundler *Bundler
}
type Writer interface {
CompWrite(W)
}
type Component interface {
Writer
CompId() CompId
}
// This type is used to box a component with all of its type info so that it implements the component interface. I would like to get rid of this and simplify the APIs
type box[T any] struct {
val T
comp[T]
}
// Creates the boxed component type
func C[T any](val T) box[T] {
comp := Comp(val)
return comp.newBox(val)
}
func (c box[T]) CompWrite(wd W) {
c.WriteVal(wd, c.val)
}
func (c box[T]) OnInsert(ent EntityCommand) {
inserter, ok := any(c.val).(onInsert)
if ok {
inserter.OnInsert(ent)
}
}
// func (c Box[T]) getPtr(engine *archEngine, archId archetypeId, index int) *T {
// store := getStorageByCompId[T](engine, c.Id())
// slice := store.slice[archId]
// return &slice.comp[index]
// }
// func (c box[T]) With(val T) box[T] {
// c.val = val
// return c
// }
// func (c box[T]) Unbundle(bun *Bundler) {
// c.UnbundleVal(bun, c.comp)
// }
// func (c Box[T]) Unbundle(bun *Bundler) {
// compId := c.compId
// val := c.Comp
// bun.archMask.addComponent(compId)
// bun.Set[compId] = true
// if bun.Components[compId] == nil {
// // Note: We need a pointer so that we dont do an allocation every time we set it
// c2 := c // Note: make a copy, so the bundle doesn't contain a pointer to the original
// bun.Components[compId] = &c2
// } else {
// rwComp := bun.Components[compId].(*Box[T])
// rwComp.Comp = val
// }
// bun.maxComponentIdAdded = max(bun.maxComponentIdAdded, compId)
// }
func (c comp[T]) WriteVal(cw W, val T) {
if cw.bundler != nil {
c.UnbundleVal(cw.bundler, val)
} else {
store := getStorageByCompId[T](cw.engine, c.CompId())
writeArch(cw.engine, cw.archId, cw.index, store, val)
}
}
// func (c Box[T]) writeVal(engine *archEngine, archId archetypeId, index int, val T) {
// store := getStorageByCompId[T](engine, c.id())
// writeArch[T](engine, archId, index, store, val)
// }
func (c comp[T]) UnbundleVal(bun *Bundler, val T) {
compId := c.compId
bun.archMask.addComponent(compId)
bun.Set[compId] = true
if bun.Components[compId] == nil {
// Note: We need a pointer so that we dont do an allocation every time we set it
box := c.newBox(val)
bun.Components[compId] = &box
} else {
rwComp := bun.Components[compId].(*box[T])
rwComp.val = val
}
bun.maxComponentIdAdded = max(bun.maxComponentIdAdded, compId)
}
package ecs
import "fmt"
// TODO: You should move to this (ie archetype graph (or bitmask?). maintain the current archetype node, then traverse to nodes (and add new ones) based on which components are added): https://ajmmertens.medium.com/building-an-ecs-2-archetypes-and-vectorization-fe21690805f9
// Dynamic component Registry
type componentRegistry struct {
archSet [][]archetypeId // Contains the set of archetypeIds that have this component
archMask map[archetypeMask]archetypeId // Contains a mapping of archetype bitmasks to archetypeIds
revArchMask []archetypeMask // Contains the reverse mapping of archetypeIds to archetype masks. Indexed by archetypeId
}
func newComponentRegistry() *componentRegistry {
r := &componentRegistry{
archSet: make([][]archetypeId, maxComponentId+1), // TODO: hardcoded to max component
archMask: make(map[archetypeMask]archetypeId),
revArchMask: make([]archetypeMask, 0),
}
return r
}
func (r *componentRegistry) getArchetypeId(engine *archEngine, mask archetypeMask) archetypeId {
archId, ok := r.archMask[mask]
if !ok {
componentIds := mask.getComponentList()
archId = engine.newArchetypeId(mask, componentIds)
r.archMask[mask] = archId
if int(archId) != len(r.revArchMask) {
panic(fmt.Sprintf("ecs: archId must increment. Expected: %d, Got: %d", len(r.revArchMask), archId))
}
r.revArchMask = append(r.revArchMask, mask)
// Add this archetypeId to every component's archList
for _, compId := range componentIds {
r.archSet[compId] = append(r.archSet[compId], archId)
}
}
return archId
}
// This is mostly for the without filter
func (r *componentRegistry) archIdOverlapsMask(archId archetypeId, compArchMask archetypeMask) bool {
archMaskToCheck := r.revArchMask[archId]
resultArchMask := archMaskToCheck.bitwiseAnd(compArchMask)
if resultArchMask != blankArchMask {
// If the resulting arch mask is nonzero, it means that both the component mask and the base mask had the same bit set, which means the arch had one of the components
return true
}
return false
}
package ecs
import "fmt"
// Reads a specific component of the entity specified at id.
// Returns true if the entity was found and had that component, else returns false.
// Deprecated: This API is tentative, I'm trying to improve the QueryN construct so that it can capture this usecase.
func Read[T any](world *World, id Id) (T, bool) {
var ret T
loc, ok := world.arch.Get(id)
if !ok {
return ret, false
}
return readArch[T](world.engine, loc, id)
}
// Reads a pointer to the component of the entity at the specified id.
// Returns true if the entity was found and had that component, else returns false.
// This pointer is short lived and can become invalid if any other entity changes in the world
// Deprecated: This API is tentative, I'm trying to improve the QueryN construct so that it can capture this usecase.
func ReadPtr[T any](world *World, id Id) *T {
loc, ok := world.arch.Get(id)
if !ok {
return nil
}
return readPtrArch[T](world.engine, loc, id)
}
func (e *archEngine) ReadEntity(loc entLoc, id Id) *Entity {
lookup := e.lookup[loc.archId]
if lookup == nil {
panic("Archetype doesn't have lookup list")
}
index := int(loc.index)
ent := NewEntity()
for n := range e.compStorage {
if e.compStorage[n] != nil {
e.compStorage[n].ReadToEntity(ent, loc.archId, index)
}
}
return ent
}
func (e *archEngine) ReadRawEntity(loc entLoc, id Id) *RawEntity {
lookup := e.lookup[loc.archId]
if lookup == nil {
panic("Archetype doesn't have lookup list")
}
ent := NewRawEntity()
for n := range e.compStorage {
if e.compStorage[n] != nil {
e.compStorage[n].ReadToRawEntity(ent, loc.archId, int(loc.index))
}
}
return ent
}
func readArch[T any](e *archEngine, loc entLoc, id Id) (T, bool) {
var ret T
lookup := e.lookup[loc.archId]
if lookup == nil {
return ret, false // TODO: when could this possibly happen?
}
// Get the dynamic componentSliceStorage
n := name(ret)
ss := e.compStorage[n]
if ss == nil {
return ret, false
}
storage, ok := ss.(*componentStorage[T])
if !ok {
panic(fmt.Sprintf("Wrong componentSliceStorage[T] type: %d != %d", name(ss), name(ret)))
}
// Get the underlying Archetype's componentSlice
cSlice, ok := storage.slice.Get(loc.archId)
if !ok {
return ret, false
}
return cSlice.comp[loc.index], true
}
func readPtrArch[T any](e *archEngine, loc entLoc, id Id) *T {
var ret T
lookup := e.lookup[loc.archId]
if lookup == nil {
return nil
}
// Get the dynamic componentSliceStorage
n := name(ret)
ss := e.compStorage[n]
if ss == nil {
return nil
}
storage, ok := ss.(*componentStorage[T])
if !ok {
panic(fmt.Sprintf("Wrong componentSliceStorage[T] type: %d != %d", name(ss), name(ret)))
}
// Get the underlying Archetype's componentSlice
cSlice, ok := storage.slice.Get(loc.archId)
if !ok {
return nil
}
return &cSlice.comp[loc.index]
}
package ecs
// An Entity is essentially a map of components that is held external to a world. Useful for pulling full entities in and out of the world.
// Deprecated: This type and its corresponding methods are tentative and might be replaced by something else.
type Entity struct {
// comp map[componentId]Component
comp []Component
}
// Creates a new entity with the specified components
func NewEntity(components ...Component) *Entity {
return &Entity{
comp: components,
}
// c := make(map[componentId]Component)
// for i := range components {
// c[components[i].id()] = components[i]
// }
// return &Entity{
// comp: c,
// }
}
// Returns the index that contains the same componentId or returns -1
func (e *Entity) findIndex(compId CompId) int {
for i := range e.comp {
if compId == e.comp[i].CompId() {
return i
}
}
return -1
}
// Adds a component to an entity
func (e *Entity) Add(components ...Component) {
for i := range components {
idx := e.findIndex(components[i].CompId())
if idx < 0 {
e.comp = append(e.comp, components[i])
} else {
e.comp[idx] = components[i]
}
}
// for i := range components {
// e.comp[components[i].id()] = components[i]
// }
}
// Merges e2 on top of e (meaning that we will overwrite e with values from e2)
func (e *Entity) Merge(e2 *Entity) {
e.Add(e2.comp...)
// for k, v := range e2.comp {
// e.comp[k] = v
// }
}
// Returns a list of the components held by the entity
func (e *Entity) Comps() []Component {
return e.comp
// ret := make([]Component, 0, len(e.comp))
// for _, v := range e.comp {
// ret = append(ret, v)
// }
// return ret
}
// Reads a specific component from the entity, returns false if the component doesn't exist
func ReadFromEntity[T any](ent *Entity) (T, bool) {
var t T
n := name(t)
idx := ent.findIndex(n)
if idx < 0 {
return t, false
}
icomp := ent.comp[idx]
val, ok := icomp.(T)
if ok {
return val, true
}
return icomp.(box[T]).val, true
// var t T
// n := name(t)
// icomp, ok := ent.comp[n]
// if !ok {
// return t, false
// }
// return icomp.(Box[T]).Comp, true
}
// Writes the entire entity to the world
func (ent *Entity) Write(world *World, id Id) {
world.Write(id, ent.comp...)
// comps := ent.Comps()
// world.Write(id, comps...)
}
// Reads the entire entity out of the world and into an *Entity object. Returns nil if the entity doesn't exist
func ReadEntity(world *World, id Id) *Entity {
entLoc, ok := world.arch.Get(id)
if !ok {
return nil
}
return world.engine.ReadEntity(entLoc, id)
}
// Deletes a component on this entity
func (e *Entity) Delete(c Component) {
compId := c.CompId()
idx := e.findIndex(compId)
if idx < 0 {
return
}
// If index does exist, then cut it out
e.comp[idx] = e.comp[len(e.comp)-1]
e.comp = e.comp[:len(e.comp)-1]
// delete(e.comp, c.id())
}
// Clears the map, but retains the space
func (e *Entity) Clear() {
e.comp = e.comp[:0]
// // Clearing Optimization: https://go.dev/doc/go1.11#performance-compiler
// for k := range e.comp {
// delete(e.comp, k)
// }
}
// TODO revisit this abstraction
// type Copier interface {
// Copy() interface{}
// }
// func (e Entity) Copy() Entity {
// copy := BlankEntity()
// for k,v := range e {
// c, ok := v.(Copier)
// if ok {
// // fmt.Println("Copying:", k)
// // If the component has a custom copy interface, then copy it
// copy[k] = c.Copy()
// } else {
// // Else just copy the top level struct
// copy[k] = v
// }
// }
// return copy
// }
// A RawEntity is like an Entity, but every component is actually a pointer to the underlying component. I mostly use this to build inspector UIs that can directly modify an entity
// Deprecated: This type and its corresponding methods are tentative and might be replaced by something else.
type RawEntity struct {
comp map[CompId]any
}
// Creates a new entity with the specified components
func NewRawEntity(components ...any) *RawEntity {
c := make(map[CompId]any)
for i := range components {
c[name(components[i])] = components[i]
}
return &RawEntity{
comp: c,
}
}
// Adds a component to an entity
func (e *RawEntity) Add(components ...any) {
for i := range components {
e.comp[name(components[i])] = components[i]
}
}
// Merges e2 on top of e (meaning that we will overwrite e with values from e2)
func (e *RawEntity) Merge(e2 *RawEntity) {
for k, v := range e2.comp {
e.comp[k] = v
}
}
// Returns a list of the components held by the entity
func (e *RawEntity) Comps() []Component {
ret := make([]Component, 0, len(e.comp))
// for _, v := range e.comp {
// ret = append(ret, v)
// }
for compId := range maxComponentId {
v, ok := e.comp[CompId(compId)]
if !ok {
continue
}
ret = append(ret, v.(Component))
}
return ret
}
// // Reads a specific component from the entity, returns false if the component doesn't exist
// func ReadFromRawEntity[T any](ent *RawEntity) (T, bool) {
// var t T
// n := name(t)
// icomp, ok := ent.comp[n]
// if !ok {
// return t, false
// }
// return icomp.(Box[T]).Comp, true
// }
// Writes the entire entity to the world
// func (ent *RawEntity) Write(world *World, id Id) {
// comps := ent.Comps()
// world.Write(id, comps...)
// }
// Reads the entire entity out of the world and into an *RawEntity object. Returns nil if the entity doesn't exist. RawEntity is lik
func ReadRawEntity(world *World, id Id) *RawEntity {
loc, ok := world.arch.Get(id)
if !ok {
return nil
}
return world.engine.ReadRawEntity(loc, id)
}
// Deletes a component on this entity
func (e *RawEntity) Delete(c Component) {
delete(e.comp, name(c))
}
// Clears the map, but retains the space
func (e *RawEntity) Clear() {
// Clearing Optimization: https://go.dev/doc/go1.11#performance-compiler
for k := range e.comp {
delete(e.comp, k)
}
}
package ecs
import "reflect"
type EventId int
var eventRegistryCounter EventId = 0
var registeredEvents = make(map[reflect.Type]EventId, 0)
// This function is not thread safe
func NewEvent[T any]() EventId {
var t T
typeof := reflect.TypeOf(t)
eventId, ok := registeredEvents[typeof]
if !ok {
eventId = eventRegistryCounter
registeredEvents[typeof] = eventId
eventRegistryCounter++
}
return eventId
}
type Event interface {
EventId() EventId
}
type Trigger[T Event] struct {
Id Id // If set, it is the entity Id that this event was triggered on
Data T
}
type Handler interface {
Run(id Id, event any)
EventTrigger() EventId
}
type handlerData[E Event] struct {
lambda func(Trigger[E])
}
func (h handlerData[E]) Run(id Id, event any) {
e := event.(E)
trigger := Trigger[E]{id, e}
h.lambda(trigger)
}
func (h handlerData[E]) EventTrigger() EventId {
var e E
return e.EventId()
}
func NewHandler[E Event](f func(trigger Trigger[E])) handlerData[E] {
return handlerData[E]{
lambda: f,
}
}
package main
import (
"fmt"
"time"
"github.com/unitoftime/ecs"
)
// This example illustrates the primary use cases for the ecs
type Name string
type Position struct {
X, Y, Z float64
}
type Velocity struct {
X, Y, Z float64
}
type PrintMessage struct {
Msg string
}
var PrintMessageEventId = ecs.NewEvent[PrintMessage]()
func (p PrintMessage) EventId() ecs.EventId {
return PrintMessageEventId
}
func main() {
// Create a New World
world := ecs.NewWorld()
// You can manually spawn entities like this
{
cmd := ecs.NewCommandQueue(world)
// Add entities
cmd.SpawnEmpty().
Insert(ecs.C(Position{1, 2, 3})).
Insert(ecs.C(Velocity{1, 2, 3})).
Insert(ecs.C(Name("My First Entity")))
cmd.Execute()
}
// Adding Component hooks
{
world.SetHookOnAdd(ecs.C(Velocity{}),
ecs.NewHandler(func(trigger ecs.Trigger[ecs.OnAdd]) {
fmt.Println("Hook:", trigger)
}))
cmd := ecs.NewCommandQueue(world)
cmd.SpawnEmpty().
Insert(ecs.C(Position{1, 2, 3})).
Insert(ecs.C(Velocity{1, 2, 3})).
Insert(ecs.C(Name("My First Entity")))
cmd.Execute()
}
// Adding Observers
{
cmd := ecs.NewCommandQueue(world)
// You can add observer handlers which run as a result of triggered events
world.AddObserver(
ecs.NewHandler(func(trigger ecs.Trigger[PrintMessage]) {
fmt.Println("Observer 1:", trigger.Data.Msg)
}))
world.AddObserver(
ecs.NewHandler(func(trigger ecs.Trigger[PrintMessage]) {
fmt.Println("Observer 2!", trigger.Data.Msg)
}))
cmd.Trigger(PrintMessage{"Hello"})
cmd.Trigger(PrintMessage{"World"})
cmd.Execute()
}
scheduler := ecs.NewScheduler(world)
// Append physics systems, these run on a fixed time step, so dt will always be constant
scheduler.AddSystems(ecs.StageFixedUpdate,
// Comment out if you want to spawn a new entity every frame
// ecs.NewSystem1(SpawnSystem),
// Option A: Create a function that returns a system
MoveSystemOption_A(world),
// Option B: Use the dynamic injection to create a system for you
ecs.NewSystem1(MoveSystemOption_B),
ecs.NewSystem1(PrintSystem),
ecs.NewSystem1(TriggerSystem),
)
// Also, add render systems if you want, These run as fast as possible
// scheduler.AppendRender()
// This will block until the scheduler exits `scheduler.SetQuit(true)`
scheduler.Run()
}
// Note: This system wasn't added to the scheduler, so that I wouldn't constantly spawn entities in the physics loop
// But, you can rely on commands to get injected for you, just like a query.
func SpawnSystem(dt time.Duration, commands *ecs.CommandQueue) {
// Note: The scheduler will automatically call .Execute() the command queue
cmd := commands.SpawnEmpty()
name := Name(fmt.Sprintf("My Entity %d", cmd.Id()))
cmd.Insert(ecs.C(Position{1, 2, 3})).
Insert(ecs.C(Velocity{1, 2, 3})).
Insert(ecs.C(name))
}
// Option A: Define and return a system based on a closure
// - Provides a bit more flexibility if you need to establish variables ahead of the system
func MoveSystemOption_A(world *ecs.World) ecs.System {
query := ecs.Query2[Position, Velocity](world)
return ecs.NewSystem(func(dt time.Duration) {
query.MapId(func(id ecs.Id, pos *Position, vel *Velocity) {
sec := dt.Seconds()
pos.X += vel.X * sec
pos.Y += vel.Y * sec
pos.Z += vel.Z * sec
})
})
}
// Option 2: Define a system and have all the queries created and injected for you
// - Can be used for simpler systems that don't need to track much system-internal state
// - Use the `ecs.NewSystemN(world, systemFunction)` syntax (Where N represents the number of required resources)
func MoveSystemOption_B(dt time.Duration, query *ecs.View2[Position, Velocity]) {
query.MapId(func(id ecs.Id, pos *Position, vel *Velocity) {
sec := dt.Seconds()
pos.X += vel.X * sec
pos.Y += vel.Y * sec
pos.Z += vel.Z * sec
})
}
// A system that prints all entity names and their positions
func PrintSystem(dt time.Duration, query *ecs.View2[Name, Position]) {
query.MapId(func(id ecs.Id, name *Name, pos *Position) {
fmt.Printf("%s: %v\n", *name, pos)
})
}
func TriggerSystem(dt time.Duration, cmd *ecs.CommandQueue) {
cmd.Trigger(PrintMessage{"Hello"})
}
package ecs
import (
"slices"
)
// Optional - Lets you view even if component is missing (func will return nil)
// With - Lets you add additional components that must be present
// Without - Lets you add additional components that must not be present
type Filter interface {
Filter([]CompId) []CompId
}
type without struct {
mask archetypeMask
}
// Creates a filter to ensure that entities will not have the specified components
func Without(comps ...any) without {
return without{
mask: buildArchMaskFromAny(comps...),
}
}
func (w without) Filter(list []CompId) []CompId {
return list // Dont filter anything. We need to exclude later on
// return append(list, w.comps...)
}
type with struct {
comps []CompId
}
// Creates a filter to ensure that entities have the specified components
func With(comps ...any) with {
ids := make([]CompId, len(comps))
for i := range comps {
ids[i] = name(comps[i])
}
return with{
comps: ids,
}
}
func (w with) Filter(list []CompId) []CompId {
return append(list, w.comps...)
}
type optional struct {
comps []CompId
}
// Creates a filter to make the query still iterate even if a specific component is missing, in which case you'll get nil if the component isn't there when accessed
func Optional(comps ...any) optional {
ids := make([]CompId, len(comps))
for i := range comps {
ids[i] = name(comps[i])
}
return optional{
comps: ids,
}
}
func (f optional) Filter(list []CompId) []CompId {
for i := 0; i < len(list); i++ {
for j := range f.comps {
if list[i] == f.comps[j] {
// If we have a match, we want to remove it from the list.
list[i] = list[len(list)-1]
list = list[:len(list)-1]
// Because we just moved the last element to index i, we need to go back to process that element
i--
break
}
}
}
return list
}
type filterList struct {
comps []CompId
withoutArchMask archetypeMask
cachedArchetypeGeneration int // Denotes the world's archetype generation that was used to create the list of archIds. If the world has a new generation, we should probably regenerate
archIds []archetypeId
}
func newFilterList(comps []CompId, filters ...Filter) filterList {
var withoutArchMask archetypeMask
for _, f := range filters {
withoutFilter, isWithout := f.(without)
if isWithout {
withoutArchMask = withoutFilter.mask
} else {
comps = f.Filter(comps)
}
}
return filterList{
comps: comps,
withoutArchMask: withoutArchMask,
archIds: make([]archetypeId, 0),
}
}
func (f *filterList) regenerate(world *World) {
if world.engine.getGeneration() != f.cachedArchetypeGeneration {
f.archIds = world.engine.FilterList(f.archIds, f.comps)
if f.withoutArchMask != blankArchMask {
f.archIds = slices.DeleteFunc(f.archIds, func(archId archetypeId) bool {
return world.engine.dcr.archIdOverlapsMask(archId, f.withoutArchMask)
})
}
f.cachedArchetypeGeneration = world.engine.getGeneration()
}
}
package ecs
import (
"github.com/unitoftime/ecs/internal/intmap"
)
type intkey interface {
// comparable
~int | ~uint | ~int64 | ~uint64 | ~int32 | ~uint32 | ~int16 | ~uint16 | ~int8 | ~uint8 | ~uintptr
}
// This is useful for testing different map implementations in my workload
type internalMap[K intkey, V any] struct {
inner *intmap.Map[K, V]
}
func newMap[K intkey, V any](size int) *internalMap[K, V] {
return &internalMap[K, V]{
intmap.New[K, V](0),
}
}
func (m *internalMap[K, V]) Len() int {
return m.inner.Len()
}
func (m *internalMap[K, V]) Get(k K) (V, bool) {
return m.inner.Get(k)
}
func (m *internalMap[K, V]) Put(k K, val V) {
m.inner.Put(k, val)
}
func (m *internalMap[K, V]) Delete(k K) {
m.inner.Del(k)
}
func (m *internalMap[K, V]) Has(k K) bool {
_, has := m.inner.Get(k)
return has
}
//--------------------------------------------------------------------------------
// TODO: Move to generational Ids
// This is useful for testing different map implementations in my workload
type locMap struct {
// inner *LocMapImpl
inner *intmap.Map[Id, entLoc]
}
func newLocMap(size int) locMap {
return locMap{
// NewLocMapImpl(size),
intmap.New[Id, entLoc](0),
}
}
func (m *locMap) Len() int {
return m.inner.Len()
}
func (m *locMap) Get(k Id) (entLoc, bool) {
return m.inner.Get(k)
}
func (m *locMap) Put(k Id, val entLoc) {
m.inner.Put(k, val)
}
func (m *locMap) Delete(k Id) {
m.inner.Del(k)
}
func (m *locMap) Has(k Id) bool {
_, has := m.inner.Get(k)
return has
}
// --------------------------------------------------------------------------------
// const fillFactor64 = 0.5
// // Hashing Reference: https://gist.github.com/badboy/6267743
// func phiMix64(x int) int {
// // Note: With this, we are only just a bit faster than swissmap
// h := x * (-1_640_531_527) // This is just the int32 version of the 0x9E3779B9
// return h ^ (h >> 16)
// // // TODO: track collision counts and compare before enabling this
// // // Theory: Because ecs.Id is just incremented by 1 each time, it might be effective to just always take the next slot
// // return x + x
// }
// type locPair struct {
// K Id
// V entLoc
// }
// // LocMapImpl is a hashmap where the keys are some any integer type.
// type LocMapImpl struct {
// data []locPair
// size int
// zeroVal entLoc // value of 'zero' key
// hasZeroKey bool // do we have 'zero' key in the map?
// }
// // New creates a new map with keys being any integer subtype.
// // The map can store up to the given capacity before reallocation and rehashing occurs.
// func NewLocMapImpl(capacity int) *LocMapImpl {
// return &LocMapImpl{
// data: make([]locPair, arraySize(capacity, fillFactor64)),
// }
// }
// // Get returns the value if the key is found.
// func (m *LocMapImpl) Get(key Id) (entLoc, bool) {
// if key == InvalidEntity {
// if m.hasZeroKey {
// return m.zeroVal, true
// }
// var zero entLoc
// return zero, false
// }
// idx := m.startIndex(key)
// p := m.data[idx]
// if p.K == InvalidEntity { // end of chain already
// var zero entLoc
// return zero, false
// }
// if p.K == key { // we check zero prior to this call
// return p.V, true
// }
// // hash collision, seek next hash match, bailing on first empty
// for {
// idx = m.nextIndex(idx)
// p = m.data[idx]
// if p.K == InvalidEntity {
// var zero entLoc
// return zero, false
// }
// if p.K == key {
// return p.V, true
// }
// }
// }
// // Put adds or updates key with value val.
// func (m *LocMapImpl) Put(key Id, val entLoc) {
// if key == InvalidEntity {
// if !m.hasZeroKey {
// m.size++
// }
// m.zeroVal = val
// m.hasZeroKey = true
// return
// }
// idx := m.startIndex(key)
// p := &m.data[idx]
// if p.K == InvalidEntity { // end of chain already
// p.K = key
// p.V = val
// if m.size >= m.sizeThreshold() {
// m.rehash()
// } else {
// m.size++
// }
// return
// } else if p.K == key { // overwrite existing value
// p.V = val
// return
// }
// // hash collision, seek next empty or key match
// for {
// idx = m.nextIndex(idx)
// p = &m.data[idx]
// if p.K == InvalidEntity {
// p.K = key
// p.V = val
// if m.size >= m.sizeThreshold() {
// m.rehash()
// } else {
// m.size++
// }
// return
// } else if p.K == key {
// p.V = val
// return
// }
// }
// }
// // Clear removes all items from the map, but keeps the internal buffers for reuse.
// func (m *LocMapImpl) Clear() {
// var zero entLoc
// m.hasZeroKey = false
// m.zeroVal = zero
// // compiles down to runtime.memclr()
// for i := range m.data {
// m.data[i] = locPair{}
// }
// m.size = 0
// }
// func (m *LocMapImpl) rehash() {
// oldData := m.data
// m.data = make([]locPair, 2*len(m.data))
// // reset size
// if m.hasZeroKey {
// m.size = 1
// } else {
// m.size = 0
// }
// forEach64(oldData, m.Put)
// // for _, p := range oldData {
// // if p.K != InvalidEntity {
// // m.Put(p.K, p.V)
// // }
// // }
// }
// // Len returns the number of elements in the map.
// func (m *LocMapImpl) Len() int {
// return m.size
// }
// func (m *LocMapImpl) sizeThreshold() int {
// return int(math.Floor(float64(len(m.data)) * fillFactor64))
// }
// func (m *LocMapImpl) startIndex(key Id) int {
// return phiMix64(int(key)) & (len(m.data) - 1)
// }
// func (m *LocMapImpl) nextIndex(idx int) int {
// return (idx + 1) & (len(m.data) - 1)
// }
// func forEach64(pairs []locPair, f func(k Id, v entLoc)) {
// for _, p := range pairs {
// if p.K != InvalidEntity {
// f(p.K, p.V)
// }
// }
// }
// // Del deletes a key and its value, returning true iff the key was found
// func (m *LocMapImpl) Del(key Id) bool {
// if key == InvalidEntity {
// if m.hasZeroKey {
// m.hasZeroKey = false
// m.size--
// return true
// }
// return false
// }
// idx := m.startIndex(key)
// p := m.data[idx]
// if p.K == key {
// // any keys that were pushed back needs to be shifted nack into the empty slot
// // to avoid breaking the chain
// m.shiftKeys(idx)
// m.size--
// return true
// } else if p.K == InvalidEntity { // end of chain already
// return false
// }
// for {
// idx = m.nextIndex(idx)
// p = m.data[idx]
// if p.K == key {
// // any keys that were pushed back needs to be shifted nack into the empty slot
// // to avoid breaking the chain
// m.shiftKeys(idx)
// m.size--
// return true
// } else if p.K == InvalidEntity {
// return false
// }
// }
// }
// func (m *LocMapImpl) shiftKeys(idx int) int {
// // Shift entries with the same hash.
// // We need to do this on deletion to ensure we don't have zeroes in the hash chain
// for {
// var p locPair
// lastIdx := idx
// idx = m.nextIndex(idx)
// for {
// p = m.data[idx]
// if p.K == InvalidEntity {
// m.data[lastIdx] = locPair{}
// return lastIdx
// }
// slot := m.startIndex(p.K)
// if lastIdx <= idx {
// if lastIdx >= slot || slot > idx {
// break
// }
// } else {
// if lastIdx >= slot && slot > idx {
// break
// }
// }
// idx = m.nextIndex(idx)
// }
// m.data[lastIdx] = p
// }
// }
// func nextPowerOf2(x uint32) uint32 {
// if x == math.MaxUint32 {
// return x
// }
// if x == 0 {
// return 1
// }
// x--
// x |= x >> 1
// x |= x >> 2
// x |= x >> 4
// x |= x >> 8
// x |= x >> 16
// return x + 1
// }
// func arraySize(exp int, fill float64) int {
// s := nextPowerOf2(uint32(math.Ceil(float64(exp) / fill)))
// if s < 2 {
// s = 2
// }
// return int(s)
// }
package ecs
type OnAdd struct {
compId CompId
}
var _onAddId = NewEvent[OnAdd]()
func (p OnAdd) EventId() EventId {
return _onAddId
}
//--------------------------------------------------------------------------------
func (e *archEngine) runFinalizedHooks(id Id) {
// Run, then clear add hooks
for i := range e.finalizeOnAdd {
e.runAddHook(id, e.finalizeOnAdd[i])
}
e.finalizeOnAdd = e.finalizeOnAdd[:0]
// TODO: Run other hooks?
}
func (e *archEngine) runAddHook(id Id, compId CompId) {
current := e.onAddHooks[compId]
if current == nil {
return
}
current.Run(id, OnAdd{compId})
}
// Marks all provided components
func markComponents(slice []CompId, comp ...Component) []CompId {
for i := range comp {
slice = append(slice, comp[i].CompId())
}
return slice
}
// Marks the provided components, excluding ones that are already set by the old mask
func markNewComponents(slice []CompId, oldMask archetypeMask, comp ...Component) []CompId {
for i := range comp {
compId := comp[i].CompId()
if oldMask.hasComponent(compId) {
continue // Skip: Component already set in oldMask
}
slice = append(slice, compId)
}
return slice
}
func markComponentMask(slice []CompId, mask archetypeMask) []CompId {
// TODO: Optimization: Technically this only has to loop to the max registered compId, not the max possible. Also see optimization note in archEngine
for compId := CompId(0); compId <= maxComponentId; compId++ {
if mask.hasComponent(compId) {
slice = append(slice, compId)
}
}
return slice
}
func markComponentDiff(slice []CompId, newMask, oldMask archetypeMask) []CompId {
mask := newMask.bitwiseClear(oldMask)
return markComponentMask(slice, mask)
}
package ecs
import (
"reflect"
"runtime"
"time"
)
func GetInjectable2[T any](world *World, t T) T {
return GetInjectable[T](world)
}
func GetInjectable[T any](world *World) T {
var t T
name := resourceName(t)
// 1. If already created, just use this variable
anyVal, ok := world.resources[name]
if ok {
return anyVal.(T)
}
// 2. If supports initialization, then make a new one and return it
tAny := any(t)
initializer, ok := tAny.(Initializer)
if ok {
anyVal = initializer.Initialize(world)
world.resources[name] = anyVal
return anyVal.(T)
}
// 3. Fallback: Just return the default value for whatever it is
world.resources[name] = t
return t
}
type Initializer interface {
Initialize(*World) any
}
type SystemBuilder interface {
Build(world *World) System
}
type System1[A any] struct {
lambda func(dt time.Duration, a A)
}
func (s System1[A]) Build(world *World) System {
aRes := GetInjectable[A](world)
systemName := runtime.FuncForPC(reflect.ValueOf(any(s.lambda)).Pointer()).Name()
return System{
Name: systemName,
Func: func(dt time.Duration) {
s.lambda(dt, aRes)
},
}
}
func NewSystem1[A any](lambda func(dt time.Duration, a A)) System1[A] {
return System1[A]{
lambda: lambda,
}
}
type System2[A, B any] struct {
lambda func(dt time.Duration, a A, b B)
}
func (s System2[A, B]) Build(world *World) System {
aRes := GetInjectable[A](world)
bRes := GetInjectable[B](world)
systemName := runtime.FuncForPC(reflect.ValueOf(any(s.lambda)).Pointer()).Name()
return System{
Name: systemName,
Func: func(dt time.Duration) {
s.lambda(dt, aRes, bRes)
},
}
}
func NewSystem2[A, B any](lambda func(dt time.Duration, a A, b B)) System2[A, B] {
return System2[A, B]{
lambda: lambda,
}
}
type System3[A, B, C any] struct {
lambda func(dt time.Duration, a A, b B, c C)
}
func (s System3[A, B, C]) Build(world *World) System {
aRes := GetInjectable[A](world)
bRes := GetInjectable[B](world)
cRes := GetInjectable[C](world)
systemName := runtime.FuncForPC(reflect.ValueOf(any(s.lambda)).Pointer()).Name()
return System{
Name: systemName,
Func: func(dt time.Duration) {
s.lambda(dt, aRes, bRes, cRes)
},
}
}
func NewSystem3[A, B, C any](lambda func(dt time.Duration, a A, b B, c C)) System3[A, B, C] {
return System3[A, B, C]{
lambda: lambda,
}
}
// func NewSystem4[A, B, C, D any](world *World, lambda func(dt time.Duration, a A, b B, c C, d D)) System {
// aRes := GetInjectable[A](world)
// bRes := GetInjectable[B](world)
// cRes := GetInjectable[C](world)
// dRes := GetInjectable[D](world)
// systemName := runtime.FuncForPC(reflect.ValueOf(any(lambda)).Pointer()).Name()
// return System{
// Name: systemName,
// Func: func(dt time.Duration) {
// lambda(dt, aRes, bRes, cRes, dRes)
// },
// }
// }
package main
import (
"bytes"
_ "embed"
"go/format"
"io/fs"
"os"
"strings"
"text/template"
)
//go:embed view.tgo
var viewTemplate string
type viewData struct {
Views [][]string
}
// This is used to generate code for the ecs library
func main() {
data := viewData{
Views: [][]string{
[]string{"A"},
[]string{"A", "B"},
[]string{"A", "B", "C"},
[]string{"A", "B", "C", "D"},
[]string{"A", "B", "C", "D", "E"},
[]string{"A", "B", "C", "D", "E", "F"},
[]string{"A", "B", "C", "D", "E", "F", "G"},
[]string{"A", "B", "C", "D", "E", "F", "G", "H"},
[]string{"A", "B", "C", "D", "E", "F", "G", "H", "I"},
[]string{"A", "B", "C", "D", "E", "F", "G", "H", "I", "J"},
[]string{"A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K"},
[]string{"A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L"},
},
}
funcs := template.FuncMap{
"join": strings.Join,
"lower": func(val string) string {
return strings.ToLower(val)
},
"nils": func(n int) string {
val := make([]string, 0)
for i := 0; i < n; i++ {
val = append(val, "nil")
}
return strings.Join(val, ", ")
},
"retlist": func(val []string) string {
ret := make([]string, len(val))
for i := range val {
ret[i] = "ret" + val[i]
}
return strings.Join(ret, ", ")
},
"lambdaArgs": func(val []string) string {
ret := make([]string, len(val))
for i := range val {
ret[i] = strings.ToLower(val[i]) + " *" + val[i]
}
return strings.Join(ret, ", ")
},
"sliceLambdaArgs": func(val []string) string {
ret := make([]string, len(val))
for i := range val {
ret[i] = strings.ToLower(val[i]) + " []" + val[i]
}
return strings.Join(ret, ", ")
},
"parallelLambdaStructArgs": func(val []string) string {
ret := make([]string, len(val))
for i := range val {
ret[i] = strings.ToLower(val[i]) + " []" + val[i]
}
return strings.Join(ret, "; ")
},
"parallelLambdaArgsFromStruct": func(val []string) string {
ret := make([]string, len(val))
for i := range val {
ret[i] = "param" + val[i]
}
return strings.Join(ret, ", ")
},
}
t := template.Must(template.New("ViewTemplate").Funcs(funcs).Parse(viewTemplate))
buf := bytes.NewBuffer([]byte{})
t.Execute(buf, data)
filename := "view_gen.go"
// Attempt to write the file as formatted, falling back to writing it normally
formatted, err := format.Source(buf.Bytes())
if err != nil {
err = os.WriteFile(filename, buf.Bytes(), fs.ModePerm)
if err != nil {
panic(err)
}
panic(err)
}
err = os.WriteFile(filename, formatted, fs.ModePerm)
if err != nil {
panic(err)
}
}
// Package intmap contains a fast hashmap implementation for maps with keys of any integer type
package intmap
import (
"math"
)
// IntKey is a type constraint for values that can be used as keys in Map
type IntKey interface {
~int | ~uint | ~int64 | ~uint64 | ~int32 | ~uint32 | ~int16 | ~uint16 | ~int8 | ~uint8 | ~uintptr
}
type pair[K IntKey, V any] struct {
K K
V V
}
const fillFactor64 = 0.7
// Hashing Reference: https://gist.github.com/badboy/6267743
func phiMix64(x int) int {
// Note: With this, we are only just a bit faster than swissmap
// h := x * int(0x9E3779B9)
// h := x * 0x9E3779B9
h := x * (-1_640_531_527) // This is just the int32 version of the 0x9E3779B9
return h ^ (h >> 16)
// TODO: track collision counts and compare before enabling this
// // Theory: Because ecs.Id is just incremented by 1 each time, it might be effective to just always take the next slot
// return x + x
}
// Map is a hashmap where the keys are some any integer type.
type Map[K IntKey, V any] struct {
data []pair[K, V] // key-value pairs
size int
zeroVal V // value of 'zero' key
hasZeroKey bool // do we have 'zero' key in the map?
}
// New creates a new map with keys being any integer subtype.
// The map can store up to the given capacity before reallocation and rehashing occurs.
func New[K IntKey, V any](capacity int) *Map[K, V] {
return &Map[K, V]{
data: make([]pair[K, V], arraySize(capacity, fillFactor64)),
}
}
// Get returns the value if the key is found.
func (m *Map[K, V]) Get(key K) (V, bool) {
if key == K(0) {
if m.hasZeroKey {
return m.zeroVal, true
}
var zero V
return zero, false
}
idx := m.startIndex(key)
p := m.data[idx]
if p.K == K(0) { // end of chain already
var zero V
return zero, false
}
if p.K == key { // we check zero prior to this call
return p.V, true
}
// hash collision, seek next hash match, bailing on first empty
for {
idx = m.nextIndex(idx)
p = m.data[idx]
if p.K == K(0) {
var zero V
return zero, false
}
if p.K == key {
return p.V, true
}
}
}
// Put adds or updates key with value val.
func (m *Map[K, V]) Put(key K, val V) {
if key == K(0) {
if !m.hasZeroKey {
m.size++
}
m.zeroVal = val
m.hasZeroKey = true
return
}
idx := m.startIndex(key)
p := &m.data[idx]
if p.K == K(0) { // end of chain already
p.K = key
p.V = val
if m.size >= m.sizeThreshold() {
m.rehash()
} else {
m.size++
}
return
} else if p.K == key { // overwrite existing value
p.V = val
return
}
// hash collision, seek next empty or key match
for {
idx = m.nextIndex(idx)
p = &m.data[idx]
if p.K == K(0) {
p.K = key
p.V = val
if m.size >= m.sizeThreshold() {
m.rehash()
} else {
m.size++
}
return
} else if p.K == key {
p.V = val
return
}
}
}
func (m *Map[K, V]) ForEach(f func(K, V)) {
if m.hasZeroKey {
f(K(0), m.zeroVal)
}
forEach64(m.data, f)
}
// Clear removes all items from the map, but keeps the internal buffers for reuse.
func (m *Map[K, V]) Clear() {
var zero V
m.hasZeroKey = false
m.zeroVal = zero
// compiles down to runtime.memclr()
for i := range m.data {
m.data[i] = pair[K, V]{}
}
m.size = 0
}
func (m *Map[K, V]) rehash() {
oldData := m.data
m.data = make([]pair[K, V], 2*len(m.data))
// reset size
if m.hasZeroKey {
m.size = 1
} else {
m.size = 0
}
forEach64(oldData, m.Put)
}
// Len returns the number of elements in the map.
func (m *Map[K, V]) Len() int {
return m.size
}
func (m *Map[K, V]) sizeThreshold() int {
return int(math.Floor(float64(len(m.data)) * fillFactor64))
}
func (m *Map[K, V]) startIndex(key K) int {
return phiMix64(int(key)) & (len(m.data) - 1)
}
func (m *Map[K, V]) nextIndex(idx int) int {
return (idx + 1) & (len(m.data) - 1)
}
func forEach64[K IntKey, V any](pairs []pair[K, V], f func(k K, v V)) {
for _, p := range pairs {
if p.K != K(0) {
f(p.K, p.V)
}
}
}
// Del deletes a key and its value, returning true iff the key was found
func (m *Map[K, V]) Del(key K) bool {
if key == K(0) {
if m.hasZeroKey {
m.hasZeroKey = false
m.size--
return true
}
return false
}
idx := m.startIndex(key)
p := m.data[idx]
if p.K == key {
// any keys that were pushed back needs to be shifted nack into the empty slot
// to avoid breaking the chain
m.shiftKeys(idx)
m.size--
return true
} else if p.K == K(0) { // end of chain already
return false
}
for {
idx = m.nextIndex(idx)
p = m.data[idx]
if p.K == key {
// any keys that were pushed back needs to be shifted nack into the empty slot
// to avoid breaking the chain
m.shiftKeys(idx)
m.size--
return true
} else if p.K == K(0) {
return false
}
}
}
func (m *Map[K, V]) shiftKeys(idx int) int {
// Shift entries with the same hash.
// We need to do this on deletion to ensure we don't have zeroes in the hash chain
for {
var p pair[K, V]
lastIdx := idx
idx = m.nextIndex(idx)
for {
p = m.data[idx]
if p.K == K(0) {
m.data[lastIdx] = pair[K, V]{}
return lastIdx
}
slot := m.startIndex(p.K)
if lastIdx <= idx {
if lastIdx >= slot || slot > idx {
break
}
} else {
if lastIdx >= slot && slot > idx {
break
}
}
idx = m.nextIndex(idx)
}
m.data[lastIdx] = p
}
}
func nextPowerOf2(x uint32) uint32 {
if x == math.MaxUint32 {
return x
}
if x == 0 {
return 1
}
x--
x |= x >> 1
x |= x >> 2
x |= x >> 4
x |= x >> 8
x |= x >> 16
return x + 1
}
func arraySize(exp int, fill float64) int {
s := nextPowerOf2(uint32(math.Ceil(float64(exp) / fill)))
if s < 2 {
s = 2
}
return int(s)
}
package ecs
type list[T any] struct {
list []T
}
func newList[T any]() list[T] {
return list[T]{
list: make([]T, 0),
}
}
func (l *list[T]) Add(t T) {
l.list = append(l.list, t)
}
package ecs
import "fmt"
// Note: you can increase max component size by increasing maxComponentId and archetypeMask
// TODO: I should have some kind of panic if you go over maximum component size
const numMaskBlocks = 4
const maxComponentId = (numMaskBlocks * 64) - 1 // 4 maskBlocks = 255 components
var blankArchMask archetypeMask
// Supports maximum 256 unique component types
type archetypeMask [numMaskBlocks]uint64 // TODO: can/should I make this configurable?
func (a archetypeMask) String() string {
return fmt.Sprintf("0x%x%x%x%x", a[0], a[1], a[2], a[3])
}
func buildArchMask(comps ...Component) archetypeMask {
var mask archetypeMask
for _, comp := range comps {
// Ranges: [0, 64), [64, 128), [128, 192), [192, 256)
c := comp.CompId()
idx := c / 64
offset := c - (64 * idx)
mask[idx] |= (1 << offset)
}
return mask
}
func buildArchMaskFromAny(comps ...any) archetypeMask {
var mask archetypeMask
for _, comp := range comps {
// Ranges: [0, 64), [64, 128), [128, 192), [192, 256)
c := name(comp)
idx := c / 64
offset := c - (64 * idx)
mask[idx] |= (1 << offset)
}
return mask
}
func buildArchMaskFromId(compIds ...CompId) archetypeMask {
var mask archetypeMask
for _, c := range compIds {
// Ranges: [0, 64), [64, 128), [128, 192), [192, 256)
idx := c / 64
offset := c - (64 * idx)
mask[idx] |= (1 << offset)
}
return mask
}
func (m *archetypeMask) addComponent(compId CompId) {
// Ranges: [0, 64), [64, 128), [128, 192), [192, 256)
idx := compId / 64
offset := compId - (64 * idx)
m[idx] |= (1 << offset)
}
func (m *archetypeMask) removeComponent(compId CompId) {
// Ranges: [0, 64), [64, 128), [128, 192), [192, 256)
idx := compId / 64
offset := compId - (64 * idx)
m[idx] &= ^(1 << offset)
}
// Performs a bitwise OR on the base mask `m` with the added mask `a`
func (m archetypeMask) bitwiseOr(a archetypeMask) archetypeMask {
for i := range m {
m[i] = m[i] | a[i]
}
return m
}
// Performs a bitwise AND on the base mask `m` with the added mask `a`
func (m archetypeMask) bitwiseAnd(a archetypeMask) archetypeMask {
for i := range m {
m[i] = m[i] & a[i]
}
return m
}
// Clears every bit in m based on the bits set in 'c'
func (m archetypeMask) bitwiseClear(c archetypeMask) archetypeMask {
for i := range m {
m[i] = m[i] & (^c[i])
}
return m
}
// m: 0x1010
// c: 0x1100
//!c: 0x0011
// f: 0x0010
// Checks to ensure archetype m contains archetype a
// Returns true if every bit in m is also set in a
// Returns false if at least one set bit in m is not set in a
func (m archetypeMask) contains(a archetypeMask) bool {
// Logic: Bitwise AND on every segment, if the 'check' result doesn't match m[i] for that segment
// then we know there was a bit in a[i] that was not set
var check uint64
for i := range m {
check = m[i] & a[i]
if check != m[i] {
return false
}
}
return true
}
// Checks to see if a mask m contains the supplied componentId
// Returns true if the bit location in that mask is set, else returns false
func (m archetypeMask) hasComponent(compId CompId) bool {
// Ranges: [0, 64), [64, 128), [128, 192), [192, 256)
idx := compId / 64
offset := compId - (64 * idx)
return (m[idx] & (1 << offset)) != 0
}
// Generates and returns a list of every componentId that this archetype contains
func (m archetypeMask) getComponentList() []CompId {
ret := make([]CompId, 0)
for compId := CompId(0); compId <= maxComponentId; compId++ {
if m.hasComponent(compId) {
ret = append(ret, compId)
}
}
return ret
}
package ecs
import (
"fmt"
"reflect"
"sync"
)
func nameTyped[T any](comp T) CompId {
compId := name(comp)
registerComponentStorage[T](compId)
return compId
}
type storageBuilder interface {
build() storage
}
type storageBuilderImp[T any] struct {
}
func (s storageBuilderImp[T]) build() storage {
return &componentStorage[T]{
slice: newMap[archetypeId, *componentList[T]](DefaultAllocation),
}
}
var componentStorageLookupMut sync.RWMutex
var componentStorageLookup = make(map[CompId]storageBuilder)
func registerComponentStorage[T any](compId CompId) {
componentStorageLookupMut.Lock()
_, ok := componentStorageLookup[compId]
if !ok {
componentStorageLookup[compId] = storageBuilderImp[T]{}
}
componentStorageLookupMut.Unlock()
}
func newComponentStorage(c CompId) storage {
componentStorageLookupMut.RLock()
s, ok := componentStorageLookup[c]
if !ok {
panic(fmt.Sprintf("tried to build component storage with unregistered componentId: %d", c))
}
componentStorageLookupMut.RUnlock()
return s.build()
}
//--------------------------------------------------------------------------------
var componentIdMutex sync.Mutex
var registeredComponents = make(map[reflect.Type]CompId, maxComponentId)
var invalidComponentId CompId = 0
var componentRegistryCounter CompId = 1
func name(t any) CompId {
// Note: We have to lock here in case there are multiple worlds
// TODO!! - This probably causes some performance penalty
componentIdMutex.Lock()
defer componentIdMutex.Unlock()
typeof := reflect.TypeOf(t)
compId, ok := registeredComponents[typeof]
if !ok {
compId = componentRegistryCounter
registeredComponents[typeof] = compId
componentRegistryCounter++
}
return compId
}
// // Possible solution: Runs faster than reflection (mostly useful for potentially removing/reducing ecs.C(...) overhead
// import (
// "sync"
// "unsafe"
// )
// type emptyInterface struct {
// typ unsafe.Pointer
// ptr unsafe.Pointer
// }
// var componentIdMutex sync.Mutex
// var registeredComponents = make(map[uintptr]componentId, maxComponentId)
// var invalidComponentId componentId = 0
// var componentRegistryCounter componentId = 1
// func name(t any) componentId {
// // Note: We have to lock here in case there are multiple worlds
// // TODO!! - This probably causes some performance penalty
// componentIdMutex.Lock()
// defer componentIdMutex.Unlock()
// iface := (*emptyInterface)(unsafe.Pointer(&t))
// typeptr := uintptr(iface.typ)
// compId, ok := registeredComponents[typeptr]
// if !ok {
// compId = componentRegistryCounter
// registeredComponents[typeptr] = compId
// componentRegistryCounter++
// }
// return compId
// }
package ecs
type storage interface {
ReadToEntity(*Entity, archetypeId, int) bool
ReadToRawEntity(*RawEntity, archetypeId, int) bool
Allocate(archetypeId, int) // Allocates the index, setting the data there to the zero value
Delete(archetypeId, int)
moveArchetype(entLoc, entLoc) // From -> To
}
// --------------------------------------------------------------------------------
// - Lookup List
// --------------------------------------------------------------------------------
// TODO: Rename, this is kind of like an archetype header
type lookupList struct {
id []Id // An array of every id in the arch list (essentially a reverse mapping from index to Id)
holes []int // List of indexes that have ben deleted
mask archetypeMask
components []CompId // This is a list of all components that this archetype contains
}
func (l *lookupList) Len() int {
return len(l.id) - len(l.holes)
}
// Adds ourselves to the last available hole, else appends
// Returns the index
func (l *lookupList) addToEasiestHole(id Id) int {
if len(l.holes) > 0 {
lastHoleIndex := len(l.holes) - 1
index := l.holes[lastHoleIndex]
l.id[index] = id
l.holes = l.holes[:lastHoleIndex]
return index
} else {
// Because the Id hasn't been added to this arch, we need to append it to the end
l.id = append(l.id, id)
index := len(l.id) - 1
return index
}
}
// --------------------------------------------------------------------------------
// - ComponentSlice
// --------------------------------------------------------------------------------
type componentList[T any] struct {
comp []T
}
// Note: This will panic if you write past the buffer by more than 1
func (s *componentList[T]) Write(index int, val T) {
if index == len(s.comp) {
// Case: index causes a single append (new element added)
s.comp = append(s.comp, val)
} else {
// Case: index is inside the length
// Edge: (Causes Panic): Index is greater than 1 plus length
s.comp[index] = val
}
}
// --------------------------------------------------------------------------------
// - ComponentSliceStorage
// --------------------------------------------------------------------------------
type componentStorage[T any] struct {
// TODO: Could these just increment rather than be a map lookup? I guess not every component type would have a storage slice for every archetype so we'd waste some memory. I guess at the very least we could use the faster lookup map
slice *internalMap[archetypeId, *componentList[T]]
}
func (ss *componentStorage[T]) ReadToEntity(entity *Entity, archId archetypeId, index int) bool {
cSlice, ok := ss.slice.Get(archId)
if !ok {
return false
}
entity.Add(C(cSlice.comp[index]))
return true
}
func (ss *componentStorage[T]) ReadToRawEntity(entity *RawEntity, archId archetypeId, index int) bool {
cSlice, ok := ss.slice.Get(archId)
if !ok {
return false
}
entity.Add(&cSlice.comp[index])
return true
}
func (ss *componentStorage[T]) GetSlice(archId archetypeId) *componentList[T] {
list, ok := ss.slice.Get(archId)
if !ok {
list = &componentList[T]{
comp: make([]T, 0, DefaultAllocation),
}
ss.slice.Put(archId, list)
}
return list
}
func (ss *componentStorage[T]) Allocate(archId archetypeId, index int) {
cSlice := ss.GetSlice(archId)
var val T
cSlice.Write(index, val)
}
func (ss *componentStorage[T]) moveArchetype(oldLoc, newLoc entLoc) {
oldSlice, _ := ss.slice.Get(oldLoc.archId)
newSlice, _ := ss.slice.Get(newLoc.archId)
val := oldSlice.comp[oldLoc.index]
newSlice.Write(int(newLoc.index), val)
}
// Delete is somewhat special because it deletes the index of the archId for the componentSlice
// but then plugs the hole by pushing the last element of the componentSlice into index
func (ss *componentStorage[T]) Delete(archId archetypeId, index int) {
cSlice, ok := ss.slice.Get(archId)
if !ok {
return
}
lastVal := cSlice.comp[len(cSlice.comp)-1]
cSlice.comp[index] = lastVal
cSlice.comp = cSlice.comp[:len(cSlice.comp)-1]
}
package ecs
import (
"fmt"
"sync/atomic"
"time"
"runtime"
)
// Represents an individual system
type System struct {
Name string
Func func(dt time.Duration)
}
func (s System) Build(world *World) System {
return s
}
// Create a new system. The system name will be automatically created based on the function name that calls this function
func NewSystem(lambda func(dt time.Duration)) System {
systemName := "UnknownSystemName"
pc, _, _, ok := runtime.Caller(1)
if ok {
details := runtime.FuncForPC(pc)
systemName = details.Name()
}
return System{
Name: systemName,
Func: lambda,
}
}
// Executes the system once, returning the time taken.
// This is mostly used by the scheduler, but you can use it too.
func (s *System) step(dt time.Duration) {
// Note: Disable timing
s.Func(dt)
// return 0
// fmt.Println(s.Name) // Spew
// start := time.Now()
// s.Func(dt)
// return time.Since(start)
}
// A log of a system and the time it took to execute
type SystemLog struct {
Name string
Time time.Duration
}
func (s *SystemLog) String() string {
return fmt.Sprintf("%s: %s", s.Name, s.Time)
}
// // TODO - Just use an atomic here?
// type signal struct {
// mu sync.Mutex
// value bool
// }
// func (s *signal) Set(val bool) {
// s.mu.Lock()
// s.value = val
// s.mu.Unlock()
// }
// func (s *signal) Get() bool {
// s.mu.Lock()
// ret := s.value
// s.mu.Unlock()
// return ret
// }
// Scheduler is a place to put your systems and have them run.
// There are two types of systems: Fixed time systems and dynamic time systems
// 1. Fixed time systems will execute on a fixed time step
// 2. Dynamic time systems will execute as quickly as they possibly can
// The scheduler may change in the future, but right now how it works is simple:
// Input: Execute input systems (Dynamic time systems)
// Physics: Execute physics systems (Fixed time systems)
// Render: Execute render systems (Dynamic time systems)
type Scheduler struct {
world *World
systems [][]System
sysTimeFront, sysTimeBack [][]SystemLog // Rotating log of how long each system takes
// stageTimingFront, stageTimingBack []SystemLog // Rotating log of how long each stage takes
fixedTimeStep time.Duration
accumulator time.Duration
gameSpeed float64
quit atomic.Bool
pauseRender atomic.Bool
maxLoopCount int
}
// Creates a scheduler
func NewScheduler(world *World) *Scheduler {
return &Scheduler{
world: world,
systems: make([][]System, StageLast+1),
sysTimeFront: make([][]SystemLog, StageLast+1),
sysTimeBack: make([][]SystemLog, StageLast+1),
fixedTimeStep: 16 * time.Millisecond,
accumulator: 0,
gameSpeed: 1,
}
}
// TODO make SetGameSpeed and SetFixedTimeStep thread safe.
// Sets the rate at which time accumulates. Also, you want them to only change at the end of a frame, else you might get some inconsistencies. Just use a mutex and a single temporary variable
func (s *Scheduler) SetGameSpeed(speed float64) {
s.gameSpeed = speed
}
// Tells the scheduler to exit. Scheduler will finish executing its remaining tick before closing.
func (s *Scheduler) SetQuit(value bool) {
s.quit.Store(true)
}
// Returns the quit value of the scheduler
func (s *Scheduler) Quit() bool {
return s.quit.Load()
}
// Pauses the set of render systems (ie they will be skipped).
// Deprecated: This API is tentatitive
func (s *Scheduler) PauseRender(value bool) {
s.pauseRender.Store(value)
}
// Sets the amount of time required before the fixed time systems will execute
func (s *Scheduler) SetFixedTimeStep(t time.Duration) {
s.fixedTimeStep = t
}
type Stage uint8
func (s Stage) String() string {
switch s {
case StageStartup:
return "StageStartup"
case StagePreUpdate:
return "StagePreUpdate"
case StageFixedUpdate:
return "StageFixedUpdate"
case StageUpdate:
return "StageUpdate"
case StageLast:
return "StageLast"
}
return "Unknown"
}
const (
// StagePreStartup
StageStartup Stage = iota
// StagePostStartup
// StageFirst
StagePreUpdate // Note: Used to be Input
// StageStateTransition
StageFixedUpdate
// StagePostFixedUpdate
StageUpdate
// StagePostUpdate
StageLast
)
// Returns true if the scheduler only has fixed systems
func (s *Scheduler) isFixedOnly() bool {
return len(s.systems[StagePreUpdate]) == 0 && len(s.systems[StageUpdate]) == 0 && len(s.systems[StageLast]) == 0
}
func (s *Scheduler) ClearSystems(stage Stage) {
// Note: Make a new slices so that any of the old system pointers get released
s.systems[stage] = make([]System, 0)
}
func (s *Scheduler) AddSystems(stage Stage, systems ...SystemBuilder) {
for _, sys := range systems {
system := sys.Build(s.world)
s.systems[stage] = append(s.systems[stage], system)
}
}
func (s *Scheduler) SetSystems(stage Stage, systems ...SystemBuilder) {
s.ClearSystems(stage)
s.AddSystems(stage, systems...)
}
// Sets the accumulator maximum point so that if the accumulator gets way to big, we will reset it and continue on, dropping all physics ticks that would have been executed. This is useful in a runtime like WASM where the browser may not let us run as frequently as we may need (for example, when the tab is hidden or minimized).
// Note: This must be set before you call scheduler.Run()
// Note: The default value is 0, which will force every physics tick to run. I highly recommend setting this to something if you plan to build for WASM!
func (s *Scheduler) SetMaxPhysicsLoopCount(count int) {
s.maxLoopCount = count
}
func (s *Scheduler) Syslog(stage Stage) []SystemLog {
return s.sysTimeFront[stage]
}
// Returns an interpolation value which represents how close we are to the next fixed time step execution. Can be useful for interpolating dynamic time systems to the fixed time systems. I might rename this
func (s *Scheduler) GetRenderInterp() float64 {
return s.accumulator.Seconds() / s.fixedTimeStep.Seconds()
}
func (s *Scheduler) runUntrackedStage(stage Stage, dt time.Duration) {
for _, sys := range s.systems[stage] {
sys.step(dt)
s.world.cmd.Execute()
}
}
func (s *Scheduler) runStage(stage Stage, dt time.Duration) {
// start := time.Now()
// Rotate syslog
{
tmp := s.sysTimeFront[stage]
s.sysTimeFront[stage] = s.sysTimeBack[stage]
s.sysTimeBack[stage] = tmp[:0]
}
// Append all stages
for _, sys := range s.systems[stage] {
sysStart := time.Now()
sys.step(dt)
s.world.cmd.Execute()
s.sysTimeBack[stage] = append(s.sysTimeBack[stage], SystemLog{
Name: sys.Name,
Time: time.Since(sysStart),
})
}
// // Track full stage timing
// // TODO: This doesn't work, because it clears on every single stage run
// {
// tmp := s.stageTimingFront
// s.stageTimingFront = s.stageTimingBack
// s.stageTimingBack = tmp[:0]
// }
// s.stageTimingBack = append(s.stageTimingBack, SystemLog{
// Name: "STAGE NAME TODO",
// Time: time.Since(start),
// })
}
// Performs a single step of the scheduler with the provided time
func (s *Scheduler) Step(dt time.Duration) {
// Pre Update
s.runStage(StagePreUpdate, dt)
maxLoopCount := time.Duration(s.maxLoopCount)
if maxLoopCount > 0 {
if s.accumulator > (maxLoopCount * s.fixedTimeStep) {
s.accumulator = s.fixedTimeStep // Just run one loop
}
}
// Physics Systems
for s.accumulator >= s.fixedTimeStep {
s.runStage(StageFixedUpdate, s.fixedTimeStep)
s.accumulator -= s.fixedTimeStep
}
// Render Systems
if !s.pauseRender.Load() {
s.runStage(StageUpdate, dt)
}
}
// Note: Would be nice to sleep or something to prevent spinning while we wait for work to do
// Could also separate the render loop from the physics loop (requires some thread safety in ECS)
func (s *Scheduler) Run() {
s.runUntrackedStage(StageStartup, 0)
frameStart := time.Now()
dt := s.fixedTimeStep
s.accumulator = 0
for !s.quit.Load() {
s.Step(dt)
// Edge case for schedules only fixed time steps
if s.isFixedOnly() {
// Note: This is guaranteed to be positive because the physics execution loops until the accumulator is less than fixedtimestep
time.Sleep(s.fixedTimeStep - s.accumulator)
}
// Capture Frame time
now := time.Now()
dt = now.Sub(frameStart)
frameStart = now
scaledDt := float64(dt.Nanoseconds()) * s.gameSpeed
s.accumulator += time.Duration(scaledDt)
}
}
// //Separates physics loop from render loop
// func (s *Scheduler) Run2() {
// var worldMu sync.Mutex
// frameStart := time.Now()
// dt := s.fixedTimeStep
// // var accumulator time.Duration
// s.accumulator = 0
// maxLoopCount := time.Duration(s.maxLoopCount)
// // physicsTicker := time.NewTicker(s.fixedTimeStep)
// // defer physicsTicker.Stop()
// go func() {
// // for {
// // _, more := <-physicsTicker.C
// // if !more { break } // Exit early, ticker channel is closed
// // // fmt.Println(phyTime)
// // worldMu.Lock()
// // for _, sys := range s.physics {
// // sys.Run(s.fixedTimeStep)
// // }
// // worldMu.Unlock()
// // }
// for !s.quit.Load() {
// worldMu.Lock()
// if maxLoopCount > 0 {
// if s.accumulator > (maxLoopCount * s.fixedTimeStep) {
// s.accumulator = s.fixedTimeStep // Just run one loop
// }
// }
// for s.accumulator >= s.fixedTimeStep {
// for _, sys := range s.physics {
// sys.Run(s.fixedTimeStep)
// }
// s.accumulator -= s.fixedTimeStep
// }
// worldMu.Unlock()
// time.Sleep(s.fixedTimeStep - s.accumulator)
// }
// }()
// for !s.quit.Load() {
// worldMu.Lock()
// for _, sys := range s.render {
// sys.Run(dt)
// }
// for _, sys := range s.input {
// sys.Run(dt)
// }
// // Capture Frame time
// now := time.Now()
// dt = now.Sub(frameStart)
// frameStart = now
// s.accumulator += dt
// worldMu.Unlock()
// }
// }
package ecs
import (
"runtime"
"sync"
)
// Warning: This is an autogenerated file. Do not modify!!
// --------------------------------------------------------------------------------
// - View 1
// --------------------------------------------------------------------------------
// Represents a view of data in a specific world. Provides access to the components specified in the generic block
type View1[A any] struct {
world *World
filter filterList
storageA *componentStorage[A]
}
// implement the initializer interface so that it can be automatically created and injected into systems
func (v *View1[A]) Initialize(world *World) any {
// TODO: filters need to be a part of the query type
return Query1[A](world)
}
// Creates a View for the specified world with the specified component filters.
func Query1[A any](world *World, filters ...Filter) *View1[A] {
storageA := getStorage[A](world.engine)
var AA A
comps := []CompId{
name(AA),
}
filterList := newFilterList(comps, filters...)
filterList.regenerate(world)
v := &View1[A]{
world: world,
filter: filterList,
storageA: storageA,
}
return v
}
// Reads a pointer to the underlying component at the specified id.
// Read will return even if the specified id doesn't match the filter list
// Read will return the value if it exists, else returns nil.
// If you execute any ecs.Write(...) or ecs.Delete(...) this pointer may become invalid.
func (v *View1[A]) Read(id Id) *A {
if id == InvalidEntity {
return nil
}
loc, ok := v.world.arch.Get(id)
if !ok {
return nil
}
lookup := v.world.engine.lookup[loc.archId]
if lookup == nil {
panic("LookupList is missing!")
}
index := int(loc.index)
var retA *A
sliceA, ok := v.storageA.slice.Get(loc.archId)
if ok {
retA = &sliceA.comp[index]
}
return retA
}
// Counts the number of entities that match this query
func (v *View1[A]) Count() int {
v.filter.regenerate(v.world)
total := 0
for _, archId := range v.filter.archIds {
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
total += lookup.Len()
}
return total
}
// Maps the lambda function across every entity which matched the specified filters.
func (v *View1[A]) MapId(lambda func(id Id, a *A)) {
v.filter.regenerate(v.world)
var sliceA *componentList[A]
var compA []A
var retA *A
for _, archId := range v.filter.archIds {
sliceA, _ = v.storageA.slice.Get(archId)
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
// lookup, ok := v.world.engine.lookup[archId]
// if !ok { panic("LookupList is missing!") }
ids := lookup.id
// TODO - this flattened version causes a mild performance hit. But the other one combinatorially explodes. I also cant get BCE to work with it. See option 2 for higher performance.
compA = nil
if sliceA != nil {
compA = sliceA.comp
}
retA = nil
for idx := range ids {
if ids[idx] == InvalidEntity {
continue
} // Skip if its a hole
if compA != nil {
retA = &compA[idx]
}
lambda(ids[idx], retA)
}
}
}
// Maps the lambda function across every entity which matched the specified filters. Components are split based on the number of OS threads available.
func (v *View1[A]) MapIdParallel(lambda func(id Id, a *A)) {
v.filter.regenerate(v.world)
var sliceA *componentList[A]
// 1. Calculate work
// 2. Calculate number of threads to execute with
// 3. Greedy divide work among N threads
// 4. Execute for each in its own goroutine
// 1. Calculate work
totalWork := 0
for _, archId := range v.filter.archIds {
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
// Each id represents an entity that holds the requested component(s)
// Each hole represents a deleted entity that used to hold the requested component(s)
totalWork += len(lookup.id) // - len(lookup.holes)
}
// Nothing to do if there is no work
if totalWork == 0 {
return
}
// 2. Calculate number of threads to execute with
numThreads := runtime.NumCPU()
// Ensure that the number of threads we plan to use is <= total amount of work
numThreads = min(totalWork, numThreads)
var waitGroup sync.WaitGroup
type workItem struct {
ids []Id
compA []A
}
workChannel := make(chan workItem)
for i := 0; i < numThreads; i++ {
waitGroup.Add(1)
go func() {
defer waitGroup.Done()
for {
work, ok := <-workChannel
if !ok {
return
}
var retA *A
for idx := range work.ids {
if work.ids[idx] == InvalidEntity {
continue
} // Skip if its a hole
if work.compA != nil {
retA = &work.compA[idx]
}
lambda(work.ids[idx], retA)
}
}
}()
}
// 3. Greedy divide work among N threads
// Simple algorithm:
// a. Find an evenly balanced distribution per thread
// b. Generate all work until it gets consumed
workPerThread := totalWork / numThreads
// Generate
var compA []A
for _, archId := range v.filter.archIds {
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
ids := lookup.id
sliceA, _ = v.storageA.slice.Get(archId)
compA = nil
if sliceA != nil {
compA = sliceA.comp
}
// workPerformed := 0
start := 0
end := 0
numWorkItems := (len(ids) / workPerThread) + 1
actualWorkPerThread := (len(ids) / numWorkItems) + 1
for i := 0; i < numWorkItems; i++ {
start = i * actualWorkPerThread
end = (i + 1) * actualWorkPerThread
if end > len(ids) {
end = len(ids)
}
// workPerformed += len(ids[start:end])
workChannel <- workItem{
ids: ids[start:end],
compA: compA[start:end],
}
}
// if workPerformed != len(ids) {
// panic("wrong")
// }
}
close(workChannel)
waitGroup.Wait()
}
// Deprecated: This API is a tentative alternative way to map
func (v *View1[A]) MapSlices(lambda func(id []Id, a []A)) {
v.filter.regenerate(v.world)
id := make([][]Id, 0)
sliceListA := make([][]A, 0)
for _, archId := range v.filter.archIds {
sliceA, ok := v.storageA.slice.Get(archId)
if !ok {
continue
}
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
// lookup, ok := v.world.engine.lookup[archId]
// if !ok { panic("LookupList is missing!") }
id = append(id, lookup.id)
sliceListA = append(sliceListA, sliceA.comp)
}
for idx := range id {
lambda(id[idx],
sliceListA[idx],
)
}
}
// --------------------------------------------------------------------------------
// - View 2
// --------------------------------------------------------------------------------
// Represents a view of data in a specific world. Provides access to the components specified in the generic block
type View2[A, B any] struct {
world *World
filter filterList
storageA *componentStorage[A]
storageB *componentStorage[B]
}
// implement the initializer interface so that it can be automatically created and injected into systems
func (v *View2[A, B]) Initialize(world *World) any {
// TODO: filters need to be a part of the query type
return Query2[A, B](world)
}
// Creates a View for the specified world with the specified component filters.
func Query2[A, B any](world *World, filters ...Filter) *View2[A, B] {
storageA := getStorage[A](world.engine)
storageB := getStorage[B](world.engine)
var AA A
var BB B
comps := []CompId{
name(AA),
name(BB),
}
filterList := newFilterList(comps, filters...)
filterList.regenerate(world)
v := &View2[A, B]{
world: world,
filter: filterList,
storageA: storageA,
storageB: storageB,
}
return v
}
// Reads a pointer to the underlying component at the specified id.
// Read will return even if the specified id doesn't match the filter list
// Read will return the value if it exists, else returns nil.
// If you execute any ecs.Write(...) or ecs.Delete(...) this pointer may become invalid.
func (v *View2[A, B]) Read(id Id) (*A, *B) {
if id == InvalidEntity {
return nil, nil
}
loc, ok := v.world.arch.Get(id)
if !ok {
return nil, nil
}
lookup := v.world.engine.lookup[loc.archId]
if lookup == nil {
panic("LookupList is missing!")
}
index := int(loc.index)
var retA *A
var retB *B
sliceA, ok := v.storageA.slice.Get(loc.archId)
if ok {
retA = &sliceA.comp[index]
}
sliceB, ok := v.storageB.slice.Get(loc.archId)
if ok {
retB = &sliceB.comp[index]
}
return retA, retB
}
// Counts the number of entities that match this query
func (v *View2[A, B]) Count() int {
v.filter.regenerate(v.world)
total := 0
for _, archId := range v.filter.archIds {
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
total += lookup.Len()
}
return total
}
// Maps the lambda function across every entity which matched the specified filters.
func (v *View2[A, B]) MapId(lambda func(id Id, a *A, b *B)) {
v.filter.regenerate(v.world)
var sliceA *componentList[A]
var compA []A
var retA *A
var sliceB *componentList[B]
var compB []B
var retB *B
for _, archId := range v.filter.archIds {
sliceA, _ = v.storageA.slice.Get(archId)
sliceB, _ = v.storageB.slice.Get(archId)
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
// lookup, ok := v.world.engine.lookup[archId]
// if !ok { panic("LookupList is missing!") }
ids := lookup.id
// TODO - this flattened version causes a mild performance hit. But the other one combinatorially explodes. I also cant get BCE to work with it. See option 2 for higher performance.
compA = nil
if sliceA != nil {
compA = sliceA.comp
}
compB = nil
if sliceB != nil {
compB = sliceB.comp
}
retA = nil
retB = nil
for idx := range ids {
if ids[idx] == InvalidEntity {
continue
} // Skip if its a hole
if compA != nil {
retA = &compA[idx]
}
if compB != nil {
retB = &compB[idx]
}
lambda(ids[idx], retA, retB)
}
}
}
// Maps the lambda function across every entity which matched the specified filters. Components are split based on the number of OS threads available.
func (v *View2[A, B]) MapIdParallel(lambda func(id Id, a *A, b *B)) {
v.filter.regenerate(v.world)
var sliceA *componentList[A]
var sliceB *componentList[B]
// 1. Calculate work
// 2. Calculate number of threads to execute with
// 3. Greedy divide work among N threads
// 4. Execute for each in its own goroutine
// 1. Calculate work
totalWork := 0
for _, archId := range v.filter.archIds {
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
// Each id represents an entity that holds the requested component(s)
// Each hole represents a deleted entity that used to hold the requested component(s)
totalWork += len(lookup.id) // - len(lookup.holes)
}
// Nothing to do if there is no work
if totalWork == 0 {
return
}
// 2. Calculate number of threads to execute with
numThreads := runtime.NumCPU()
// Ensure that the number of threads we plan to use is <= total amount of work
numThreads = min(totalWork, numThreads)
var waitGroup sync.WaitGroup
type workItem struct {
ids []Id
compA []A
compB []B
}
workChannel := make(chan workItem)
for i := 0; i < numThreads; i++ {
waitGroup.Add(1)
go func() {
defer waitGroup.Done()
for {
work, ok := <-workChannel
if !ok {
return
}
var retA *A
var retB *B
for idx := range work.ids {
if work.ids[idx] == InvalidEntity {
continue
} // Skip if its a hole
if work.compA != nil {
retA = &work.compA[idx]
}
if work.compB != nil {
retB = &work.compB[idx]
}
lambda(work.ids[idx], retA, retB)
}
}
}()
}
// 3. Greedy divide work among N threads
// Simple algorithm:
// a. Find an evenly balanced distribution per thread
// b. Generate all work until it gets consumed
workPerThread := totalWork / numThreads
// Generate
var compA []A
var compB []B
for _, archId := range v.filter.archIds {
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
ids := lookup.id
sliceA, _ = v.storageA.slice.Get(archId)
sliceB, _ = v.storageB.slice.Get(archId)
compA = nil
if sliceA != nil {
compA = sliceA.comp
}
compB = nil
if sliceB != nil {
compB = sliceB.comp
}
// workPerformed := 0
start := 0
end := 0
numWorkItems := (len(ids) / workPerThread) + 1
actualWorkPerThread := (len(ids) / numWorkItems) + 1
for i := 0; i < numWorkItems; i++ {
start = i * actualWorkPerThread
end = (i + 1) * actualWorkPerThread
if end > len(ids) {
end = len(ids)
}
// workPerformed += len(ids[start:end])
workChannel <- workItem{
ids: ids[start:end],
compA: compA[start:end],
compB: compB[start:end],
}
}
// if workPerformed != len(ids) {
// panic("wrong")
// }
}
close(workChannel)
waitGroup.Wait()
}
// Deprecated: This API is a tentative alternative way to map
func (v *View2[A, B]) MapSlices(lambda func(id []Id, a []A, b []B)) {
v.filter.regenerate(v.world)
id := make([][]Id, 0)
sliceListA := make([][]A, 0)
sliceListB := make([][]B, 0)
for _, archId := range v.filter.archIds {
sliceA, ok := v.storageA.slice.Get(archId)
if !ok {
continue
}
sliceB, ok := v.storageB.slice.Get(archId)
if !ok {
continue
}
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
// lookup, ok := v.world.engine.lookup[archId]
// if !ok { panic("LookupList is missing!") }
id = append(id, lookup.id)
sliceListA = append(sliceListA, sliceA.comp)
sliceListB = append(sliceListB, sliceB.comp)
}
for idx := range id {
lambda(id[idx],
sliceListA[idx], sliceListB[idx],
)
}
}
// --------------------------------------------------------------------------------
// - View 3
// --------------------------------------------------------------------------------
// Represents a view of data in a specific world. Provides access to the components specified in the generic block
type View3[A, B, C any] struct {
world *World
filter filterList
storageA *componentStorage[A]
storageB *componentStorage[B]
storageC *componentStorage[C]
}
// implement the initializer interface so that it can be automatically created and injected into systems
func (v *View3[A, B, C]) Initialize(world *World) any {
// TODO: filters need to be a part of the query type
return Query3[A, B, C](world)
}
// Creates a View for the specified world with the specified component filters.
func Query3[A, B, C any](world *World, filters ...Filter) *View3[A, B, C] {
storageA := getStorage[A](world.engine)
storageB := getStorage[B](world.engine)
storageC := getStorage[C](world.engine)
var AA A
var BB B
var CC C
comps := []CompId{
name(AA),
name(BB),
name(CC),
}
filterList := newFilterList(comps, filters...)
filterList.regenerate(world)
v := &View3[A, B, C]{
world: world,
filter: filterList,
storageA: storageA,
storageB: storageB,
storageC: storageC,
}
return v
}
// Reads a pointer to the underlying component at the specified id.
// Read will return even if the specified id doesn't match the filter list
// Read will return the value if it exists, else returns nil.
// If you execute any ecs.Write(...) or ecs.Delete(...) this pointer may become invalid.
func (v *View3[A, B, C]) Read(id Id) (*A, *B, *C) {
if id == InvalidEntity {
return nil, nil, nil
}
loc, ok := v.world.arch.Get(id)
if !ok {
return nil, nil, nil
}
lookup := v.world.engine.lookup[loc.archId]
if lookup == nil {
panic("LookupList is missing!")
}
index := int(loc.index)
var retA *A
var retB *B
var retC *C
sliceA, ok := v.storageA.slice.Get(loc.archId)
if ok {
retA = &sliceA.comp[index]
}
sliceB, ok := v.storageB.slice.Get(loc.archId)
if ok {
retB = &sliceB.comp[index]
}
sliceC, ok := v.storageC.slice.Get(loc.archId)
if ok {
retC = &sliceC.comp[index]
}
return retA, retB, retC
}
// Counts the number of entities that match this query
func (v *View3[A, B, C]) Count() int {
v.filter.regenerate(v.world)
total := 0
for _, archId := range v.filter.archIds {
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
total += lookup.Len()
}
return total
}
// Maps the lambda function across every entity which matched the specified filters.
func (v *View3[A, B, C]) MapId(lambda func(id Id, a *A, b *B, c *C)) {
v.filter.regenerate(v.world)
var sliceA *componentList[A]
var compA []A
var retA *A
var sliceB *componentList[B]
var compB []B
var retB *B
var sliceC *componentList[C]
var compC []C
var retC *C
for _, archId := range v.filter.archIds {
sliceA, _ = v.storageA.slice.Get(archId)
sliceB, _ = v.storageB.slice.Get(archId)
sliceC, _ = v.storageC.slice.Get(archId)
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
// lookup, ok := v.world.engine.lookup[archId]
// if !ok { panic("LookupList is missing!") }
ids := lookup.id
// TODO - this flattened version causes a mild performance hit. But the other one combinatorially explodes. I also cant get BCE to work with it. See option 2 for higher performance.
compA = nil
if sliceA != nil {
compA = sliceA.comp
}
compB = nil
if sliceB != nil {
compB = sliceB.comp
}
compC = nil
if sliceC != nil {
compC = sliceC.comp
}
retA = nil
retB = nil
retC = nil
for idx := range ids {
if ids[idx] == InvalidEntity {
continue
} // Skip if its a hole
if compA != nil {
retA = &compA[idx]
}
if compB != nil {
retB = &compB[idx]
}
if compC != nil {
retC = &compC[idx]
}
lambda(ids[idx], retA, retB, retC)
}
}
}
// Maps the lambda function across every entity which matched the specified filters. Components are split based on the number of OS threads available.
func (v *View3[A, B, C]) MapIdParallel(lambda func(id Id, a *A, b *B, c *C)) {
v.filter.regenerate(v.world)
var sliceA *componentList[A]
var sliceB *componentList[B]
var sliceC *componentList[C]
// 1. Calculate work
// 2. Calculate number of threads to execute with
// 3. Greedy divide work among N threads
// 4. Execute for each in its own goroutine
// 1. Calculate work
totalWork := 0
for _, archId := range v.filter.archIds {
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
// Each id represents an entity that holds the requested component(s)
// Each hole represents a deleted entity that used to hold the requested component(s)
totalWork += len(lookup.id) // - len(lookup.holes)
}
// Nothing to do if there is no work
if totalWork == 0 {
return
}
// 2. Calculate number of threads to execute with
numThreads := runtime.NumCPU()
// Ensure that the number of threads we plan to use is <= total amount of work
numThreads = min(totalWork, numThreads)
var waitGroup sync.WaitGroup
type workItem struct {
ids []Id
compA []A
compB []B
compC []C
}
workChannel := make(chan workItem)
for i := 0; i < numThreads; i++ {
waitGroup.Add(1)
go func() {
defer waitGroup.Done()
for {
work, ok := <-workChannel
if !ok {
return
}
var retA *A
var retB *B
var retC *C
for idx := range work.ids {
if work.ids[idx] == InvalidEntity {
continue
} // Skip if its a hole
if work.compA != nil {
retA = &work.compA[idx]
}
if work.compB != nil {
retB = &work.compB[idx]
}
if work.compC != nil {
retC = &work.compC[idx]
}
lambda(work.ids[idx], retA, retB, retC)
}
}
}()
}
// 3. Greedy divide work among N threads
// Simple algorithm:
// a. Find an evenly balanced distribution per thread
// b. Generate all work until it gets consumed
workPerThread := totalWork / numThreads
// Generate
var compA []A
var compB []B
var compC []C
for _, archId := range v.filter.archIds {
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
ids := lookup.id
sliceA, _ = v.storageA.slice.Get(archId)
sliceB, _ = v.storageB.slice.Get(archId)
sliceC, _ = v.storageC.slice.Get(archId)
compA = nil
if sliceA != nil {
compA = sliceA.comp
}
compB = nil
if sliceB != nil {
compB = sliceB.comp
}
compC = nil
if sliceC != nil {
compC = sliceC.comp
}
// workPerformed := 0
start := 0
end := 0
numWorkItems := (len(ids) / workPerThread) + 1
actualWorkPerThread := (len(ids) / numWorkItems) + 1
for i := 0; i < numWorkItems; i++ {
start = i * actualWorkPerThread
end = (i + 1) * actualWorkPerThread
if end > len(ids) {
end = len(ids)
}
// workPerformed += len(ids[start:end])
workChannel <- workItem{
ids: ids[start:end],
compA: compA[start:end],
compB: compB[start:end],
compC: compC[start:end],
}
}
// if workPerformed != len(ids) {
// panic("wrong")
// }
}
close(workChannel)
waitGroup.Wait()
}
// Deprecated: This API is a tentative alternative way to map
func (v *View3[A, B, C]) MapSlices(lambda func(id []Id, a []A, b []B, c []C)) {
v.filter.regenerate(v.world)
id := make([][]Id, 0)
sliceListA := make([][]A, 0)
sliceListB := make([][]B, 0)
sliceListC := make([][]C, 0)
for _, archId := range v.filter.archIds {
sliceA, ok := v.storageA.slice.Get(archId)
if !ok {
continue
}
sliceB, ok := v.storageB.slice.Get(archId)
if !ok {
continue
}
sliceC, ok := v.storageC.slice.Get(archId)
if !ok {
continue
}
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
// lookup, ok := v.world.engine.lookup[archId]
// if !ok { panic("LookupList is missing!") }
id = append(id, lookup.id)
sliceListA = append(sliceListA, sliceA.comp)
sliceListB = append(sliceListB, sliceB.comp)
sliceListC = append(sliceListC, sliceC.comp)
}
for idx := range id {
lambda(id[idx],
sliceListA[idx], sliceListB[idx], sliceListC[idx],
)
}
}
// --------------------------------------------------------------------------------
// - View 4
// --------------------------------------------------------------------------------
// Represents a view of data in a specific world. Provides access to the components specified in the generic block
type View4[A, B, C, D any] struct {
world *World
filter filterList
storageA *componentStorage[A]
storageB *componentStorage[B]
storageC *componentStorage[C]
storageD *componentStorage[D]
}
// implement the initializer interface so that it can be automatically created and injected into systems
func (v *View4[A, B, C, D]) Initialize(world *World) any {
// TODO: filters need to be a part of the query type
return Query4[A, B, C, D](world)
}
// Creates a View for the specified world with the specified component filters.
func Query4[A, B, C, D any](world *World, filters ...Filter) *View4[A, B, C, D] {
storageA := getStorage[A](world.engine)
storageB := getStorage[B](world.engine)
storageC := getStorage[C](world.engine)
storageD := getStorage[D](world.engine)
var AA A
var BB B
var CC C
var DD D
comps := []CompId{
name(AA),
name(BB),
name(CC),
name(DD),
}
filterList := newFilterList(comps, filters...)
filterList.regenerate(world)
v := &View4[A, B, C, D]{
world: world,
filter: filterList,
storageA: storageA,
storageB: storageB,
storageC: storageC,
storageD: storageD,
}
return v
}
// Reads a pointer to the underlying component at the specified id.
// Read will return even if the specified id doesn't match the filter list
// Read will return the value if it exists, else returns nil.
// If you execute any ecs.Write(...) or ecs.Delete(...) this pointer may become invalid.
func (v *View4[A, B, C, D]) Read(id Id) (*A, *B, *C, *D) {
if id == InvalidEntity {
return nil, nil, nil, nil
}
loc, ok := v.world.arch.Get(id)
if !ok {
return nil, nil, nil, nil
}
lookup := v.world.engine.lookup[loc.archId]
if lookup == nil {
panic("LookupList is missing!")
}
index := int(loc.index)
var retA *A
var retB *B
var retC *C
var retD *D
sliceA, ok := v.storageA.slice.Get(loc.archId)
if ok {
retA = &sliceA.comp[index]
}
sliceB, ok := v.storageB.slice.Get(loc.archId)
if ok {
retB = &sliceB.comp[index]
}
sliceC, ok := v.storageC.slice.Get(loc.archId)
if ok {
retC = &sliceC.comp[index]
}
sliceD, ok := v.storageD.slice.Get(loc.archId)
if ok {
retD = &sliceD.comp[index]
}
return retA, retB, retC, retD
}
// Counts the number of entities that match this query
func (v *View4[A, B, C, D]) Count() int {
v.filter.regenerate(v.world)
total := 0
for _, archId := range v.filter.archIds {
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
total += lookup.Len()
}
return total
}
// Maps the lambda function across every entity which matched the specified filters.
func (v *View4[A, B, C, D]) MapId(lambda func(id Id, a *A, b *B, c *C, d *D)) {
v.filter.regenerate(v.world)
var sliceA *componentList[A]
var compA []A
var retA *A
var sliceB *componentList[B]
var compB []B
var retB *B
var sliceC *componentList[C]
var compC []C
var retC *C
var sliceD *componentList[D]
var compD []D
var retD *D
for _, archId := range v.filter.archIds {
sliceA, _ = v.storageA.slice.Get(archId)
sliceB, _ = v.storageB.slice.Get(archId)
sliceC, _ = v.storageC.slice.Get(archId)
sliceD, _ = v.storageD.slice.Get(archId)
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
// lookup, ok := v.world.engine.lookup[archId]
// if !ok { panic("LookupList is missing!") }
ids := lookup.id
// TODO - this flattened version causes a mild performance hit. But the other one combinatorially explodes. I also cant get BCE to work with it. See option 2 for higher performance.
compA = nil
if sliceA != nil {
compA = sliceA.comp
}
compB = nil
if sliceB != nil {
compB = sliceB.comp
}
compC = nil
if sliceC != nil {
compC = sliceC.comp
}
compD = nil
if sliceD != nil {
compD = sliceD.comp
}
retA = nil
retB = nil
retC = nil
retD = nil
for idx := range ids {
if ids[idx] == InvalidEntity {
continue
} // Skip if its a hole
if compA != nil {
retA = &compA[idx]
}
if compB != nil {
retB = &compB[idx]
}
if compC != nil {
retC = &compC[idx]
}
if compD != nil {
retD = &compD[idx]
}
lambda(ids[idx], retA, retB, retC, retD)
}
}
}
// Maps the lambda function across every entity which matched the specified filters. Components are split based on the number of OS threads available.
func (v *View4[A, B, C, D]) MapIdParallel(lambda func(id Id, a *A, b *B, c *C, d *D)) {
v.filter.regenerate(v.world)
var sliceA *componentList[A]
var sliceB *componentList[B]
var sliceC *componentList[C]
var sliceD *componentList[D]
// 1. Calculate work
// 2. Calculate number of threads to execute with
// 3. Greedy divide work among N threads
// 4. Execute for each in its own goroutine
// 1. Calculate work
totalWork := 0
for _, archId := range v.filter.archIds {
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
// Each id represents an entity that holds the requested component(s)
// Each hole represents a deleted entity that used to hold the requested component(s)
totalWork += len(lookup.id) // - len(lookup.holes)
}
// Nothing to do if there is no work
if totalWork == 0 {
return
}
// 2. Calculate number of threads to execute with
numThreads := runtime.NumCPU()
// Ensure that the number of threads we plan to use is <= total amount of work
numThreads = min(totalWork, numThreads)
var waitGroup sync.WaitGroup
type workItem struct {
ids []Id
compA []A
compB []B
compC []C
compD []D
}
workChannel := make(chan workItem)
for i := 0; i < numThreads; i++ {
waitGroup.Add(1)
go func() {
defer waitGroup.Done()
for {
work, ok := <-workChannel
if !ok {
return
}
var retA *A
var retB *B
var retC *C
var retD *D
for idx := range work.ids {
if work.ids[idx] == InvalidEntity {
continue
} // Skip if its a hole
if work.compA != nil {
retA = &work.compA[idx]
}
if work.compB != nil {
retB = &work.compB[idx]
}
if work.compC != nil {
retC = &work.compC[idx]
}
if work.compD != nil {
retD = &work.compD[idx]
}
lambda(work.ids[idx], retA, retB, retC, retD)
}
}
}()
}
// 3. Greedy divide work among N threads
// Simple algorithm:
// a. Find an evenly balanced distribution per thread
// b. Generate all work until it gets consumed
workPerThread := totalWork / numThreads
// Generate
var compA []A
var compB []B
var compC []C
var compD []D
for _, archId := range v.filter.archIds {
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
ids := lookup.id
sliceA, _ = v.storageA.slice.Get(archId)
sliceB, _ = v.storageB.slice.Get(archId)
sliceC, _ = v.storageC.slice.Get(archId)
sliceD, _ = v.storageD.slice.Get(archId)
compA = nil
if sliceA != nil {
compA = sliceA.comp
}
compB = nil
if sliceB != nil {
compB = sliceB.comp
}
compC = nil
if sliceC != nil {
compC = sliceC.comp
}
compD = nil
if sliceD != nil {
compD = sliceD.comp
}
// workPerformed := 0
start := 0
end := 0
numWorkItems := (len(ids) / workPerThread) + 1
actualWorkPerThread := (len(ids) / numWorkItems) + 1
for i := 0; i < numWorkItems; i++ {
start = i * actualWorkPerThread
end = (i + 1) * actualWorkPerThread
if end > len(ids) {
end = len(ids)
}
// workPerformed += len(ids[start:end])
workChannel <- workItem{
ids: ids[start:end],
compA: compA[start:end],
compB: compB[start:end],
compC: compC[start:end],
compD: compD[start:end],
}
}
// if workPerformed != len(ids) {
// panic("wrong")
// }
}
close(workChannel)
waitGroup.Wait()
}
// Deprecated: This API is a tentative alternative way to map
func (v *View4[A, B, C, D]) MapSlices(lambda func(id []Id, a []A, b []B, c []C, d []D)) {
v.filter.regenerate(v.world)
id := make([][]Id, 0)
sliceListA := make([][]A, 0)
sliceListB := make([][]B, 0)
sliceListC := make([][]C, 0)
sliceListD := make([][]D, 0)
for _, archId := range v.filter.archIds {
sliceA, ok := v.storageA.slice.Get(archId)
if !ok {
continue
}
sliceB, ok := v.storageB.slice.Get(archId)
if !ok {
continue
}
sliceC, ok := v.storageC.slice.Get(archId)
if !ok {
continue
}
sliceD, ok := v.storageD.slice.Get(archId)
if !ok {
continue
}
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
// lookup, ok := v.world.engine.lookup[archId]
// if !ok { panic("LookupList is missing!") }
id = append(id, lookup.id)
sliceListA = append(sliceListA, sliceA.comp)
sliceListB = append(sliceListB, sliceB.comp)
sliceListC = append(sliceListC, sliceC.comp)
sliceListD = append(sliceListD, sliceD.comp)
}
for idx := range id {
lambda(id[idx],
sliceListA[idx], sliceListB[idx], sliceListC[idx], sliceListD[idx],
)
}
}
// --------------------------------------------------------------------------------
// - View 5
// --------------------------------------------------------------------------------
// Represents a view of data in a specific world. Provides access to the components specified in the generic block
type View5[A, B, C, D, E any] struct {
world *World
filter filterList
storageA *componentStorage[A]
storageB *componentStorage[B]
storageC *componentStorage[C]
storageD *componentStorage[D]
storageE *componentStorage[E]
}
// implement the initializer interface so that it can be automatically created and injected into systems
func (v *View5[A, B, C, D, E]) Initialize(world *World) any {
// TODO: filters need to be a part of the query type
return Query5[A, B, C, D, E](world)
}
// Creates a View for the specified world with the specified component filters.
func Query5[A, B, C, D, E any](world *World, filters ...Filter) *View5[A, B, C, D, E] {
storageA := getStorage[A](world.engine)
storageB := getStorage[B](world.engine)
storageC := getStorage[C](world.engine)
storageD := getStorage[D](world.engine)
storageE := getStorage[E](world.engine)
var AA A
var BB B
var CC C
var DD D
var EE E
comps := []CompId{
name(AA),
name(BB),
name(CC),
name(DD),
name(EE),
}
filterList := newFilterList(comps, filters...)
filterList.regenerate(world)
v := &View5[A, B, C, D, E]{
world: world,
filter: filterList,
storageA: storageA,
storageB: storageB,
storageC: storageC,
storageD: storageD,
storageE: storageE,
}
return v
}
// Reads a pointer to the underlying component at the specified id.
// Read will return even if the specified id doesn't match the filter list
// Read will return the value if it exists, else returns nil.
// If you execute any ecs.Write(...) or ecs.Delete(...) this pointer may become invalid.
func (v *View5[A, B, C, D, E]) Read(id Id) (*A, *B, *C, *D, *E) {
if id == InvalidEntity {
return nil, nil, nil, nil, nil
}
loc, ok := v.world.arch.Get(id)
if !ok {
return nil, nil, nil, nil, nil
}
lookup := v.world.engine.lookup[loc.archId]
if lookup == nil {
panic("LookupList is missing!")
}
index := int(loc.index)
var retA *A
var retB *B
var retC *C
var retD *D
var retE *E
sliceA, ok := v.storageA.slice.Get(loc.archId)
if ok {
retA = &sliceA.comp[index]
}
sliceB, ok := v.storageB.slice.Get(loc.archId)
if ok {
retB = &sliceB.comp[index]
}
sliceC, ok := v.storageC.slice.Get(loc.archId)
if ok {
retC = &sliceC.comp[index]
}
sliceD, ok := v.storageD.slice.Get(loc.archId)
if ok {
retD = &sliceD.comp[index]
}
sliceE, ok := v.storageE.slice.Get(loc.archId)
if ok {
retE = &sliceE.comp[index]
}
return retA, retB, retC, retD, retE
}
// Counts the number of entities that match this query
func (v *View5[A, B, C, D, E]) Count() int {
v.filter.regenerate(v.world)
total := 0
for _, archId := range v.filter.archIds {
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
total += lookup.Len()
}
return total
}
// Maps the lambda function across every entity which matched the specified filters.
func (v *View5[A, B, C, D, E]) MapId(lambda func(id Id, a *A, b *B, c *C, d *D, e *E)) {
v.filter.regenerate(v.world)
var sliceA *componentList[A]
var compA []A
var retA *A
var sliceB *componentList[B]
var compB []B
var retB *B
var sliceC *componentList[C]
var compC []C
var retC *C
var sliceD *componentList[D]
var compD []D
var retD *D
var sliceE *componentList[E]
var compE []E
var retE *E
for _, archId := range v.filter.archIds {
sliceA, _ = v.storageA.slice.Get(archId)
sliceB, _ = v.storageB.slice.Get(archId)
sliceC, _ = v.storageC.slice.Get(archId)
sliceD, _ = v.storageD.slice.Get(archId)
sliceE, _ = v.storageE.slice.Get(archId)
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
// lookup, ok := v.world.engine.lookup[archId]
// if !ok { panic("LookupList is missing!") }
ids := lookup.id
// TODO - this flattened version causes a mild performance hit. But the other one combinatorially explodes. I also cant get BCE to work with it. See option 2 for higher performance.
compA = nil
if sliceA != nil {
compA = sliceA.comp
}
compB = nil
if sliceB != nil {
compB = sliceB.comp
}
compC = nil
if sliceC != nil {
compC = sliceC.comp
}
compD = nil
if sliceD != nil {
compD = sliceD.comp
}
compE = nil
if sliceE != nil {
compE = sliceE.comp
}
retA = nil
retB = nil
retC = nil
retD = nil
retE = nil
for idx := range ids {
if ids[idx] == InvalidEntity {
continue
} // Skip if its a hole
if compA != nil {
retA = &compA[idx]
}
if compB != nil {
retB = &compB[idx]
}
if compC != nil {
retC = &compC[idx]
}
if compD != nil {
retD = &compD[idx]
}
if compE != nil {
retE = &compE[idx]
}
lambda(ids[idx], retA, retB, retC, retD, retE)
}
}
}
// Maps the lambda function across every entity which matched the specified filters. Components are split based on the number of OS threads available.
func (v *View5[A, B, C, D, E]) MapIdParallel(lambda func(id Id, a *A, b *B, c *C, d *D, e *E)) {
v.filter.regenerate(v.world)
var sliceA *componentList[A]
var sliceB *componentList[B]
var sliceC *componentList[C]
var sliceD *componentList[D]
var sliceE *componentList[E]
// 1. Calculate work
// 2. Calculate number of threads to execute with
// 3. Greedy divide work among N threads
// 4. Execute for each in its own goroutine
// 1. Calculate work
totalWork := 0
for _, archId := range v.filter.archIds {
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
// Each id represents an entity that holds the requested component(s)
// Each hole represents a deleted entity that used to hold the requested component(s)
totalWork += len(lookup.id) // - len(lookup.holes)
}
// Nothing to do if there is no work
if totalWork == 0 {
return
}
// 2. Calculate number of threads to execute with
numThreads := runtime.NumCPU()
// Ensure that the number of threads we plan to use is <= total amount of work
numThreads = min(totalWork, numThreads)
var waitGroup sync.WaitGroup
type workItem struct {
ids []Id
compA []A
compB []B
compC []C
compD []D
compE []E
}
workChannel := make(chan workItem)
for i := 0; i < numThreads; i++ {
waitGroup.Add(1)
go func() {
defer waitGroup.Done()
for {
work, ok := <-workChannel
if !ok {
return
}
var retA *A
var retB *B
var retC *C
var retD *D
var retE *E
for idx := range work.ids {
if work.ids[idx] == InvalidEntity {
continue
} // Skip if its a hole
if work.compA != nil {
retA = &work.compA[idx]
}
if work.compB != nil {
retB = &work.compB[idx]
}
if work.compC != nil {
retC = &work.compC[idx]
}
if work.compD != nil {
retD = &work.compD[idx]
}
if work.compE != nil {
retE = &work.compE[idx]
}
lambda(work.ids[idx], retA, retB, retC, retD, retE)
}
}
}()
}
// 3. Greedy divide work among N threads
// Simple algorithm:
// a. Find an evenly balanced distribution per thread
// b. Generate all work until it gets consumed
workPerThread := totalWork / numThreads
// Generate
var compA []A
var compB []B
var compC []C
var compD []D
var compE []E
for _, archId := range v.filter.archIds {
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
ids := lookup.id
sliceA, _ = v.storageA.slice.Get(archId)
sliceB, _ = v.storageB.slice.Get(archId)
sliceC, _ = v.storageC.slice.Get(archId)
sliceD, _ = v.storageD.slice.Get(archId)
sliceE, _ = v.storageE.slice.Get(archId)
compA = nil
if sliceA != nil {
compA = sliceA.comp
}
compB = nil
if sliceB != nil {
compB = sliceB.comp
}
compC = nil
if sliceC != nil {
compC = sliceC.comp
}
compD = nil
if sliceD != nil {
compD = sliceD.comp
}
compE = nil
if sliceE != nil {
compE = sliceE.comp
}
// workPerformed := 0
start := 0
end := 0
numWorkItems := (len(ids) / workPerThread) + 1
actualWorkPerThread := (len(ids) / numWorkItems) + 1
for i := 0; i < numWorkItems; i++ {
start = i * actualWorkPerThread
end = (i + 1) * actualWorkPerThread
if end > len(ids) {
end = len(ids)
}
// workPerformed += len(ids[start:end])
workChannel <- workItem{
ids: ids[start:end],
compA: compA[start:end],
compB: compB[start:end],
compC: compC[start:end],
compD: compD[start:end],
compE: compE[start:end],
}
}
// if workPerformed != len(ids) {
// panic("wrong")
// }
}
close(workChannel)
waitGroup.Wait()
}
// Deprecated: This API is a tentative alternative way to map
func (v *View5[A, B, C, D, E]) MapSlices(lambda func(id []Id, a []A, b []B, c []C, d []D, e []E)) {
v.filter.regenerate(v.world)
id := make([][]Id, 0)
sliceListA := make([][]A, 0)
sliceListB := make([][]B, 0)
sliceListC := make([][]C, 0)
sliceListD := make([][]D, 0)
sliceListE := make([][]E, 0)
for _, archId := range v.filter.archIds {
sliceA, ok := v.storageA.slice.Get(archId)
if !ok {
continue
}
sliceB, ok := v.storageB.slice.Get(archId)
if !ok {
continue
}
sliceC, ok := v.storageC.slice.Get(archId)
if !ok {
continue
}
sliceD, ok := v.storageD.slice.Get(archId)
if !ok {
continue
}
sliceE, ok := v.storageE.slice.Get(archId)
if !ok {
continue
}
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
// lookup, ok := v.world.engine.lookup[archId]
// if !ok { panic("LookupList is missing!") }
id = append(id, lookup.id)
sliceListA = append(sliceListA, sliceA.comp)
sliceListB = append(sliceListB, sliceB.comp)
sliceListC = append(sliceListC, sliceC.comp)
sliceListD = append(sliceListD, sliceD.comp)
sliceListE = append(sliceListE, sliceE.comp)
}
for idx := range id {
lambda(id[idx],
sliceListA[idx], sliceListB[idx], sliceListC[idx], sliceListD[idx], sliceListE[idx],
)
}
}
// --------------------------------------------------------------------------------
// - View 6
// --------------------------------------------------------------------------------
// Represents a view of data in a specific world. Provides access to the components specified in the generic block
type View6[A, B, C, D, E, F any] struct {
world *World
filter filterList
storageA *componentStorage[A]
storageB *componentStorage[B]
storageC *componentStorage[C]
storageD *componentStorage[D]
storageE *componentStorage[E]
storageF *componentStorage[F]
}
// implement the initializer interface so that it can be automatically created and injected into systems
func (v *View6[A, B, C, D, E, F]) Initialize(world *World) any {
// TODO: filters need to be a part of the query type
return Query6[A, B, C, D, E, F](world)
}
// Creates a View for the specified world with the specified component filters.
func Query6[A, B, C, D, E, F any](world *World, filters ...Filter) *View6[A, B, C, D, E, F] {
storageA := getStorage[A](world.engine)
storageB := getStorage[B](world.engine)
storageC := getStorage[C](world.engine)
storageD := getStorage[D](world.engine)
storageE := getStorage[E](world.engine)
storageF := getStorage[F](world.engine)
var AA A
var BB B
var CC C
var DD D
var EE E
var FF F
comps := []CompId{
name(AA),
name(BB),
name(CC),
name(DD),
name(EE),
name(FF),
}
filterList := newFilterList(comps, filters...)
filterList.regenerate(world)
v := &View6[A, B, C, D, E, F]{
world: world,
filter: filterList,
storageA: storageA,
storageB: storageB,
storageC: storageC,
storageD: storageD,
storageE: storageE,
storageF: storageF,
}
return v
}
// Reads a pointer to the underlying component at the specified id.
// Read will return even if the specified id doesn't match the filter list
// Read will return the value if it exists, else returns nil.
// If you execute any ecs.Write(...) or ecs.Delete(...) this pointer may become invalid.
func (v *View6[A, B, C, D, E, F]) Read(id Id) (*A, *B, *C, *D, *E, *F) {
if id == InvalidEntity {
return nil, nil, nil, nil, nil, nil
}
loc, ok := v.world.arch.Get(id)
if !ok {
return nil, nil, nil, nil, nil, nil
}
lookup := v.world.engine.lookup[loc.archId]
if lookup == nil {
panic("LookupList is missing!")
}
index := int(loc.index)
var retA *A
var retB *B
var retC *C
var retD *D
var retE *E
var retF *F
sliceA, ok := v.storageA.slice.Get(loc.archId)
if ok {
retA = &sliceA.comp[index]
}
sliceB, ok := v.storageB.slice.Get(loc.archId)
if ok {
retB = &sliceB.comp[index]
}
sliceC, ok := v.storageC.slice.Get(loc.archId)
if ok {
retC = &sliceC.comp[index]
}
sliceD, ok := v.storageD.slice.Get(loc.archId)
if ok {
retD = &sliceD.comp[index]
}
sliceE, ok := v.storageE.slice.Get(loc.archId)
if ok {
retE = &sliceE.comp[index]
}
sliceF, ok := v.storageF.slice.Get(loc.archId)
if ok {
retF = &sliceF.comp[index]
}
return retA, retB, retC, retD, retE, retF
}
// Counts the number of entities that match this query
func (v *View6[A, B, C, D, E, F]) Count() int {
v.filter.regenerate(v.world)
total := 0
for _, archId := range v.filter.archIds {
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
total += lookup.Len()
}
return total
}
// Maps the lambda function across every entity which matched the specified filters.
func (v *View6[A, B, C, D, E, F]) MapId(lambda func(id Id, a *A, b *B, c *C, d *D, e *E, f *F)) {
v.filter.regenerate(v.world)
var sliceA *componentList[A]
var compA []A
var retA *A
var sliceB *componentList[B]
var compB []B
var retB *B
var sliceC *componentList[C]
var compC []C
var retC *C
var sliceD *componentList[D]
var compD []D
var retD *D
var sliceE *componentList[E]
var compE []E
var retE *E
var sliceF *componentList[F]
var compF []F
var retF *F
for _, archId := range v.filter.archIds {
sliceA, _ = v.storageA.slice.Get(archId)
sliceB, _ = v.storageB.slice.Get(archId)
sliceC, _ = v.storageC.slice.Get(archId)
sliceD, _ = v.storageD.slice.Get(archId)
sliceE, _ = v.storageE.slice.Get(archId)
sliceF, _ = v.storageF.slice.Get(archId)
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
// lookup, ok := v.world.engine.lookup[archId]
// if !ok { panic("LookupList is missing!") }
ids := lookup.id
// TODO - this flattened version causes a mild performance hit. But the other one combinatorially explodes. I also cant get BCE to work with it. See option 2 for higher performance.
compA = nil
if sliceA != nil {
compA = sliceA.comp
}
compB = nil
if sliceB != nil {
compB = sliceB.comp
}
compC = nil
if sliceC != nil {
compC = sliceC.comp
}
compD = nil
if sliceD != nil {
compD = sliceD.comp
}
compE = nil
if sliceE != nil {
compE = sliceE.comp
}
compF = nil
if sliceF != nil {
compF = sliceF.comp
}
retA = nil
retB = nil
retC = nil
retD = nil
retE = nil
retF = nil
for idx := range ids {
if ids[idx] == InvalidEntity {
continue
} // Skip if its a hole
if compA != nil {
retA = &compA[idx]
}
if compB != nil {
retB = &compB[idx]
}
if compC != nil {
retC = &compC[idx]
}
if compD != nil {
retD = &compD[idx]
}
if compE != nil {
retE = &compE[idx]
}
if compF != nil {
retF = &compF[idx]
}
lambda(ids[idx], retA, retB, retC, retD, retE, retF)
}
}
}
// Maps the lambda function across every entity which matched the specified filters. Components are split based on the number of OS threads available.
func (v *View6[A, B, C, D, E, F]) MapIdParallel(lambda func(id Id, a *A, b *B, c *C, d *D, e *E, f *F)) {
v.filter.regenerate(v.world)
var sliceA *componentList[A]
var sliceB *componentList[B]
var sliceC *componentList[C]
var sliceD *componentList[D]
var sliceE *componentList[E]
var sliceF *componentList[F]
// 1. Calculate work
// 2. Calculate number of threads to execute with
// 3. Greedy divide work among N threads
// 4. Execute for each in its own goroutine
// 1. Calculate work
totalWork := 0
for _, archId := range v.filter.archIds {
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
// Each id represents an entity that holds the requested component(s)
// Each hole represents a deleted entity that used to hold the requested component(s)
totalWork += len(lookup.id) // - len(lookup.holes)
}
// Nothing to do if there is no work
if totalWork == 0 {
return
}
// 2. Calculate number of threads to execute with
numThreads := runtime.NumCPU()
// Ensure that the number of threads we plan to use is <= total amount of work
numThreads = min(totalWork, numThreads)
var waitGroup sync.WaitGroup
type workItem struct {
ids []Id
compA []A
compB []B
compC []C
compD []D
compE []E
compF []F
}
workChannel := make(chan workItem)
for i := 0; i < numThreads; i++ {
waitGroup.Add(1)
go func() {
defer waitGroup.Done()
for {
work, ok := <-workChannel
if !ok {
return
}
var retA *A
var retB *B
var retC *C
var retD *D
var retE *E
var retF *F
for idx := range work.ids {
if work.ids[idx] == InvalidEntity {
continue
} // Skip if its a hole
if work.compA != nil {
retA = &work.compA[idx]
}
if work.compB != nil {
retB = &work.compB[idx]
}
if work.compC != nil {
retC = &work.compC[idx]
}
if work.compD != nil {
retD = &work.compD[idx]
}
if work.compE != nil {
retE = &work.compE[idx]
}
if work.compF != nil {
retF = &work.compF[idx]
}
lambda(work.ids[idx], retA, retB, retC, retD, retE, retF)
}
}
}()
}
// 3. Greedy divide work among N threads
// Simple algorithm:
// a. Find an evenly balanced distribution per thread
// b. Generate all work until it gets consumed
workPerThread := totalWork / numThreads
// Generate
var compA []A
var compB []B
var compC []C
var compD []D
var compE []E
var compF []F
for _, archId := range v.filter.archIds {
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
ids := lookup.id
sliceA, _ = v.storageA.slice.Get(archId)
sliceB, _ = v.storageB.slice.Get(archId)
sliceC, _ = v.storageC.slice.Get(archId)
sliceD, _ = v.storageD.slice.Get(archId)
sliceE, _ = v.storageE.slice.Get(archId)
sliceF, _ = v.storageF.slice.Get(archId)
compA = nil
if sliceA != nil {
compA = sliceA.comp
}
compB = nil
if sliceB != nil {
compB = sliceB.comp
}
compC = nil
if sliceC != nil {
compC = sliceC.comp
}
compD = nil
if sliceD != nil {
compD = sliceD.comp
}
compE = nil
if sliceE != nil {
compE = sliceE.comp
}
compF = nil
if sliceF != nil {
compF = sliceF.comp
}
// workPerformed := 0
start := 0
end := 0
numWorkItems := (len(ids) / workPerThread) + 1
actualWorkPerThread := (len(ids) / numWorkItems) + 1
for i := 0; i < numWorkItems; i++ {
start = i * actualWorkPerThread
end = (i + 1) * actualWorkPerThread
if end > len(ids) {
end = len(ids)
}
// workPerformed += len(ids[start:end])
workChannel <- workItem{
ids: ids[start:end],
compA: compA[start:end],
compB: compB[start:end],
compC: compC[start:end],
compD: compD[start:end],
compE: compE[start:end],
compF: compF[start:end],
}
}
// if workPerformed != len(ids) {
// panic("wrong")
// }
}
close(workChannel)
waitGroup.Wait()
}
// Deprecated: This API is a tentative alternative way to map
func (v *View6[A, B, C, D, E, F]) MapSlices(lambda func(id []Id, a []A, b []B, c []C, d []D, e []E, f []F)) {
v.filter.regenerate(v.world)
id := make([][]Id, 0)
sliceListA := make([][]A, 0)
sliceListB := make([][]B, 0)
sliceListC := make([][]C, 0)
sliceListD := make([][]D, 0)
sliceListE := make([][]E, 0)
sliceListF := make([][]F, 0)
for _, archId := range v.filter.archIds {
sliceA, ok := v.storageA.slice.Get(archId)
if !ok {
continue
}
sliceB, ok := v.storageB.slice.Get(archId)
if !ok {
continue
}
sliceC, ok := v.storageC.slice.Get(archId)
if !ok {
continue
}
sliceD, ok := v.storageD.slice.Get(archId)
if !ok {
continue
}
sliceE, ok := v.storageE.slice.Get(archId)
if !ok {
continue
}
sliceF, ok := v.storageF.slice.Get(archId)
if !ok {
continue
}
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
// lookup, ok := v.world.engine.lookup[archId]
// if !ok { panic("LookupList is missing!") }
id = append(id, lookup.id)
sliceListA = append(sliceListA, sliceA.comp)
sliceListB = append(sliceListB, sliceB.comp)
sliceListC = append(sliceListC, sliceC.comp)
sliceListD = append(sliceListD, sliceD.comp)
sliceListE = append(sliceListE, sliceE.comp)
sliceListF = append(sliceListF, sliceF.comp)
}
for idx := range id {
lambda(id[idx],
sliceListA[idx], sliceListB[idx], sliceListC[idx], sliceListD[idx], sliceListE[idx], sliceListF[idx],
)
}
}
// --------------------------------------------------------------------------------
// - View 7
// --------------------------------------------------------------------------------
// Represents a view of data in a specific world. Provides access to the components specified in the generic block
type View7[A, B, C, D, E, F, G any] struct {
world *World
filter filterList
storageA *componentStorage[A]
storageB *componentStorage[B]
storageC *componentStorage[C]
storageD *componentStorage[D]
storageE *componentStorage[E]
storageF *componentStorage[F]
storageG *componentStorage[G]
}
// implement the initializer interface so that it can be automatically created and injected into systems
func (v *View7[A, B, C, D, E, F, G]) Initialize(world *World) any {
// TODO: filters need to be a part of the query type
return Query7[A, B, C, D, E, F, G](world)
}
// Creates a View for the specified world with the specified component filters.
func Query7[A, B, C, D, E, F, G any](world *World, filters ...Filter) *View7[A, B, C, D, E, F, G] {
storageA := getStorage[A](world.engine)
storageB := getStorage[B](world.engine)
storageC := getStorage[C](world.engine)
storageD := getStorage[D](world.engine)
storageE := getStorage[E](world.engine)
storageF := getStorage[F](world.engine)
storageG := getStorage[G](world.engine)
var AA A
var BB B
var CC C
var DD D
var EE E
var FF F
var GG G
comps := []CompId{
name(AA),
name(BB),
name(CC),
name(DD),
name(EE),
name(FF),
name(GG),
}
filterList := newFilterList(comps, filters...)
filterList.regenerate(world)
v := &View7[A, B, C, D, E, F, G]{
world: world,
filter: filterList,
storageA: storageA,
storageB: storageB,
storageC: storageC,
storageD: storageD,
storageE: storageE,
storageF: storageF,
storageG: storageG,
}
return v
}
// Reads a pointer to the underlying component at the specified id.
// Read will return even if the specified id doesn't match the filter list
// Read will return the value if it exists, else returns nil.
// If you execute any ecs.Write(...) or ecs.Delete(...) this pointer may become invalid.
func (v *View7[A, B, C, D, E, F, G]) Read(id Id) (*A, *B, *C, *D, *E, *F, *G) {
if id == InvalidEntity {
return nil, nil, nil, nil, nil, nil, nil
}
loc, ok := v.world.arch.Get(id)
if !ok {
return nil, nil, nil, nil, nil, nil, nil
}
lookup := v.world.engine.lookup[loc.archId]
if lookup == nil {
panic("LookupList is missing!")
}
index := int(loc.index)
var retA *A
var retB *B
var retC *C
var retD *D
var retE *E
var retF *F
var retG *G
sliceA, ok := v.storageA.slice.Get(loc.archId)
if ok {
retA = &sliceA.comp[index]
}
sliceB, ok := v.storageB.slice.Get(loc.archId)
if ok {
retB = &sliceB.comp[index]
}
sliceC, ok := v.storageC.slice.Get(loc.archId)
if ok {
retC = &sliceC.comp[index]
}
sliceD, ok := v.storageD.slice.Get(loc.archId)
if ok {
retD = &sliceD.comp[index]
}
sliceE, ok := v.storageE.slice.Get(loc.archId)
if ok {
retE = &sliceE.comp[index]
}
sliceF, ok := v.storageF.slice.Get(loc.archId)
if ok {
retF = &sliceF.comp[index]
}
sliceG, ok := v.storageG.slice.Get(loc.archId)
if ok {
retG = &sliceG.comp[index]
}
return retA, retB, retC, retD, retE, retF, retG
}
// Counts the number of entities that match this query
func (v *View7[A, B, C, D, E, F, G]) Count() int {
v.filter.regenerate(v.world)
total := 0
for _, archId := range v.filter.archIds {
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
total += lookup.Len()
}
return total
}
// Maps the lambda function across every entity which matched the specified filters.
func (v *View7[A, B, C, D, E, F, G]) MapId(lambda func(id Id, a *A, b *B, c *C, d *D, e *E, f *F, g *G)) {
v.filter.regenerate(v.world)
var sliceA *componentList[A]
var compA []A
var retA *A
var sliceB *componentList[B]
var compB []B
var retB *B
var sliceC *componentList[C]
var compC []C
var retC *C
var sliceD *componentList[D]
var compD []D
var retD *D
var sliceE *componentList[E]
var compE []E
var retE *E
var sliceF *componentList[F]
var compF []F
var retF *F
var sliceG *componentList[G]
var compG []G
var retG *G
for _, archId := range v.filter.archIds {
sliceA, _ = v.storageA.slice.Get(archId)
sliceB, _ = v.storageB.slice.Get(archId)
sliceC, _ = v.storageC.slice.Get(archId)
sliceD, _ = v.storageD.slice.Get(archId)
sliceE, _ = v.storageE.slice.Get(archId)
sliceF, _ = v.storageF.slice.Get(archId)
sliceG, _ = v.storageG.slice.Get(archId)
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
// lookup, ok := v.world.engine.lookup[archId]
// if !ok { panic("LookupList is missing!") }
ids := lookup.id
// TODO - this flattened version causes a mild performance hit. But the other one combinatorially explodes. I also cant get BCE to work with it. See option 2 for higher performance.
compA = nil
if sliceA != nil {
compA = sliceA.comp
}
compB = nil
if sliceB != nil {
compB = sliceB.comp
}
compC = nil
if sliceC != nil {
compC = sliceC.comp
}
compD = nil
if sliceD != nil {
compD = sliceD.comp
}
compE = nil
if sliceE != nil {
compE = sliceE.comp
}
compF = nil
if sliceF != nil {
compF = sliceF.comp
}
compG = nil
if sliceG != nil {
compG = sliceG.comp
}
retA = nil
retB = nil
retC = nil
retD = nil
retE = nil
retF = nil
retG = nil
for idx := range ids {
if ids[idx] == InvalidEntity {
continue
} // Skip if its a hole
if compA != nil {
retA = &compA[idx]
}
if compB != nil {
retB = &compB[idx]
}
if compC != nil {
retC = &compC[idx]
}
if compD != nil {
retD = &compD[idx]
}
if compE != nil {
retE = &compE[idx]
}
if compF != nil {
retF = &compF[idx]
}
if compG != nil {
retG = &compG[idx]
}
lambda(ids[idx], retA, retB, retC, retD, retE, retF, retG)
}
}
}
// Maps the lambda function across every entity which matched the specified filters. Components are split based on the number of OS threads available.
func (v *View7[A, B, C, D, E, F, G]) MapIdParallel(lambda func(id Id, a *A, b *B, c *C, d *D, e *E, f *F, g *G)) {
v.filter.regenerate(v.world)
var sliceA *componentList[A]
var sliceB *componentList[B]
var sliceC *componentList[C]
var sliceD *componentList[D]
var sliceE *componentList[E]
var sliceF *componentList[F]
var sliceG *componentList[G]
// 1. Calculate work
// 2. Calculate number of threads to execute with
// 3. Greedy divide work among N threads
// 4. Execute for each in its own goroutine
// 1. Calculate work
totalWork := 0
for _, archId := range v.filter.archIds {
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
// Each id represents an entity that holds the requested component(s)
// Each hole represents a deleted entity that used to hold the requested component(s)
totalWork += len(lookup.id) // - len(lookup.holes)
}
// Nothing to do if there is no work
if totalWork == 0 {
return
}
// 2. Calculate number of threads to execute with
numThreads := runtime.NumCPU()
// Ensure that the number of threads we plan to use is <= total amount of work
numThreads = min(totalWork, numThreads)
var waitGroup sync.WaitGroup
type workItem struct {
ids []Id
compA []A
compB []B
compC []C
compD []D
compE []E
compF []F
compG []G
}
workChannel := make(chan workItem)
for i := 0; i < numThreads; i++ {
waitGroup.Add(1)
go func() {
defer waitGroup.Done()
for {
work, ok := <-workChannel
if !ok {
return
}
var retA *A
var retB *B
var retC *C
var retD *D
var retE *E
var retF *F
var retG *G
for idx := range work.ids {
if work.ids[idx] == InvalidEntity {
continue
} // Skip if its a hole
if work.compA != nil {
retA = &work.compA[idx]
}
if work.compB != nil {
retB = &work.compB[idx]
}
if work.compC != nil {
retC = &work.compC[idx]
}
if work.compD != nil {
retD = &work.compD[idx]
}
if work.compE != nil {
retE = &work.compE[idx]
}
if work.compF != nil {
retF = &work.compF[idx]
}
if work.compG != nil {
retG = &work.compG[idx]
}
lambda(work.ids[idx], retA, retB, retC, retD, retE, retF, retG)
}
}
}()
}
// 3. Greedy divide work among N threads
// Simple algorithm:
// a. Find an evenly balanced distribution per thread
// b. Generate all work until it gets consumed
workPerThread := totalWork / numThreads
// Generate
var compA []A
var compB []B
var compC []C
var compD []D
var compE []E
var compF []F
var compG []G
for _, archId := range v.filter.archIds {
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
ids := lookup.id
sliceA, _ = v.storageA.slice.Get(archId)
sliceB, _ = v.storageB.slice.Get(archId)
sliceC, _ = v.storageC.slice.Get(archId)
sliceD, _ = v.storageD.slice.Get(archId)
sliceE, _ = v.storageE.slice.Get(archId)
sliceF, _ = v.storageF.slice.Get(archId)
sliceG, _ = v.storageG.slice.Get(archId)
compA = nil
if sliceA != nil {
compA = sliceA.comp
}
compB = nil
if sliceB != nil {
compB = sliceB.comp
}
compC = nil
if sliceC != nil {
compC = sliceC.comp
}
compD = nil
if sliceD != nil {
compD = sliceD.comp
}
compE = nil
if sliceE != nil {
compE = sliceE.comp
}
compF = nil
if sliceF != nil {
compF = sliceF.comp
}
compG = nil
if sliceG != nil {
compG = sliceG.comp
}
// workPerformed := 0
start := 0
end := 0
numWorkItems := (len(ids) / workPerThread) + 1
actualWorkPerThread := (len(ids) / numWorkItems) + 1
for i := 0; i < numWorkItems; i++ {
start = i * actualWorkPerThread
end = (i + 1) * actualWorkPerThread
if end > len(ids) {
end = len(ids)
}
// workPerformed += len(ids[start:end])
workChannel <- workItem{
ids: ids[start:end],
compA: compA[start:end],
compB: compB[start:end],
compC: compC[start:end],
compD: compD[start:end],
compE: compE[start:end],
compF: compF[start:end],
compG: compG[start:end],
}
}
// if workPerformed != len(ids) {
// panic("wrong")
// }
}
close(workChannel)
waitGroup.Wait()
}
// Deprecated: This API is a tentative alternative way to map
func (v *View7[A, B, C, D, E, F, G]) MapSlices(lambda func(id []Id, a []A, b []B, c []C, d []D, e []E, f []F, g []G)) {
v.filter.regenerate(v.world)
id := make([][]Id, 0)
sliceListA := make([][]A, 0)
sliceListB := make([][]B, 0)
sliceListC := make([][]C, 0)
sliceListD := make([][]D, 0)
sliceListE := make([][]E, 0)
sliceListF := make([][]F, 0)
sliceListG := make([][]G, 0)
for _, archId := range v.filter.archIds {
sliceA, ok := v.storageA.slice.Get(archId)
if !ok {
continue
}
sliceB, ok := v.storageB.slice.Get(archId)
if !ok {
continue
}
sliceC, ok := v.storageC.slice.Get(archId)
if !ok {
continue
}
sliceD, ok := v.storageD.slice.Get(archId)
if !ok {
continue
}
sliceE, ok := v.storageE.slice.Get(archId)
if !ok {
continue
}
sliceF, ok := v.storageF.slice.Get(archId)
if !ok {
continue
}
sliceG, ok := v.storageG.slice.Get(archId)
if !ok {
continue
}
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
// lookup, ok := v.world.engine.lookup[archId]
// if !ok { panic("LookupList is missing!") }
id = append(id, lookup.id)
sliceListA = append(sliceListA, sliceA.comp)
sliceListB = append(sliceListB, sliceB.comp)
sliceListC = append(sliceListC, sliceC.comp)
sliceListD = append(sliceListD, sliceD.comp)
sliceListE = append(sliceListE, sliceE.comp)
sliceListF = append(sliceListF, sliceF.comp)
sliceListG = append(sliceListG, sliceG.comp)
}
for idx := range id {
lambda(id[idx],
sliceListA[idx], sliceListB[idx], sliceListC[idx], sliceListD[idx], sliceListE[idx], sliceListF[idx], sliceListG[idx],
)
}
}
// --------------------------------------------------------------------------------
// - View 8
// --------------------------------------------------------------------------------
// Represents a view of data in a specific world. Provides access to the components specified in the generic block
type View8[A, B, C, D, E, F, G, H any] struct {
world *World
filter filterList
storageA *componentStorage[A]
storageB *componentStorage[B]
storageC *componentStorage[C]
storageD *componentStorage[D]
storageE *componentStorage[E]
storageF *componentStorage[F]
storageG *componentStorage[G]
storageH *componentStorage[H]
}
// implement the initializer interface so that it can be automatically created and injected into systems
func (v *View8[A, B, C, D, E, F, G, H]) Initialize(world *World) any {
// TODO: filters need to be a part of the query type
return Query8[A, B, C, D, E, F, G, H](world)
}
// Creates a View for the specified world with the specified component filters.
func Query8[A, B, C, D, E, F, G, H any](world *World, filters ...Filter) *View8[A, B, C, D, E, F, G, H] {
storageA := getStorage[A](world.engine)
storageB := getStorage[B](world.engine)
storageC := getStorage[C](world.engine)
storageD := getStorage[D](world.engine)
storageE := getStorage[E](world.engine)
storageF := getStorage[F](world.engine)
storageG := getStorage[G](world.engine)
storageH := getStorage[H](world.engine)
var AA A
var BB B
var CC C
var DD D
var EE E
var FF F
var GG G
var HH H
comps := []CompId{
name(AA),
name(BB),
name(CC),
name(DD),
name(EE),
name(FF),
name(GG),
name(HH),
}
filterList := newFilterList(comps, filters...)
filterList.regenerate(world)
v := &View8[A, B, C, D, E, F, G, H]{
world: world,
filter: filterList,
storageA: storageA,
storageB: storageB,
storageC: storageC,
storageD: storageD,
storageE: storageE,
storageF: storageF,
storageG: storageG,
storageH: storageH,
}
return v
}
// Reads a pointer to the underlying component at the specified id.
// Read will return even if the specified id doesn't match the filter list
// Read will return the value if it exists, else returns nil.
// If you execute any ecs.Write(...) or ecs.Delete(...) this pointer may become invalid.
func (v *View8[A, B, C, D, E, F, G, H]) Read(id Id) (*A, *B, *C, *D, *E, *F, *G, *H) {
if id == InvalidEntity {
return nil, nil, nil, nil, nil, nil, nil, nil
}
loc, ok := v.world.arch.Get(id)
if !ok {
return nil, nil, nil, nil, nil, nil, nil, nil
}
lookup := v.world.engine.lookup[loc.archId]
if lookup == nil {
panic("LookupList is missing!")
}
index := int(loc.index)
var retA *A
var retB *B
var retC *C
var retD *D
var retE *E
var retF *F
var retG *G
var retH *H
sliceA, ok := v.storageA.slice.Get(loc.archId)
if ok {
retA = &sliceA.comp[index]
}
sliceB, ok := v.storageB.slice.Get(loc.archId)
if ok {
retB = &sliceB.comp[index]
}
sliceC, ok := v.storageC.slice.Get(loc.archId)
if ok {
retC = &sliceC.comp[index]
}
sliceD, ok := v.storageD.slice.Get(loc.archId)
if ok {
retD = &sliceD.comp[index]
}
sliceE, ok := v.storageE.slice.Get(loc.archId)
if ok {
retE = &sliceE.comp[index]
}
sliceF, ok := v.storageF.slice.Get(loc.archId)
if ok {
retF = &sliceF.comp[index]
}
sliceG, ok := v.storageG.slice.Get(loc.archId)
if ok {
retG = &sliceG.comp[index]
}
sliceH, ok := v.storageH.slice.Get(loc.archId)
if ok {
retH = &sliceH.comp[index]
}
return retA, retB, retC, retD, retE, retF, retG, retH
}
// Counts the number of entities that match this query
func (v *View8[A, B, C, D, E, F, G, H]) Count() int {
v.filter.regenerate(v.world)
total := 0
for _, archId := range v.filter.archIds {
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
total += lookup.Len()
}
return total
}
// Maps the lambda function across every entity which matched the specified filters.
func (v *View8[A, B, C, D, E, F, G, H]) MapId(lambda func(id Id, a *A, b *B, c *C, d *D, e *E, f *F, g *G, h *H)) {
v.filter.regenerate(v.world)
var sliceA *componentList[A]
var compA []A
var retA *A
var sliceB *componentList[B]
var compB []B
var retB *B
var sliceC *componentList[C]
var compC []C
var retC *C
var sliceD *componentList[D]
var compD []D
var retD *D
var sliceE *componentList[E]
var compE []E
var retE *E
var sliceF *componentList[F]
var compF []F
var retF *F
var sliceG *componentList[G]
var compG []G
var retG *G
var sliceH *componentList[H]
var compH []H
var retH *H
for _, archId := range v.filter.archIds {
sliceA, _ = v.storageA.slice.Get(archId)
sliceB, _ = v.storageB.slice.Get(archId)
sliceC, _ = v.storageC.slice.Get(archId)
sliceD, _ = v.storageD.slice.Get(archId)
sliceE, _ = v.storageE.slice.Get(archId)
sliceF, _ = v.storageF.slice.Get(archId)
sliceG, _ = v.storageG.slice.Get(archId)
sliceH, _ = v.storageH.slice.Get(archId)
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
// lookup, ok := v.world.engine.lookup[archId]
// if !ok { panic("LookupList is missing!") }
ids := lookup.id
// TODO - this flattened version causes a mild performance hit. But the other one combinatorially explodes. I also cant get BCE to work with it. See option 2 for higher performance.
compA = nil
if sliceA != nil {
compA = sliceA.comp
}
compB = nil
if sliceB != nil {
compB = sliceB.comp
}
compC = nil
if sliceC != nil {
compC = sliceC.comp
}
compD = nil
if sliceD != nil {
compD = sliceD.comp
}
compE = nil
if sliceE != nil {
compE = sliceE.comp
}
compF = nil
if sliceF != nil {
compF = sliceF.comp
}
compG = nil
if sliceG != nil {
compG = sliceG.comp
}
compH = nil
if sliceH != nil {
compH = sliceH.comp
}
retA = nil
retB = nil
retC = nil
retD = nil
retE = nil
retF = nil
retG = nil
retH = nil
for idx := range ids {
if ids[idx] == InvalidEntity {
continue
} // Skip if its a hole
if compA != nil {
retA = &compA[idx]
}
if compB != nil {
retB = &compB[idx]
}
if compC != nil {
retC = &compC[idx]
}
if compD != nil {
retD = &compD[idx]
}
if compE != nil {
retE = &compE[idx]
}
if compF != nil {
retF = &compF[idx]
}
if compG != nil {
retG = &compG[idx]
}
if compH != nil {
retH = &compH[idx]
}
lambda(ids[idx], retA, retB, retC, retD, retE, retF, retG, retH)
}
}
}
// Maps the lambda function across every entity which matched the specified filters. Components are split based on the number of OS threads available.
func (v *View8[A, B, C, D, E, F, G, H]) MapIdParallel(lambda func(id Id, a *A, b *B, c *C, d *D, e *E, f *F, g *G, h *H)) {
v.filter.regenerate(v.world)
var sliceA *componentList[A]
var sliceB *componentList[B]
var sliceC *componentList[C]
var sliceD *componentList[D]
var sliceE *componentList[E]
var sliceF *componentList[F]
var sliceG *componentList[G]
var sliceH *componentList[H]
// 1. Calculate work
// 2. Calculate number of threads to execute with
// 3. Greedy divide work among N threads
// 4. Execute for each in its own goroutine
// 1. Calculate work
totalWork := 0
for _, archId := range v.filter.archIds {
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
// Each id represents an entity that holds the requested component(s)
// Each hole represents a deleted entity that used to hold the requested component(s)
totalWork += len(lookup.id) // - len(lookup.holes)
}
// Nothing to do if there is no work
if totalWork == 0 {
return
}
// 2. Calculate number of threads to execute with
numThreads := runtime.NumCPU()
// Ensure that the number of threads we plan to use is <= total amount of work
numThreads = min(totalWork, numThreads)
var waitGroup sync.WaitGroup
type workItem struct {
ids []Id
compA []A
compB []B
compC []C
compD []D
compE []E
compF []F
compG []G
compH []H
}
workChannel := make(chan workItem)
for i := 0; i < numThreads; i++ {
waitGroup.Add(1)
go func() {
defer waitGroup.Done()
for {
work, ok := <-workChannel
if !ok {
return
}
var retA *A
var retB *B
var retC *C
var retD *D
var retE *E
var retF *F
var retG *G
var retH *H
for idx := range work.ids {
if work.ids[idx] == InvalidEntity {
continue
} // Skip if its a hole
if work.compA != nil {
retA = &work.compA[idx]
}
if work.compB != nil {
retB = &work.compB[idx]
}
if work.compC != nil {
retC = &work.compC[idx]
}
if work.compD != nil {
retD = &work.compD[idx]
}
if work.compE != nil {
retE = &work.compE[idx]
}
if work.compF != nil {
retF = &work.compF[idx]
}
if work.compG != nil {
retG = &work.compG[idx]
}
if work.compH != nil {
retH = &work.compH[idx]
}
lambda(work.ids[idx], retA, retB, retC, retD, retE, retF, retG, retH)
}
}
}()
}
// 3. Greedy divide work among N threads
// Simple algorithm:
// a. Find an evenly balanced distribution per thread
// b. Generate all work until it gets consumed
workPerThread := totalWork / numThreads
// Generate
var compA []A
var compB []B
var compC []C
var compD []D
var compE []E
var compF []F
var compG []G
var compH []H
for _, archId := range v.filter.archIds {
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
ids := lookup.id
sliceA, _ = v.storageA.slice.Get(archId)
sliceB, _ = v.storageB.slice.Get(archId)
sliceC, _ = v.storageC.slice.Get(archId)
sliceD, _ = v.storageD.slice.Get(archId)
sliceE, _ = v.storageE.slice.Get(archId)
sliceF, _ = v.storageF.slice.Get(archId)
sliceG, _ = v.storageG.slice.Get(archId)
sliceH, _ = v.storageH.slice.Get(archId)
compA = nil
if sliceA != nil {
compA = sliceA.comp
}
compB = nil
if sliceB != nil {
compB = sliceB.comp
}
compC = nil
if sliceC != nil {
compC = sliceC.comp
}
compD = nil
if sliceD != nil {
compD = sliceD.comp
}
compE = nil
if sliceE != nil {
compE = sliceE.comp
}
compF = nil
if sliceF != nil {
compF = sliceF.comp
}
compG = nil
if sliceG != nil {
compG = sliceG.comp
}
compH = nil
if sliceH != nil {
compH = sliceH.comp
}
// workPerformed := 0
start := 0
end := 0
numWorkItems := (len(ids) / workPerThread) + 1
actualWorkPerThread := (len(ids) / numWorkItems) + 1
for i := 0; i < numWorkItems; i++ {
start = i * actualWorkPerThread
end = (i + 1) * actualWorkPerThread
if end > len(ids) {
end = len(ids)
}
// workPerformed += len(ids[start:end])
workChannel <- workItem{
ids: ids[start:end],
compA: compA[start:end],
compB: compB[start:end],
compC: compC[start:end],
compD: compD[start:end],
compE: compE[start:end],
compF: compF[start:end],
compG: compG[start:end],
compH: compH[start:end],
}
}
// if workPerformed != len(ids) {
// panic("wrong")
// }
}
close(workChannel)
waitGroup.Wait()
}
// Deprecated: This API is a tentative alternative way to map
func (v *View8[A, B, C, D, E, F, G, H]) MapSlices(lambda func(id []Id, a []A, b []B, c []C, d []D, e []E, f []F, g []G, h []H)) {
v.filter.regenerate(v.world)
id := make([][]Id, 0)
sliceListA := make([][]A, 0)
sliceListB := make([][]B, 0)
sliceListC := make([][]C, 0)
sliceListD := make([][]D, 0)
sliceListE := make([][]E, 0)
sliceListF := make([][]F, 0)
sliceListG := make([][]G, 0)
sliceListH := make([][]H, 0)
for _, archId := range v.filter.archIds {
sliceA, ok := v.storageA.slice.Get(archId)
if !ok {
continue
}
sliceB, ok := v.storageB.slice.Get(archId)
if !ok {
continue
}
sliceC, ok := v.storageC.slice.Get(archId)
if !ok {
continue
}
sliceD, ok := v.storageD.slice.Get(archId)
if !ok {
continue
}
sliceE, ok := v.storageE.slice.Get(archId)
if !ok {
continue
}
sliceF, ok := v.storageF.slice.Get(archId)
if !ok {
continue
}
sliceG, ok := v.storageG.slice.Get(archId)
if !ok {
continue
}
sliceH, ok := v.storageH.slice.Get(archId)
if !ok {
continue
}
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
// lookup, ok := v.world.engine.lookup[archId]
// if !ok { panic("LookupList is missing!") }
id = append(id, lookup.id)
sliceListA = append(sliceListA, sliceA.comp)
sliceListB = append(sliceListB, sliceB.comp)
sliceListC = append(sliceListC, sliceC.comp)
sliceListD = append(sliceListD, sliceD.comp)
sliceListE = append(sliceListE, sliceE.comp)
sliceListF = append(sliceListF, sliceF.comp)
sliceListG = append(sliceListG, sliceG.comp)
sliceListH = append(sliceListH, sliceH.comp)
}
for idx := range id {
lambda(id[idx],
sliceListA[idx], sliceListB[idx], sliceListC[idx], sliceListD[idx], sliceListE[idx], sliceListF[idx], sliceListG[idx], sliceListH[idx],
)
}
}
// --------------------------------------------------------------------------------
// - View 9
// --------------------------------------------------------------------------------
// Represents a view of data in a specific world. Provides access to the components specified in the generic block
type View9[A, B, C, D, E, F, G, H, I any] struct {
world *World
filter filterList
storageA *componentStorage[A]
storageB *componentStorage[B]
storageC *componentStorage[C]
storageD *componentStorage[D]
storageE *componentStorage[E]
storageF *componentStorage[F]
storageG *componentStorage[G]
storageH *componentStorage[H]
storageI *componentStorage[I]
}
// implement the initializer interface so that it can be automatically created and injected into systems
func (v *View9[A, B, C, D, E, F, G, H, I]) Initialize(world *World) any {
// TODO: filters need to be a part of the query type
return Query9[A, B, C, D, E, F, G, H, I](world)
}
// Creates a View for the specified world with the specified component filters.
func Query9[A, B, C, D, E, F, G, H, I any](world *World, filters ...Filter) *View9[A, B, C, D, E, F, G, H, I] {
storageA := getStorage[A](world.engine)
storageB := getStorage[B](world.engine)
storageC := getStorage[C](world.engine)
storageD := getStorage[D](world.engine)
storageE := getStorage[E](world.engine)
storageF := getStorage[F](world.engine)
storageG := getStorage[G](world.engine)
storageH := getStorage[H](world.engine)
storageI := getStorage[I](world.engine)
var AA A
var BB B
var CC C
var DD D
var EE E
var FF F
var GG G
var HH H
var II I
comps := []CompId{
name(AA),
name(BB),
name(CC),
name(DD),
name(EE),
name(FF),
name(GG),
name(HH),
name(II),
}
filterList := newFilterList(comps, filters...)
filterList.regenerate(world)
v := &View9[A, B, C, D, E, F, G, H, I]{
world: world,
filter: filterList,
storageA: storageA,
storageB: storageB,
storageC: storageC,
storageD: storageD,
storageE: storageE,
storageF: storageF,
storageG: storageG,
storageH: storageH,
storageI: storageI,
}
return v
}
// Reads a pointer to the underlying component at the specified id.
// Read will return even if the specified id doesn't match the filter list
// Read will return the value if it exists, else returns nil.
// If you execute any ecs.Write(...) or ecs.Delete(...) this pointer may become invalid.
func (v *View9[A, B, C, D, E, F, G, H, I]) Read(id Id) (*A, *B, *C, *D, *E, *F, *G, *H, *I) {
if id == InvalidEntity {
return nil, nil, nil, nil, nil, nil, nil, nil, nil
}
loc, ok := v.world.arch.Get(id)
if !ok {
return nil, nil, nil, nil, nil, nil, nil, nil, nil
}
lookup := v.world.engine.lookup[loc.archId]
if lookup == nil {
panic("LookupList is missing!")
}
index := int(loc.index)
var retA *A
var retB *B
var retC *C
var retD *D
var retE *E
var retF *F
var retG *G
var retH *H
var retI *I
sliceA, ok := v.storageA.slice.Get(loc.archId)
if ok {
retA = &sliceA.comp[index]
}
sliceB, ok := v.storageB.slice.Get(loc.archId)
if ok {
retB = &sliceB.comp[index]
}
sliceC, ok := v.storageC.slice.Get(loc.archId)
if ok {
retC = &sliceC.comp[index]
}
sliceD, ok := v.storageD.slice.Get(loc.archId)
if ok {
retD = &sliceD.comp[index]
}
sliceE, ok := v.storageE.slice.Get(loc.archId)
if ok {
retE = &sliceE.comp[index]
}
sliceF, ok := v.storageF.slice.Get(loc.archId)
if ok {
retF = &sliceF.comp[index]
}
sliceG, ok := v.storageG.slice.Get(loc.archId)
if ok {
retG = &sliceG.comp[index]
}
sliceH, ok := v.storageH.slice.Get(loc.archId)
if ok {
retH = &sliceH.comp[index]
}
sliceI, ok := v.storageI.slice.Get(loc.archId)
if ok {
retI = &sliceI.comp[index]
}
return retA, retB, retC, retD, retE, retF, retG, retH, retI
}
// Counts the number of entities that match this query
func (v *View9[A, B, C, D, E, F, G, H, I]) Count() int {
v.filter.regenerate(v.world)
total := 0
for _, archId := range v.filter.archIds {
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
total += lookup.Len()
}
return total
}
// Maps the lambda function across every entity which matched the specified filters.
func (v *View9[A, B, C, D, E, F, G, H, I]) MapId(lambda func(id Id, a *A, b *B, c *C, d *D, e *E, f *F, g *G, h *H, i *I)) {
v.filter.regenerate(v.world)
var sliceA *componentList[A]
var compA []A
var retA *A
var sliceB *componentList[B]
var compB []B
var retB *B
var sliceC *componentList[C]
var compC []C
var retC *C
var sliceD *componentList[D]
var compD []D
var retD *D
var sliceE *componentList[E]
var compE []E
var retE *E
var sliceF *componentList[F]
var compF []F
var retF *F
var sliceG *componentList[G]
var compG []G
var retG *G
var sliceH *componentList[H]
var compH []H
var retH *H
var sliceI *componentList[I]
var compI []I
var retI *I
for _, archId := range v.filter.archIds {
sliceA, _ = v.storageA.slice.Get(archId)
sliceB, _ = v.storageB.slice.Get(archId)
sliceC, _ = v.storageC.slice.Get(archId)
sliceD, _ = v.storageD.slice.Get(archId)
sliceE, _ = v.storageE.slice.Get(archId)
sliceF, _ = v.storageF.slice.Get(archId)
sliceG, _ = v.storageG.slice.Get(archId)
sliceH, _ = v.storageH.slice.Get(archId)
sliceI, _ = v.storageI.slice.Get(archId)
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
// lookup, ok := v.world.engine.lookup[archId]
// if !ok { panic("LookupList is missing!") }
ids := lookup.id
// TODO - this flattened version causes a mild performance hit. But the other one combinatorially explodes. I also cant get BCE to work with it. See option 2 for higher performance.
compA = nil
if sliceA != nil {
compA = sliceA.comp
}
compB = nil
if sliceB != nil {
compB = sliceB.comp
}
compC = nil
if sliceC != nil {
compC = sliceC.comp
}
compD = nil
if sliceD != nil {
compD = sliceD.comp
}
compE = nil
if sliceE != nil {
compE = sliceE.comp
}
compF = nil
if sliceF != nil {
compF = sliceF.comp
}
compG = nil
if sliceG != nil {
compG = sliceG.comp
}
compH = nil
if sliceH != nil {
compH = sliceH.comp
}
compI = nil
if sliceI != nil {
compI = sliceI.comp
}
retA = nil
retB = nil
retC = nil
retD = nil
retE = nil
retF = nil
retG = nil
retH = nil
retI = nil
for idx := range ids {
if ids[idx] == InvalidEntity {
continue
} // Skip if its a hole
if compA != nil {
retA = &compA[idx]
}
if compB != nil {
retB = &compB[idx]
}
if compC != nil {
retC = &compC[idx]
}
if compD != nil {
retD = &compD[idx]
}
if compE != nil {
retE = &compE[idx]
}
if compF != nil {
retF = &compF[idx]
}
if compG != nil {
retG = &compG[idx]
}
if compH != nil {
retH = &compH[idx]
}
if compI != nil {
retI = &compI[idx]
}
lambda(ids[idx], retA, retB, retC, retD, retE, retF, retG, retH, retI)
}
}
}
// Maps the lambda function across every entity which matched the specified filters. Components are split based on the number of OS threads available.
func (v *View9[A, B, C, D, E, F, G, H, I]) MapIdParallel(lambda func(id Id, a *A, b *B, c *C, d *D, e *E, f *F, g *G, h *H, i *I)) {
v.filter.regenerate(v.world)
var sliceA *componentList[A]
var sliceB *componentList[B]
var sliceC *componentList[C]
var sliceD *componentList[D]
var sliceE *componentList[E]
var sliceF *componentList[F]
var sliceG *componentList[G]
var sliceH *componentList[H]
var sliceI *componentList[I]
// 1. Calculate work
// 2. Calculate number of threads to execute with
// 3. Greedy divide work among N threads
// 4. Execute for each in its own goroutine
// 1. Calculate work
totalWork := 0
for _, archId := range v.filter.archIds {
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
// Each id represents an entity that holds the requested component(s)
// Each hole represents a deleted entity that used to hold the requested component(s)
totalWork += len(lookup.id) // - len(lookup.holes)
}
// Nothing to do if there is no work
if totalWork == 0 {
return
}
// 2. Calculate number of threads to execute with
numThreads := runtime.NumCPU()
// Ensure that the number of threads we plan to use is <= total amount of work
numThreads = min(totalWork, numThreads)
var waitGroup sync.WaitGroup
type workItem struct {
ids []Id
compA []A
compB []B
compC []C
compD []D
compE []E
compF []F
compG []G
compH []H
compI []I
}
workChannel := make(chan workItem)
for i := 0; i < numThreads; i++ {
waitGroup.Add(1)
go func() {
defer waitGroup.Done()
for {
work, ok := <-workChannel
if !ok {
return
}
var retA *A
var retB *B
var retC *C
var retD *D
var retE *E
var retF *F
var retG *G
var retH *H
var retI *I
for idx := range work.ids {
if work.ids[idx] == InvalidEntity {
continue
} // Skip if its a hole
if work.compA != nil {
retA = &work.compA[idx]
}
if work.compB != nil {
retB = &work.compB[idx]
}
if work.compC != nil {
retC = &work.compC[idx]
}
if work.compD != nil {
retD = &work.compD[idx]
}
if work.compE != nil {
retE = &work.compE[idx]
}
if work.compF != nil {
retF = &work.compF[idx]
}
if work.compG != nil {
retG = &work.compG[idx]
}
if work.compH != nil {
retH = &work.compH[idx]
}
if work.compI != nil {
retI = &work.compI[idx]
}
lambda(work.ids[idx], retA, retB, retC, retD, retE, retF, retG, retH, retI)
}
}
}()
}
// 3. Greedy divide work among N threads
// Simple algorithm:
// a. Find an evenly balanced distribution per thread
// b. Generate all work until it gets consumed
workPerThread := totalWork / numThreads
// Generate
var compA []A
var compB []B
var compC []C
var compD []D
var compE []E
var compF []F
var compG []G
var compH []H
var compI []I
for _, archId := range v.filter.archIds {
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
ids := lookup.id
sliceA, _ = v.storageA.slice.Get(archId)
sliceB, _ = v.storageB.slice.Get(archId)
sliceC, _ = v.storageC.slice.Get(archId)
sliceD, _ = v.storageD.slice.Get(archId)
sliceE, _ = v.storageE.slice.Get(archId)
sliceF, _ = v.storageF.slice.Get(archId)
sliceG, _ = v.storageG.slice.Get(archId)
sliceH, _ = v.storageH.slice.Get(archId)
sliceI, _ = v.storageI.slice.Get(archId)
compA = nil
if sliceA != nil {
compA = sliceA.comp
}
compB = nil
if sliceB != nil {
compB = sliceB.comp
}
compC = nil
if sliceC != nil {
compC = sliceC.comp
}
compD = nil
if sliceD != nil {
compD = sliceD.comp
}
compE = nil
if sliceE != nil {
compE = sliceE.comp
}
compF = nil
if sliceF != nil {
compF = sliceF.comp
}
compG = nil
if sliceG != nil {
compG = sliceG.comp
}
compH = nil
if sliceH != nil {
compH = sliceH.comp
}
compI = nil
if sliceI != nil {
compI = sliceI.comp
}
// workPerformed := 0
start := 0
end := 0
numWorkItems := (len(ids) / workPerThread) + 1
actualWorkPerThread := (len(ids) / numWorkItems) + 1
for i := 0; i < numWorkItems; i++ {
start = i * actualWorkPerThread
end = (i + 1) * actualWorkPerThread
if end > len(ids) {
end = len(ids)
}
// workPerformed += len(ids[start:end])
workChannel <- workItem{
ids: ids[start:end],
compA: compA[start:end],
compB: compB[start:end],
compC: compC[start:end],
compD: compD[start:end],
compE: compE[start:end],
compF: compF[start:end],
compG: compG[start:end],
compH: compH[start:end],
compI: compI[start:end],
}
}
// if workPerformed != len(ids) {
// panic("wrong")
// }
}
close(workChannel)
waitGroup.Wait()
}
// Deprecated: This API is a tentative alternative way to map
func (v *View9[A, B, C, D, E, F, G, H, I]) MapSlices(lambda func(id []Id, a []A, b []B, c []C, d []D, e []E, f []F, g []G, h []H, i []I)) {
v.filter.regenerate(v.world)
id := make([][]Id, 0)
sliceListA := make([][]A, 0)
sliceListB := make([][]B, 0)
sliceListC := make([][]C, 0)
sliceListD := make([][]D, 0)
sliceListE := make([][]E, 0)
sliceListF := make([][]F, 0)
sliceListG := make([][]G, 0)
sliceListH := make([][]H, 0)
sliceListI := make([][]I, 0)
for _, archId := range v.filter.archIds {
sliceA, ok := v.storageA.slice.Get(archId)
if !ok {
continue
}
sliceB, ok := v.storageB.slice.Get(archId)
if !ok {
continue
}
sliceC, ok := v.storageC.slice.Get(archId)
if !ok {
continue
}
sliceD, ok := v.storageD.slice.Get(archId)
if !ok {
continue
}
sliceE, ok := v.storageE.slice.Get(archId)
if !ok {
continue
}
sliceF, ok := v.storageF.slice.Get(archId)
if !ok {
continue
}
sliceG, ok := v.storageG.slice.Get(archId)
if !ok {
continue
}
sliceH, ok := v.storageH.slice.Get(archId)
if !ok {
continue
}
sliceI, ok := v.storageI.slice.Get(archId)
if !ok {
continue
}
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
// lookup, ok := v.world.engine.lookup[archId]
// if !ok { panic("LookupList is missing!") }
id = append(id, lookup.id)
sliceListA = append(sliceListA, sliceA.comp)
sliceListB = append(sliceListB, sliceB.comp)
sliceListC = append(sliceListC, sliceC.comp)
sliceListD = append(sliceListD, sliceD.comp)
sliceListE = append(sliceListE, sliceE.comp)
sliceListF = append(sliceListF, sliceF.comp)
sliceListG = append(sliceListG, sliceG.comp)
sliceListH = append(sliceListH, sliceH.comp)
sliceListI = append(sliceListI, sliceI.comp)
}
for idx := range id {
lambda(id[idx],
sliceListA[idx], sliceListB[idx], sliceListC[idx], sliceListD[idx], sliceListE[idx], sliceListF[idx], sliceListG[idx], sliceListH[idx], sliceListI[idx],
)
}
}
// --------------------------------------------------------------------------------
// - View 10
// --------------------------------------------------------------------------------
// Represents a view of data in a specific world. Provides access to the components specified in the generic block
type View10[A, B, C, D, E, F, G, H, I, J any] struct {
world *World
filter filterList
storageA *componentStorage[A]
storageB *componentStorage[B]
storageC *componentStorage[C]
storageD *componentStorage[D]
storageE *componentStorage[E]
storageF *componentStorage[F]
storageG *componentStorage[G]
storageH *componentStorage[H]
storageI *componentStorage[I]
storageJ *componentStorage[J]
}
// implement the initializer interface so that it can be automatically created and injected into systems
func (v *View10[A, B, C, D, E, F, G, H, I, J]) Initialize(world *World) any {
// TODO: filters need to be a part of the query type
return Query10[A, B, C, D, E, F, G, H, I, J](world)
}
// Creates a View for the specified world with the specified component filters.
func Query10[A, B, C, D, E, F, G, H, I, J any](world *World, filters ...Filter) *View10[A, B, C, D, E, F, G, H, I, J] {
storageA := getStorage[A](world.engine)
storageB := getStorage[B](world.engine)
storageC := getStorage[C](world.engine)
storageD := getStorage[D](world.engine)
storageE := getStorage[E](world.engine)
storageF := getStorage[F](world.engine)
storageG := getStorage[G](world.engine)
storageH := getStorage[H](world.engine)
storageI := getStorage[I](world.engine)
storageJ := getStorage[J](world.engine)
var AA A
var BB B
var CC C
var DD D
var EE E
var FF F
var GG G
var HH H
var II I
var JJ J
comps := []CompId{
name(AA),
name(BB),
name(CC),
name(DD),
name(EE),
name(FF),
name(GG),
name(HH),
name(II),
name(JJ),
}
filterList := newFilterList(comps, filters...)
filterList.regenerate(world)
v := &View10[A, B, C, D, E, F, G, H, I, J]{
world: world,
filter: filterList,
storageA: storageA,
storageB: storageB,
storageC: storageC,
storageD: storageD,
storageE: storageE,
storageF: storageF,
storageG: storageG,
storageH: storageH,
storageI: storageI,
storageJ: storageJ,
}
return v
}
// Reads a pointer to the underlying component at the specified id.
// Read will return even if the specified id doesn't match the filter list
// Read will return the value if it exists, else returns nil.
// If you execute any ecs.Write(...) or ecs.Delete(...) this pointer may become invalid.
func (v *View10[A, B, C, D, E, F, G, H, I, J]) Read(id Id) (*A, *B, *C, *D, *E, *F, *G, *H, *I, *J) {
if id == InvalidEntity {
return nil, nil, nil, nil, nil, nil, nil, nil, nil, nil
}
loc, ok := v.world.arch.Get(id)
if !ok {
return nil, nil, nil, nil, nil, nil, nil, nil, nil, nil
}
lookup := v.world.engine.lookup[loc.archId]
if lookup == nil {
panic("LookupList is missing!")
}
index := int(loc.index)
var retA *A
var retB *B
var retC *C
var retD *D
var retE *E
var retF *F
var retG *G
var retH *H
var retI *I
var retJ *J
sliceA, ok := v.storageA.slice.Get(loc.archId)
if ok {
retA = &sliceA.comp[index]
}
sliceB, ok := v.storageB.slice.Get(loc.archId)
if ok {
retB = &sliceB.comp[index]
}
sliceC, ok := v.storageC.slice.Get(loc.archId)
if ok {
retC = &sliceC.comp[index]
}
sliceD, ok := v.storageD.slice.Get(loc.archId)
if ok {
retD = &sliceD.comp[index]
}
sliceE, ok := v.storageE.slice.Get(loc.archId)
if ok {
retE = &sliceE.comp[index]
}
sliceF, ok := v.storageF.slice.Get(loc.archId)
if ok {
retF = &sliceF.comp[index]
}
sliceG, ok := v.storageG.slice.Get(loc.archId)
if ok {
retG = &sliceG.comp[index]
}
sliceH, ok := v.storageH.slice.Get(loc.archId)
if ok {
retH = &sliceH.comp[index]
}
sliceI, ok := v.storageI.slice.Get(loc.archId)
if ok {
retI = &sliceI.comp[index]
}
sliceJ, ok := v.storageJ.slice.Get(loc.archId)
if ok {
retJ = &sliceJ.comp[index]
}
return retA, retB, retC, retD, retE, retF, retG, retH, retI, retJ
}
// Counts the number of entities that match this query
func (v *View10[A, B, C, D, E, F, G, H, I, J]) Count() int {
v.filter.regenerate(v.world)
total := 0
for _, archId := range v.filter.archIds {
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
total += lookup.Len()
}
return total
}
// Maps the lambda function across every entity which matched the specified filters.
func (v *View10[A, B, C, D, E, F, G, H, I, J]) MapId(lambda func(id Id, a *A, b *B, c *C, d *D, e *E, f *F, g *G, h *H, i *I, j *J)) {
v.filter.regenerate(v.world)
var sliceA *componentList[A]
var compA []A
var retA *A
var sliceB *componentList[B]
var compB []B
var retB *B
var sliceC *componentList[C]
var compC []C
var retC *C
var sliceD *componentList[D]
var compD []D
var retD *D
var sliceE *componentList[E]
var compE []E
var retE *E
var sliceF *componentList[F]
var compF []F
var retF *F
var sliceG *componentList[G]
var compG []G
var retG *G
var sliceH *componentList[H]
var compH []H
var retH *H
var sliceI *componentList[I]
var compI []I
var retI *I
var sliceJ *componentList[J]
var compJ []J
var retJ *J
for _, archId := range v.filter.archIds {
sliceA, _ = v.storageA.slice.Get(archId)
sliceB, _ = v.storageB.slice.Get(archId)
sliceC, _ = v.storageC.slice.Get(archId)
sliceD, _ = v.storageD.slice.Get(archId)
sliceE, _ = v.storageE.slice.Get(archId)
sliceF, _ = v.storageF.slice.Get(archId)
sliceG, _ = v.storageG.slice.Get(archId)
sliceH, _ = v.storageH.slice.Get(archId)
sliceI, _ = v.storageI.slice.Get(archId)
sliceJ, _ = v.storageJ.slice.Get(archId)
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
// lookup, ok := v.world.engine.lookup[archId]
// if !ok { panic("LookupList is missing!") }
ids := lookup.id
// TODO - this flattened version causes a mild performance hit. But the other one combinatorially explodes. I also cant get BCE to work with it. See option 2 for higher performance.
compA = nil
if sliceA != nil {
compA = sliceA.comp
}
compB = nil
if sliceB != nil {
compB = sliceB.comp
}
compC = nil
if sliceC != nil {
compC = sliceC.comp
}
compD = nil
if sliceD != nil {
compD = sliceD.comp
}
compE = nil
if sliceE != nil {
compE = sliceE.comp
}
compF = nil
if sliceF != nil {
compF = sliceF.comp
}
compG = nil
if sliceG != nil {
compG = sliceG.comp
}
compH = nil
if sliceH != nil {
compH = sliceH.comp
}
compI = nil
if sliceI != nil {
compI = sliceI.comp
}
compJ = nil
if sliceJ != nil {
compJ = sliceJ.comp
}
retA = nil
retB = nil
retC = nil
retD = nil
retE = nil
retF = nil
retG = nil
retH = nil
retI = nil
retJ = nil
for idx := range ids {
if ids[idx] == InvalidEntity {
continue
} // Skip if its a hole
if compA != nil {
retA = &compA[idx]
}
if compB != nil {
retB = &compB[idx]
}
if compC != nil {
retC = &compC[idx]
}
if compD != nil {
retD = &compD[idx]
}
if compE != nil {
retE = &compE[idx]
}
if compF != nil {
retF = &compF[idx]
}
if compG != nil {
retG = &compG[idx]
}
if compH != nil {
retH = &compH[idx]
}
if compI != nil {
retI = &compI[idx]
}
if compJ != nil {
retJ = &compJ[idx]
}
lambda(ids[idx], retA, retB, retC, retD, retE, retF, retG, retH, retI, retJ)
}
}
}
// Maps the lambda function across every entity which matched the specified filters. Components are split based on the number of OS threads available.
func (v *View10[A, B, C, D, E, F, G, H, I, J]) MapIdParallel(lambda func(id Id, a *A, b *B, c *C, d *D, e *E, f *F, g *G, h *H, i *I, j *J)) {
v.filter.regenerate(v.world)
var sliceA *componentList[A]
var sliceB *componentList[B]
var sliceC *componentList[C]
var sliceD *componentList[D]
var sliceE *componentList[E]
var sliceF *componentList[F]
var sliceG *componentList[G]
var sliceH *componentList[H]
var sliceI *componentList[I]
var sliceJ *componentList[J]
// 1. Calculate work
// 2. Calculate number of threads to execute with
// 3. Greedy divide work among N threads
// 4. Execute for each in its own goroutine
// 1. Calculate work
totalWork := 0
for _, archId := range v.filter.archIds {
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
// Each id represents an entity that holds the requested component(s)
// Each hole represents a deleted entity that used to hold the requested component(s)
totalWork += len(lookup.id) // - len(lookup.holes)
}
// Nothing to do if there is no work
if totalWork == 0 {
return
}
// 2. Calculate number of threads to execute with
numThreads := runtime.NumCPU()
// Ensure that the number of threads we plan to use is <= total amount of work
numThreads = min(totalWork, numThreads)
var waitGroup sync.WaitGroup
type workItem struct {
ids []Id
compA []A
compB []B
compC []C
compD []D
compE []E
compF []F
compG []G
compH []H
compI []I
compJ []J
}
workChannel := make(chan workItem)
for i := 0; i < numThreads; i++ {
waitGroup.Add(1)
go func() {
defer waitGroup.Done()
for {
work, ok := <-workChannel
if !ok {
return
}
var retA *A
var retB *B
var retC *C
var retD *D
var retE *E
var retF *F
var retG *G
var retH *H
var retI *I
var retJ *J
for idx := range work.ids {
if work.ids[idx] == InvalidEntity {
continue
} // Skip if its a hole
if work.compA != nil {
retA = &work.compA[idx]
}
if work.compB != nil {
retB = &work.compB[idx]
}
if work.compC != nil {
retC = &work.compC[idx]
}
if work.compD != nil {
retD = &work.compD[idx]
}
if work.compE != nil {
retE = &work.compE[idx]
}
if work.compF != nil {
retF = &work.compF[idx]
}
if work.compG != nil {
retG = &work.compG[idx]
}
if work.compH != nil {
retH = &work.compH[idx]
}
if work.compI != nil {
retI = &work.compI[idx]
}
if work.compJ != nil {
retJ = &work.compJ[idx]
}
lambda(work.ids[idx], retA, retB, retC, retD, retE, retF, retG, retH, retI, retJ)
}
}
}()
}
// 3. Greedy divide work among N threads
// Simple algorithm:
// a. Find an evenly balanced distribution per thread
// b. Generate all work until it gets consumed
workPerThread := totalWork / numThreads
// Generate
var compA []A
var compB []B
var compC []C
var compD []D
var compE []E
var compF []F
var compG []G
var compH []H
var compI []I
var compJ []J
for _, archId := range v.filter.archIds {
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
ids := lookup.id
sliceA, _ = v.storageA.slice.Get(archId)
sliceB, _ = v.storageB.slice.Get(archId)
sliceC, _ = v.storageC.slice.Get(archId)
sliceD, _ = v.storageD.slice.Get(archId)
sliceE, _ = v.storageE.slice.Get(archId)
sliceF, _ = v.storageF.slice.Get(archId)
sliceG, _ = v.storageG.slice.Get(archId)
sliceH, _ = v.storageH.slice.Get(archId)
sliceI, _ = v.storageI.slice.Get(archId)
sliceJ, _ = v.storageJ.slice.Get(archId)
compA = nil
if sliceA != nil {
compA = sliceA.comp
}
compB = nil
if sliceB != nil {
compB = sliceB.comp
}
compC = nil
if sliceC != nil {
compC = sliceC.comp
}
compD = nil
if sliceD != nil {
compD = sliceD.comp
}
compE = nil
if sliceE != nil {
compE = sliceE.comp
}
compF = nil
if sliceF != nil {
compF = sliceF.comp
}
compG = nil
if sliceG != nil {
compG = sliceG.comp
}
compH = nil
if sliceH != nil {
compH = sliceH.comp
}
compI = nil
if sliceI != nil {
compI = sliceI.comp
}
compJ = nil
if sliceJ != nil {
compJ = sliceJ.comp
}
// workPerformed := 0
start := 0
end := 0
numWorkItems := (len(ids) / workPerThread) + 1
actualWorkPerThread := (len(ids) / numWorkItems) + 1
for i := 0; i < numWorkItems; i++ {
start = i * actualWorkPerThread
end = (i + 1) * actualWorkPerThread
if end > len(ids) {
end = len(ids)
}
// workPerformed += len(ids[start:end])
workChannel <- workItem{
ids: ids[start:end],
compA: compA[start:end],
compB: compB[start:end],
compC: compC[start:end],
compD: compD[start:end],
compE: compE[start:end],
compF: compF[start:end],
compG: compG[start:end],
compH: compH[start:end],
compI: compI[start:end],
compJ: compJ[start:end],
}
}
// if workPerformed != len(ids) {
// panic("wrong")
// }
}
close(workChannel)
waitGroup.Wait()
}
// Deprecated: This API is a tentative alternative way to map
func (v *View10[A, B, C, D, E, F, G, H, I, J]) MapSlices(lambda func(id []Id, a []A, b []B, c []C, d []D, e []E, f []F, g []G, h []H, i []I, j []J)) {
v.filter.regenerate(v.world)
id := make([][]Id, 0)
sliceListA := make([][]A, 0)
sliceListB := make([][]B, 0)
sliceListC := make([][]C, 0)
sliceListD := make([][]D, 0)
sliceListE := make([][]E, 0)
sliceListF := make([][]F, 0)
sliceListG := make([][]G, 0)
sliceListH := make([][]H, 0)
sliceListI := make([][]I, 0)
sliceListJ := make([][]J, 0)
for _, archId := range v.filter.archIds {
sliceA, ok := v.storageA.slice.Get(archId)
if !ok {
continue
}
sliceB, ok := v.storageB.slice.Get(archId)
if !ok {
continue
}
sliceC, ok := v.storageC.slice.Get(archId)
if !ok {
continue
}
sliceD, ok := v.storageD.slice.Get(archId)
if !ok {
continue
}
sliceE, ok := v.storageE.slice.Get(archId)
if !ok {
continue
}
sliceF, ok := v.storageF.slice.Get(archId)
if !ok {
continue
}
sliceG, ok := v.storageG.slice.Get(archId)
if !ok {
continue
}
sliceH, ok := v.storageH.slice.Get(archId)
if !ok {
continue
}
sliceI, ok := v.storageI.slice.Get(archId)
if !ok {
continue
}
sliceJ, ok := v.storageJ.slice.Get(archId)
if !ok {
continue
}
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
// lookup, ok := v.world.engine.lookup[archId]
// if !ok { panic("LookupList is missing!") }
id = append(id, lookup.id)
sliceListA = append(sliceListA, sliceA.comp)
sliceListB = append(sliceListB, sliceB.comp)
sliceListC = append(sliceListC, sliceC.comp)
sliceListD = append(sliceListD, sliceD.comp)
sliceListE = append(sliceListE, sliceE.comp)
sliceListF = append(sliceListF, sliceF.comp)
sliceListG = append(sliceListG, sliceG.comp)
sliceListH = append(sliceListH, sliceH.comp)
sliceListI = append(sliceListI, sliceI.comp)
sliceListJ = append(sliceListJ, sliceJ.comp)
}
for idx := range id {
lambda(id[idx],
sliceListA[idx], sliceListB[idx], sliceListC[idx], sliceListD[idx], sliceListE[idx], sliceListF[idx], sliceListG[idx], sliceListH[idx], sliceListI[idx], sliceListJ[idx],
)
}
}
// --------------------------------------------------------------------------------
// - View 11
// --------------------------------------------------------------------------------
// Represents a view of data in a specific world. Provides access to the components specified in the generic block
type View11[A, B, C, D, E, F, G, H, I, J, K any] struct {
world *World
filter filterList
storageA *componentStorage[A]
storageB *componentStorage[B]
storageC *componentStorage[C]
storageD *componentStorage[D]
storageE *componentStorage[E]
storageF *componentStorage[F]
storageG *componentStorage[G]
storageH *componentStorage[H]
storageI *componentStorage[I]
storageJ *componentStorage[J]
storageK *componentStorage[K]
}
// implement the initializer interface so that it can be automatically created and injected into systems
func (v *View11[A, B, C, D, E, F, G, H, I, J, K]) Initialize(world *World) any {
// TODO: filters need to be a part of the query type
return Query11[A, B, C, D, E, F, G, H, I, J, K](world)
}
// Creates a View for the specified world with the specified component filters.
func Query11[A, B, C, D, E, F, G, H, I, J, K any](world *World, filters ...Filter) *View11[A, B, C, D, E, F, G, H, I, J, K] {
storageA := getStorage[A](world.engine)
storageB := getStorage[B](world.engine)
storageC := getStorage[C](world.engine)
storageD := getStorage[D](world.engine)
storageE := getStorage[E](world.engine)
storageF := getStorage[F](world.engine)
storageG := getStorage[G](world.engine)
storageH := getStorage[H](world.engine)
storageI := getStorage[I](world.engine)
storageJ := getStorage[J](world.engine)
storageK := getStorage[K](world.engine)
var AA A
var BB B
var CC C
var DD D
var EE E
var FF F
var GG G
var HH H
var II I
var JJ J
var KK K
comps := []CompId{
name(AA),
name(BB),
name(CC),
name(DD),
name(EE),
name(FF),
name(GG),
name(HH),
name(II),
name(JJ),
name(KK),
}
filterList := newFilterList(comps, filters...)
filterList.regenerate(world)
v := &View11[A, B, C, D, E, F, G, H, I, J, K]{
world: world,
filter: filterList,
storageA: storageA,
storageB: storageB,
storageC: storageC,
storageD: storageD,
storageE: storageE,
storageF: storageF,
storageG: storageG,
storageH: storageH,
storageI: storageI,
storageJ: storageJ,
storageK: storageK,
}
return v
}
// Reads a pointer to the underlying component at the specified id.
// Read will return even if the specified id doesn't match the filter list
// Read will return the value if it exists, else returns nil.
// If you execute any ecs.Write(...) or ecs.Delete(...) this pointer may become invalid.
func (v *View11[A, B, C, D, E, F, G, H, I, J, K]) Read(id Id) (*A, *B, *C, *D, *E, *F, *G, *H, *I, *J, *K) {
if id == InvalidEntity {
return nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil
}
loc, ok := v.world.arch.Get(id)
if !ok {
return nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil
}
lookup := v.world.engine.lookup[loc.archId]
if lookup == nil {
panic("LookupList is missing!")
}
index := int(loc.index)
var retA *A
var retB *B
var retC *C
var retD *D
var retE *E
var retF *F
var retG *G
var retH *H
var retI *I
var retJ *J
var retK *K
sliceA, ok := v.storageA.slice.Get(loc.archId)
if ok {
retA = &sliceA.comp[index]
}
sliceB, ok := v.storageB.slice.Get(loc.archId)
if ok {
retB = &sliceB.comp[index]
}
sliceC, ok := v.storageC.slice.Get(loc.archId)
if ok {
retC = &sliceC.comp[index]
}
sliceD, ok := v.storageD.slice.Get(loc.archId)
if ok {
retD = &sliceD.comp[index]
}
sliceE, ok := v.storageE.slice.Get(loc.archId)
if ok {
retE = &sliceE.comp[index]
}
sliceF, ok := v.storageF.slice.Get(loc.archId)
if ok {
retF = &sliceF.comp[index]
}
sliceG, ok := v.storageG.slice.Get(loc.archId)
if ok {
retG = &sliceG.comp[index]
}
sliceH, ok := v.storageH.slice.Get(loc.archId)
if ok {
retH = &sliceH.comp[index]
}
sliceI, ok := v.storageI.slice.Get(loc.archId)
if ok {
retI = &sliceI.comp[index]
}
sliceJ, ok := v.storageJ.slice.Get(loc.archId)
if ok {
retJ = &sliceJ.comp[index]
}
sliceK, ok := v.storageK.slice.Get(loc.archId)
if ok {
retK = &sliceK.comp[index]
}
return retA, retB, retC, retD, retE, retF, retG, retH, retI, retJ, retK
}
// Counts the number of entities that match this query
func (v *View11[A, B, C, D, E, F, G, H, I, J, K]) Count() int {
v.filter.regenerate(v.world)
total := 0
for _, archId := range v.filter.archIds {
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
total += lookup.Len()
}
return total
}
// Maps the lambda function across every entity which matched the specified filters.
func (v *View11[A, B, C, D, E, F, G, H, I, J, K]) MapId(lambda func(id Id, a *A, b *B, c *C, d *D, e *E, f *F, g *G, h *H, i *I, j *J, k *K)) {
v.filter.regenerate(v.world)
var sliceA *componentList[A]
var compA []A
var retA *A
var sliceB *componentList[B]
var compB []B
var retB *B
var sliceC *componentList[C]
var compC []C
var retC *C
var sliceD *componentList[D]
var compD []D
var retD *D
var sliceE *componentList[E]
var compE []E
var retE *E
var sliceF *componentList[F]
var compF []F
var retF *F
var sliceG *componentList[G]
var compG []G
var retG *G
var sliceH *componentList[H]
var compH []H
var retH *H
var sliceI *componentList[I]
var compI []I
var retI *I
var sliceJ *componentList[J]
var compJ []J
var retJ *J
var sliceK *componentList[K]
var compK []K
var retK *K
for _, archId := range v.filter.archIds {
sliceA, _ = v.storageA.slice.Get(archId)
sliceB, _ = v.storageB.slice.Get(archId)
sliceC, _ = v.storageC.slice.Get(archId)
sliceD, _ = v.storageD.slice.Get(archId)
sliceE, _ = v.storageE.slice.Get(archId)
sliceF, _ = v.storageF.slice.Get(archId)
sliceG, _ = v.storageG.slice.Get(archId)
sliceH, _ = v.storageH.slice.Get(archId)
sliceI, _ = v.storageI.slice.Get(archId)
sliceJ, _ = v.storageJ.slice.Get(archId)
sliceK, _ = v.storageK.slice.Get(archId)
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
// lookup, ok := v.world.engine.lookup[archId]
// if !ok { panic("LookupList is missing!") }
ids := lookup.id
// TODO - this flattened version causes a mild performance hit. But the other one combinatorially explodes. I also cant get BCE to work with it. See option 2 for higher performance.
compA = nil
if sliceA != nil {
compA = sliceA.comp
}
compB = nil
if sliceB != nil {
compB = sliceB.comp
}
compC = nil
if sliceC != nil {
compC = sliceC.comp
}
compD = nil
if sliceD != nil {
compD = sliceD.comp
}
compE = nil
if sliceE != nil {
compE = sliceE.comp
}
compF = nil
if sliceF != nil {
compF = sliceF.comp
}
compG = nil
if sliceG != nil {
compG = sliceG.comp
}
compH = nil
if sliceH != nil {
compH = sliceH.comp
}
compI = nil
if sliceI != nil {
compI = sliceI.comp
}
compJ = nil
if sliceJ != nil {
compJ = sliceJ.comp
}
compK = nil
if sliceK != nil {
compK = sliceK.comp
}
retA = nil
retB = nil
retC = nil
retD = nil
retE = nil
retF = nil
retG = nil
retH = nil
retI = nil
retJ = nil
retK = nil
for idx := range ids {
if ids[idx] == InvalidEntity {
continue
} // Skip if its a hole
if compA != nil {
retA = &compA[idx]
}
if compB != nil {
retB = &compB[idx]
}
if compC != nil {
retC = &compC[idx]
}
if compD != nil {
retD = &compD[idx]
}
if compE != nil {
retE = &compE[idx]
}
if compF != nil {
retF = &compF[idx]
}
if compG != nil {
retG = &compG[idx]
}
if compH != nil {
retH = &compH[idx]
}
if compI != nil {
retI = &compI[idx]
}
if compJ != nil {
retJ = &compJ[idx]
}
if compK != nil {
retK = &compK[idx]
}
lambda(ids[idx], retA, retB, retC, retD, retE, retF, retG, retH, retI, retJ, retK)
}
}
}
// Maps the lambda function across every entity which matched the specified filters. Components are split based on the number of OS threads available.
func (v *View11[A, B, C, D, E, F, G, H, I, J, K]) MapIdParallel(lambda func(id Id, a *A, b *B, c *C, d *D, e *E, f *F, g *G, h *H, i *I, j *J, k *K)) {
v.filter.regenerate(v.world)
var sliceA *componentList[A]
var sliceB *componentList[B]
var sliceC *componentList[C]
var sliceD *componentList[D]
var sliceE *componentList[E]
var sliceF *componentList[F]
var sliceG *componentList[G]
var sliceH *componentList[H]
var sliceI *componentList[I]
var sliceJ *componentList[J]
var sliceK *componentList[K]
// 1. Calculate work
// 2. Calculate number of threads to execute with
// 3. Greedy divide work among N threads
// 4. Execute for each in its own goroutine
// 1. Calculate work
totalWork := 0
for _, archId := range v.filter.archIds {
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
// Each id represents an entity that holds the requested component(s)
// Each hole represents a deleted entity that used to hold the requested component(s)
totalWork += len(lookup.id) // - len(lookup.holes)
}
// Nothing to do if there is no work
if totalWork == 0 {
return
}
// 2. Calculate number of threads to execute with
numThreads := runtime.NumCPU()
// Ensure that the number of threads we plan to use is <= total amount of work
numThreads = min(totalWork, numThreads)
var waitGroup sync.WaitGroup
type workItem struct {
ids []Id
compA []A
compB []B
compC []C
compD []D
compE []E
compF []F
compG []G
compH []H
compI []I
compJ []J
compK []K
}
workChannel := make(chan workItem)
for i := 0; i < numThreads; i++ {
waitGroup.Add(1)
go func() {
defer waitGroup.Done()
for {
work, ok := <-workChannel
if !ok {
return
}
var retA *A
var retB *B
var retC *C
var retD *D
var retE *E
var retF *F
var retG *G
var retH *H
var retI *I
var retJ *J
var retK *K
for idx := range work.ids {
if work.ids[idx] == InvalidEntity {
continue
} // Skip if its a hole
if work.compA != nil {
retA = &work.compA[idx]
}
if work.compB != nil {
retB = &work.compB[idx]
}
if work.compC != nil {
retC = &work.compC[idx]
}
if work.compD != nil {
retD = &work.compD[idx]
}
if work.compE != nil {
retE = &work.compE[idx]
}
if work.compF != nil {
retF = &work.compF[idx]
}
if work.compG != nil {
retG = &work.compG[idx]
}
if work.compH != nil {
retH = &work.compH[idx]
}
if work.compI != nil {
retI = &work.compI[idx]
}
if work.compJ != nil {
retJ = &work.compJ[idx]
}
if work.compK != nil {
retK = &work.compK[idx]
}
lambda(work.ids[idx], retA, retB, retC, retD, retE, retF, retG, retH, retI, retJ, retK)
}
}
}()
}
// 3. Greedy divide work among N threads
// Simple algorithm:
// a. Find an evenly balanced distribution per thread
// b. Generate all work until it gets consumed
workPerThread := totalWork / numThreads
// Generate
var compA []A
var compB []B
var compC []C
var compD []D
var compE []E
var compF []F
var compG []G
var compH []H
var compI []I
var compJ []J
var compK []K
for _, archId := range v.filter.archIds {
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
ids := lookup.id
sliceA, _ = v.storageA.slice.Get(archId)
sliceB, _ = v.storageB.slice.Get(archId)
sliceC, _ = v.storageC.slice.Get(archId)
sliceD, _ = v.storageD.slice.Get(archId)
sliceE, _ = v.storageE.slice.Get(archId)
sliceF, _ = v.storageF.slice.Get(archId)
sliceG, _ = v.storageG.slice.Get(archId)
sliceH, _ = v.storageH.slice.Get(archId)
sliceI, _ = v.storageI.slice.Get(archId)
sliceJ, _ = v.storageJ.slice.Get(archId)
sliceK, _ = v.storageK.slice.Get(archId)
compA = nil
if sliceA != nil {
compA = sliceA.comp
}
compB = nil
if sliceB != nil {
compB = sliceB.comp
}
compC = nil
if sliceC != nil {
compC = sliceC.comp
}
compD = nil
if sliceD != nil {
compD = sliceD.comp
}
compE = nil
if sliceE != nil {
compE = sliceE.comp
}
compF = nil
if sliceF != nil {
compF = sliceF.comp
}
compG = nil
if sliceG != nil {
compG = sliceG.comp
}
compH = nil
if sliceH != nil {
compH = sliceH.comp
}
compI = nil
if sliceI != nil {
compI = sliceI.comp
}
compJ = nil
if sliceJ != nil {
compJ = sliceJ.comp
}
compK = nil
if sliceK != nil {
compK = sliceK.comp
}
// workPerformed := 0
start := 0
end := 0
numWorkItems := (len(ids) / workPerThread) + 1
actualWorkPerThread := (len(ids) / numWorkItems) + 1
for i := 0; i < numWorkItems; i++ {
start = i * actualWorkPerThread
end = (i + 1) * actualWorkPerThread
if end > len(ids) {
end = len(ids)
}
// workPerformed += len(ids[start:end])
workChannel <- workItem{
ids: ids[start:end],
compA: compA[start:end],
compB: compB[start:end],
compC: compC[start:end],
compD: compD[start:end],
compE: compE[start:end],
compF: compF[start:end],
compG: compG[start:end],
compH: compH[start:end],
compI: compI[start:end],
compJ: compJ[start:end],
compK: compK[start:end],
}
}
// if workPerformed != len(ids) {
// panic("wrong")
// }
}
close(workChannel)
waitGroup.Wait()
}
// Deprecated: This API is a tentative alternative way to map
func (v *View11[A, B, C, D, E, F, G, H, I, J, K]) MapSlices(lambda func(id []Id, a []A, b []B, c []C, d []D, e []E, f []F, g []G, h []H, i []I, j []J, k []K)) {
v.filter.regenerate(v.world)
id := make([][]Id, 0)
sliceListA := make([][]A, 0)
sliceListB := make([][]B, 0)
sliceListC := make([][]C, 0)
sliceListD := make([][]D, 0)
sliceListE := make([][]E, 0)
sliceListF := make([][]F, 0)
sliceListG := make([][]G, 0)
sliceListH := make([][]H, 0)
sliceListI := make([][]I, 0)
sliceListJ := make([][]J, 0)
sliceListK := make([][]K, 0)
for _, archId := range v.filter.archIds {
sliceA, ok := v.storageA.slice.Get(archId)
if !ok {
continue
}
sliceB, ok := v.storageB.slice.Get(archId)
if !ok {
continue
}
sliceC, ok := v.storageC.slice.Get(archId)
if !ok {
continue
}
sliceD, ok := v.storageD.slice.Get(archId)
if !ok {
continue
}
sliceE, ok := v.storageE.slice.Get(archId)
if !ok {
continue
}
sliceF, ok := v.storageF.slice.Get(archId)
if !ok {
continue
}
sliceG, ok := v.storageG.slice.Get(archId)
if !ok {
continue
}
sliceH, ok := v.storageH.slice.Get(archId)
if !ok {
continue
}
sliceI, ok := v.storageI.slice.Get(archId)
if !ok {
continue
}
sliceJ, ok := v.storageJ.slice.Get(archId)
if !ok {
continue
}
sliceK, ok := v.storageK.slice.Get(archId)
if !ok {
continue
}
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
// lookup, ok := v.world.engine.lookup[archId]
// if !ok { panic("LookupList is missing!") }
id = append(id, lookup.id)
sliceListA = append(sliceListA, sliceA.comp)
sliceListB = append(sliceListB, sliceB.comp)
sliceListC = append(sliceListC, sliceC.comp)
sliceListD = append(sliceListD, sliceD.comp)
sliceListE = append(sliceListE, sliceE.comp)
sliceListF = append(sliceListF, sliceF.comp)
sliceListG = append(sliceListG, sliceG.comp)
sliceListH = append(sliceListH, sliceH.comp)
sliceListI = append(sliceListI, sliceI.comp)
sliceListJ = append(sliceListJ, sliceJ.comp)
sliceListK = append(sliceListK, sliceK.comp)
}
for idx := range id {
lambda(id[idx],
sliceListA[idx], sliceListB[idx], sliceListC[idx], sliceListD[idx], sliceListE[idx], sliceListF[idx], sliceListG[idx], sliceListH[idx], sliceListI[idx], sliceListJ[idx], sliceListK[idx],
)
}
}
// --------------------------------------------------------------------------------
// - View 12
// --------------------------------------------------------------------------------
// Represents a view of data in a specific world. Provides access to the components specified in the generic block
type View12[A, B, C, D, E, F, G, H, I, J, K, L any] struct {
world *World
filter filterList
storageA *componentStorage[A]
storageB *componentStorage[B]
storageC *componentStorage[C]
storageD *componentStorage[D]
storageE *componentStorage[E]
storageF *componentStorage[F]
storageG *componentStorage[G]
storageH *componentStorage[H]
storageI *componentStorage[I]
storageJ *componentStorage[J]
storageK *componentStorage[K]
storageL *componentStorage[L]
}
// implement the initializer interface so that it can be automatically created and injected into systems
func (v *View12[A, B, C, D, E, F, G, H, I, J, K, L]) Initialize(world *World) any {
// TODO: filters need to be a part of the query type
return Query12[A, B, C, D, E, F, G, H, I, J, K, L](world)
}
// Creates a View for the specified world with the specified component filters.
func Query12[A, B, C, D, E, F, G, H, I, J, K, L any](world *World, filters ...Filter) *View12[A, B, C, D, E, F, G, H, I, J, K, L] {
storageA := getStorage[A](world.engine)
storageB := getStorage[B](world.engine)
storageC := getStorage[C](world.engine)
storageD := getStorage[D](world.engine)
storageE := getStorage[E](world.engine)
storageF := getStorage[F](world.engine)
storageG := getStorage[G](world.engine)
storageH := getStorage[H](world.engine)
storageI := getStorage[I](world.engine)
storageJ := getStorage[J](world.engine)
storageK := getStorage[K](world.engine)
storageL := getStorage[L](world.engine)
var AA A
var BB B
var CC C
var DD D
var EE E
var FF F
var GG G
var HH H
var II I
var JJ J
var KK K
var LL L
comps := []CompId{
name(AA),
name(BB),
name(CC),
name(DD),
name(EE),
name(FF),
name(GG),
name(HH),
name(II),
name(JJ),
name(KK),
name(LL),
}
filterList := newFilterList(comps, filters...)
filterList.regenerate(world)
v := &View12[A, B, C, D, E, F, G, H, I, J, K, L]{
world: world,
filter: filterList,
storageA: storageA,
storageB: storageB,
storageC: storageC,
storageD: storageD,
storageE: storageE,
storageF: storageF,
storageG: storageG,
storageH: storageH,
storageI: storageI,
storageJ: storageJ,
storageK: storageK,
storageL: storageL,
}
return v
}
// Reads a pointer to the underlying component at the specified id.
// Read will return even if the specified id doesn't match the filter list
// Read will return the value if it exists, else returns nil.
// If you execute any ecs.Write(...) or ecs.Delete(...) this pointer may become invalid.
func (v *View12[A, B, C, D, E, F, G, H, I, J, K, L]) Read(id Id) (*A, *B, *C, *D, *E, *F, *G, *H, *I, *J, *K, *L) {
if id == InvalidEntity {
return nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil
}
loc, ok := v.world.arch.Get(id)
if !ok {
return nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil
}
lookup := v.world.engine.lookup[loc.archId]
if lookup == nil {
panic("LookupList is missing!")
}
index := int(loc.index)
var retA *A
var retB *B
var retC *C
var retD *D
var retE *E
var retF *F
var retG *G
var retH *H
var retI *I
var retJ *J
var retK *K
var retL *L
sliceA, ok := v.storageA.slice.Get(loc.archId)
if ok {
retA = &sliceA.comp[index]
}
sliceB, ok := v.storageB.slice.Get(loc.archId)
if ok {
retB = &sliceB.comp[index]
}
sliceC, ok := v.storageC.slice.Get(loc.archId)
if ok {
retC = &sliceC.comp[index]
}
sliceD, ok := v.storageD.slice.Get(loc.archId)
if ok {
retD = &sliceD.comp[index]
}
sliceE, ok := v.storageE.slice.Get(loc.archId)
if ok {
retE = &sliceE.comp[index]
}
sliceF, ok := v.storageF.slice.Get(loc.archId)
if ok {
retF = &sliceF.comp[index]
}
sliceG, ok := v.storageG.slice.Get(loc.archId)
if ok {
retG = &sliceG.comp[index]
}
sliceH, ok := v.storageH.slice.Get(loc.archId)
if ok {
retH = &sliceH.comp[index]
}
sliceI, ok := v.storageI.slice.Get(loc.archId)
if ok {
retI = &sliceI.comp[index]
}
sliceJ, ok := v.storageJ.slice.Get(loc.archId)
if ok {
retJ = &sliceJ.comp[index]
}
sliceK, ok := v.storageK.slice.Get(loc.archId)
if ok {
retK = &sliceK.comp[index]
}
sliceL, ok := v.storageL.slice.Get(loc.archId)
if ok {
retL = &sliceL.comp[index]
}
return retA, retB, retC, retD, retE, retF, retG, retH, retI, retJ, retK, retL
}
// Counts the number of entities that match this query
func (v *View12[A, B, C, D, E, F, G, H, I, J, K, L]) Count() int {
v.filter.regenerate(v.world)
total := 0
for _, archId := range v.filter.archIds {
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
total += lookup.Len()
}
return total
}
// Maps the lambda function across every entity which matched the specified filters.
func (v *View12[A, B, C, D, E, F, G, H, I, J, K, L]) MapId(lambda func(id Id, a *A, b *B, c *C, d *D, e *E, f *F, g *G, h *H, i *I, j *J, k *K, l *L)) {
v.filter.regenerate(v.world)
var sliceA *componentList[A]
var compA []A
var retA *A
var sliceB *componentList[B]
var compB []B
var retB *B
var sliceC *componentList[C]
var compC []C
var retC *C
var sliceD *componentList[D]
var compD []D
var retD *D
var sliceE *componentList[E]
var compE []E
var retE *E
var sliceF *componentList[F]
var compF []F
var retF *F
var sliceG *componentList[G]
var compG []G
var retG *G
var sliceH *componentList[H]
var compH []H
var retH *H
var sliceI *componentList[I]
var compI []I
var retI *I
var sliceJ *componentList[J]
var compJ []J
var retJ *J
var sliceK *componentList[K]
var compK []K
var retK *K
var sliceL *componentList[L]
var compL []L
var retL *L
for _, archId := range v.filter.archIds {
sliceA, _ = v.storageA.slice.Get(archId)
sliceB, _ = v.storageB.slice.Get(archId)
sliceC, _ = v.storageC.slice.Get(archId)
sliceD, _ = v.storageD.slice.Get(archId)
sliceE, _ = v.storageE.slice.Get(archId)
sliceF, _ = v.storageF.slice.Get(archId)
sliceG, _ = v.storageG.slice.Get(archId)
sliceH, _ = v.storageH.slice.Get(archId)
sliceI, _ = v.storageI.slice.Get(archId)
sliceJ, _ = v.storageJ.slice.Get(archId)
sliceK, _ = v.storageK.slice.Get(archId)
sliceL, _ = v.storageL.slice.Get(archId)
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
// lookup, ok := v.world.engine.lookup[archId]
// if !ok { panic("LookupList is missing!") }
ids := lookup.id
// TODO - this flattened version causes a mild performance hit. But the other one combinatorially explodes. I also cant get BCE to work with it. See option 2 for higher performance.
compA = nil
if sliceA != nil {
compA = sliceA.comp
}
compB = nil
if sliceB != nil {
compB = sliceB.comp
}
compC = nil
if sliceC != nil {
compC = sliceC.comp
}
compD = nil
if sliceD != nil {
compD = sliceD.comp
}
compE = nil
if sliceE != nil {
compE = sliceE.comp
}
compF = nil
if sliceF != nil {
compF = sliceF.comp
}
compG = nil
if sliceG != nil {
compG = sliceG.comp
}
compH = nil
if sliceH != nil {
compH = sliceH.comp
}
compI = nil
if sliceI != nil {
compI = sliceI.comp
}
compJ = nil
if sliceJ != nil {
compJ = sliceJ.comp
}
compK = nil
if sliceK != nil {
compK = sliceK.comp
}
compL = nil
if sliceL != nil {
compL = sliceL.comp
}
retA = nil
retB = nil
retC = nil
retD = nil
retE = nil
retF = nil
retG = nil
retH = nil
retI = nil
retJ = nil
retK = nil
retL = nil
for idx := range ids {
if ids[idx] == InvalidEntity {
continue
} // Skip if its a hole
if compA != nil {
retA = &compA[idx]
}
if compB != nil {
retB = &compB[idx]
}
if compC != nil {
retC = &compC[idx]
}
if compD != nil {
retD = &compD[idx]
}
if compE != nil {
retE = &compE[idx]
}
if compF != nil {
retF = &compF[idx]
}
if compG != nil {
retG = &compG[idx]
}
if compH != nil {
retH = &compH[idx]
}
if compI != nil {
retI = &compI[idx]
}
if compJ != nil {
retJ = &compJ[idx]
}
if compK != nil {
retK = &compK[idx]
}
if compL != nil {
retL = &compL[idx]
}
lambda(ids[idx], retA, retB, retC, retD, retE, retF, retG, retH, retI, retJ, retK, retL)
}
}
}
// Maps the lambda function across every entity which matched the specified filters. Components are split based on the number of OS threads available.
func (v *View12[A, B, C, D, E, F, G, H, I, J, K, L]) MapIdParallel(lambda func(id Id, a *A, b *B, c *C, d *D, e *E, f *F, g *G, h *H, i *I, j *J, k *K, l *L)) {
v.filter.regenerate(v.world)
var sliceA *componentList[A]
var sliceB *componentList[B]
var sliceC *componentList[C]
var sliceD *componentList[D]
var sliceE *componentList[E]
var sliceF *componentList[F]
var sliceG *componentList[G]
var sliceH *componentList[H]
var sliceI *componentList[I]
var sliceJ *componentList[J]
var sliceK *componentList[K]
var sliceL *componentList[L]
// 1. Calculate work
// 2. Calculate number of threads to execute with
// 3. Greedy divide work among N threads
// 4. Execute for each in its own goroutine
// 1. Calculate work
totalWork := 0
for _, archId := range v.filter.archIds {
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
// Each id represents an entity that holds the requested component(s)
// Each hole represents a deleted entity that used to hold the requested component(s)
totalWork += len(lookup.id) // - len(lookup.holes)
}
// Nothing to do if there is no work
if totalWork == 0 {
return
}
// 2. Calculate number of threads to execute with
numThreads := runtime.NumCPU()
// Ensure that the number of threads we plan to use is <= total amount of work
numThreads = min(totalWork, numThreads)
var waitGroup sync.WaitGroup
type workItem struct {
ids []Id
compA []A
compB []B
compC []C
compD []D
compE []E
compF []F
compG []G
compH []H
compI []I
compJ []J
compK []K
compL []L
}
workChannel := make(chan workItem)
for i := 0; i < numThreads; i++ {
waitGroup.Add(1)
go func() {
defer waitGroup.Done()
for {
work, ok := <-workChannel
if !ok {
return
}
var retA *A
var retB *B
var retC *C
var retD *D
var retE *E
var retF *F
var retG *G
var retH *H
var retI *I
var retJ *J
var retK *K
var retL *L
for idx := range work.ids {
if work.ids[idx] == InvalidEntity {
continue
} // Skip if its a hole
if work.compA != nil {
retA = &work.compA[idx]
}
if work.compB != nil {
retB = &work.compB[idx]
}
if work.compC != nil {
retC = &work.compC[idx]
}
if work.compD != nil {
retD = &work.compD[idx]
}
if work.compE != nil {
retE = &work.compE[idx]
}
if work.compF != nil {
retF = &work.compF[idx]
}
if work.compG != nil {
retG = &work.compG[idx]
}
if work.compH != nil {
retH = &work.compH[idx]
}
if work.compI != nil {
retI = &work.compI[idx]
}
if work.compJ != nil {
retJ = &work.compJ[idx]
}
if work.compK != nil {
retK = &work.compK[idx]
}
if work.compL != nil {
retL = &work.compL[idx]
}
lambda(work.ids[idx], retA, retB, retC, retD, retE, retF, retG, retH, retI, retJ, retK, retL)
}
}
}()
}
// 3. Greedy divide work among N threads
// Simple algorithm:
// a. Find an evenly balanced distribution per thread
// b. Generate all work until it gets consumed
workPerThread := totalWork / numThreads
// Generate
var compA []A
var compB []B
var compC []C
var compD []D
var compE []E
var compF []F
var compG []G
var compH []H
var compI []I
var compJ []J
var compK []K
var compL []L
for _, archId := range v.filter.archIds {
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
ids := lookup.id
sliceA, _ = v.storageA.slice.Get(archId)
sliceB, _ = v.storageB.slice.Get(archId)
sliceC, _ = v.storageC.slice.Get(archId)
sliceD, _ = v.storageD.slice.Get(archId)
sliceE, _ = v.storageE.slice.Get(archId)
sliceF, _ = v.storageF.slice.Get(archId)
sliceG, _ = v.storageG.slice.Get(archId)
sliceH, _ = v.storageH.slice.Get(archId)
sliceI, _ = v.storageI.slice.Get(archId)
sliceJ, _ = v.storageJ.slice.Get(archId)
sliceK, _ = v.storageK.slice.Get(archId)
sliceL, _ = v.storageL.slice.Get(archId)
compA = nil
if sliceA != nil {
compA = sliceA.comp
}
compB = nil
if sliceB != nil {
compB = sliceB.comp
}
compC = nil
if sliceC != nil {
compC = sliceC.comp
}
compD = nil
if sliceD != nil {
compD = sliceD.comp
}
compE = nil
if sliceE != nil {
compE = sliceE.comp
}
compF = nil
if sliceF != nil {
compF = sliceF.comp
}
compG = nil
if sliceG != nil {
compG = sliceG.comp
}
compH = nil
if sliceH != nil {
compH = sliceH.comp
}
compI = nil
if sliceI != nil {
compI = sliceI.comp
}
compJ = nil
if sliceJ != nil {
compJ = sliceJ.comp
}
compK = nil
if sliceK != nil {
compK = sliceK.comp
}
compL = nil
if sliceL != nil {
compL = sliceL.comp
}
// workPerformed := 0
start := 0
end := 0
numWorkItems := (len(ids) / workPerThread) + 1
actualWorkPerThread := (len(ids) / numWorkItems) + 1
for i := 0; i < numWorkItems; i++ {
start = i * actualWorkPerThread
end = (i + 1) * actualWorkPerThread
if end > len(ids) {
end = len(ids)
}
// workPerformed += len(ids[start:end])
workChannel <- workItem{
ids: ids[start:end],
compA: compA[start:end],
compB: compB[start:end],
compC: compC[start:end],
compD: compD[start:end],
compE: compE[start:end],
compF: compF[start:end],
compG: compG[start:end],
compH: compH[start:end],
compI: compI[start:end],
compJ: compJ[start:end],
compK: compK[start:end],
compL: compL[start:end],
}
}
// if workPerformed != len(ids) {
// panic("wrong")
// }
}
close(workChannel)
waitGroup.Wait()
}
// Deprecated: This API is a tentative alternative way to map
func (v *View12[A, B, C, D, E, F, G, H, I, J, K, L]) MapSlices(lambda func(id []Id, a []A, b []B, c []C, d []D, e []E, f []F, g []G, h []H, i []I, j []J, k []K, l []L)) {
v.filter.regenerate(v.world)
id := make([][]Id, 0)
sliceListA := make([][]A, 0)
sliceListB := make([][]B, 0)
sliceListC := make([][]C, 0)
sliceListD := make([][]D, 0)
sliceListE := make([][]E, 0)
sliceListF := make([][]F, 0)
sliceListG := make([][]G, 0)
sliceListH := make([][]H, 0)
sliceListI := make([][]I, 0)
sliceListJ := make([][]J, 0)
sliceListK := make([][]K, 0)
sliceListL := make([][]L, 0)
for _, archId := range v.filter.archIds {
sliceA, ok := v.storageA.slice.Get(archId)
if !ok {
continue
}
sliceB, ok := v.storageB.slice.Get(archId)
if !ok {
continue
}
sliceC, ok := v.storageC.slice.Get(archId)
if !ok {
continue
}
sliceD, ok := v.storageD.slice.Get(archId)
if !ok {
continue
}
sliceE, ok := v.storageE.slice.Get(archId)
if !ok {
continue
}
sliceF, ok := v.storageF.slice.Get(archId)
if !ok {
continue
}
sliceG, ok := v.storageG.slice.Get(archId)
if !ok {
continue
}
sliceH, ok := v.storageH.slice.Get(archId)
if !ok {
continue
}
sliceI, ok := v.storageI.slice.Get(archId)
if !ok {
continue
}
sliceJ, ok := v.storageJ.slice.Get(archId)
if !ok {
continue
}
sliceK, ok := v.storageK.slice.Get(archId)
if !ok {
continue
}
sliceL, ok := v.storageL.slice.Get(archId)
if !ok {
continue
}
lookup := v.world.engine.lookup[archId]
if lookup == nil {
panic("LookupList is missing!")
}
// lookup, ok := v.world.engine.lookup[archId]
// if !ok { panic("LookupList is missing!") }
id = append(id, lookup.id)
sliceListA = append(sliceListA, sliceA.comp)
sliceListB = append(sliceListB, sliceB.comp)
sliceListC = append(sliceListC, sliceC.comp)
sliceListD = append(sliceListD, sliceD.comp)
sliceListE = append(sliceListE, sliceE.comp)
sliceListF = append(sliceListF, sliceF.comp)
sliceListG = append(sliceListG, sliceG.comp)
sliceListH = append(sliceListH, sliceH.comp)
sliceListI = append(sliceListI, sliceI.comp)
sliceListJ = append(sliceListJ, sliceJ.comp)
sliceListK = append(sliceListK, sliceK.comp)
sliceListL = append(sliceListL, sliceL.comp)
}
for idx := range id {
lambda(id[idx],
sliceListA[idx], sliceListB[idx], sliceListC[idx], sliceListD[idx], sliceListE[idx], sliceListF[idx], sliceListG[idx], sliceListH[idx], sliceListI[idx], sliceListJ[idx], sliceListK[idx], sliceListL[idx],
)
}
}
package ecs
import (
"fmt"
"math"
"sync/atomic"
"time"
"reflect" // For resourceName
)
var (
DefaultAllocation = 0
)
const (
InvalidEntity Id = 0 // Represents the default entity Id, which is invalid
firstEntity Id = 1
MaxEntity Id = math.MaxUint32
)
// World is the main data-holder. You usually pass it to other functions to do things.
type World struct {
idCounter atomic.Uint64
nextId Id
minId, maxId Id // This is the range of Ids returned by NewId
arch locMap
engine *archEngine
resources map[reflect.Type]any
observers *internalMap[EventId, list[Handler]] // TODO: SliceMap instead of map
cmd *CommandQueue
}
// Creates a new world
func NewWorld() *World {
world := &World{
nextId: firstEntity + 1,
minId: firstEntity + 1,
maxId: MaxEntity,
arch: newLocMap(DefaultAllocation),
engine: newArchEngine(),
resources: make(map[reflect.Type]any),
observers: newMap[EventId, list[Handler]](0),
}
world.cmd = GetInjectable[*CommandQueue](world)
return world
}
func (w *World) print() {
fmt.Printf("%+v\n", *w)
w.engine.print()
}
// Returns the command queue for the world
func (w *World) Cmd() *CommandQueue {
return w.cmd
}
// Sets an range of Ids that the world will use when creating new Ids. Potentially helpful when you have multiple worlds and don't want their Id space to collide.
// Deprecated: This API is tentative. It may be better to just have the user create Ids as they see fit
func (w *World) SetIdRange(min, max Id) {
if min <= firstEntity {
panic("max must be greater than 1")
}
if max <= firstEntity {
panic("max must be greater than 1")
}
if min > max {
panic("min must be less than max!")
}
w.minId = min
w.maxId = max
}
// Creates a new Id which can then be used to create an entity. This is threadsafe
func (w *World) NewId() Id {
for {
val := w.idCounter.Load()
if w.idCounter.CompareAndSwap(val, val+1) {
return (Id(val) % (w.maxId - w.minId)) + w.minId
}
}
// if w.nextId < w.minId {
// w.nextId = w.minId
// }
// id := w.nextId
// if w.nextId == w.maxId {
// w.nextId = w.minId
// } else {
// w.nextId++
// }
// return id
}
func (world *World) Spawn(comp ...Component) Id {
id := world.NewId()
world.spawn(id, comp...)
return id
}
func (world *World) spawn(id Id, comp ...Component) {
// Id does not yet exist, we need to add it for the first time
mask := buildArchMask(comp...)
archId := world.engine.getArchetypeId(mask)
// Write all components to that archetype
index := world.engine.spawn(archId, id, comp...)
world.arch.Put(id, entLoc{archId, uint32(index)})
world.engine.runFinalizedHooks(id)
}
// returns true if the entity in the world has the compId
func (world *World) hasCompId(id Id, compId CompId) bool {
loc, ok := world.arch.Get(id)
if !ok {
return false
}
lookup := world.engine.lookup[loc.archId]
return lookup.mask.hasComponent(compId)
}
// Writes components to the entity specified at id. This API can potentially break if you call it inside of a loop. Specifically, if you cause the archetype of the entity to change by writing a new component, then the loop may act in mysterious ways.
// Deprecated: This API is tentative, I might replace it with something similar to bevy commands to alleviate the above concern
func Write(world *World, id Id, comp ...Component) {
world.Write(id, comp...)
}
func (world *World) Write(id Id, comp ...Component) {
if len(comp) <= 0 {
return // Do nothing if there are no components
}
loc, ok := world.arch.Get(id)
if ok {
newLoc := world.engine.rewriteArch(loc, id, comp...)
world.arch.Put(id, newLoc)
} else {
world.spawn(id, comp...)
}
world.engine.runFinalizedHooks(id)
}
func (w *World) writeBundler(id Id, b *Bundler) {
newLoc := w.allocateMove(id, b.archMask)
wd := W{
engine: w.engine,
archId: newLoc.archId,
index: int(newLoc.index),
}
for i := CompId(0); i <= b.maxComponentIdAdded; i++ {
if !b.Set[i] {
continue
}
b.Components[i].CompWrite(wd)
}
w.engine.runFinalizedHooks(id)
}
// func (world *World) GetArchetype(comp ...Component) archetypeId {
// mask := buildArchMask(comp...)
// return world.engine.getArchetypeId(mask)
// }
// // Note: This returns the index of the location allocated
// func (world *World) Allocate(id Id, archId archetypeId) int {
// return world.allocate(id, world.engine.dcr.revArchMask[archId])
// }
// Allocates an index for the id at the specified addMask location
// 1. If the id already exists, an archetype move will happen
// 2. If the id doesn't exist, then the addMask is the newMask and the entity will be allocated there
// Returns the index of the location allocated. May return -1 if invalid archMask supplied
func (world *World) allocateMove(id Id, addMask archetypeMask) entLoc {
if addMask == blankArchMask {
// Nothing to allocate, aka do nothing
loc, _ := world.arch.Get(id)
// TODO: Technically this is some kind of error if id isn't set
return loc
}
loc, ok := world.arch.Get(id)
if ok {
// Calculate the new mask based on the bitwise or of the old and added masks
lookup := world.engine.lookup[loc.archId]
oldMask := lookup.mask
newMask := oldMask.bitwiseOr(addMask)
// If the new mask matches the old mask, then we don't need to move anything
if oldMask == newMask {
return loc
}
newLoc := world.engine.moveArchetype(loc, newMask, id)
world.arch.Put(id, newLoc)
world.engine.finalizeOnAdd = markComponentDiff(world.engine.finalizeOnAdd, addMask, oldMask)
return newLoc
} else {
// Id does not yet exist, we need to add it for the first time
archId := world.engine.getArchetypeId(addMask)
// Write all components to that archetype
newIndex := world.engine.allocate(archId, id)
newLoc := entLoc{archId, uint32(newIndex)}
world.arch.Put(id, newLoc)
world.engine.finalizeOnAdd = markComponentMask(world.engine.finalizeOnAdd, addMask)
return newLoc
}
}
// May return -1 if invalid archMask supplied, or if the entity doesn't exist
func (world *World) deleteMask(id Id, deleteMask archetypeMask) {
loc, ok := world.arch.Get(id)
if !ok {
return
}
// 1. calculate the destination mask
lookup := world.engine.lookup[loc.archId]
oldMask := lookup.mask
newMask := oldMask.bitwiseClear(deleteMask)
// If the new mask requires the removal of all components, then just delete the current entity
if newMask == blankArchMask {
Delete(world, id)
return
}
// If the new mask matches the old mask, then we don't need to move anything
if oldMask == newMask {
return
}
// 2. Move all components from source arch to dest arch
newLoc := world.engine.moveArchetypeDown(loc, newMask, id)
world.arch.Put(id, newLoc)
}
// This is safe for maps and loops
// 1. This deletes the high level id -> archId lookup
// 2. This creates a "hole" in the archetype list
// Returns true if the entity was deleted, else returns false if the entity does not exist (or was already deleted)
// Deletes the entire entity specified by the id
// This can be called inside maps and loops, it will delete the entity immediately.
// Returns true if the entity exists and was actually deleted, else returns false
func Delete(world *World, id Id) bool {
archId, ok := world.arch.Get(id)
if !ok {
return false
}
world.arch.Delete(id)
world.engine.TagForDeletion(archId, id)
return true
}
// Deletes specific components from an entity in the world
// Skips all work if the entity doesn't exist
// Skips deleting components that the entity doesn't have
// If no components remain after the delete, the entity will be completely removed
func DeleteComponent(world *World, id Id, comp ...Component) {
if len(comp) <= 0 {
return
}
mask := buildArchMask(comp...)
world.deleteMask(id, mask)
}
// Returns true if the entity exists in the world else it returns false
func (world *World) Exists(id Id) bool {
return world.arch.Has(id)
}
// --------------------------------------------------------------------------------
// - Observers
// --------------------------------------------------------------------------------
func (w *World) Trigger(event Event, id Id) {
handlerList, ok := w.observers.Get(event.EventId())
if !ok {
return
}
for _, handler := range handlerList.list {
handler.Run(id, event)
}
}
func (w *World) AddObserver(handler Handler) {
handlerList, ok := w.observers.Get(handler.EventTrigger())
if !ok {
handlerList = newList[Handler]()
}
handlerList.Add(handler)
w.observers.Put(handler.EventTrigger(), handlerList)
}
// You may only register one hook per component, else it will panic
func (w *World) SetHookOnAdd(comp Component, handler Handler) {
current := w.engine.onAddHooks[comp.CompId()]
if current != nil {
panic("AddHook: You may only register one hook per component")
}
w.engine.onAddHooks[comp.CompId()] = handler
}
// --------------------------------------------------------------------------------
// - Resources
// --------------------------------------------------------------------------------
func resourceName(t any) reflect.Type {
return reflect.TypeOf(t)
}
// TODO: Should I force people to do pointers?
func PutResource[T any](world *World, resource *T) {
name := resourceName(resource)
world.resources[name] = resource
}
func GetResource[T any](world *World) *T {
var t T
name := resourceName(&t)
anyVal, ok := world.resources[name]
if !ok {
return nil
}
return anyVal.(*T)
}
// --------------------------------------------------------------------------------
// - Systems
// --------------------------------------------------------------------------------
func (w *World) StepSystemList(dt time.Duration, systems ...System) time.Duration {
start := time.Now()
for i := range systems {
systems[i].step(dt)
w.cmd.Execute()
}
return time.Since(start)
}