Compare commits

..

No commits in common. '4451fe4224b5e3c89e6a15295bf50630e2ac97f9' and '6bfa6ef65af6c2501c4fa509f418ea0ca5c593e0' have entirely different histories.

4
.gitmodules vendored

@ -1,3 +1,7 @@
[submodule "raylib-zig"]
path = raylib-zig
url = https://github.com/Not-Nik/raylib-zig.git
[submodule "zig-ecs"]
path = zig-ecs
url = https://github.com/prime31/zig-ecs.git
branch = Patch

@ -0,0 +1 @@
Subproject commit abf785549393a14fd5d0350899e0786fa32d88c5

@ -1,6 +0,0 @@
zig-cache
zig-arm-cache
/docs/
.DS_Store

@ -1,13 +0,0 @@
{
"version": "0.2.0",
"configurations": [
{
"name": "lldb Debug ecs binary",
"type": "lldb",
"request": "launch",
"program": "${workspaceFolder}/zig-cache/bin/ecs",
"args": [],
"preLaunchTask": "Build Project",
}
]
}

@ -1,122 +0,0 @@
{
// See https://go.microsoft.com/fwlink/?LinkId=733558
// for the documentation about the tasks.json format
"version": "2.0.0",
"options": {
"env": {
"ZIG_SYSTEM_LINKER_HACK": "1"
}
},
"tasks": [
{
"label": "Build Project",
"type": "shell",
"command": "zig build",
"problemMatcher": [
"$gcc"
]
},
{
"label": "Build and Run Project",
"type": "shell",
"command": "zig build run",
"problemMatcher": [
"$gcc"
],
"group": {
"kind": "build",
"isDefault": true
},
"presentation": {
"clear": true
}
},
{
"label": "Build and Run Project (x64 on arm)",
"type": "shell",
"command": "~/zig/zig-x64/zig build run",
"problemMatcher": [
"$gcc"
],
"group": "build",
"presentation": {
"clear": true
}
},
{
"label": "Build and Run Project (release-fast)",
"type": "shell",
"command": "zig build run -Drelease-fast",
"problemMatcher": [
"$gcc"
],
"group": {
"kind": "build",
"isDefault": true
},
"presentation": {
"clear": true
}
},
{
"label": "Build and Run Project (release-small)",
"type": "shell",
"command": "zig build run -Drelease-small",
"problemMatcher": [
"$gcc"
],
"group": {
"kind": "build",
"isDefault": true
},
"presentation": {
"clear": true
}
},
{
"label": "Test Project",
"type": "shell",
"command": "zig build test",
"problemMatcher": [
"$gcc"
],
"group": {
"kind": "build",
"isDefault": true
},
"presentation": {
"clear": true
}
},
{
"label": "Build and Run Current File",
"type": "shell",
"command": "zig run ${file}",
"problemMatcher": [
"$gcc"
],
"presentation": {
"clear": true
},
"group": {
"kind": "build",
"isDefault": true
}
},
{
"label": "Build and Run Tests in Current File",
"type": "shell",
"command": "zig test ${file}",
"problemMatcher": [
"$gcc"
],
"presentation": {
"clear": true
},
"group": {
"kind": "build",
"isDefault": true
}
}
]
}

@ -1,21 +0,0 @@
MIT License
Copyright (c) 2021 prime31
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

@ -1,46 +0,0 @@
# Zig ECS
Zig ECS is a zig port of the fantasic [Entt](https://github.com/skypjack/entt). Entt is _highly_ templated C++ code which depending on your opinion is either a good thing or satan itself in code form. Zig doesn't have the same concept as C++ templates (thank goodness!) so the templated code was changed over to use Zig's generics and compile time metaprogramming.
## What does a zigified Entt look like?
Below are examples of a View and a Group, the two main ways to work with entities in the ecs along with the scaffolding code.
Declare some structs to work with:
```zig
pub const Velocity = struct { x: f32, y: f32 };
pub const Position = struct { x: f32, y: f32 };
```
Setup the Registry, which holds the entity data and is where we run our queries:
```zig
var reg = ecs.Registry.init(std.testing.allocator);
```
Create a couple entities and add some components to them
```zig
var entity = reg.create();
reg.add(entity, Position{ .x = 0, .y = 0 });
reg.add(entity, Velocity{ .x = 5, .y = 7 });
...
```
Create and iterate a View that matches all entities with a `Velocity` and `Position` component:
```zig
var view = reg.view(.{ Velocity, Position }, .{});
var iter = view.iterator();
while (iter.next()) |entity| {
const pos = view.getConst(Position, entity); // readonly copy
var vel = view.get(Velocity, entity); // mutable
}
```
The same example using an owning Group:
```zig
var group = reg.group(.{ Velocity, Position }, .{}, .{});
group.each(each);
fn each(e: struct { vel: *Velocity, pos: *Position }) void {
e.pos.*.x += e.vel.x;
e.pos.*.y += e.vel.y;
}
```

@ -1,104 +0,0 @@
const std = @import("std");
const Builder = std.build.Builder;
const builtin = @import("builtin");
pub fn build(b: *Builder) void {
const optimize = b.standardOptimizeOption(.{});
const ecs_module = b.addModule("zig-ecs", .{
.source_file = std.build.FileSource{ .path = "src/ecs.zig" },
});
// use a different cache folder for macos arm builds
b.cache_root = .{
.handle = std.fs.cwd(),
.path = if (builtin.os.tag == .macos and builtin.target.cpu.arch == .aarch64) "zig-arm-cache" else "zig-cache",
};
const examples = [_][2][]const u8{
[_][]const u8{ "view_vs_group", "examples/view_vs_group.zig" },
[_][]const u8{ "group_sort", "examples/group_sort.zig" },
[_][]const u8{ "simple", "examples/simple.zig" },
};
for (examples, 0..) |example, i| {
const name = if (i == 0) "ecs" else example[0];
const source = example[1];
var exe = b.addExecutable(.{
.name = name,
.root_source_file = std.build.FileSource{ .path = source },
.optimize = optimize,
});
// exe.setOutputDir(std.fs.path.join(b.allocator, &[_][]const u8{ b.cache_root, "bin" }) catch unreachable);
// exe.output_dirname_source = .{ .path = std.fs.path.join(b.allocator, &[_][]const u8{ b.cache_root.path.?, "bin" }) catch unreachable, .step = &exe.step };
exe.addModule("ecs", ecs_module);
exe.linkLibC();
const docs = exe;
// docs.emit_docs = .emit;
const doc = b.step(b.fmt("{s}-docs", .{name}), "Generate documentation");
doc.dependOn(&docs.step);
const run_cmd = b.addRunArtifact(exe);
const exe_step = b.step(name, b.fmt("run {s}.zig", .{name}));
exe_step.dependOn(&run_cmd.step);
// first element in the list is added as "run" so "zig build run" works
if (i == 0) {
const run_exe_step = b.step("run", b.fmt("run {s}.zig", .{name}));
run_exe_step.dependOn(&run_cmd.step);
}
}
// internal tests
const internal_test_step = b.addTest(.{
.root_source_file = std.build.FileSource{ .path = "src/tests.zig" },
.optimize = optimize,
});
// public api tests
const test_step = b.addTest(.{
.root_source_file = std.build.FileSource{ .path = "tests/tests.zig" },
.optimize = optimize,
});
test_step.addModule("ecs", ecs_module);
const test_cmd = b.step("test", "Run the tests");
test_cmd.dependOn(&internal_test_step.step);
test_cmd.dependOn(&test_step.step);
}
pub const LibType = enum(i32) {
static,
dynamic, // requires DYLD_LIBRARY_PATH to point to the dylib path
exe_compiled,
};
pub fn getModule(b: *std.Build, comptime prefix_path: []const u8) *std.build.Module {
return b.addModule("zig-ecs", .{
.source_file = .{ .path = prefix_path ++ "/src/ecs.zig" },
});
}
/// prefix_path is used to add package paths. It should be the the same path used to include this build file
pub fn linkArtifact(b: *Builder, artifact: *std.build.LibExeObjStep, _: std.build.Target, lib_type: LibType, comptime prefix_path: []const u8) void {
const optimize = b.standardOptimizeOption(.{});
switch (lib_type) {
.static => {
const lib = b.addStaticLibrary(.{ .name = "ecs", .root_source_file = "ecs.zig", .optimize = optimize });
lib.install();
artifact.linkLibrary(lib);
},
.dynamic => {
const lib = b.addSharedLibrary(.{ .name = "ecs", .root_source_file = "ecs.zig", .optimize = optimize });
lib.install();
artifact.linkLibrary(lib);
},
else => {},
}
artifact.addModule(getModule(prefix_path));
}

@ -1,64 +0,0 @@
const std = @import("std");
const ecs = @import("ecs");
// override the EntityTraits used by ecs
pub const EntityTraits = ecs.EntityTraitsType(.medium);
pub const Velocity = struct { x: f32, y: f32 };
pub const Position = struct { x: f32, y: f32 };
const total_entities: usize = 10000;
/// logs the timing for views vs non-owning groups vs owning groups with 1,000,000 entities
pub fn main() !void {
var reg = ecs.Registry.init(std.heap.c_allocator);
defer reg.deinit();
createEntities(&reg);
owningGroup(&reg);
}
fn createEntities(reg: *ecs.Registry) void {
var r = std.rand.DefaultPrng.init(666);
var timer = std.time.Timer.start() catch unreachable;
var i: usize = 0;
while (i < total_entities) : (i += 1) {
var e1 = reg.create();
reg.add(e1, Position{ .x = 1, .y = r.random().float(f32) * 100 });
reg.add(e1, Velocity{ .x = 1, .y = r.random().float(f32) * 100 });
}
var end = timer.lap();
std.debug.print("create {d} entities: {d}\n", .{ total_entities, @as(f64, @floatFromInt(end)) / 1000000000 });
}
fn owningGroup(reg: *ecs.Registry) void {
var group = reg.group(.{ Velocity, Position }, .{}, .{});
// var group_iter = group.iterator(struct { vel: *Velocity, pos: *Position });
// while (group_iter.next()) |e| {
// std.debug.print("pos.y {d:.3}, ent: {}\n", .{e.pos.y, group_iter.entity()});
// }
const SortContext = struct {
fn sort(_: void, a: Position, b: Position) bool {
return a.y < b.y;
}
};
var timer = std.time.Timer.start() catch unreachable;
group.sort(Position, {}, SortContext.sort);
var end = timer.lap();
std.debug.print("group (sort): {d}\n", .{@as(f64, @floatFromInt(end)) / 1000000000});
timer.reset();
group.sort(Position, {}, SortContext.sort);
end = timer.lap();
std.debug.print("group (sort 2): {d}\n", .{@as(f64, @floatFromInt(end)) / 1000000000});
// var group_iter2 = group.iterator(struct { vel: *Velocity, pos: *Position });
// while (group_iter2.next()) |e| {
// std.debug.print("pos.y {d:.3}, ent: {}\n", .{e.pos.y, group_iter2.entity()});
// }
}

@ -1,47 +0,0 @@
const std = @import("std");
const ecs = @import("ecs");
// override the EntityTraits used by ecs
pub const EntityTraits = ecs.EntityTraitsType(.small);
pub const Velocity = struct { x: f32, y: f32 };
pub const Position = struct { x: f32, y: f32 };
pub fn main() !void {
var reg = ecs.Registry.init(std.heap.c_allocator);
defer reg.deinit();
var e1 = reg.create();
reg.add(e1, Position{ .x = 0, .y = 0 });
reg.add(e1, Velocity{ .x = 5, .y = 7 });
var e2 = reg.create();
reg.add(e2, Position{ .x = 10, .y = 10 });
reg.add(e2, Velocity{ .x = 15, .y = 17 });
var view = reg.view(.{ Velocity, Position }, .{});
var iter = view.iterator();
while (iter.next()) |entity| {
var pos = view.get(Position, entity);
const vel = view.getConst(Velocity, entity);
std.debug.print(
"entity: {}, pos: (x = {d}, y = {d}), vel: (x = {d}, y = {d})\n",
.{ entity, pos.x, pos.y, vel.x, vel.y },
);
pos.*.x += vel.x;
pos.*.y += vel.y;
}
std.debug.print("---- resetting iter\n", .{});
iter.reset();
while (iter.next()) |entity| {
const pos = view.getConst(Position, entity);
const vel = view.getConst(Velocity, entity);
std.debug.print(
"entity: {}, pos: (x = {d}, y = {d}), vel: (x = {d}, y = {d})\n",
.{ entity, pos.x, pos.y, vel.x, vel.y },
);
}
}

@ -1,119 +0,0 @@
const std = @import("std");
const ecs = @import("ecs");
// override the EntityTraits used by ecs
pub const EntityTraits = ecs.EntityTraitsType(.medium);
pub const Velocity = struct { x: f32, y: f32 };
pub const Position = struct { x: f32, y: f32 };
/// logs the timing for views vs non-owning groups vs owning groups with 1,000,000 entities
pub fn main() !void {
var reg = ecs.Registry.init(std.heap.c_allocator);
defer reg.deinit();
// var timer = try std.time.Timer.start();
createEntities(&reg);
iterateView(&reg);
nonOwningGroup(&reg);
owningGroup(&reg);
}
fn createEntities(reg: *ecs.Registry) void {
var timer = std.time.Timer.start() catch unreachable;
var i: usize = 0;
while (i < 1000000) : (i += 1) {
var e1 = reg.create();
reg.add(e1, Position{ .x = 1, .y = 1 });
reg.add(e1, Velocity{ .x = 1, .y = 1 });
}
var end = timer.lap();
std.debug.print("create entities: \t{d}\n", .{@as(f64, @floatFromInt(end)) / 1000000000});
}
fn iterateView(reg: *ecs.Registry) void {
std.debug.print("--- multi-view ---\n", .{});
var view = reg.view(.{ Velocity, Position }, .{});
var timer = std.time.Timer.start() catch unreachable;
var iter = view.entityIterator();
while (iter.next()) |entity| {
var pos = view.get(Position, entity);
const vel = view.getConst(Velocity, entity);
pos.*.x += vel.x;
pos.*.y += vel.y;
}
var end = timer.lap();
std.debug.print("view (iter): \t{d}\n", .{@as(f64, @floatFromInt(end)) / 1000000000});
}
fn nonOwningGroup(reg: *ecs.Registry) void {
std.debug.print("--- non-owning ---\n", .{});
var timer = std.time.Timer.start() catch unreachable;
var group = reg.group(.{}, .{ Velocity, Position }, .{});
var end = timer.lap();
std.debug.print("group (create): {d}\n", .{@as(f64, @floatFromInt(end)) / 1000000000});
timer.reset();
var group_iter = group.iterator();
while (group_iter.next()) |entity| {
var pos = group.get(Position, entity);
const vel = group.getConst(Velocity, entity);
pos.*.x += vel.x;
pos.*.y += vel.y;
}
end = timer.lap();
std.debug.print("group (iter): \t{d}\n", .{@as(f64, @floatFromInt(end)) / 1000000000});
}
fn owningGroup(reg: *ecs.Registry) void {
std.debug.print("--- owning ---\n", .{});
var timer = std.time.Timer.start() catch unreachable;
var group = reg.group(.{ Velocity, Position }, .{}, .{});
var end = timer.lap();
std.debug.print("group (create): {d}\n", .{@as(f64, @floatFromInt(end)) / 1000000000});
timer.reset();
var group_iter = group.iterator(struct { vel: *Velocity, pos: *Position });
while (group_iter.next()) |e| {
e.pos.*.x += e.vel.x;
e.pos.*.y += e.vel.y;
}
end = timer.lap();
std.debug.print("group (iter): \t{d}\n", .{@as(f64, @floatFromInt(end)) / 1000000000});
timer.reset();
group.each(each);
end = timer.lap();
std.debug.print("group (each): \t{d}\n", .{@as(f64, @floatFromInt(end)) / 1000000000});
timer.reset();
// var storage = reg.assure(Velocity);
// var vel = storage.instances.items;
var pos = reg.assure(Position).instances.items;
var index: usize = group.group_data.current;
while (true) {
if (index == 0) break;
index -= 1;
pos[index].x += pos[index].x;
pos[index].y += pos[index].y;
}
end = timer.lap();
std.debug.print("group (direct): {d}\n", .{@as(f64, @floatFromInt(end)) / 1000000000});
}
fn each(e: struct { vel: *Velocity, pos: *Position }) void {
e.pos.*.x += e.vel.x;
e.pos.*.y += e.vel.y;
}

@ -1,19 +0,0 @@
// ecs
pub const EntityTraitsType = @import("ecs/entity.zig").EntityTraitsType;
// TODO: remove me. this is just for testing
pub const ComponentStorage = @import("ecs/component_storage.zig").ComponentStorage;
pub const Entity = @import("ecs/registry.zig").Entity;
pub const Registry = @import("ecs/registry.zig").Registry;
pub const EntityHandles = @import("ecs/registry.zig").EntityHandles;
pub const BasicView = @import("ecs/views.zig").BasicView;
pub const BasicMultiView = @import("ecs/views.zig").BasicMultiView;
pub const BasicGroup = @import("ecs/groups.zig").BasicGroup;
pub const OwningGroup = @import("ecs/groups.zig").OwningGroup;
pub const SparseSet = @import("ecs/sparse_set.zig").SparseSet;
pub const utils = @import("ecs/utils.zig");
// signals
pub const Signal = @import("signals/signal.zig").Signal;
pub const Dispatcher = @import("signals/dispatcher.zig").Dispatcher;

@ -1,88 +0,0 @@
const std = @import("std");
const Registry = @import("registry.zig").Registry;
const Entity = @import("registry.zig").Entity;
pub const Actor = struct {
registry: *Registry,
entity: Entity = undefined,
pub fn init(registry: *Registry) Actor {
var reg = registry;
return .{
.registry = registry,
.entity = reg.create(),
};
}
pub fn deinit(self: *Actor) void {
self.registry.destroy(self.entity);
}
pub fn add(self: *Actor, value: anytype) void {
self.registry.add(self.entity, value);
}
pub fn addTyped(self: *Actor, comptime T: type, value: T) void {
self.registry.addTyped(T, self.entity, value);
}
pub fn remove(self: *Actor, comptime T: type) void {
self.registry.remove(T, self.entity);
}
pub fn has(self: *Actor, comptime T: type) bool {
return self.registry.has(T, self.entity);
}
pub fn get(self: *Actor, comptime T: type) *T {
return self.registry.get(T, self.entity);
}
pub fn tryGet(self: *Actor, comptime T: type) ?*T {
return self.registry.tryGet(T, self.entity);
}
};
test "actor" {
var reg = Registry.init(std.testing.allocator);
defer reg.deinit();
var actor = Actor.init(&reg);
defer actor.deinit();
std.debug.assert(!actor.has(f32));
actor.addTyped(f32, 67.45);
if (actor.tryGet(f32)) |val| {
try std.testing.expectEqual(val.*, 67.45);
}
actor.addTyped(u64, 8888);
try std.testing.expectEqual(actor.get(u64).*, 8888);
std.debug.assert(actor.has(u64));
actor.remove(u64);
std.debug.assert(!actor.has(u64));
}
test "actor structs" {
const Velocity = struct { x: f32, y: f32 };
const Position = struct { x: f32 = 0, y: f32 = 0 };
var reg = Registry.init(std.testing.allocator);
defer reg.deinit();
var actor = Actor.init(&reg);
defer actor.deinit();
actor.add(Velocity{ .x = 5, .y = 10 });
actor.add(Position{});
var vel = actor.get(Velocity);
var pos = actor.get(Position);
pos.*.x += vel.x;
pos.*.y += vel.y;
try std.testing.expectEqual(actor.get(Position).*.x, 5);
try std.testing.expectEqual(actor.get(Position).*.y, 10);
}

@ -1,451 +0,0 @@
const std = @import("std");
const utils = @import("utils.zig");
const SparseSet = @import("sparse_set.zig").SparseSet;
const Signal = @import("../signals/signal.zig").Signal;
const Sink = @import("../signals/sink.zig").Sink;
/// Stores an ArrayList of components along with a SparseSet of entities
pub fn ComponentStorage(comptime Component: type, comptime Entity: type) type {
std.debug.assert(!utils.isComptime(Component));
// empty (zero-sized) structs will not have an array created
const is_empty_struct = @sizeOf(Component) == 0;
// HACK: due to this being stored as untyped ptrs, when deinit is called we are casted to a Component of some random
// non-zero sized type. That will make is_empty_struct false in deinit always so we can't use it. Instead, we stick
// a small dummy struct in the instances ArrayList so it can safely be deallocated.
// Perhaps we should just allocate instances with a dummy allocator or the tmp allocator?
comptime var ComponentOrDummy = if (is_empty_struct) struct { dummy: u1 } else Component;
return struct {
const Self = @This();
set: *SparseSet(Entity),
instances: std.ArrayList(ComponentOrDummy),
allocator: ?std.mem.Allocator,
/// doesnt really belong here...used to denote group ownership
super: usize = 0,
safeDeinit: *const fn (*Self) void,
safeSwap: *const fn (*Self, Entity, Entity, bool) void,
safeRemoveIfContains: *const fn (*Self, Entity) void,
construction: Signal(Entity),
update: Signal(Entity),
destruction: Signal(Entity),
pub fn init(allocator: std.mem.Allocator) Self {
var store = Self{
.set = SparseSet(Entity).initPtr(allocator),
.instances = undefined,
.safeDeinit = struct {
fn deinit(self: *Self) void {
if (!is_empty_struct) {
self.instances.deinit();
}
}
}.deinit,
.safeSwap = struct {
fn swap(self: *Self, lhs: Entity, rhs: Entity, instances_only: bool) void {
if (!is_empty_struct) {
std.mem.swap(Component, &self.instances.items[self.set.index(lhs)], &self.instances.items[self.set.index(rhs)]);
}
if (!instances_only) self.set.swap(lhs, rhs);
}
}.swap,
.safeRemoveIfContains = struct {
fn removeIfContains(self: *Self, entity: Entity) void {
if (self.contains(entity)) {
self.remove(entity);
}
}
}.removeIfContains,
.allocator = null,
.construction = Signal(Entity).init(allocator),
.update = Signal(Entity).init(allocator),
.destruction = Signal(Entity).init(allocator),
};
if (!is_empty_struct) {
store.instances = std.ArrayList(ComponentOrDummy).init(allocator);
}
return store;
}
pub fn initPtr(allocator: std.mem.Allocator) *Self {
var store = allocator.create(Self) catch unreachable;
store.set = SparseSet(Entity).initPtr(allocator);
if (!is_empty_struct) {
store.instances = std.ArrayList(ComponentOrDummy).init(allocator);
}
store.allocator = allocator;
store.super = 0;
store.construction = Signal(Entity).init(allocator);
store.update = Signal(Entity).init(allocator);
store.destruction = Signal(Entity).init(allocator);
// since we are stored as a pointer, we need to catpure this
store.safeDeinit = struct {
fn deinit(self: *Self) void {
if (!is_empty_struct) {
self.instances.deinit();
}
}
}.deinit;
store.safeSwap = struct {
fn swap(self: *Self, lhs: Entity, rhs: Entity, instances_only: bool) void {
if (!is_empty_struct) {
std.mem.swap(Component, &self.instances.items[self.set.index(lhs)], &self.instances.items[self.set.index(rhs)]);
}
if (!instances_only) self.set.swap(lhs, rhs);
}
}.swap;
store.safeRemoveIfContains = struct {
fn removeIfContains(self: *Self, entity: Entity) void {
if (self.contains(entity)) {
self.remove(entity);
}
}
}.removeIfContains;
return store;
}
pub fn deinit(self: *Self) void {
// great care must be taken here. Due to how Registry keeps this struct as pointers anything touching a type
// will be wrong since it has to cast to a random struct when deiniting. Because of all that, is_empty_struct
// will allways be false here so we have to deinit the instances no matter what.
self.safeDeinit(self);
self.set.deinit();
self.construction.deinit();
self.update.deinit();
self.destruction.deinit();
if (self.allocator) |allocator| {
allocator.destroy(self);
}
}
pub fn onConstruct(self: *Self) Sink(Entity) {
return self.construction.sink();
}
pub fn onUpdate(self: *Self) Sink(Entity) {
return self.update.sink();
}
pub fn onDestruct(self: *Self) Sink(Entity) {
return self.destruction.sink();
}
/// Increases the capacity of a component storage
pub fn reserve(self: *Self, cap: usize) void {
self.set.reserve(cap);
if (!is_empty_struct) {
self.instances.items.reserve(cap);
}
}
/// Assigns an entity to a storage and assigns its object
pub fn add(self: *Self, entity: Entity, value: Component) void {
if (!is_empty_struct) {
_ = self.instances.append(value) catch unreachable;
}
self.set.add(entity);
self.construction.publish(entity);
}
/// Removes an entity from a storage
pub fn remove(self: *Self, entity: Entity) void {
self.destruction.publish(entity);
if (!is_empty_struct) {
_ = self.instances.swapRemove(self.set.index(entity));
}
self.set.remove(entity);
}
/// Checks if a view contains an entity
pub fn contains(self: Self, entity: Entity) bool {
return self.set.contains(entity);
}
pub fn removeIfContains(self: *Self, entity: Entity) void {
if (Component == u1) {
self.safeRemoveIfContains(self, entity);
} else if (self.contains(entity)) {
self.remove(entity);
}
}
pub fn len(self: Self) usize {
return self.set.len();
}
pub usingnamespace if (is_empty_struct)
struct {
/// Sort Entities according to the given comparison function. Only T == Entity is allowed. The constraint param only exists for
/// parity with non-empty Components
pub fn sort(self: Self, comptime T: type, context: anytype, comptime lessThan: *const fn (@TypeOf(context), T, T) bool) void {
std.debug.assert(T == Entity);
self.set.sort(context, lessThan);
}
}
else
struct {
/// Direct access to the array of objects
pub fn raw(self: Self) []Component {
return self.instances.items;
}
/// Replaces the given component for an entity
pub fn replace(self: *Self, entity: Entity, value: Component) void {
self.get(entity).* = value;
self.update.publish(entity);
}
/// Returns the object associated with an entity
pub fn get(self: *Self, entity: Entity) *Component {
std.debug.assert(self.contains(entity));
return &self.instances.items[self.set.index(entity)];
}
pub fn getConst(self: *Self, entity: Entity) Component {
return self.instances.items[self.set.index(entity)];
}
/// Returns a pointer to the object associated with an entity, if any.
pub fn tryGet(self: *Self, entity: Entity) ?*Component {
return if (self.set.contains(entity)) &self.instances.items[self.set.index(entity)] else null;
}
pub fn tryGetConst(self: *Self, entity: Entity) ?Component {
return if (self.set.contains(entity)) self.instances.items[self.set.index(entity)] else null;
}
/// Sort Entities or Components according to the given comparison function. Valid types for T are Entity or Component.
pub fn sort(self: *Self, comptime T: type, length: usize, context: anytype, comptime lessThan: *const fn (@TypeOf(context), T, T) bool) void {
std.debug.assert(T == Entity or T == Component);
// we have to perform a swap after the sort for all moved entities so we make a helper struct for that. In the
// case of a Component sort we also wrap that into the struct so we can get the Component data to pass to the
// lessThan method passed in.
if (T == Entity) {
const SortContext = struct {
storage: *Self,
pub fn swap(this: @This(), a: Entity, b: Entity) void {
this.storage.safeSwap(this.storage, a, b, true);
}
};
const swap_context = SortContext{ .storage = self };
self.set.arrange(length, context, lessThan, swap_context);
} else {
const SortContext = struct {
storage: *Self,
wrapped_context: @TypeOf(context),
lessThan: *const fn (@TypeOf(context), T, T) bool,
fn sort(this: @This(), a: Entity, b: Entity) bool {
const real_a = this.storage.getConst(a);
const real_b = this.storage.getConst(b);
return this.lessThan(this.wrapped_context, real_a, real_b);
}
pub fn swap(this: @This(), a: Entity, b: Entity) void {
this.storage.safeSwap(this.storage, a, b, true);
}
};
const swap_context = SortContext{ .storage = self, .wrapped_context = context, .lessThan = lessThan };
self.set.arrange(length, swap_context, SortContext.sort, swap_context);
}
}
};
/// Direct access to the array of entities
pub fn data(self: Self) []const Entity {
return self.set.data();
}
/// Direct access to the array of entities
pub fn dataPtr(self: Self) *const []Entity {
return self.set.dataPtr();
}
/// Swaps entities and objects in the internal packed arrays
pub fn swap(self: *Self, lhs: Entity, rhs: Entity) void {
self.safeSwap(self, lhs, rhs, false);
}
pub fn clear(self: *Self) void {
if (!is_empty_struct) {
self.instances.items.len = 0;
}
self.set.clear();
}
};
}
test "add/try-get/remove/clear" {
var store = ComponentStorage(f32, u32).init(std.testing.allocator);
defer store.deinit();
store.add(3, 66.45);
try std.testing.expectEqual(store.tryGetConst(3).?, 66.45);
if (store.tryGet(3)) |found| {
try std.testing.expectEqual(@as(f32, 66.45), found.*);
}
store.remove(3);
var val_null = store.tryGet(3);
try std.testing.expectEqual(val_null, null);
store.clear();
}
test "add/get/remove" {
var store = ComponentStorage(f32, u32).init(std.testing.allocator);
defer store.deinit();
store.add(3, 66.45);
if (store.tryGet(3)) |found| try std.testing.expectEqual(@as(f32, 66.45), found.*);
try std.testing.expectEqual(store.tryGetConst(3).?, 66.45);
store.remove(3);
try std.testing.expectEqual(store.tryGet(3), null);
}
test "iterate" {
var store = ComponentStorage(f32, u32).initPtr(std.testing.allocator);
defer store.deinit();
store.add(3, 66.45);
store.add(5, 66.45);
store.add(7, 66.45);
for (store.data(), 0..) |entity, i| {
if (i == 0) {
try std.testing.expectEqual(entity, 3);
}
if (i == 1) {
try std.testing.expectEqual(entity, 5);
}
if (i == 2) {
try std.testing.expectEqual(entity, 7);
}
}
}
test "empty component" {
const Empty = struct {};
var store = ComponentStorage(Empty, u32).initPtr(std.testing.allocator);
defer store.deinit();
store.add(3, Empty{});
store.remove(3);
}
fn construct(e: u32) void {
std.debug.assert(e == 3);
}
fn update(e: u32) void {
std.debug.assert(e == 3);
}
fn destruct(e: u32) void {
std.debug.assert(e == 3);
}
test "signals" {
var store = ComponentStorage(f32, u32).init(std.testing.allocator);
defer store.deinit();
store.onConstruct().connect(construct);
store.onUpdate().connect(update);
store.onDestruct().connect(destruct);
store.add(3, 66.45);
store.replace(3, 45.64);
store.remove(3);
store.onConstruct().disconnect(construct);
store.onUpdate().disconnect(update);
store.onDestruct().disconnect(destruct);
store.add(4, 66.45);
store.replace(4, 45.64);
store.remove(4);
}
test "sort empty component" {
const Empty = struct {};
var store = ComponentStorage(Empty, u32).initPtr(std.testing.allocator);
defer store.deinit();
store.add(1, Empty{});
store.add(2, Empty{});
store.add(0, Empty{});
const asc_u32 = comptime std.sort.asc(u32);
store.sort(u32, {}, asc_u32);
for (store.data(), 0..) |e, i| {
try std.testing.expectEqual(@as(u32, @intCast(i)), e);
}
const desc_u32 = comptime std.sort.desc(u32);
store.sort(u32, {}, desc_u32);
var counter: u32 = 2;
for (store.data()) |e| {
try std.testing.expectEqual(counter, e);
if (counter > 0) counter -= 1;
}
}
test "sort by entity" {
var store = ComponentStorage(f32, u32).initPtr(std.testing.allocator);
defer store.deinit();
store.add(22, @as(f32, 2.2));
store.add(11, @as(f32, 1.1));
store.add(33, @as(f32, 3.3));
const SortContext = struct {
store: *ComponentStorage(f32, u32),
fn sort(this: @This(), a: u32, b: u32) bool {
const real_a = this.store.getConst(a);
const real_b = this.store.getConst(b);
return real_a > real_b;
}
};
const context = SortContext{ .store = store };
store.sort(u32, store.len(), context, SortContext.sort);
var compare: f32 = 5;
for (store.raw()) |val| {
try std.testing.expect(compare > val);
compare = val;
}
}
test "sort by component" {
var store = ComponentStorage(f32, u32).initPtr(std.testing.allocator);
defer store.deinit();
store.add(22, @as(f32, 2.2));
store.add(11, @as(f32, 1.1));
store.add(33, @as(f32, 3.3));
const desc_f32 = comptime std.sort.desc(f32);
store.sort(f32, store.len(), {}, desc_f32);
var compare: f32 = 5;
for (store.raw()) |val| {
try std.testing.expect(compare > val);
compare = val;
}
}

@ -1,56 +0,0 @@
const std = @import("std");
/// default EntityTraitsDefinition with reasonable sizes suitable for most situations
pub const EntityTraits = EntityTraitsType(.medium);
pub const EntityTraitsSize = enum { small, medium, large };
pub fn EntityTraitsType(comptime size: EntityTraitsSize) type {
return switch (size) {
.small => EntityTraitsDefinition(u16, u12, u4),
.medium => EntityTraitsDefinition(u32, u20, u12),
.large => EntityTraitsDefinition(u64, u32, u32),
};
}
fn EntityTraitsDefinition(comptime EntityType: type, comptime IndexType: type, comptime VersionType: type) type {
std.debug.assert(std.meta.trait.isUnsignedInt(EntityType));
std.debug.assert(std.meta.trait.isUnsignedInt(IndexType));
std.debug.assert(std.meta.trait.isUnsignedInt(VersionType));
const sizeOfIndexType = @bitSizeOf(IndexType);
const sizeOfVersionType = @bitSizeOf(VersionType);
const entityShift = sizeOfIndexType;
if (sizeOfIndexType + sizeOfVersionType != @bitSizeOf(EntityType))
@compileError("IndexType and VersionType must sum to EntityType's bit count");
const entityMask = std.math.maxInt(IndexType);
const versionMask = std.math.maxInt(VersionType);
return struct {
entity_type: type = EntityType,
index_type: type = IndexType,
version_type: type = VersionType,
/// Mask to use to get the entity index number out of an identifier
entity_mask: EntityType = entityMask,
/// Mask to use to get the version out of an identifier
version_mask: EntityType = versionMask,
/// Bit size of entity in entity_type
entity_shift: EntityType = entityShift,
pub fn init() @This() {
return @This(){};
}
};
}
test "entity traits" {
const sm = EntityTraitsType(.small).init();
const m = EntityTraitsType(.medium).init();
const l = EntityTraitsType(.large).init();
try std.testing.expectEqual(sm.entity_mask, std.math.maxInt(sm.index_type));
try std.testing.expectEqual(m.entity_mask, std.math.maxInt(m.index_type));
try std.testing.expectEqual(l.entity_mask, std.math.maxInt(l.index_type));
}

@ -1,469 +0,0 @@
const std = @import("std");
const builtin = @import("builtin");
const utils = @import("utils.zig");
const Registry = @import("registry.zig").Registry;
const Storage = @import("registry.zig").Storage;
const SparseSet = @import("sparse_set.zig").SparseSet;
const Entity = @import("registry.zig").Entity;
/// BasicGroups do not own any components. Internally, they keep a SparseSet that is always kept up-to-date with the matching
/// entities.
pub const BasicGroup = struct {
registry: *Registry,
group_data: *Registry.GroupData,
pub fn init(registry: *Registry, group_data: *Registry.GroupData) BasicGroup {
return .{
.registry = registry,
.group_data = group_data,
};
}
pub fn len(self: BasicGroup) usize {
return self.group_data.entity_set.len();
}
/// Direct access to the array of entities
pub fn data(self: BasicGroup) []const Entity {
return self.group_data.entity_set.data();
}
pub fn get(self: BasicGroup, comptime T: type, entity: Entity) *T {
return self.registry.assure(T).get(entity);
}
pub fn getConst(self: BasicGroup, comptime T: type, entity: Entity) T {
return self.registry.assure(T).getConst(entity);
}
/// iterates the matched entities backwards, so the current entity can always be removed safely
/// and newly added entities wont affect it.
pub fn iterator(self: BasicGroup) utils.ReverseSliceIterator(Entity) {
return self.group_data.entity_set.reverseIterator();
}
pub fn sort(self: BasicGroup, comptime T: type, context: anytype, comptime lessThan: *const fn (@TypeOf(context), T, T) bool) void {
if (T == Entity) {
self.group_data.entity_set.sort(context, lessThan);
} else {
// TODO: in debug mode, validate that T is present in the group
const SortContext = struct {
group: BasicGroup,
wrapped_context: @TypeOf(context),
lessThan: *const fn (@TypeOf(context), T, T) bool,
fn sort(this: @This(), a: Entity, b: Entity) bool {
const real_a = this.group.getConst(T, a);
const real_b = this.group.getConst(T, b);
return this.lessThan(this.wrapped_context, real_a, real_b);
}
};
var wrapper = SortContext{ .group = self, .wrapped_context = context, .lessThan = lessThan };
self.group_data.entity_set.sort(wrapper, SortContext.sort);
}
}
};
pub const OwningGroup = struct {
registry: *Registry,
group_data: *Registry.GroupData,
super: *usize,
/// iterator the provides the data from all the requested owned components in a single struct. Access to the current Entity
/// being iterated is available via the entity() method, useful for accessing non-owned component data. The get() method can
/// also be used to fetch non-owned component data for the currently iterated Entity.
/// TODO: support const types in the Components struct in addition to the current ptrs
fn Iterator(comptime Components: anytype) type {
return struct {
group: OwningGroup,
index: usize,
storage: *Storage(u1),
component_ptrs: [@typeInfo(Components).Struct.fields.len][*]u8,
pub fn init(group: OwningGroup) @This() {
const component_info = @typeInfo(Components).Struct;
var component_ptrs: [component_info.fields.len][*]u8 = undefined;
inline for (component_info.fields, 0..) |field, i| {
const storage = group.registry.assure(@typeInfo(field.type).Pointer.child);
component_ptrs[i] = @as([*]u8, @ptrCast(storage.instances.items.ptr));
}
return .{
.group = group,
.index = group.group_data.current,
.storage = group.firstOwnedStorage(),
.component_ptrs = component_ptrs,
};
}
pub fn next(it: *@This()) ?Components {
if (it.index == 0) return null;
it.index -= 1;
// fill and return the struct
var comps: Components = undefined;
inline for (@typeInfo(Components).Struct.fields, 0..) |field, i| {
const typed_ptr = @as([*]@typeInfo(field.type).Pointer.child, @ptrCast(@alignCast(it.component_ptrs[i])));
@field(comps, field.name) = &typed_ptr[it.index];
}
return comps;
}
pub fn entity(it: @This()) Entity {
std.debug.assert(it.index >= 0 and it.index < it.group.group_data.current);
return it.storage.set.dense.items[it.index];
}
pub fn get(it: @This(), comptime T: type) *T {
return it.group.registry.get(T, it.entity());
}
// Reset the iterator to the initial index
pub fn reset(it: *@This()) void {
it.index = it.group.group_data.current;
}
};
}
pub fn init(registry: *Registry, group_data: *Registry.GroupData, super: *usize) OwningGroup {
return .{
.registry = registry,
.group_data = group_data,
.super = super,
};
}
/// grabs an untyped (u1) reference to the first Storage(T) in the owned array
fn firstOwnedStorage(self: OwningGroup) *Storage(u1) {
const ptr = self.registry.components.get(self.group_data.owned[0]).?;
return @as(*Storage(u1), @ptrFromInt(ptr));
}
/// total number of entities in the group
pub fn len(self: OwningGroup) usize {
return self.group_data.current;
}
/// direct access to the array of entities of the first owning group
pub fn data(self: OwningGroup) []const Entity {
return self.firstOwnedStorage().data();
}
pub fn contains(self: OwningGroup, entity: Entity) bool {
var storage = self.firstOwnedStorage();
return storage.contains(entity) and storage.set.index(entity) < self.len();
}
fn validate(self: OwningGroup, comptime Components: anytype) void {
if (builtin.mode == .Debug and self.group_data.owned.len > 0) {
std.debug.assert(@typeInfo(Components) == .Struct);
inline for (@typeInfo(Components).Struct.fields) |field| {
std.debug.assert(@typeInfo(field.type) == .Pointer);
const found = std.mem.indexOfScalar(u32, self.group_data.owned, utils.typeId(std.meta.Child(field.type)));
std.debug.assert(found != null);
}
}
}
pub fn getOwned(self: OwningGroup, entity: Entity, comptime Components: anytype) Components {
self.validate(Components);
const component_info = @typeInfo(Components).Struct;
var component_ptrs: [component_info.fields.len][*]u8 = undefined;
inline for (component_info.fields, 0..) |field, i| {
const storage = self.registry.assure(std.meta.Child(field.type));
component_ptrs[i] = @as([*]u8, @ptrCast(storage.instances.items.ptr));
}
// fill the struct
const index = self.firstOwnedStorage().set.index(entity);
var comps: Components = undefined;
inline for (component_info.fields, 0..) |field, i| {
const typed_ptr = @as([*]std.meta.Child(field.type), @ptrCast(@alignCast(component_ptrs[i])));
@field(comps, field.name) = &typed_ptr[index];
}
return comps;
}
pub fn each(self: OwningGroup, comptime func: anytype) void {
const Components = switch (@typeInfo(@TypeOf(func))) {
.Fn => |func_info| func_info.params[0].type.?,
else => std.debug.assert("invalid func"),
};
self.validate(Components);
// optionally we could just use an Iterator here and pay for some slight indirection for code sharing
var iter = self.iterator(Components);
while (iter.next()) |comps| {
@call(.always_inline, func, .{comps});
}
}
/// returns the component storage for the given type for direct access
pub fn getStorage(self: OwningGroup, comptime T: type) *Storage(T) {
return self.registry.assure(T);
}
pub fn get(self: OwningGroup, comptime T: type, entity: Entity) *T {
return self.registry.assure(T).get(entity);
}
pub fn getConst(self: OwningGroup, comptime T: type, entity: Entity) T {
return self.registry.assure(T).getConst(entity);
}
pub fn sortable(self: OwningGroup) bool {
return self.group_data.super == self.group_data.size;
}
/// returns an iterator with optimized access to the owend Components. Note that Components should be a struct with
/// fields that are pointers to the component types that you want to fetch. Only types that are owned are valid! Non-owned
/// types should be fetched via Iterator.get.
pub fn iterator(self: OwningGroup, comptime Components: anytype) Iterator(Components) {
self.validate(Components);
return Iterator(Components).init(self);
}
pub fn entityIterator(self: OwningGroup) utils.ReverseSliceIterator(Entity) {
return utils.ReverseSliceIterator(Entity).init(self.firstOwnedStorage().set.dense.items[0..self.group_data.current]);
}
pub fn sort(self: OwningGroup, comptime T: type, context: anytype, comptime lessThan: *const fn (@TypeOf(context), T, T) bool) void {
var first_storage = self.firstOwnedStorage();
if (T == Entity) {
// only sort up to self.group_data.current
first_storage.sort(Entity, self.group_data.current, context, lessThan);
} else {
// TODO: in debug mode, validate that T is present in the group
const SortContext = struct {
group: OwningGroup,
wrapped_context: @TypeOf(context),
lessThan: *const fn (@TypeOf(context), T, T) bool,
fn sort(this: @This(), a: Entity, b: Entity) bool {
const real_a = this.group.getConst(T, a);
const real_b = this.group.getConst(T, b);
return this.lessThan(this.wrapped_context, real_a, real_b);
}
};
const wrapper = SortContext{ .group = self, .wrapped_context = context, .lessThan = lessThan };
first_storage.sort(Entity, self.group_data.current, wrapper, SortContext.sort);
}
// sync up the rest of the owned components
var next: usize = self.group_data.current;
while (true) : (next -= 1) {
if (next == 0) break;
const pos = next - 1;
const entity = first_storage.data()[pos];
// skip the first one since its what we are using to sort with
for (self.group_data.owned[1..]) |type_id| {
var other_ptr = self.registry.components.get(type_id).?;
var storage = @as(*Storage(u1), @ptrFromInt(other_ptr));
storage.swap(storage.data()[pos], entity);
}
}
}
};
test "BasicGroup creation/iteration" {
var reg = Registry.init(std.testing.allocator);
defer reg.deinit();
var group = reg.group(.{}, .{ i32, u32 }, .{});
try std.testing.expectEqual(group.len(), 0);
var e0 = reg.create();
reg.add(e0, @as(i32, 44));
reg.add(e0, @as(u32, 55));
std.debug.assert(group.len() == 1);
var iterated_entities: usize = 0;
var iter = group.iterator();
while (iter.next()) |_| {
iterated_entities += 1;
}
try std.testing.expectEqual(iterated_entities, 1);
iterated_entities = 0;
for (group.data()) |_| {
iterated_entities += 1;
}
try std.testing.expectEqual(iterated_entities, 1);
reg.remove(i32, e0);
std.debug.assert(group.len() == 0);
}
test "BasicGroup excludes" {
var reg = Registry.init(std.testing.allocator);
defer reg.deinit();
var group = reg.group(.{}, .{i32}, .{u32});
try std.testing.expectEqual(group.len(), 0);
var e0 = reg.create();
reg.add(e0, @as(i32, 44));
std.debug.assert(group.len() == 1);
var iterated_entities: usize = 0;
var iter = group.iterator();
while (iter.next()) |_| {
iterated_entities += 1;
}
try std.testing.expectEqual(iterated_entities, 1);
reg.add(e0, @as(u32, 55));
std.debug.assert(group.len() == 0);
}
test "BasicGroup create late" {
var reg = Registry.init(std.testing.allocator);
defer reg.deinit();
var e0 = reg.create();
reg.add(e0, @as(i32, 44));
reg.add(e0, @as(u32, 55));
var group = reg.group(.{}, .{ i32, u32 }, .{});
try std.testing.expectEqual(group.len(), 1);
}
test "OwningGroup" {
var reg = Registry.init(std.testing.allocator);
defer reg.deinit();
var group = reg.group(.{ i32, u32 }, .{}, .{});
var e0 = reg.create();
reg.add(e0, @as(i32, 44));
reg.add(e0, @as(u32, 55));
try std.testing.expectEqual(group.len(), 1);
try std.testing.expect(group.contains(e0));
try std.testing.expectEqual(group.get(i32, e0).*, 44);
try std.testing.expectEqual(group.getConst(u32, e0), 55);
var vals = group.getOwned(e0, struct { int: *i32, uint: *u32 });
try std.testing.expectEqual(vals.int.*, 44);
try std.testing.expectEqual(vals.uint.*, 55);
vals.int.* = 666;
var vals2 = group.getOwned(e0, struct { int: *i32, uint: *u32 });
try std.testing.expectEqual(vals2.int.*, 666);
}
test "OwningGroup add/remove" {
var reg = Registry.init(std.testing.allocator);
defer reg.deinit();
var group = reg.group(.{ i32, u32 }, .{}, .{});
var e0 = reg.create();
reg.add(e0, @as(i32, 44));
reg.add(e0, @as(u32, 55));
try std.testing.expectEqual(group.len(), 1);
reg.remove(u32, e0);
try std.testing.expectEqual(group.len(), 0);
}
test "OwningGroup iterate" {
var reg = Registry.init(std.testing.allocator);
defer reg.deinit();
var e0 = reg.create();
reg.add(e0, @as(i32, 44));
reg.add(e0, @as(u32, 55));
reg.add(e0, @as(u8, 11));
var e1 = reg.create();
reg.add(e1, @as(i32, 666));
reg.add(e1, @as(u32, 999));
reg.add(e1, @as(f32, 55.5));
var group = reg.group(.{ i32, u32 }, .{}, .{});
var iter = group.iterator(struct { int: *i32, uint: *u32 });
while (iter.next()) |item| {
if (iter.entity() == e0) {
try std.testing.expectEqual(item.int.*, 44);
try std.testing.expectEqual(item.uint.*, 55);
try std.testing.expectEqual(iter.get(u8).*, 11);
} else {
try std.testing.expectEqual(item.int.*, 666);
try std.testing.expectEqual(item.uint.*, 999);
try std.testing.expectEqual(iter.get(f32).*, 55.5);
}
}
}
fn each(components: struct {
int: *i32,
uint: *u32,
}) void {
std.testing.expectEqual(components.int.*, 44) catch unreachable;
std.testing.expectEqual(components.uint.*, 55) catch unreachable;
}
test "OwningGroup each" {
var reg = Registry.init(std.testing.allocator);
defer reg.deinit();
var e0 = reg.create();
reg.add(e0, @as(i32, 44));
reg.add(e0, @as(u32, 55));
const Thing = struct {
fn each(_: @This(), components: struct {
int: *i32,
uint: *u32,
}) void {
std.testing.expectEqual(components.int.*, 44) catch unreachable;
std.testing.expectEqual(components.uint.*, 55) catch unreachable;
}
};
var thing = Thing{};
var group = reg.group(.{ i32, u32 }, .{}, .{});
// group.each(thing.each); // zig v0.10.0: error: no field named 'each' in struct 'ecs.groups.test.OwningGroup each.Thing'
_ = thing;
// group.each(each); // zig v0.10.0: error: expected type 'ecs.groups.each__struct_6297', found 'ecs.groups.each__struct_3365'
_ = group;
}
test "multiple OwningGroups" {
const Sprite = struct { x: f32 };
const Transform = struct { x: f32 };
const Renderable = struct { x: f32 };
const Rotation = struct { x: f32 };
var reg = Registry.init(std.testing.allocator);
defer reg.deinit();
// var group1 = reg.group(.{u64, u32}, .{}, .{});
// var group2 = reg.group(.{u64, u32, u8}, .{}, .{});
_ = reg.group(.{ Sprite, Transform }, .{ Renderable, Rotation }, .{});
_ = reg.group(.{Sprite}, .{Renderable}, .{});
_ = reg.group(.{ Sprite, Transform }, .{Renderable}, .{});
// ensure groups are ordered correctly internally
var last_size: u8 = 0;
for (reg.groups.items) |grp| {
try std.testing.expect(last_size <= grp.size);
last_size = grp.size;
}
try std.testing.expect(!reg.sortable(Sprite));
// this will break the group
// var group6 = reg.group(.{Sprite, Rotation}, .{}, .{});
}

@ -1,161 +0,0 @@
const std = @import("std");
const registry = @import("registry.zig");
/// generates versioned "handles" (https://floooh.github.io/2018/06/17/handles-vs-pointers.html)
/// you choose the type of the handle (aka its size) and how much of that goes to the index and the version.
/// the bitsize of version + id must equal the handle size.
pub fn Handles(comptime HandleType: type, comptime IndexType: type, comptime VersionType: type) type {
std.debug.assert(@typeInfo(HandleType) == .Int and std.meta.Int(.unsigned, @bitSizeOf(HandleType)) == HandleType);
std.debug.assert(@typeInfo(IndexType) == .Int and std.meta.Int(.unsigned, @bitSizeOf(IndexType)) == IndexType);
std.debug.assert(@typeInfo(VersionType) == .Int and std.meta.Int(.unsigned, @bitSizeOf(VersionType)) == VersionType);
if (@bitSizeOf(IndexType) + @bitSizeOf(VersionType) != @bitSizeOf(HandleType))
@compileError("IndexType and VersionType must sum to HandleType's bit count");
return struct {
const Self = @This();
handles: []HandleType,
append_cursor: IndexType = 0,
last_destroyed: ?IndexType = null,
allocator: std.mem.Allocator,
const invalid_id = std.math.maxInt(IndexType);
pub const Iterator = struct {
hm: Self,
index: usize = 0,
pub fn init(hm: Self) @This() {
return .{ .hm = hm };
}
pub fn next(self: *@This()) ?HandleType {
if (self.index == self.hm.append_cursor) return null;
for (self.hm.handles[self.index..self.hm.append_cursor]) |h| {
self.index += 1;
if (self.hm.alive(h)) {
return h;
}
}
return null;
}
};
pub fn init(allocator: std.mem.Allocator) Self {
return initWithCapacity(allocator, 32);
}
pub fn initWithCapacity(allocator: std.mem.Allocator, capacity: usize) Self {
return Self{
.handles = allocator.alloc(HandleType, capacity) catch unreachable,
.allocator = allocator,
};
}
pub fn deinit(self: Self) void {
self.allocator.free(self.handles);
}
pub fn extractId(_: Self, handle: HandleType) IndexType {
return @as(IndexType, @truncate(handle & registry.entity_traits.entity_mask));
}
pub fn extractVersion(_: Self, handle: HandleType) VersionType {
return @as(VersionType, @truncate(handle >> registry.entity_traits.entity_shift));
}
fn forge(id: IndexType, version: VersionType) HandleType {
return id | @as(HandleType, version) << registry.entity_traits.entity_shift;
}
pub fn create(self: *Self) HandleType {
if (self.last_destroyed == null) {
// ensure capacity and grow if needed
if (self.handles.len - 1 == self.append_cursor) {
self.handles = self.allocator.realloc(self.handles, self.handles.len * 2) catch unreachable;
}
const id = self.append_cursor;
const handle = forge(self.append_cursor, 0);
self.handles[id] = handle;
self.append_cursor += 1;
return handle;
}
const version = self.extractVersion(self.handles[self.last_destroyed.?]);
const destroyed_id = self.extractId(self.handles[self.last_destroyed.?]);
const handle = forge(self.last_destroyed.?, version);
self.handles[self.last_destroyed.?] = handle;
self.last_destroyed = if (destroyed_id == invalid_id) null else destroyed_id;
return handle;
}
pub fn remove(self: *Self, handle: HandleType) !void {
const id = self.extractId(handle);
if (id > self.append_cursor or self.handles[id] != handle)
return error.RemovedInvalidHandle;
const next_id = self.last_destroyed orelse invalid_id;
if (next_id == id) return error.ExhaustedEntityRemoval;
const version = self.extractVersion(handle);
self.handles[id] = forge(next_id, version +% 1);
self.last_destroyed = id;
}
pub fn alive(self: Self, handle: HandleType) bool {
const id = self.extractId(handle);
return id < self.append_cursor and self.handles[id] == handle;
}
pub fn iterator(self: Self) Iterator {
return Iterator.init(self);
}
};
}
test "handles" {
var hm = Handles(u32, u20, u12).init(std.testing.allocator);
defer hm.deinit();
const e0 = hm.create();
const e1 = hm.create();
const e2 = hm.create();
std.debug.assert(hm.alive(e0));
std.debug.assert(hm.alive(e1));
std.debug.assert(hm.alive(e2));
hm.remove(e1) catch unreachable;
std.debug.assert(!hm.alive(e1));
try std.testing.expectError(error.RemovedInvalidHandle, hm.remove(e1));
var e_tmp = hm.create();
std.debug.assert(hm.alive(e_tmp));
hm.remove(e_tmp) catch unreachable;
std.debug.assert(!hm.alive(e_tmp));
hm.remove(e0) catch unreachable;
std.debug.assert(!hm.alive(e0));
hm.remove(e2) catch unreachable;
std.debug.assert(!hm.alive(e2));
e_tmp = hm.create();
std.debug.assert(hm.alive(e_tmp));
e_tmp = hm.create();
std.debug.assert(hm.alive(e_tmp));
e_tmp = hm.create();
std.debug.assert(hm.alive(e_tmp));
}

@ -1,644 +0,0 @@
const std = @import("std");
const builtin = @import("builtin");
const assert = std.debug.assert;
const utils = @import("utils.zig");
const Handles = @import("handles.zig").Handles;
const SparseSet = @import("sparse_set.zig").SparseSet;
const ComponentStorage = @import("component_storage.zig").ComponentStorage;
const Sink = @import("../signals/sink.zig").Sink;
const TypeStore = @import("type_store.zig").TypeStore;
// allow overriding EntityTraits by setting in root via: EntityTraits = EntityTraitsType(.medium);
const root = @import("root");
pub const entity_traits = if (@hasDecl(root, "EntityTraits")) root.EntityTraits.init() else @import("entity.zig").EntityTraits.init();
// setup the Handles type based on the type set in EntityTraits
pub const EntityHandles = Handles(entity_traits.entity_type, entity_traits.index_type, entity_traits.version_type);
pub const Entity = entity_traits.entity_type;
const BasicView = @import("views.zig").BasicView;
const MultiView = @import("views.zig").MultiView;
const BasicGroup = @import("groups.zig").BasicGroup;
const OwningGroup = @import("groups.zig").OwningGroup;
/// Stores an ArrayList of components. The max amount that can be stored is based on the type below
pub fn Storage(comptime CompT: type) type {
return ComponentStorage(CompT, Entity);
}
/// the registry is the main gateway to all ecs functionality. It assumes all internal allocations will succeed and returns
/// no errors to keep the API clean and because if a component array cant be allocated you've got bigger problems.
pub const Registry = struct {
handles: EntityHandles,
components: std.AutoHashMap(u32, usize),
contexts: std.AutoHashMap(u32, usize),
groups: std.ArrayList(*GroupData),
type_store: TypeStore,
allocator: std.mem.Allocator,
/// internal, persistant data structure to manage the entities in a group
pub const GroupData = struct {
hash: u64,
size: u8,
/// optional. there will be an entity_set for non-owning groups and current for owning
entity_set: SparseSet(Entity) = undefined,
owned: []u32,
include: []u32,
exclude: []u32,
registry: *Registry,
current: usize,
pub fn initPtr(allocator: std.mem.Allocator, registry: *Registry, hash: u64, owned: []u32, include: []u32, exclude: []u32) *GroupData {
// std.debug.assert(std.mem.indexOfAny(u32, owned, include) == null);
// std.debug.assert(std.mem.indexOfAny(u32, owned, exclude) == null);
// std.debug.assert(std.mem.indexOfAny(u32, include, exclude) == null);
var group_data = allocator.create(GroupData) catch unreachable;
group_data.hash = hash;
group_data.size = @as(u8, @intCast(owned.len + include.len + exclude.len));
if (owned.len == 0) {
group_data.entity_set = SparseSet(Entity).init(allocator);
}
group_data.owned = allocator.dupe(u32, owned) catch unreachable;
group_data.include = allocator.dupe(u32, include) catch unreachable;
group_data.exclude = allocator.dupe(u32, exclude) catch unreachable;
group_data.registry = registry;
group_data.current = 0;
return group_data;
}
pub fn deinit(self: *GroupData, allocator: std.mem.Allocator) void {
// only deinit th SparseSet for non-owning groups
if (self.owned.len == 0) {
self.entity_set.deinit();
}
allocator.free(self.owned);
allocator.free(self.include);
allocator.free(self.exclude);
allocator.destroy(self);
}
pub fn maybeValidIf(self: *GroupData, entity: Entity) void {
const isValid: bool = blk: {
for (self.owned) |tid| {
const ptr = self.registry.components.get(tid).?;
if (!@as(*Storage(u1), @ptrFromInt(ptr)).contains(entity))
break :blk false;
}
for (self.include) |tid| {
const ptr = self.registry.components.get(tid).?;
if (!@as(*Storage(u1), @ptrFromInt(ptr)).contains(entity))
break :blk false;
}
for (self.exclude) |tid| {
const ptr = self.registry.components.get(tid).?;
if (@as(*Storage(u1), @ptrFromInt(ptr)).contains(entity))
break :blk false;
}
break :blk true;
};
if (self.owned.len == 0) {
if (isValid and !self.entity_set.contains(entity)) {
self.entity_set.add(entity);
}
} else {
if (isValid) {
const ptr = self.registry.components.get(self.owned[0]).?;
if (!(@as(*Storage(u1), @ptrFromInt(ptr)).set.index(entity) < self.current)) {
for (self.owned) |tid| {
// store.swap hides a safe version that types it correctly
const store_ptr = self.registry.components.get(tid).?;
var store = @as(*Storage(u1), @ptrFromInt(store_ptr));
store.swap(store.data()[self.current], entity);
}
self.current += 1;
}
}
std.debug.assert(self.owned.len >= 0);
}
}
pub fn discardIf(self: *GroupData, entity: Entity) void {
if (self.owned.len == 0) {
if (self.entity_set.contains(entity)) {
self.entity_set.remove(entity);
}
} else {
const ptr = self.registry.components.get(self.owned[0]).?;
var store = @as(*Storage(u1), @ptrFromInt(ptr));
if (store.contains(entity) and store.set.index(entity) < self.current) {
self.current -= 1;
for (self.owned) |tid| {
const store_ptr = self.registry.components.get(tid).?;
store = @as(*Storage(u1), @ptrFromInt(store_ptr));
store.swap(store.data()[self.current], entity);
}
}
}
}
/// finds the insertion point for this group by finding anything in the group family (overlapping owned)
/// and finds the least specialized (based on size). This allows the least specialized to update first
/// which ensures more specialized (ie less matches) will always be swapping inside the bounds of
/// the less specialized groups.
fn findInsertionIndex(self: GroupData, groups: []*GroupData) ?usize {
for (groups, 0..) |grp, i| {
var overlapping: u8 = 0;
for (grp.owned) |grp_owned| {
if (std.mem.indexOfScalar(u32, self.owned, grp_owned)) |_| overlapping += 1;
}
if (overlapping > 0 and self.size <= grp.size) return i;
}
return null;
}
// TODO: is this the right logic? Should this return just the previous item in the family or be more specific about
// the group size for the index it returns?
/// for discards, the most specialized group in the family needs to do its discard and swap first. This will ensure
/// as each more specialized group does their discards the entity will always remain outside of the "current" index
/// for all groups in the family.
fn findPreviousIndex(self: GroupData, groups: []*GroupData, index: ?usize) ?usize {
if (groups.len == 0) return null;
// we iterate backwards and either index or groups.len is one tick passed where we want to start
var i = if (index) |ind| ind else groups.len;
if (i > 0) i -= 1;
while (i >= 0) : (i -= 1) {
var overlapping: u8 = 0;
for (groups[i].owned) |grp_owned| {
if (std.mem.indexOfScalar(u32, self.owned, grp_owned)) |_| overlapping += 1;
}
if (overlapping > 0) return i;
if (i == 0) return null;
}
return null;
}
};
pub fn init(allocator: std.mem.Allocator) Registry {
return Registry{
.handles = EntityHandles.init(allocator),
.components = std.AutoHashMap(u32, usize).init(allocator),
.contexts = std.AutoHashMap(u32, usize).init(allocator),
.groups = std.ArrayList(*GroupData).init(allocator),
.type_store = TypeStore.init(allocator),
.allocator = allocator,
};
}
pub fn deinit(self: *Registry) void {
var iter = self.components.valueIterator();
while (iter.next()) |ptr| {
// HACK: we dont know the Type here but we need to call deinit
var storage = @as(*Storage(u1), @ptrFromInt(ptr.*));
storage.deinit();
}
for (self.groups.items) |grp| {
grp.deinit(self.allocator);
}
self.components.deinit();
self.contexts.deinit();
self.groups.deinit();
self.type_store.deinit();
self.handles.deinit();
}
pub fn assure(self: *Registry, comptime T: type) *Storage(T) {
var type_id = utils.typeId(T);
if (self.components.getEntry(type_id)) |kv| {
return @as(*Storage(T), @ptrFromInt(kv.value_ptr.*));
}
var comp_set = Storage(T).initPtr(self.allocator);
var comp_set_ptr = @intFromPtr(comp_set);
_ = self.components.put(type_id, comp_set_ptr) catch unreachable;
return comp_set;
}
/// Prepares a pool for the given type if required
pub fn prepare(self: *Registry, comptime T: type) void {
_ = self.assure(T);
}
/// Returns the number of existing components of the given type
pub fn len(self: *Registry, comptime T: type) usize {
return self.assure(T).len();
}
/// Increases the capacity of the registry or of the pools for the given component
pub fn reserve(self: *Registry, comptime T: type, cap: usize) void {
self.assure(T).reserve(cap);
}
/// Direct access to the list of components of a given pool
pub fn raw(self: Registry, comptime T: type) []T {
return self.assure(T).raw();
}
/// Direct access to the list of entities of a given pool
pub fn data(self: Registry, comptime T: type) []Entity {
return self.assure(T).data().*;
}
pub fn valid(self: *Registry, entity: Entity) bool {
return self.handles.alive(entity);
}
/// Returns the entity identifier without the version
pub fn entityId(_: Registry, entity: Entity) Entity {
return entity & entity_traits.entity_mask;
}
/// Returns the version stored along with an entity identifier
pub fn version(_: *Registry, entity: Entity) entity_traits.version_type {
return @as(entity_traits.version_type, @truncate(entity >> entity_traits.entity_shift));
}
/// Creates a new entity and returns it
pub fn create(self: *Registry) Entity {
return self.handles.create();
}
/// Destroys an entity
pub fn destroy(self: *Registry, entity: Entity) void {
assert(self.valid(entity));
self.removeAll(entity);
self.handles.remove(entity) catch unreachable;
}
/// returns an interator that iterates all live entities
pub fn entities(self: Registry) EntityHandles.Iterator {
return self.handles.iterator();
}
pub fn add(self: *Registry, entity: Entity, value: anytype) void {
assert(self.valid(entity));
self.assure(@TypeOf(value)).add(entity, value);
}
/// shortcut for adding raw comptime_int/float without having to @as cast
pub fn addTyped(self: *Registry, comptime T: type, entity: Entity, value: T) void {
self.add(entity, value);
}
/// adds all the component types passed in as zero-initialized values
pub fn addTypes(self: *Registry, entity: Entity, comptime types: anytype) void {
inline for (types) |t| {
self.assure(t).add(entity, std.mem.zeroes(t));
}
}
/// Replaces the given component for an entity
pub fn replace(self: *Registry, entity: Entity, value: anytype) void {
assert(self.valid(entity));
self.assure(@TypeOf(value)).replace(entity, value);
}
/// shortcut for replacing raw comptime_int/float without having to @as cast
pub fn replaceTyped(self: *Registry, comptime T: type, entity: Entity, value: T) void {
self.replace(entity, value);
}
pub fn addOrReplace(self: *Registry, entity: Entity, value: anytype) void {
assert(self.valid(entity));
const store = self.assure(@TypeOf(value));
if (store.tryGet(entity)) |found| {
found.* = value;
store.update.publish(entity);
} else {
store.add(entity, value);
}
}
/// shortcut for add-or-replace raw comptime_int/float without having to @as cast
pub fn addOrReplaceTyped(self: *Registry, comptime T: type, entity: Entity, value: T) void {
self.addOrReplace(entity, value);
}
/// Removes the given component from an entity
pub fn remove(self: *Registry, comptime T: type, entity: Entity) void {
assert(self.valid(entity));
self.assure(T).remove(entity);
}
pub fn removeIfExists(self: *Registry, comptime T: type, entity: Entity) void {
assert(self.valid(entity));
var store = self.assure(T);
if (store.contains(entity)) {
store.remove(entity);
}
}
/// Removes all the components from an entity and makes it orphaned
pub fn removeAll(self: *Registry, entity: Entity) void {
assert(self.valid(entity));
var iter = self.components.valueIterator();
while (iter.next()) |value| {
// HACK: we dont know the Type here but we need to be able to call methods on the Storage(T)
var store = @as(*Storage(u1), @ptrFromInt(value.*));
store.removeIfContains(entity);
}
}
pub fn has(self: *Registry, comptime T: type, entity: Entity) bool {
assert(self.valid(entity));
return self.assure(T).set.contains(entity);
}
pub fn get(self: *Registry, comptime T: type, entity: Entity) *T {
assert(self.valid(entity));
return self.assure(T).get(entity);
}
pub fn getConst(self: *Registry, comptime T: type, entity: Entity) T {
assert(self.valid(entity));
return self.assure(T).getConst(entity);
}
/// Returns a reference to the given component for an entity creating it if necessary
pub fn getOrAdd(self: *Registry, comptime T: type, entity: Entity) *T {
if (!self.has(T, entity)) {
self.addTyped(T, entity, .{});
}
return self.get(T, entity);
}
pub fn tryGet(self: *Registry, comptime T: type, entity: Entity) ?*T {
return self.assure(T).tryGet(entity);
}
/// Returns a Sink object for the given component to add/remove listeners with
pub fn onConstruct(self: *Registry, comptime T: type) Sink(Entity) {
return self.assure(T).onConstruct();
}
/// Returns a Sink object for the given component to add/remove listeners with
pub fn onUpdate(self: *Registry, comptime T: type) Sink(Entity) {
return self.assure(T).onUpdate();
}
/// Returns a Sink object for the given component to add/remove listeners with
pub fn onDestruct(self: *Registry, comptime T: type) Sink(Entity) {
return self.assure(T).onDestruct();
}
/// Binds an object to the context of the registry
pub fn setContext(self: *Registry, context: anytype) void {
std.debug.assert(@typeInfo(@TypeOf(context)) == .Pointer);
var type_id = utils.typeId(@typeInfo(@TypeOf(context)).Pointer.child);
_ = self.contexts.put(type_id, @intFromPtr(context)) catch unreachable;
}
/// Unsets a context variable if it exists
pub fn unsetContext(self: *Registry, comptime T: type) void {
std.debug.assert(@typeInfo(T) != .Pointer);
_ = self.contexts.put(utils.typeId(T), 0) catch unreachable;
}
/// Returns a pointer to an object in the context of the registry
pub fn getContext(self: *Registry, comptime T: type) ?*T {
std.debug.assert(@typeInfo(T) != .Pointer);
return if (self.contexts.get(utils.typeId(T))) |ptr|
return if (ptr > 0) @as(*T, @ptrFromInt(ptr)) else null
else
null;
}
/// provides access to a TypeStore letting you add singleton components to the registry
pub fn singletons(self: *Registry) *TypeStore {
return &self.type_store;
}
pub fn sort(self: *Registry, comptime T: type, comptime lessThan: *const fn (void, T, T) bool) void {
const comp = self.assure(T);
std.debug.assert(comp.super == 0);
comp.sort(T, comp.len(), {}, lessThan);
}
/// Checks whether the given component belongs to any group. If so, it is not sortable directly.
pub fn sortable(self: *Registry, comptime T: type) bool {
return self.assure(T).super == 0;
}
pub fn view(self: *Registry, comptime includes: anytype, comptime excludes: anytype) ViewType(includes, excludes) {
std.debug.assert(@typeInfo(@TypeOf(includes)) == .Struct);
std.debug.assert(@typeInfo(@TypeOf(excludes)) == .Struct);
std.debug.assert(includes.len > 0);
// just one include so use the optimized BasicView
if (includes.len == 1 and excludes.len == 0)
return BasicView(includes[0]).init(self.assure(includes[0]));
var includes_arr: [includes.len]u32 = undefined;
inline for (includes, 0..) |t, i| {
_ = self.assure(t);
includes_arr[i] = utils.typeId(t);
}
var excludes_arr: [excludes.len]u32 = undefined;
inline for (excludes, 0..) |t, i| {
_ = self.assure(t);
excludes_arr[i] = utils.typeId(t);
}
return MultiView(includes.len, excludes.len).init(self, includes_arr, excludes_arr);
}
/// returns the Type that a view will be based on the includes and excludes
fn ViewType(comptime includes: anytype, comptime excludes: anytype) type {
if (includes.len == 1 and excludes.len == 0) return BasicView(includes[0]);
return MultiView(includes.len, excludes.len);
}
/// creates an optimized group for iterating components
pub fn group(self: *Registry, comptime owned: anytype, comptime includes: anytype, comptime excludes: anytype) (if (owned.len == 0) BasicGroup else OwningGroup) {
std.debug.assert(@typeInfo(@TypeOf(owned)) == .Struct);
std.debug.assert(@typeInfo(@TypeOf(includes)) == .Struct);
std.debug.assert(@typeInfo(@TypeOf(excludes)) == .Struct);
std.debug.assert(owned.len + includes.len > 0);
std.debug.assert(owned.len + includes.len + excludes.len > 1);
// create a unique hash to identify the group so that we can look it up
const hash = comptime hashGroupTypes(owned, includes, excludes);
for (self.groups.items) |grp| {
if (grp.hash == hash) {
if (owned.len == 0) {
return BasicGroup.init(self, grp);
}
var first_owned = self.assure(owned[0]);
return OwningGroup.init(self, grp, &first_owned.super);
}
}
// gather up all our Types as typeIds
var includes_arr: [includes.len]u32 = undefined;
inline for (includes, 0..) |t, i| {
_ = self.assure(t);
includes_arr[i] = utils.typeId(t);
}
var excludes_arr: [excludes.len]u32 = undefined;
inline for (excludes, 0..) |t, i| {
_ = self.assure(t);
excludes_arr[i] = utils.typeId(t);
}
var owned_arr: [owned.len]u32 = undefined;
inline for (owned, 0..) |t, i| {
_ = self.assure(t);
owned_arr[i] = utils.typeId(t);
}
// we need to create a new GroupData
var new_group_data = GroupData.initPtr(self.allocator, self, hash, owned_arr[0..], includes_arr[0..], excludes_arr[0..]);
// before adding the group we need to do some checks to make sure there arent other owning groups with the same types
if (builtin.mode == .Debug and owned.len > 0) {
for (self.groups.items) |grp| {
if (grp.owned.len == 0) continue;
var overlapping: u8 = 0;
for (grp.owned) |grp_owned| {
if (std.mem.indexOfScalar(u32, &owned_arr, grp_owned)) |_| overlapping += 1;
}
var sz: u8 = overlapping;
for (grp.include) |grp_include| {
if (std.mem.indexOfScalar(u32, &includes_arr, grp_include)) |_| sz += 1;
}
for (grp.exclude) |grp_exclude| {
if (std.mem.indexOfScalar(u32, &excludes_arr, grp_exclude)) |_| sz += 1;
}
const check = overlapping == 0 or ((sz == new_group_data.size) or (sz == grp.size));
std.debug.assert(check);
}
}
var maybe_valid_if: ?*GroupData = null;
var discard_if: ?*GroupData = null;
if (owned.len == 0) {
self.groups.append(new_group_data) catch unreachable;
} else {
// if this is a group in a family, we may need to do an insert so get the insertion index first
const maybe_index = new_group_data.findInsertionIndex(self.groups.items);
// if there is a previous group in this family, we use it for inserting our discardIf calls
if (new_group_data.findPreviousIndex(self.groups.items, maybe_index)) |prev| {
discard_if = self.groups.items[prev];
}
if (maybe_index) |index| {
maybe_valid_if = self.groups.items[index];
self.groups.insert(index, new_group_data) catch unreachable;
} else {
self.groups.append(new_group_data) catch unreachable;
}
// update super on all owned Storages to be the max of size and their current super value
inline for (owned) |t| {
var storage = self.assure(t);
storage.super = @max(storage.super, new_group_data.size);
}
}
// wire up our listeners
inline for (owned) |t| self.onConstruct(t).beforeBound(maybe_valid_if).connectBound(new_group_data, "maybeValidIf");
inline for (includes) |t| self.onConstruct(t).beforeBound(maybe_valid_if).connectBound(new_group_data, "maybeValidIf");
inline for (excludes) |t| self.onDestruct(t).beforeBound(maybe_valid_if).connectBound(new_group_data, "maybeValidIf");
inline for (owned) |t| self.onDestruct(t).beforeBound(discard_if).connectBound(new_group_data, "discardIf");
inline for (includes) |t| self.onDestruct(t).beforeBound(discard_if).connectBound(new_group_data, "discardIf");
inline for (excludes) |t| self.onConstruct(t).beforeBound(discard_if).connectBound(new_group_data, "discardIf");
// pre-fill the GroupData with any existing entitites that match
if (owned.len == 0) {
var view_instance = self.view(owned ++ includes, excludes);
var view_iter = view_instance.entityIterator();
while (view_iter.next()) |entity| {
new_group_data.entity_set.add(entity);
}
} else {
// we cannot iterate backwards because we want to leave behind valid entities in case of owned types
// ??? why not?
var first_owned_storage = self.assure(owned[0]);
for (first_owned_storage.data()) |entity| {
new_group_data.maybeValidIf(entity);
}
// for(auto *first = std::get<0>(cpools).data(), *last = first + std::get<0>(cpools).size(); first != last; ++first) {
// handler->template maybe_valid_if<std::tuple_element_t<0, std::tuple<std::decay_t<Owned>...>>>(*this, *first);
// }
}
if (owned.len == 0) {
return BasicGroup.init(self, new_group_data);
} else {
var first_owned_storage = self.assure(owned[0]);
return OwningGroup.init(self, new_group_data, &first_owned_storage.super);
}
}
/// given the 3 group Types arrays, generates a (mostly) unique u64 hash. Simultaneously ensures there are no duped types between
/// the 3 groups.
inline fn hashGroupTypes(comptime owned: anytype, comptime includes: anytype, comptime excludes: anytype) u64 {
comptime {
for (owned) |t1| {
for (includes) |t2| {
std.debug.assert(t1 != t2);
for (excludes) |t3| {
std.debug.assert(t1 != t3);
std.debug.assert(t2 != t3);
}
}
}
const owned_str = concatTypes(owned);
const includes_str = concatTypes(includes);
const excludes_str = concatTypes(excludes);
return utils.hashStringFnv(u64, owned_str ++ includes_str ++ excludes_str);
}
}
/// expects a tuple of types. Convertes them to type names, sorts them then concatenates and returns the string.
inline fn concatTypes(comptime types: anytype) []const u8 {
comptime {
if (types.len == 0) return "_";
const impl = struct {
fn asc(_: void, lhs: []const u8, rhs: []const u8) bool {
return std.mem.lessThan(u8, lhs, rhs);
}
};
var names: [types.len][]const u8 = undefined;
for (&names, 0..) |*name, i| {
name.* = @typeName(types[i]);
}
std.sort.pdq([]const u8, &names, {}, impl.asc);
comptime var res: []const u8 = "";
inline for (names) |name| res = res ++ name;
return res;
}
}
};

@ -1,367 +0,0 @@
const std = @import("std");
const utils = @import("utils.zig");
const registry = @import("registry.zig");
const ReverseSliceIterator = @import("utils.zig").ReverseSliceIterator;
/// NOTE: This is a copy of `std.sort.insertionSort` with fixed function pointer
/// syntax to avoid compilation errors.
///
/// Stable in-place sort. O(n) best case, O(pow(n, 2)) worst case.
/// O(1) memory (no allocator required).
/// This can be expressed in terms of `insertionSortContext` but the glue
/// code is slightly longer than the direct implementation.
fn std_sort_insertionSort_clone(
comptime T: type,
items: []T,
context: anytype,
comptime lessThan: *const fn (context: @TypeOf(context), lhs: T, rhs: T) bool,
) void {
var i: usize = 1;
while (i < items.len) : (i += 1) {
const x = items[i];
var j: usize = i;
while (j > 0 and lessThan(context, x, items[j - 1])) : (j -= 1) {
items[j] = items[j - 1];
}
items[j] = x;
}
}
// TODO: fix entity_mask. it should come from EntityTraitsDefinition.
pub fn SparseSet(comptime SparseT: type) type {
return struct {
const Self = @This();
const page_size: usize = 4096;
sparse: std.ArrayList(?[]SparseT),
dense: std.ArrayList(SparseT),
entity_mask: SparseT,
allocator: ?std.mem.Allocator,
pub fn initPtr(allocator: std.mem.Allocator) *Self {
var set = allocator.create(Self) catch unreachable;
set.sparse = std.ArrayList(?[]SparseT).initCapacity(allocator, 16) catch unreachable;
set.dense = std.ArrayList(SparseT).initCapacity(allocator, 16) catch unreachable;
set.entity_mask = registry.entity_traits.entity_mask;
set.allocator = allocator;
return set;
}
pub fn init(allocator: std.mem.Allocator) Self {
return Self{
.sparse = std.ArrayList(?[]SparseT).init(allocator),
.dense = std.ArrayList(SparseT).init(allocator),
.entity_mask = registry.entity_traits.entity_mask,
.allocator = null,
};
}
pub fn deinit(self: *Self) void {
for (self.sparse.items) |array| {
if (array) |arr| {
self.sparse.allocator.free(arr);
}
}
self.dense.deinit();
self.sparse.deinit();
if (self.allocator) |allocator| {
allocator.destroy(self);
}
}
pub fn page(self: Self, sparse: SparseT) usize {
return (sparse & self.entity_mask) / page_size;
}
fn offset(_: Self, sparse: SparseT) usize {
return sparse & (page_size - 1);
}
fn assure(self: *Self, pos: usize) []SparseT {
if (pos >= self.sparse.items.len) {
const start_pos = self.sparse.items.len;
self.sparse.resize(pos + 1) catch unreachable;
self.sparse.expandToCapacity();
@memset(self.sparse.items[start_pos..], null);
}
if (self.sparse.items[pos] == null) {
var new_page = self.sparse.allocator.alloc(SparseT, page_size) catch unreachable;
@memset(new_page, std.math.maxInt(SparseT));
self.sparse.items[pos] = new_page;
}
return self.sparse.items[pos].?;
}
/// Increases the capacity of a sparse sets index array
pub fn reserve(self: *Self, cap: usize) void {
self.sparse.resize(cap) catch unreachable;
}
/// Returns the number of dense elements that a sparse set has currently allocated space for
pub fn capacity(self: *Self) usize {
return self.dense.capacity;
}
/// Returns the number of dense elements in a sparse set
pub fn len(self: Self) usize {
return self.dense.items.len;
}
pub fn empty(self: *Self) bool {
return self.dense.items.len == 0;
}
pub fn data(self: Self) []const SparseT {
return self.dense.items;
}
pub fn dataPtr(self: Self) *const []SparseT {
return &self.dense.items;
}
pub fn contains(self: Self, sparse: SparseT) bool {
const curr = self.page(sparse);
return curr < self.sparse.items.len and
self.sparse.items[curr] != null and
self.sparse.items[curr].?[self.offset(sparse)] != std.math.maxInt(SparseT);
}
/// Returns the position of an entity in a sparse set
pub fn index(self: Self, sparse: SparseT) SparseT {
std.debug.assert(self.contains(sparse));
return self.sparse.items[self.page(sparse)].?[self.offset(sparse)];
}
/// Assigns an entity to a sparse set
pub fn add(self: *Self, sparse: SparseT) void {
std.debug.assert(!self.contains(sparse));
// assure(page(entt))[offset(entt)] = packed.size()
self.assure(self.page(sparse))[self.offset(sparse)] = @as(SparseT, @intCast(self.dense.items.len));
_ = self.dense.append(sparse) catch unreachable;
}
/// Removes an entity from a sparse set
pub fn remove(self: *Self, sparse: SparseT) void {
std.debug.assert(self.contains(sparse));
const curr = self.page(sparse);
const pos = self.offset(sparse);
const last_dense = self.dense.items[self.dense.items.len - 1];
self.dense.items[self.sparse.items[curr].?[pos]] = last_dense;
self.sparse.items[self.page(last_dense)].?[self.offset(last_dense)] = self.sparse.items[curr].?[pos];
self.sparse.items[curr].?[pos] = std.math.maxInt(SparseT);
_ = self.dense.pop();
}
/// Swaps two entities in the internal packed and sparse arrays
pub fn swap(self: *Self, lhs: SparseT, rhs: SparseT) void {
var from = &self.sparse.items[self.page(lhs)].?[self.offset(lhs)];
var to = &self.sparse.items[self.page(rhs)].?[self.offset(rhs)];
std.mem.swap(SparseT, &self.dense.items[from.*], &self.dense.items[to.*]);
std.mem.swap(SparseT, from, to);
}
/// Sort elements according to the given comparison function
pub fn sort(self: *Self, context: anytype, comptime lessThan: *const fn (@TypeOf(context), SparseT, SparseT) bool) void {
std_sort_insertionSort_clone(SparseT, self.dense.items, context, lessThan);
for (self.dense.items, 0..) |_, i| {
const item = @as(SparseT, @intCast(i));
self.sparse.items[self.page(self.dense.items[self.page(item)])].?[self.offset(self.dense.items[self.page(item)])] = @as(SparseT, @intCast(i));
}
}
/// Sort elements according to the given comparison function. Use this when a data array needs to stay in sync with the SparseSet
/// by passing in a "swap_context" that contains a "swap" method with a sig of fn(ctx,SparseT,SparseT)void
pub fn arrange(self: *Self, length: usize, context: anytype, comptime lessThan: *const fn (@TypeOf(context), SparseT, SparseT) bool, swap_context: anytype) void {
std_sort_insertionSort_clone(SparseT, self.dense.items[0..length], context, lessThan);
for (self.dense.items[0..length], 0..) |_, pos| {
var curr = @as(SparseT, @intCast(pos));
var next = self.index(self.dense.items[curr]);
while (curr != next) {
swap_context.swap(self.dense.items[curr], self.dense.items[next]);
self.sparse.items[self.page(self.dense.items[curr])].?[self.offset(self.dense.items[curr])] = curr;
curr = next;
next = self.index(self.dense.items[curr]);
}
}
}
/// Sort entities according to their order in another sparse set. Other is the master in this case.
pub fn respect(self: *Self, other: *Self) void {
var pos = @as(SparseT, 0);
var i = @as(SparseT, 0);
while (i < other.dense.items.len) : (i += 1) {
if (self.contains(other.dense.items[i])) {
if (other.dense.items[i] != self.dense.items[pos]) {
self.swap(self.dense.items[pos], other.dense.items[i]);
}
pos += 1;
}
}
}
pub fn clear(self: *Self) void {
for (self.sparse.items, 0..) |array, i| {
if (array) |arr| {
self.sparse.allocator.free(arr);
self.sparse.items[i] = null;
}
}
self.sparse.items.len = 0;
self.dense.items.len = 0;
}
pub fn reverseIterator(self: *Self) ReverseSliceIterator(SparseT) {
return ReverseSliceIterator(SparseT).init(self.dense.items);
}
};
}
fn printSet(set: *SparseSet(u32, u8)) void {
std.debug.print("\nsparse -----\n", .{});
for (set.sparse.items) |sparse| {
std.debug.print("{}\t", .{sparse});
}
std.debug.print("\ndense -----\n", .{});
for (set.dense.items) |dense| {
std.debug.print("{}\t", .{dense});
}
std.debug.print("\n\n", .{});
}
test "add/remove/clear" {
var set = SparseSet(u32).initPtr(std.testing.allocator);
defer set.deinit();
set.add(4);
set.add(3);
try std.testing.expectEqual(set.len(), 2);
try std.testing.expectEqual(set.index(4), 0);
try std.testing.expectEqual(set.index(3), 1);
set.remove(4);
try std.testing.expectEqual(set.len(), 1);
set.clear();
try std.testing.expectEqual(set.len(), 0);
}
test "grow" {
var set = SparseSet(u32).initPtr(std.testing.allocator);
defer set.deinit();
var i = @as(usize, std.math.maxInt(u8));
while (i > 0) : (i -= 1) {
set.add(@as(u32, @intCast(i)));
}
try std.testing.expectEqual(set.len(), std.math.maxInt(u8));
}
test "swap" {
var set = SparseSet(u32).initPtr(std.testing.allocator);
defer set.deinit();
set.add(4);
set.add(3);
try std.testing.expectEqual(set.index(4), 0);
try std.testing.expectEqual(set.index(3), 1);
set.swap(4, 3);
try std.testing.expectEqual(set.index(3), 0);
try std.testing.expectEqual(set.index(4), 1);
}
test "data() synced" {
var set = SparseSet(u32).initPtr(std.testing.allocator);
defer set.deinit();
set.add(0);
set.add(1);
set.add(2);
set.add(3);
var data = set.data();
try std.testing.expectEqual(data[1], 1);
try std.testing.expectEqual(set.len(), data.len);
set.remove(0);
set.remove(1);
try std.testing.expectEqual(set.len(), set.data().len);
}
test "iterate" {
var set = SparseSet(u32).initPtr(std.testing.allocator);
defer set.deinit();
set.add(0);
set.add(1);
set.add(2);
set.add(3);
var i: u32 = @as(u32, @intCast(set.len())) - 1;
var iter = set.reverseIterator();
while (iter.next()) |entity| {
try std.testing.expectEqual(i, entity);
if (i > 0) i -= 1;
}
}
test "respect 1" {
var set1 = SparseSet(u32).initPtr(std.testing.allocator);
defer set1.deinit();
var set2 = SparseSet(u32).initPtr(std.testing.allocator);
defer set2.deinit();
set1.add(3);
set1.add(4);
set1.add(5);
set1.add(6);
set1.add(7);
set2.add(8);
set2.add(6);
set2.add(4);
set1.respect(set2);
try std.testing.expectEqual(set1.dense.items[0], set2.dense.items[1]);
try std.testing.expectEqual(set1.dense.items[1], set2.dense.items[2]);
}
const desc_u32 = std.sort.desc(u32);
test "respect 2" {
var set = SparseSet(u32).initPtr(std.testing.allocator);
defer set.deinit();
set.add(5);
set.add(2);
set.add(4);
set.add(1);
set.add(3);
set.sort({}, desc_u32);
for (set.dense.items, 0..) |item, i| {
if (i < set.dense.items.len - 1) {
std.debug.assert(item > set.dense.items[i + 1]);
}
}
}

@ -1,89 +0,0 @@
const std = @import("std");
const utils = @import("utils.zig");
/// stores a single object of type T for each T added
pub const TypeStore = struct {
map: std.AutoHashMap(u32, []u8),
allocator: std.mem.Allocator,
pub fn init(allocator: std.mem.Allocator) TypeStore {
return TypeStore{
.map = std.AutoHashMap(u32, []u8).init(allocator),
.allocator = allocator,
};
}
pub fn deinit(self: *TypeStore) void {
var iter = self.map.valueIterator();
while (iter.next()) |val_ptr| {
self.allocator.free(val_ptr.*);
}
self.map.deinit();
}
/// adds instance, returning a pointer to the item as it lives in the store
pub fn add(self: *TypeStore, instance: anytype) void {
var bytes = self.allocator.alloc(u8, @sizeOf(@TypeOf(instance))) catch unreachable;
std.mem.copy(u8, bytes, std.mem.asBytes(&instance));
_ = self.map.put(utils.typeId(@TypeOf(instance)), bytes) catch unreachable;
}
pub fn get(self: *TypeStore, comptime T: type) *T {
if (self.map.get(utils.typeId(T))) |bytes| {
return @as(*T, @ptrCast(@alignCast(bytes)));
}
unreachable;
}
pub fn getConst(self: *TypeStore, comptime T: type) T {
return self.get(T).*;
}
pub fn getOrAdd(self: *TypeStore, comptime T: type) *T {
if (!self.has(T)) {
var instance = std.mem.zeroes(T);
self.add(instance);
}
return self.get(T);
}
pub fn remove(self: *TypeStore, comptime T: type) void {
if (self.map.get(utils.typeId(T))) |bytes| {
self.allocator.free(bytes);
_ = self.map.remove(utils.typeId(T));
}
}
pub fn has(self: *TypeStore, comptime T: type) bool {
return self.map.contains(utils.typeId(T));
}
};
test "TypeStore" {
const Vector = struct { x: f32 = 0, y: f32 = 0, z: f32 = 0 };
var store = TypeStore.init(std.testing.allocator);
defer store.deinit();
var orig = Vector{ .x = 5, .y = 6, .z = 8 };
store.add(orig);
try std.testing.expect(store.has(Vector));
try std.testing.expectEqual(store.get(Vector).*, orig);
var v = store.get(Vector);
try std.testing.expectEqual(v.*, Vector{ .x = 5, .y = 6, .z = 8 });
v.*.x = 666;
var v2 = store.get(Vector);
try std.testing.expectEqual(v2.*, Vector{ .x = 666, .y = 6, .z = 8 });
store.remove(Vector);
try std.testing.expect(!store.has(Vector));
var v3 = store.getOrAdd(u32);
try std.testing.expectEqual(v3.*, 0);
v3.* = 777;
_ = store.get(u32);
try std.testing.expectEqual(v3.*, 777);
}

@ -1,139 +0,0 @@
const std = @import("std");
pub const ErasedPtr = struct {
ptr: usize,
pub fn init(ptr: anytype) ErasedPtr {
if (@sizeOf(@TypeOf(ptr)) == 0) {
return .{ .ptr = undefined };
}
return .{ .ptr = @intFromPtr(ptr) };
}
pub fn as(self: ErasedPtr, comptime T: type) *T {
if (@sizeOf(T) == 0)
return @as(T, undefined);
return self.asPtr(*T);
}
pub fn asPtr(self: ErasedPtr, comptime PtrT: type) PtrT {
if (@sizeOf(PtrT) == 0)
return @as(PtrT, undefined);
return @as(PtrT, @ptrFromInt(self.ptr));
}
};
pub fn ReverseSliceIterator(comptime T: type) type {
return struct {
slice: []T,
index: usize,
pub fn init(slice: []T) @This() {
return .{
.slice = slice,
.index = slice.len,
};
}
pub fn next(self: *@This()) ?T {
if (self.index == 0) return null;
self.index -= 1;
return self.slice[self.index];
}
pub fn reset(self: *@This()) void {
self.index = self.slice.len;
}
};
}
/// sorts items using lessThan and keeps sub_items with the same sort
pub fn sortSub(comptime T1: type, comptime T2: type, items: []T1, sub_items: []T2, comptime lessThan: *const fn (void, lhs: T1, rhs: T1) bool) void {
var i: usize = 1;
while (i < items.len) : (i += 1) {
const x = items[i];
const y = sub_items[i];
var j: usize = i;
while (j > 0 and lessThan({}, x, items[j - 1])) : (j -= 1) {
items[j] = items[j - 1];
sub_items[j] = sub_items[j - 1];
}
items[j] = x;
sub_items[j] = y;
}
}
pub fn sortSubSub(comptime T1: type, comptime T2: type, items: []T1, sub_items: []T2, context: anytype, comptime lessThan: *const fn (@TypeOf(context), lhs: T1, rhs: T1) bool) void {
var i: usize = 1;
while (i < items.len) : (i += 1) {
const x = items[i];
const y = sub_items[i];
var j: usize = i;
while (j > 0 and lessThan(context, x, items[j - 1])) : (j -= 1) {
items[j] = items[j - 1];
sub_items[j] = sub_items[j - 1];
}
items[j] = x;
sub_items[j] = y;
}
}
/// comptime string hashing for the type names
pub fn typeId(comptime T: type) u32 {
return hashStringFnv(u32, @typeName(T));
}
/// comptime string hashing for the type names
pub fn typeId64(comptime T: type) u64 {
return hashStringFnv(u64, @typeName(T));
}
/// u32 Fowler-Noll-Vo string hash
pub fn hashString(comptime str: []const u8) u32 {
return hashStringFnv(u32, str);
}
/// FowlerNollVo string hash. ReturnType should be u32/u64
pub fn hashStringFnv(comptime ReturnType: type, comptime str: []const u8) ReturnType {
std.debug.assert(ReturnType == u32 or ReturnType == u64);
const prime = if (ReturnType == u32) @as(u32, 16777619) else @as(u64, 1099511628211);
var value = if (ReturnType == u32) @as(u32, 2166136261) else @as(u64, 14695981039346656037);
for (str) |c| {
value = (value ^ @as(u32, @intCast(c))) *% prime;
}
return value;
}
/// comptime string hashing, djb2 by Dan Bernstein. Fails on large strings.
pub fn hashStringDjb2(comptime str: []const u8) comptime_int {
var hash: comptime_int = 5381;
for (str) |c| {
hash = ((hash << 5) + hash) + @as(comptime_int, @intCast(c));
}
return hash;
}
pub fn isComptime(comptime T: type) bool {
return switch (@typeInfo(T)) {
.ComptimeInt, .ComptimeFloat => true,
else => false,
};
}
test "ReverseSliceIterator" {
var slice = std.testing.allocator.alloc(usize, 10) catch unreachable;
defer std.testing.allocator.free(slice);
for (slice, 0..) |*item, i| {
item.* = i;
}
var iter = ReverseSliceIterator(usize).init(slice);
var i: usize = 9;
while (iter.next()) |val| {
try std.testing.expectEqual(i, val);
if (i > 0) i -= 1;
}
}

@ -1,295 +0,0 @@
const std = @import("std");
const utils = @import("utils.zig");
const Registry = @import("registry.zig").Registry;
const Storage = @import("registry.zig").Storage;
const Entity = @import("registry.zig").Entity;
const ReverseSliceIterator = @import("utils.zig").ReverseSliceIterator;
/// single item view. Iterating raw() directly is the fastest way to get at the data. An iterator is also available to iterate
/// either the Entities or the Components. If T is sorted note that raw() will be in the reverse order so it should be looped
/// backwards. The iterators will return data in the sorted order though.
pub fn BasicView(comptime T: type) type {
return struct {
const Self = @This();
storage: *Storage(T),
pub fn init(storage: *Storage(T)) Self {
return Self{
.storage = storage,
};
}
pub fn len(self: Self) usize {
return self.storage.len();
}
/// Direct access to the array of components
pub fn raw(self: Self) []T {
return self.storage.raw();
}
/// Direct access to the array of entities
pub fn data(self: Self) []const Entity {
return self.storage.data();
}
/// Returns the object associated with an entity
pub fn get(self: Self, entity: Entity) *T {
return self.storage.get(entity);
}
pub fn getConst(self: *Self, entity: Entity) T {
return self.storage.getConst(entity);
}
pub fn iterator(self: Self) utils.ReverseSliceIterator(T) {
return utils.ReverseSliceIterator(T).init(self.storage.instances.items);
}
pub fn entityIterator(self: Self) utils.ReverseSliceIterator(Entity) {
return self.storage.set.reverseIterator();
}
};
}
pub fn MultiView(comptime n_includes: usize, comptime n_excludes: usize) type {
return struct {
const Self = @This();
registry: *Registry,
type_ids: [n_includes]u32,
exclude_type_ids: [n_excludes]u32,
pub const Iterator = struct {
view: *Self,
internal_it: ReverseSliceIterator(Entity),
pub fn init(view: *Self) Iterator {
const ptr = view.registry.components.get(view.type_ids[0]).?;
const internal_it = @as(*Storage(u8), @ptrFromInt(ptr)).set.reverseIterator();
return .{ .view = view, .internal_it = internal_it };
}
pub fn next(it: *Iterator) ?Entity {
while (it.internal_it.next()) |entity| blk: {
// entity must be in all other Storages
for (it.view.type_ids) |tid| {
const ptr = it.view.registry.components.get(tid).?;
if (!@as(*Storage(u1), @ptrFromInt(ptr)).contains(entity)) {
break :blk;
}
}
// entity must not be in all other excluded Storages
for (it.view.exclude_type_ids) |tid| {
const ptr = it.view.registry.components.get(tid).?;
if (@as(*Storage(u1), @ptrFromInt(ptr)).contains(entity)) {
break :blk;
}
}
return entity;
}
return null;
}
// Reset the iterator to the initial index
pub fn reset(it: *Iterator) void {
// Assign new iterator instance in case entities have been
// removed or added.
it.internal_it = it.getInternalIteratorInstance();
}
fn getInternalIteratorInstance(it: *Iterator) ReverseSliceIterator(Entity) {
const ptr = it.view.registry.components.get(it.view.type_ids[0]).?;
return @as(*Storage(u8), @ptrFromInt(ptr)).set.reverseIterator();
}
};
pub fn init(registry: *Registry, type_ids: [n_includes]u32, exclude_type_ids: [n_excludes]u32) Self {
return Self{
.registry = registry,
.type_ids = type_ids,
.exclude_type_ids = exclude_type_ids,
};
}
pub fn get(self: *Self, comptime T: type, entity: Entity) *T {
return self.registry.assure(T).get(entity);
}
pub fn getConst(self: *Self, comptime T: type, entity: Entity) T {
return self.registry.assure(T).getConst(entity);
}
fn sort(self: *Self) void {
// get our component counts in an array so we can sort the type_ids based on how many entities are in each
var sub_items: [n_includes]usize = undefined;
for (self.type_ids, 0..) |tid, i| {
const ptr = self.registry.components.get(tid).?;
const store = @as(*Storage(u8), @ptrFromInt(ptr));
sub_items[i] = store.len();
}
const asc_usize = struct {
fn sort(_: void, a: usize, b: usize) bool {
return a < b;
}
};
utils.sortSub(usize, u32, sub_items[0..], self.type_ids[0..], asc_usize.sort);
}
pub fn entityIterator(self: *Self) Iterator {
self.sort();
return Iterator.init(self);
}
};
}
test "single basic view" {
var store = Storage(f32).init(std.testing.allocator);
defer store.deinit();
store.add(3, 30);
store.add(5, 50);
store.add(7, 70);
var view = BasicView(f32).init(&store);
try std.testing.expectEqual(view.len(), 3);
store.remove(7);
try std.testing.expectEqual(view.len(), 2);
var i: usize = 0;
var iter = view.iterator();
while (iter.next()) |comp| {
if (i == 0) try std.testing.expectEqual(comp, 50);
if (i == 1) try std.testing.expectEqual(comp, 30);
i += 1;
}
i = 0;
var entIter = view.entityIterator();
while (entIter.next()) |ent| {
if (i == 0) {
try std.testing.expectEqual(ent, 5);
try std.testing.expectEqual(view.getConst(ent), 50);
}
if (i == 1) {
try std.testing.expectEqual(ent, 3);
try std.testing.expectEqual(view.getConst(ent), 30);
}
i += 1;
}
}
test "single basic view data" {
var store = Storage(f32).init(std.testing.allocator);
defer store.deinit();
store.add(3, 30);
store.add(5, 50);
store.add(7, 70);
var view = BasicView(f32).init(&store);
try std.testing.expectEqual(view.get(3).*, 30);
for (view.data(), 0..) |entity, i| {
if (i == 0)
try std.testing.expectEqual(entity, 3);
if (i == 1)
try std.testing.expectEqual(entity, 5);
if (i == 2)
try std.testing.expectEqual(entity, 7);
}
for (view.raw(), 0..) |data, i| {
if (i == 0)
try std.testing.expectEqual(data, 30);
if (i == 1)
try std.testing.expectEqual(data, 50);
if (i == 2)
try std.testing.expectEqual(data, 70);
}
try std.testing.expectEqual(view.len(), 3);
}
test "basic multi view" {
var reg = Registry.init(std.testing.allocator);
defer reg.deinit();
var e0 = reg.create();
var e1 = reg.create();
var e2 = reg.create();
reg.add(e0, @as(i32, -0));
reg.add(e1, @as(i32, -1));
reg.add(e2, @as(i32, -2));
reg.add(e0, @as(u32, 0));
reg.add(e2, @as(u32, 2));
_ = reg.view(.{u32}, .{});
var view = reg.view(.{ i32, u32 }, .{});
var iterated_entities: usize = 0;
var iter = view.entityIterator();
while (iter.next()) |_| {
iterated_entities += 1;
}
try std.testing.expectEqual(iterated_entities, 2);
iterated_entities = 0;
reg.remove(u32, e0);
iter.reset();
while (iter.next()) |_| {
iterated_entities += 1;
}
try std.testing.expectEqual(iterated_entities, 1);
}
test "basic multi view with excludes" {
var reg = Registry.init(std.testing.allocator);
defer reg.deinit();
var e0 = reg.create();
var e1 = reg.create();
var e2 = reg.create();
reg.add(e0, @as(i32, -0));
reg.add(e1, @as(i32, -1));
reg.add(e2, @as(i32, -2));
reg.add(e0, @as(u32, 0));
reg.add(e2, @as(u32, 2));
reg.add(e2, @as(u8, 255));
var view = reg.view(.{ i32, u32 }, .{u8});
var iterated_entities: usize = 0;
var iter = view.entityIterator();
while (iter.next()) |_| {
iterated_entities += 1;
}
try std.testing.expectEqual(iterated_entities, 1);
iterated_entities = 0;
reg.remove(u8, e2);
iter.reset();
while (iter.next()) |_| {
iterated_entities += 1;
}
try std.testing.expectEqual(iterated_entities, 2);
}

@ -1,100 +0,0 @@
const std = @import("std");
/// Processes are run by the Scheduler. They use a similar pattern to Allocators in that they are created and
/// added as fields in a parent struct, your actual process that will be run.
pub const Process = struct {
const State = enum(u8) { uninitialized, running, paused, succeeded, failed, aborted, finished };
updateFn: *const fn (self: *Process) void,
startFn: ?*const fn (self: *Process) void = null,
abortedFn: ?*const fn (self: *Process) void = null,
failedFn: ?*const fn (self: *Process) void = null,
succeededFn: ?*const fn (self: *Process) void = null,
deinit: *const fn (self: *Process, allocator: std.mem.Allocator) void = undefined,
state: State = .uninitialized,
stopped: bool = false,
next: ?*Process = null,
pub fn getParent(self: *Process, comptime T: type) *T {
return @fieldParentPtr(T, "process", self);
}
/// Terminates a process with success if it's still alive
pub fn succeed(self: *Process) void {
if (self.alive()) self.state = .succeeded;
}
/// Terminates a process with errors if it's still alive
pub fn fail(self: *Process) void {
if (self.alive()) self.state = .failed;
}
/// Stops a process if it's in a running state
pub fn pause(self: *Process) void {
if (self.state == .running) self.state = .paused;
}
/// Restarts a process if it's paused
pub fn unpause(self: *Process) void {
if (self.state == .paused) self.state = .running;
}
/// Aborts a process if it's still alive
pub fn abort(self: *Process, immediately: bool) void {
if (self.alive()) {
self.state = .aborted;
if (immediately) {
self.tick();
}
}
}
/// Returns true if a process is either running or paused
pub fn alive(self: Process) bool {
return self.state == .running or self.state == .paused;
}
/// Returns true if a process is already terminated
pub fn dead(self: Process) bool {
return self.state == .finished;
}
pub fn rejected(self: Process) bool {
return self.stopped;
}
/// Updates a process and its internal state
pub fn tick(self: *Process) void {
switch (self.state) {
.uninitialized => {
if (self.startFn) |func| func(self);
self.state = .running;
},
.running => {
self.updateFn(self);
},
else => {},
}
// if it's dead, it must be notified and removed immediately
switch (self.state) {
.succeeded => {
if (self.succeededFn) |func| func(self);
self.state = .finished;
},
.failed => {
if (self.failedFn) |func| func(self);
self.state = .finished;
self.stopped = true;
},
.aborted => {
if (self.abortedFn) |func| func(self);
self.state = .finished;
self.stopped = true;
},
else => {},
}
}
};

@ -1,238 +0,0 @@
const std = @import("std");
const Process = @import("process.zig").Process;
/// Cooperative scheduler for processes. Each process is invoked once per tick. If a process terminates, it's
/// removed automatically from the scheduler and it's never invoked again. A process can also have a child. In
/// this case, the process is replaced with its child when it terminates if it returns with success. In case of errors,
/// both the process and its child are discarded. In order to invoke all scheduled processes, call the `update` member function
/// Processes add themselves by calling `attach` and must satisfy the following conditions:
/// - have a field `process: Process`
/// - have a method `initialize(self: *@This(), data: var) void` that initializes all fields and takes in a the data passed to `attach`
/// - when initializing the `process` field it ust be given an `updateFn`. All other callbacks are optional.
/// - in any callback you can get your oiginal struct back via `process.getParent(@This())`
pub const Scheduler = struct {
processes: std.ArrayList(*Process),
allocator: std.mem.Allocator,
/// helper to create and prepare a process
fn createProcessHandler(comptime T: type, data: anytype, allocator: std.mem.Allocator) *Process {
var proc = allocator.create(T) catch unreachable;
proc.initialize(data);
// get a closure so that we can safely deinit this later
proc.process.deinit = struct {
fn deinit(process: *Process, alloc: std.mem.Allocator) void {
if (process.next) |next_process| {
next_process.deinit(next_process, alloc);
}
alloc.destroy(@fieldParentPtr(T, "process", process));
}
}.deinit;
return &proc.process;
}
/// returned when appending a process so that sub-processes can be added to the process
const Continuation = struct {
process: *Process,
allocator: std.mem.Allocator,
pub fn init(process: *Process, allocator: std.mem.Allocator) Continuation {
return .{ .process = process, .allocator = allocator };
}
pub fn next(self: *@This(), comptime T: type, data: anytype) *@This() {
self.process.next = createProcessHandler(T, data, self.allocator);
self.process = self.process.next.?;
return self;
}
};
pub fn init(allocator: std.mem.Allocator) Scheduler {
return .{
.processes = std.ArrayList(*Process).init(allocator),
.allocator = allocator,
};
}
pub fn deinit(self: *Scheduler) void {
self.clear();
self.processes.deinit();
}
/// Schedules a process for the next tick
pub fn attach(self: *Scheduler, comptime T: type, data: anytype) Continuation {
std.debug.assert(@hasDecl(T, "initialize"));
std.debug.assert(@hasField(T, "process"));
var process = createProcessHandler(T, data, self.allocator);
process.tick();
self.processes.append(process) catch unreachable;
return Continuation.init(process, self.allocator);
}
fn updateProcess(process: **Process, allocator: std.mem.Allocator) bool {
const current_process = process.*;
current_process.tick();
if (current_process.dead()) {
if (!current_process.rejected() and current_process.next != null) {
// grab the next process and null it out so we dont double-free it later
const next_process = current_process.next.?;
current_process.next = null;
process.* = next_process;
// kill the old Process parent
current_process.deinit(current_process, allocator);
return updateProcess(process, allocator);
} else {
return true;
}
}
return false;
}
/// Updates all scheduled processes
pub fn update(self: *Scheduler) void {
if (self.processes.items.len == 0) return;
var i: usize = self.processes.items.len - 1;
while (true) : (i -= 1) {
if (updateProcess(&self.processes.items[i], self.allocator)) {
var dead_process = self.processes.swapRemove(i);
dead_process.deinit(dead_process, self.allocator);
}
if (i == 0) break;
}
}
/// gets the number of processes still running
pub fn len(self: Scheduler) usize {
return self.processes.items.len;
}
/// resets the scheduler to its initial state and discards all the processes
pub fn clear(self: *Scheduler) void {
for (self.processes.items) |process| {
process.deinit(process, self.allocator);
}
self.processes.items.len = 0;
}
/// Aborts all scheduled processes. Unless an immediate operation is requested, the abort is scheduled for the next tick
pub fn abort(self: *Scheduler, immediately: bool) void {
for (self.processes.items) |handler| {
handler.process.abort(immediately);
}
}
};
test "scheduler.update" {
std.debug.print("\n", .{});
const Tester = struct {
process: Process,
fart: usize,
pub fn initialize(self: *@This(), data: anytype) void {
self.process = .{
.startFn = start,
.updateFn = update,
.abortedFn = aborted,
.failedFn = failed,
.succeededFn = succeeded,
};
self.fart = data;
}
fn start(process: *Process) void {
_ = process.getParent(@This());
// std.debug.print("start {}\n", .{self.fart});
}
fn aborted(process: *Process) void {
_ = process.getParent(@This());
// std.debug.print("aborted {}\n", .{self.fart});
}
fn failed(process: *Process) void {
_ = process.getParent(@This());
// std.debug.print("failed {}\n", .{self.fart});
}
fn succeeded(process: *Process) void {
_ = process.getParent(@This());
// std.debug.print("succeeded {}\n", .{self.fart});
}
fn update(process: *Process) void {
_ = process.getParent(@This());
// std.debug.print("update {}\n", .{self.fart});
process.succeed();
}
};
var scheduler = Scheduler.init(std.testing.allocator);
defer scheduler.deinit();
var continuation = scheduler.attach(Tester, 33);
_ = continuation.next(Tester, 66).next(Tester, 88).next(Tester, 99);
scheduler.update();
scheduler.update();
scheduler.update();
scheduler.update();
scheduler.update();
}
test "scheduler.clear" {
const Tester = struct {
process: Process,
pub fn initialize(self: *@This(), _: anytype) void {
self.process = .{ .updateFn = update };
}
fn update(_: *Process) void {
std.debug.assert(false);
}
};
var scheduler = Scheduler.init(std.testing.allocator);
defer scheduler.deinit();
var continuation = scheduler.attach(Tester, {});
_ = continuation.next(Tester, {});
scheduler.clear();
scheduler.update();
}
test "scheduler.attach.next" {
const Tester = struct {
process: Process,
counter: *usize,
pub fn initialize(self: *@This(), data: anytype) void {
self.process = .{ .updateFn = update };
self.counter = data;
}
fn update(process: *Process) void {
const self = process.getParent(@This());
self.counter.* += 1;
process.succeed();
}
};
var scheduler = Scheduler.init(std.testing.allocator);
defer scheduler.deinit();
var counter: usize = 0;
var continuation = scheduler.attach(Tester, &counter);
_ = continuation.next(Tester, &counter);
scheduler.update();
scheduler.update();
try std.testing.expectEqual(counter, 2);
}

@ -1,99 +0,0 @@
const std = @import("std");
const utils = @import("../ecs/utils.zig");
const Cache = @import("cache.zig").Cache;
pub const Assets = struct {
caches: std.AutoHashMap(u32, usize),
allocator: std.mem.Allocator,
pub fn init(allocator: std.mem.Allocator) Assets {
return Assets{
.caches = std.AutoHashMap(u32, usize).init(allocator),
.allocator = allocator,
};
}
pub fn deinit(self: *Assets) void {
var iter = self.caches.iterator();
while (iter.next()) |ptr| {
// HACK: we dont know the Type here but we need to call deinit
@as(*Cache(u1), @ptrFromInt(ptr.value_ptr.*)).deinit();
}
self.caches.deinit();
}
pub fn get(self: *Assets, comptime AssetT: type) *Cache(AssetT) {
if (self.caches.get(utils.typeId(AssetT))) |tid| {
return @as(*Cache(AssetT), @ptrFromInt(tid));
}
var cache = Cache(AssetT).initPtr(self.allocator);
_ = self.caches.put(utils.typeId(AssetT), @intFromPtr(cache)) catch unreachable;
return cache;
}
pub fn load(self: *Assets, id: u16, comptime loader: anytype) ReturnType(loader, false) {
return self.get(ReturnType(loader, true)).load(id, loader);
}
fn ReturnType(comptime loader: anytype, comptime strip_ptr: bool) type {
var ret = @typeInfo(@typeInfo(@TypeOf(@field(loader, "load"))).Pointer.child).Fn.return_type.?;
if (strip_ptr) {
return std.meta.Child(ret);
}
return ret;
}
};
test "assets" {
const Thing = struct {
fart: i32,
pub fn deinit(self: *@This()) void {
std.testing.allocator.destroy(self);
}
};
const OtherThing = struct {
fart: i32,
pub fn deinit(self: *@This()) void {
std.testing.allocator.destroy(self);
}
};
const OtherThingLoadArgs = struct {
// Use actual field "load" as function pointer to avoid zig v0.10.0
// compiler error: "error: no field named 'load' in struct '...'"
load: *const fn (_: @This()) *OtherThing,
pub fn loadFn(_: @This()) *OtherThing {
return std.testing.allocator.create(OtherThing) catch unreachable;
}
};
const ThingLoadArgs = struct {
// Use actual field "load" as function pointer to avoid zig v0.10.0
// compiler error: "error: no field named 'load' in struct '...'"
load: *const fn (_: @This()) *Thing,
pub fn loadFn(_: @This()) *Thing {
return std.testing.allocator.create(Thing) catch unreachable;
}
};
var assets = Assets.init(std.testing.allocator);
defer assets.deinit();
_ = assets.get(Thing).load(6, ThingLoadArgs{ .load = ThingLoadArgs.loadFn });
try std.testing.expectEqual(assets.get(Thing).size(), 1);
_ = assets.load(4, ThingLoadArgs{ .load = ThingLoadArgs.loadFn });
try std.testing.expectEqual(assets.get(Thing).size(), 2);
_ = assets.get(OtherThing).load(6, OtherThingLoadArgs{ .load = OtherThingLoadArgs.loadFn });
try std.testing.expectEqual(assets.get(OtherThing).size(), 1);
_ = assets.load(8, OtherThingLoadArgs{ .load = OtherThingLoadArgs.loadFn });
try std.testing.expectEqual(assets.get(OtherThing).size(), 2);
assets.get(OtherThing).clear();
try std.testing.expectEqual(assets.get(OtherThing).size(), 0);
}

@ -1,116 +0,0 @@
const std = @import("std");
const ErasedPtr = @import("../ecs/utils.zig").ErasedPtr;
/// Simple cache for resources of a given type. If any resource has a deinit method it will be called when clear
/// or remove is called. Implementing a "loader" which is passed to "load" is a struct with one method:
/// - load(self: @This()) *T.
pub fn Cache(comptime T: type) type {
return struct {
const Self = @This();
safe_deinit: *const fn (*@This()) void,
resources: std.AutoHashMap(u32, *T),
allocator: ?std.mem.Allocator = null,
pub fn initPtr(allocator: std.mem.Allocator) *@This() {
var cache = allocator.create(@This()) catch unreachable;
cache.safe_deinit = struct {
fn deinit(self: *Self) void {
self.clear();
self.resources.deinit();
self.allocator.?.destroy(self);
}
}.deinit;
cache.resources = std.AutoHashMap(u32, *T).init(allocator);
cache.allocator = allocator;
return cache;
}
pub fn init(allocator: std.mem.Allocator) @This() {
return .{
.safe_deinit = struct {
fn deinit(self: *Self) void {
self.clear();
self.resources.deinit();
}
}.deinit,
.resources = std.AutoHashMap(u32, *T).init(allocator),
};
}
pub fn deinit(self: *@This()) void {
self.safe_deinit(self);
}
pub fn load(self: *@This(), id: u32, comptime loader: anytype) @typeInfo(@typeInfo(@TypeOf(@field(loader, "load"))).Pointer.child).Fn.return_type.? {
if (self.resources.get(id)) |resource| {
return resource;
}
var resource = loader.load(loader);
_ = self.resources.put(id, resource) catch unreachable;
return resource;
}
pub fn contains(self: *@This(), id: u32) bool {
return self.resources.contains(id);
}
pub fn remove(self: *@This(), id: u32) void {
if (self.resources.fetchRemove(id)) |kv| {
if (@hasDecl(T, "deinit")) {
@call(.always_inline, T.deinit, .{kv.value});
}
}
}
pub fn clear(self: *@This()) void {
// optionally deinit any resources that have a deinit method
if (@hasDecl(T, "deinit")) {
var iter = self.resources.iterator();
while (iter.next()) |kv| {
@call(.always_inline, T.deinit, .{kv.value_ptr.*});
}
}
self.resources.clearAndFree();
}
pub fn size(self: @This()) usize {
return self.resources.count();
}
};
}
test "cache" {
const utils = @import("../ecs/utils.zig");
const Thing = struct {
fart: i32,
pub fn deinit(self: *@This()) void {
std.testing.allocator.destroy(self);
}
};
const ThingLoadArgs = struct {
// Use actual field "load" as function pointer to avoid zig v0.10.0
// compiler error: "error: no field named 'load' in struct '...'"
load: *const fn (self: @This()) *Thing,
pub fn loadFn(self: @This()) *Thing {
_ = self;
return std.testing.allocator.create(Thing) catch unreachable;
}
};
var cache = Cache(Thing).init(std.testing.allocator);
defer cache.deinit();
_ = cache.load(utils.hashString("my/id"), ThingLoadArgs{ .load = ThingLoadArgs.loadFn });
_ = cache.load(utils.hashString("another/id"), ThingLoadArgs{ .load = ThingLoadArgs.loadFn });
try std.testing.expectEqual(cache.size(), 2);
cache.remove(utils.hashString("my/id"));
try std.testing.expectEqual(cache.size(), 1);
cache.clear();
try std.testing.expectEqual(cache.size(), 0);
}

@ -1,88 +0,0 @@
const std = @import("std");
/// wraps either a free function or a bound function that takes an Event as a parameter
pub fn Delegate(comptime Event: type) type {
return struct {
const Self = @This();
ctx_ptr_address: usize = 0,
callback: union(enum) {
free: *const fn (Event) void,
bound: *const fn (usize, Event) void,
},
/// sets a bound function as the Delegate callback
pub fn initBound(ctx: anytype, comptime fn_name: []const u8) Self {
std.debug.assert(@typeInfo(@TypeOf(ctx)) == .Pointer);
std.debug.assert(@intFromPtr(ctx) != 0);
const T = @TypeOf(ctx);
const BaseT = @typeInfo(T).Pointer.child;
return Self{
.ctx_ptr_address = @intFromPtr(ctx),
.callback = .{
.bound = struct {
fn cb(self: usize, param: Event) void {
@call(.always_inline, @field(BaseT, fn_name), .{ @as(T, @ptrFromInt(self)), param });
}
}.cb,
},
};
}
/// sets a free function as the Delegate callback
pub fn initFree(func: *const fn (Event) void) Self {
return Self{
.callback = .{ .free = func },
};
}
pub fn trigger(self: Self, param: Event) void {
switch (self.callback) {
.free => |func| @call(.auto, func, .{param}),
.bound => |func| @call(.auto, func, .{ self.ctx_ptr_address, param }),
}
}
pub fn containsFree(self: Self, callback: *const fn (Event) void) bool {
return switch (self.callback) {
.free => |func| func == callback,
else => false,
};
}
pub fn containsBound(self: Self, ctx: anytype) bool {
std.debug.assert(@intFromPtr(ctx) != 0);
std.debug.assert(@typeInfo(@TypeOf(ctx)) == .Pointer);
return switch (self.callback) {
.bound => @intFromPtr(ctx) == self.ctx_ptr_address,
else => false,
};
}
};
}
fn tester(param: u32) void {
std.testing.expectEqual(@as(u32, 666), param) catch unreachable;
}
const Thing = struct {
field: f32 = 0,
pub fn tester(_: *Thing, param: u32) void {
std.testing.expectEqual(@as(u32, 777), param) catch unreachable;
}
};
test "free Delegate" {
var d = Delegate(u32).initFree(tester);
d.trigger(666);
}
test "bound Delegate" {
var thing = Thing{};
var d = Delegate(u32).initBound(&thing, "tester");
d.trigger(777);
}

@ -1,47 +0,0 @@
const std = @import("std");
const Sink = @import("sink.zig").Sink;
const Signal = @import("signal.zig").Signal;
const utils = @import("../ecs/utils.zig");
pub const Dispatcher = struct {
signals: std.AutoHashMap(u32, usize),
allocator: std.mem.Allocator,
pub fn init(allocator: std.mem.Allocator) Dispatcher {
return Dispatcher{
.signals = std.AutoHashMap(u32, usize).init(allocator),
.allocator = allocator,
};
}
pub fn deinit(self: *Dispatcher) void {
var iter = self.signals.iterator();
while (iter.next()) |ptr| {
// HACK: we dont know the Type here but we need to call deinit
var signal = @as(*Signal(void), @ptrFromInt(ptr.value_ptr.*));
signal.deinit();
}
self.signals.deinit();
}
fn assure(self: *Dispatcher, comptime T: type) *Signal(T) {
var type_id = utils.typeId(T);
if (self.signals.get(type_id)) |value| {
return @as(*Signal(T), @ptrFromInt(value));
}
var signal = Signal(T).create(self.allocator);
var signal_ptr = @intFromPtr(signal);
_ = self.signals.put(type_id, signal_ptr) catch unreachable;
return signal;
}
pub fn sink(self: *Dispatcher, comptime T: type) Sink(T) {
return self.assure(T).sink();
}
pub fn trigger(self: *Dispatcher, comptime T: type, value: T) void {
self.assure(T).publish(value);
}
};

@ -1,105 +0,0 @@
const std = @import("std");
const Sink = @import("sink.zig").Sink;
const Delegate = @import("delegate.zig").Delegate;
pub fn Signal(comptime Event: type) type {
return struct {
const Self = @This();
calls: std.ArrayList(Delegate(Event)),
allocator: ?std.mem.Allocator = null,
pub fn init(allocator: std.mem.Allocator) Self {
// we purposely do not store the allocator locally in this case so we know not to destroy ourself in deint!
return Self{
.calls = std.ArrayList(Delegate(Event)).init(allocator),
};
}
/// heap allocates a Signal
pub fn create(allocator: std.mem.Allocator) *Self {
var signal = allocator.create(Self) catch unreachable;
signal.calls = std.ArrayList(Delegate(Event)).init(allocator);
signal.allocator = allocator;
return signal;
}
pub fn deinit(self: *Self) void {
self.calls.deinit();
// optionally destroy ourself as well if we came from an allocator
if (self.allocator) |allocator| allocator.destroy(self);
}
pub fn size(self: Self) usize {
return self.calls.items.len;
}
pub fn empty(self: Self) bool {
return self.size == 0;
}
/// Disconnects all the listeners from a signal
pub fn clear(self: *Self) void {
self.calls.items.len = 0;
}
pub fn publish(self: Self, arg: Event) void {
for (self.calls.items) |call| {
call.trigger(arg);
}
}
/// Constructs a sink that is allowed to modify a given signal
pub fn sink(self: *Self) Sink(Event) {
return Sink(Event).init(self);
}
};
}
fn tester(param: u32) void {
std.testing.expectEqual(@as(u32, 666), param) catch unreachable;
}
const Thing = struct {
field: f32 = 0,
pub fn tester(_: *Thing, param: u32) void {
std.testing.expectEqual(@as(u32, 666), param) catch unreachable;
}
};
test "Signal/Sink" {
var signal = Signal(u32).init(std.testing.allocator);
defer signal.deinit();
var sink = signal.sink();
sink.connect(tester);
try std.testing.expectEqual(@as(usize, 1), signal.size());
// bound listener
var thing = Thing{};
sink.connectBound(&thing, "tester");
signal.publish(666);
sink.disconnect(tester);
signal.publish(666);
try std.testing.expectEqual(@as(usize, 1), signal.size());
sink.disconnectBound(&thing);
try std.testing.expectEqual(@as(usize, 0), signal.size());
}
test "Sink Before null" {
var signal = Signal(u32).init(std.testing.allocator);
defer signal.deinit();
var sink = signal.sink();
sink.connect(tester);
try std.testing.expectEqual(@as(usize, 1), signal.size());
var thing = Thing{};
sink.before(null).connectBound(&thing, "tester");
try std.testing.expectEqual(@as(usize, 2), signal.size());
}

@ -1,115 +0,0 @@
const std = @import("std");
const Signal = @import("signal.zig").Signal;
const Delegate = @import("delegate.zig").Delegate;
/// helper used to connect and disconnect listeners on the fly from a Signal. Listeners are wrapped in Delegates
/// and can be either free functions or functions bound to a struct.
pub fn Sink(comptime Event: type) type {
return struct {
const Self = @This();
insert_index: usize,
/// the Signal this Sink is temporarily wrapping
var owning_signal: *Signal(Event) = undefined;
pub fn init(signal: *Signal(Event)) Self {
owning_signal = signal;
return Self{ .insert_index = owning_signal.calls.items.len };
}
pub fn before(self: Self, callback: ?*const fn (Event) void) Self {
if (callback) |cb| {
if (self.indexOf(cb)) |index| {
return Self{ .insert_index = index };
}
}
return self;
}
pub fn beforeBound(self: Self, ctx: anytype) Self {
if (@typeInfo(@TypeOf(ctx)) == .Pointer) {
if (self.indexOfBound(ctx)) |index| {
return Self{ .insert_index = index };
}
}
return self;
}
pub fn connect(self: Self, callback: *const fn (Event) void) void {
std.debug.assert(self.indexOf(callback) == null);
_ = owning_signal.calls.insert(self.insert_index, Delegate(Event).initFree(callback)) catch unreachable;
}
pub fn connectBound(self: Self, ctx: anytype, comptime fn_name: []const u8) void {
std.debug.assert(self.indexOfBound(ctx) == null);
_ = owning_signal.calls.insert(self.insert_index, Delegate(Event).initBound(ctx, fn_name)) catch unreachable;
}
pub fn disconnect(self: Self, callback: *const fn (Event) void) void {
if (self.indexOf(callback)) |index| {
_ = owning_signal.calls.swapRemove(index);
}
}
pub fn disconnectBound(self: Self, ctx: anytype) void {
if (self.indexOfBound(ctx)) |index| {
_ = owning_signal.calls.swapRemove(index);
}
}
fn indexOf(_: Self, callback: *const fn (Event) void) ?usize {
for (owning_signal.calls.items, 0..) |call, i| {
if (call.containsFree(callback)) {
return i;
}
}
return null;
}
fn indexOfBound(_: Self, ctx: anytype) ?usize {
for (owning_signal.calls.items, 0..) |call, i| {
if (call.containsBound(ctx)) {
return i;
}
}
return null;
}
};
}
fn tester(param: u32) void {
std.testing.expectEqual(@as(u32, 666), param) catch unreachable;
}
const Thing = struct {
field: f32 = 0,
pub fn tester(_: *Thing, param: u32) void {
std.testing.expectEqual(@as(u32, 666), param) catch unreachable;
}
};
test "Sink Before free" {
var signal = Signal(u32).init(std.testing.allocator);
defer signal.deinit();
signal.sink().connect(tester);
try std.testing.expectEqual(signal.sink().indexOf(tester).?, 0);
var thing = Thing{};
signal.sink().before(tester).connectBound(&thing, "tester");
try std.testing.expectEqual(signal.sink().indexOfBound(&thing).?, 0);
}
test "Sink Before bound" {
var signal = Signal(u32).init(std.testing.allocator);
defer signal.deinit();
var thing = Thing{};
signal.sink().connectBound(&thing, "tester");
try std.testing.expectEqual(signal.sink().indexOfBound(&thing).?, 0);
signal.sink().beforeBound(&thing).connect(tester);
try std.testing.expectEqual(signal.sink().indexOf(tester).?, 0);
}

@ -1,23 +0,0 @@
// include all files with tests
comptime {
// ecs
_ = @import("ecs/actor.zig");
_ = @import("ecs/component_storage.zig");
_ = @import("ecs/entity.zig");
_ = @import("ecs/handles.zig");
_ = @import("ecs/sparse_set.zig");
_ = @import("ecs/views.zig");
_ = @import("ecs/groups.zig");
_ = @import("ecs/type_store.zig");
// signals
_ = @import("signals/delegate.zig");
_ = @import("signals/signal.zig");
// resources
_ = @import("resources/cache.zig");
_ = @import("resources/assets.zig");
// process
_ = @import("process/scheduler.zig");
}

@ -1,39 +0,0 @@
const std = @import("std");
const Dispatcher = @import("ecs").Dispatcher;
fn tester(param: u32) void {
std.testing.expectEqual(@as(u32, 666), param) catch unreachable;
}
fn tester2(param: i32) void {
std.testing.expectEqual(@as(i32, -543), param) catch unreachable;
}
const Thing = struct {
field: f32 = 0,
pub fn testU32(_: *Thing, param: u32) void {
std.testing.expectEqual(@as(u32, 666), param) catch unreachable;
}
pub fn testI32(_: *Thing, param: i32) void {
std.testing.expectEqual(@as(i32, -543), param) catch unreachable;
}
};
test "Dispatcher" {
var thing = Thing{};
var d = Dispatcher.init(std.testing.allocator);
defer d.deinit();
var sink = d.sink(u32);
sink.connect(tester);
sink.connectBound(&thing, "testU32");
d.trigger(u32, 666);
var sink2 = d.sink(i32);
sink2.connect(tester2);
sink2.connectBound(&thing, "testI32");
d.trigger(i32, -543);
}

@ -1,242 +0,0 @@
const std = @import("std");
const ecs = @import("ecs");
const Registry = @import("ecs").Registry;
const BasicGroup = @import("ecs").BasicGroup;
const OwningGroup = @import("ecs").OwningGroup;
const Velocity = struct { x: f32 = 0, y: f32 = 0 };
const Position = struct { x: f32 = 0, y: f32 = 0 };
const Empty = struct {};
const Sprite = struct { x: f32 = 0 };
const Transform = struct { x: f32 = 0 };
const Renderable = struct { x: f32 = 0 };
const Rotation = struct { x: f32 = 0 };
fn printStore(store: anytype, name: []const u8) void {
std.debug.print("--- {} ---\n", .{name});
for (store.set.dense.items, 0..) |e, i| {
std.debug.print("e[{}] s[{}]{}", .{ e, store.set.page(store.set.dense.items[i]), store.set.sparse.items[store.set.page(store.set.dense.items[i])] });
std.debug.print(" ({d:.2}) ", .{store.instances.items[i]});
}
std.debug.print("\n", .{});
}
test "sort BasicGroup by Entity" {
var reg = Registry.init(std.testing.allocator);
defer reg.deinit();
var group = reg.group(.{}, .{ Sprite, Renderable }, .{});
var i: usize = 0;
while (i < 5) : (i += 1) {
var e = reg.create();
reg.add(e, Sprite{ .x = @as(f32, @floatFromInt(i)) });
reg.add(e, Renderable{ .x = @as(f32, @floatFromInt(i)) });
}
const SortContext = struct {
group: BasicGroup,
fn sort(this: *@This(), a: ecs.Entity, b: ecs.Entity) bool {
const real_a = this.group.getConst(Sprite, a);
const real_b = this.group.getConst(Sprite, b);
return real_a.x > real_b.x;
}
};
var context = SortContext{ .group = group };
group.sort(ecs.Entity, &context, SortContext.sort);
var val: f32 = 0;
var iter = group.iterator();
while (iter.next()) |entity| {
try std.testing.expectEqual(val, group.getConst(Sprite, entity).x);
val += 1;
}
}
test "sort BasicGroup by Component" {
var reg = Registry.init(std.testing.allocator);
defer reg.deinit();
var group = reg.group(.{}, .{ Sprite, Renderable }, .{});
var i: usize = 0;
while (i < 5) : (i += 1) {
var e = reg.create();
reg.add(e, Sprite{ .x = @as(f32, @floatFromInt(i)) });
reg.add(e, Renderable{ .x = @as(f32, @floatFromInt(i)) });
}
const SortContext = struct {
fn sort(_: void, a: Sprite, b: Sprite) bool {
return a.x > b.x;
}
};
group.sort(Sprite, {}, SortContext.sort);
var val: f32 = 0;
var iter = group.iterator();
while (iter.next()) |entity| {
try std.testing.expectEqual(val, group.getConst(Sprite, entity).x);
val += 1;
}
}
test "sort OwningGroup by Entity" {
var reg = Registry.init(std.testing.allocator);
defer reg.deinit();
var group = reg.group(.{ Sprite, Renderable }, .{}, .{});
var i: usize = 0;
while (i < 5) : (i += 1) {
var e = reg.create();
reg.add(e, Sprite{ .x = @as(f32, @floatFromInt(i)) });
reg.add(e, Renderable{ .x = @as(f32, @floatFromInt(i)) });
}
const SortContext = struct {
group: OwningGroup,
fn sort(this: @This(), a: ecs.Entity, b: ecs.Entity) bool {
const sprite_a = this.group.getConst(Sprite, a);
const sprite_b = this.group.getConst(Sprite, b);
return sprite_a.x > sprite_b.x;
}
};
const context = SortContext{ .group = group };
group.sort(ecs.Entity, context, SortContext.sort);
var val: f32 = 0;
var iter = group.iterator(struct { s: *Sprite, r: *Renderable });
while (iter.next()) |entity| {
try std.testing.expectEqual(val, entity.s.*.x);
val += 1;
}
}
test "sort OwningGroup by Component" {
var reg = Registry.init(std.testing.allocator);
defer reg.deinit();
var group = reg.group(.{ Sprite, Renderable }, .{}, .{});
var i: usize = 0;
while (i < 5) : (i += 1) {
var e = reg.create();
reg.add(e, Sprite{ .x = @as(f32, @floatFromInt(i)) });
reg.add(e, Renderable{ .x = @as(f32, @floatFromInt(i)) });
}
const SortContext = struct {
fn sort(_: void, a: Sprite, b: Sprite) bool {
return a.x > b.x;
}
};
group.sort(Sprite, {}, SortContext.sort);
var val: f32 = 0;
var iter = group.iterator(struct { s: *Sprite, r: *Renderable });
while (iter.next()) |entity| {
try std.testing.expectEqual(val, entity.s.*.x);
val += 1;
}
}
test "sort OwningGroup by Component ensure unsorted non-matches" {
var reg = Registry.init(std.testing.allocator);
defer reg.deinit();
var group = reg.group(.{ Sprite, Renderable }, .{}, .{});
var i: usize = 0;
while (i < 5) : (i += 1) {
var e = reg.create();
reg.add(e, Sprite{ .x = @as(f32, @floatFromInt(i)) });
reg.add(e, Renderable{ .x = @as(f32, @floatFromInt(i)) });
var e2 = reg.create();
reg.add(e2, Sprite{ .x = @as(f32, @floatFromInt(i + 1 * 50)) });
}
try std.testing.expectEqual(group.len(), 5);
try std.testing.expectEqual(reg.len(Sprite), 10);
const SortContext = struct {
fn sort(_: void, a: Sprite, b: Sprite) bool {
// sprites with x > 50 shouldnt match in the group
std.testing.expect(a.x < 50 and b.x < 50) catch unreachable;
return a.x > b.x;
}
};
group.sort(Sprite, {}, SortContext.sort);
// all the
var view = reg.view(.{Sprite}, .{});
var count: usize = 0;
var iter = view.iterator();
while (iter.next()) |sprite| {
count += 1;
// all sprite.x > 50 should be at the end and we iterate backwards
if (count < 6) {
try std.testing.expect(sprite.x >= 50);
}
}
}
test "nested OwningGroups add/remove components" {
var reg = Registry.init(std.testing.allocator);
defer reg.deinit();
var group1 = reg.group(.{Sprite}, .{Renderable}, .{});
var group2 = reg.group(.{ Sprite, Transform }, .{Renderable}, .{});
var group3 = reg.group(.{ Sprite, Transform }, .{ Renderable, Rotation }, .{});
try std.testing.expect(!reg.sortable(Sprite));
try std.testing.expect(!reg.sortable(Transform));
try std.testing.expect(reg.sortable(Renderable));
var e1 = reg.create();
reg.addTypes(e1, .{ Sprite, Renderable, Rotation });
try std.testing.expectEqual(group1.len(), 1);
try std.testing.expectEqual(group2.len(), 0);
try std.testing.expectEqual(group3.len(), 0);
reg.add(e1, Transform{});
try std.testing.expectEqual(group3.len(), 1);
reg.remove(Sprite, e1);
try std.testing.expectEqual(group1.len(), 0);
try std.testing.expectEqual(group2.len(), 0);
try std.testing.expectEqual(group3.len(), 0);
}
test "nested OwningGroups entity order" {
var reg = Registry.init(std.testing.allocator);
defer reg.deinit();
var group1 = reg.group(.{Sprite}, .{Renderable}, .{});
var group2 = reg.group(.{ Sprite, Transform }, .{Renderable}, .{});
var i: usize = 0;
while (i < 5) : (i += 1) {
var e = reg.create();
reg.add(e, Sprite{ .x = @as(f32, @floatFromInt(i)) });
reg.add(e, Renderable{ .x = @as(f32, @floatFromInt(i)) });
}
try std.testing.expectEqual(group1.len(), 5);
try std.testing.expectEqual(group2.len(), 0);
_ = reg.assure(Sprite);
_ = reg.assure(Transform);
// printStore(sprite_store, "Sprite");
reg.add(1, Transform{ .x = 1 });
// printStore(sprite_store, "Sprite");
// printStore(transform_store, "Transform");
// std.debug.print("group2.current: {}\n", .{group2.group_data.current});
}

@ -1,128 +0,0 @@
const std = @import("std");
const ecs = @import("ecs");
const Registry = @import("ecs").Registry;
const Velocity = struct { x: f32, y: f32 };
const Position = struct { x: f32 = 0, y: f32 = 0 };
const Empty = struct {};
const BigOne = struct { pos: Position, vel: Velocity, accel: Velocity };
test "entity traits" {
_ = ecs.EntityTraitsType(.large).init();
}
test "Registry" {
var reg = Registry.init(std.testing.allocator);
defer reg.deinit();
var e1 = reg.create();
reg.addTypes(e1, .{ Empty, Position });
reg.add(e1, BigOne{ .pos = Position{ .x = 5, .y = 5 }, .vel = Velocity{ .x = 5, .y = 5 }, .accel = Velocity{ .x = 5, .y = 5 } });
try std.testing.expect(reg.has(Empty, e1));
try std.testing.expect(reg.has(Position, e1));
try std.testing.expect(reg.has(BigOne, e1));
var iter = reg.entities();
while (iter.next()) |e| try std.testing.expectEqual(e1, e);
reg.remove(Empty, e1);
try std.testing.expect(!reg.has(Empty, e1));
}
test "context get/set/unset" {
var reg = Registry.init(std.testing.allocator);
defer reg.deinit();
var ctx = reg.getContext(Position);
try std.testing.expectEqual(ctx, null);
var pos = Position{ .x = 5, .y = 5 };
reg.setContext(&pos);
ctx = reg.getContext(Position);
try std.testing.expectEqual(ctx.?, &pos);
reg.unsetContext(Position);
ctx = reg.getContext(Position);
try std.testing.expectEqual(ctx, null);
}
// this test should fail
test "context not pointer" {
var reg = Registry.init(std.testing.allocator);
defer reg.deinit();
var pos = Position{ .x = 5, .y = 5 };
_ = pos;
// reg.setContext(pos);
}
test "context get/set/unset typed" {
const SomeType = struct { dummy: u1 };
var reg = Registry.init(std.testing.allocator);
defer reg.deinit();
var ctx = reg.getContext(SomeType);
try std.testing.expectEqual(ctx, null);
var pos = SomeType{ .dummy = 0 };
reg.setContext(&pos);
ctx = reg.getContext(SomeType);
try std.testing.expectEqual(ctx.?, &pos);
reg.unsetContext(SomeType);
ctx = reg.getContext(SomeType);
try std.testing.expectEqual(ctx, null);
}
test "singletons" {
var reg = Registry.init(std.testing.allocator);
defer reg.deinit();
var pos = Position{ .x = 5, .y = 5 };
reg.singletons().add(pos);
try std.testing.expect(reg.singletons().has(Position));
try std.testing.expectEqual(reg.singletons().get(Position).*, pos);
reg.singletons().remove(Position);
try std.testing.expect(!reg.singletons().has(Position));
}
test "destroy" {
var reg = Registry.init(std.testing.allocator);
defer reg.deinit();
var i = @as(u8, 0);
while (i < 255) : (i += 1) {
const e = reg.create();
reg.add(e, Position{ .x = @as(f32, @floatFromInt(i)), .y = @as(f32, @floatFromInt(i)) });
}
reg.destroy(3);
reg.destroy(4);
i = 0;
while (i < 6) : (i += 1) {
if (i != 3 and i != 4)
try std.testing.expectEqual(Position{ .x = @as(f32, @floatFromInt(i)), .y = @as(f32, @floatFromInt(i)) }, reg.getConst(Position, i));
}
}
test "remove all" {
var reg = Registry.init(std.testing.allocator);
defer reg.deinit();
var e = reg.create();
reg.add(e, Position{ .x = 1, .y = 1 });
reg.addTyped(u32, e, 666);
try std.testing.expect(reg.has(Position, e));
try std.testing.expect(reg.has(u32, e));
reg.removeAll(e);
try std.testing.expect(!reg.has(Position, e));
try std.testing.expect(!reg.has(u32, e));
}

@ -1,5 +0,0 @@
test "ecs test suite" {
_ = @import("dispatcher_test.zig");
_ = @import("registry_test.zig");
_ = @import("groups_test.zig");
}

@ -1,4 +0,0 @@
id: 8cw92n7j19xzpjm8kwq0scvx3vx6k7b730vn3i6rl3t25z08
name: ecs
main: src/ecs.zig
dependencies:
Loading…
Cancel
Save