massive group simplification
This commit is contained in:
parent
21a51a9298
commit
7dd8a28dbe
@ -7,71 +7,65 @@ const SparseSet = @import("sparse_set.zig").SparseSet;
|
|||||||
const Entity = @import("registry.zig").Entity;
|
const Entity = @import("registry.zig").Entity;
|
||||||
|
|
||||||
/// BasicGroups do not own any components
|
/// BasicGroups do not own any components
|
||||||
pub fn BasicGroup(comptime n_includes: usize, comptime n_excludes: usize) type {
|
pub const BasicGroup = struct {
|
||||||
return struct {
|
const Self = @This();
|
||||||
const Self = @This();
|
|
||||||
|
|
||||||
entity_set: *SparseSet(Entity),
|
registry: *Registry,
|
||||||
registry: *Registry,
|
group_data: *Registry.GroupData,
|
||||||
type_ids: [n_includes]u32,
|
|
||||||
exclude_type_ids: [n_excludes]u32,
|
|
||||||
|
|
||||||
pub const Iterator = struct {
|
pub const Iterator = struct {
|
||||||
group: *Self,
|
group: *Self,
|
||||||
index: usize = 0,
|
index: usize = 0,
|
||||||
entities: *const []Entity,
|
entities: *const []Entity,
|
||||||
|
|
||||||
pub fn init(group: *Self) Iterator {
|
pub fn init(group: *Self) Iterator {
|
||||||
return .{
|
return .{
|
||||||
.group = group,
|
.group = group,
|
||||||
.entities = group.entity_set.data(),
|
.entities = group.group_data.entity_set.data(),
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn next(it: *Iterator) ?Entity {
|
|
||||||
if (it.index >= it.entities.len) return null;
|
|
||||||
|
|
||||||
it.index += 1;
|
|
||||||
return it.entities.*[it.index - 1];
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset the iterator to the initial index
|
|
||||||
pub fn reset(it: *Iterator) void {
|
|
||||||
it.index = 0;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
pub fn init(entity_set: *SparseSet(Entity), registry: *Registry, type_ids: [n_includes]u32, exclude_type_ids: [n_excludes]u32) Self {
|
|
||||||
return Self{
|
|
||||||
.entity_set = entity_set,
|
|
||||||
.registry = registry,
|
|
||||||
.type_ids = type_ids,
|
|
||||||
.exclude_type_ids = exclude_type_ids,
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn len(self: Self) usize {
|
pub fn next(it: *Iterator) ?Entity {
|
||||||
return self.entity_set.len();
|
if (it.index >= it.entities.len) return null;
|
||||||
|
|
||||||
|
it.index += 1;
|
||||||
|
return it.entities.*[it.index - 1];
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Direct access to the array of entities
|
// Reset the iterator to the initial index
|
||||||
pub fn data(self: Self) *const []Entity {
|
pub fn reset(it: *Iterator) void {
|
||||||
return self.entity_set.data();
|
it.index = 0;
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get(self: *Self, comptime T: type, entity: Entity) *T {
|
|
||||||
return self.registry.assure(T).get(entity);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn getConst(self: *Self, comptime T: type, entity: Entity) T {
|
|
||||||
return self.registry.assure(T).getConst(entity);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn iterator(self: *Self) Iterator {
|
|
||||||
return Iterator.init(self);
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
|
||||||
|
pub fn init(registry: *Registry, group_data: *Registry.GroupData) Self {
|
||||||
|
return Self{
|
||||||
|
.registry = registry,
|
||||||
|
.group_data = group_data,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn len(self: Self) usize {
|
||||||
|
return self.group_data.entity_set.len();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Direct access to the array of entities
|
||||||
|
pub fn data(self: Self) *const []Entity {
|
||||||
|
return self.group_data.entity_set.data();
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get(self: *Self, comptime T: type, entity: Entity) *T {
|
||||||
|
return self.registry.assure(T).get(entity);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn getConst(self: *Self, comptime T: type, entity: Entity) T {
|
||||||
|
return self.registry.assure(T).getConst(entity);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn iterator(self: *Self) Iterator {
|
||||||
|
return Iterator.init(self);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
pub const OwningGroup = struct {
|
pub const OwningGroup = struct {
|
||||||
registry: *Registry,
|
registry: *Registry,
|
||||||
|
@ -453,21 +453,28 @@ pub const Registry = struct {
|
|||||||
return MultiView(includes.len, excludes.len);
|
return MultiView(includes.len, excludes.len);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// creates an optimized group for iterating components. Note that types are ORDER DEPENDENDANT for now, so always pass component
|
/// creates an optimized group for iterating components
|
||||||
/// types in the same order.
|
pub fn group(self: *Registry, comptime owned: var, comptime includes: var, comptime excludes: var) (if (owned.len == 0) BasicGroup else OwningGroup) {
|
||||||
pub fn group(self: *Registry, comptime owned: var, comptime includes: var, comptime excludes: var) GroupType(owned, includes, excludes) {
|
|
||||||
std.debug.assert(@typeInfo(@TypeOf(owned)) == .Struct);
|
std.debug.assert(@typeInfo(@TypeOf(owned)) == .Struct);
|
||||||
std.debug.assert(@typeInfo(@TypeOf(includes)) == .Struct);
|
std.debug.assert(@typeInfo(@TypeOf(includes)) == .Struct);
|
||||||
std.debug.assert(@typeInfo(@TypeOf(excludes)) == .Struct);
|
std.debug.assert(@typeInfo(@TypeOf(excludes)) == .Struct);
|
||||||
std.debug.assert(owned.len + includes.len > 0);
|
std.debug.assert(owned.len + includes.len > 0);
|
||||||
std.debug.assert(owned.len + includes.len + excludes.len > 1);
|
std.debug.assert(owned.len + includes.len + excludes.len > 1);
|
||||||
|
|
||||||
var owned_arr: [owned.len]u32 = undefined;
|
// create a unique hash to identify the group so that we can look it up
|
||||||
inline for (owned) |t, i| {
|
comptime const hash = comptime hashGroupTypes(owned, includes, excludes);
|
||||||
_ = self.assure(t);
|
|
||||||
owned_arr[i] = utils.typeId(t);
|
for (self.groups.items) |grp| {
|
||||||
|
if (grp.hash == hash) {
|
||||||
|
if (owned.len == 0) {
|
||||||
|
return BasicGroup.init(self, grp);
|
||||||
|
}
|
||||||
|
var first_owned = self.assure(owned[0]);
|
||||||
|
return OwningGroup.init(self, grp, &first_owned.super);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// gather up all our Types as typeIds
|
||||||
var includes_arr: [includes.len]u32 = undefined;
|
var includes_arr: [includes.len]u32 = undefined;
|
||||||
inline for (includes) |t, i| {
|
inline for (includes) |t, i| {
|
||||||
_ = self.assure(t);
|
_ = self.assure(t);
|
||||||
@ -480,33 +487,17 @@ pub const Registry = struct {
|
|||||||
excludes_arr[i] = utils.typeId(t);
|
excludes_arr[i] = utils.typeId(t);
|
||||||
}
|
}
|
||||||
|
|
||||||
// create a unique hash to identify the group
|
var owned_arr: [owned.len]u32 = undefined;
|
||||||
var maybe_group_data: ?*GroupData = null;
|
inline for (owned) |t, i| {
|
||||||
comptime const hash = comptime hashGroupTypes(owned, includes, excludes);
|
_ = self.assure(t);
|
||||||
|
owned_arr[i] = utils.typeId(t);
|
||||||
for (self.groups.items) |grp| {
|
|
||||||
if (grp.hash == hash) {
|
|
||||||
maybe_group_data = grp;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// do we already have the GroupData?
|
// we need to create a new GroupData
|
||||||
if (maybe_group_data) |group_data| {
|
var new_group_data = GroupData.initPtr(self.allocator, self, hash, owned_arr[0..], includes_arr[0..], excludes_arr[0..]);
|
||||||
// non-owning groups
|
|
||||||
if (owned.len == 0) {
|
|
||||||
return BasicGroup(includes.len, excludes.len).init(&group_data.entity_set, self, includes_arr, excludes_arr);
|
|
||||||
} else {
|
|
||||||
var first_owned = self.assure(owned[0]);
|
|
||||||
return OwningGroup.init(self, group_data, &first_owned.super);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const size = owned.len + includes.len + excludes.len;
|
|
||||||
|
|
||||||
// before adding the group we need to do some checks to make sure there arent other owning groups with the same types
|
// before adding the group we need to do some checks to make sure there arent other owning groups with the same types
|
||||||
if (std.builtin.mode == .Debug and owned.len > 0) {
|
if (std.builtin.mode == .Debug and owned.len > 0) {
|
||||||
std.debug.warn("\n", .{});
|
|
||||||
for (self.groups.items) |grp| {
|
for (self.groups.items) |grp| {
|
||||||
if (grp.owned.len == 0) continue;
|
if (grp.owned.len == 0) continue;
|
||||||
|
|
||||||
@ -523,14 +514,11 @@ pub const Registry = struct {
|
|||||||
if (std.mem.indexOfScalar(u32, &excludes_arr, grp_exclude)) |_| sz += 1;
|
if (std.mem.indexOfScalar(u32, &excludes_arr, grp_exclude)) |_| sz += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
const check = overlapping == 0 or ((sz == size) or (sz == grp.size));
|
const check = overlapping == 0 or ((sz == new_group_data.size) or (sz == grp.size));
|
||||||
std.debug.assert(check);
|
std.debug.assert(check);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// we need to create a new GroupData
|
|
||||||
var new_group_data = GroupData.initPtr(self.allocator, self, hash, owned_arr[0..], includes_arr[0..], excludes_arr[0..]);
|
|
||||||
|
|
||||||
var maybe_valid_if: ?*GroupData = null;
|
var maybe_valid_if: ?*GroupData = null;
|
||||||
var discard_if: ?*GroupData = null;
|
var discard_if: ?*GroupData = null;
|
||||||
|
|
||||||
@ -555,7 +543,7 @@ pub const Registry = struct {
|
|||||||
// update super on all owned Storages to be the max of size and their current super value
|
// update super on all owned Storages to be the max of size and their current super value
|
||||||
inline for (owned) |t| {
|
inline for (owned) |t| {
|
||||||
var storage = self.assure(t);
|
var storage = self.assure(t);
|
||||||
storage.super = std.math.max(storage.super, size);
|
storage.super = std.math.max(storage.super, new_group_data.size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -586,19 +574,13 @@ pub const Registry = struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (owned.len == 0) {
|
if (owned.len == 0) {
|
||||||
return BasicGroup(includes.len, excludes.len).init(&new_group_data.entity_set, self, includes_arr, excludes_arr);
|
return BasicGroup.init(self, new_group_data);
|
||||||
} else {
|
} else {
|
||||||
var first_owned_storage = self.assure(owned[0]);
|
var first_owned_storage = self.assure(owned[0]);
|
||||||
return OwningGroup.init(self, new_group_data, &first_owned_storage.super);
|
return OwningGroup.init(self, new_group_data, &first_owned_storage.super);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// returns the Type that a view will be, based on the includes and excludes
|
|
||||||
fn GroupType(comptime owned: var, comptime includes: var, comptime excludes: var) type {
|
|
||||||
if (owned.len == 0) return BasicGroup(includes.len, excludes.len);
|
|
||||||
return OwningGroup;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// given the 3 group Types arrays, generates a (mostly) unique u64 hash. Simultaneously ensures there are no duped types between
|
/// given the 3 group Types arrays, generates a (mostly) unique u64 hash. Simultaneously ensures there are no duped types between
|
||||||
/// the 3 groups.
|
/// the 3 groups.
|
||||||
inline fn hashGroupTypes(comptime owned: var, comptime includes: var, comptime excludes: var) u64 {
|
inline fn hashGroupTypes(comptime owned: var, comptime includes: var, comptime excludes: var) u64 {
|
||||||
|
Loading…
x
Reference in New Issue
Block a user