mirror of
https://github.com/Jozufozu/Flywheel.git
synced 2024-11-14 06:24:12 +01:00
Compare commits
7 Commits
74fc096130
...
2ae177966c
Author | SHA1 | Date | |
---|---|---|---|
|
2ae177966c | ||
|
540fe7a7fe | ||
|
a9f2018c0a | ||
|
fc3e475ec9 | ||
|
fac63168c1 | ||
|
20b3f78b9c | ||
|
a7e7090866 |
@ -1,27 +1,14 @@
|
|||||||
package dev.engine_room.flywheel.backend.engine;
|
package dev.engine_room.flywheel.backend.engine;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
|
||||||
|
|
||||||
import org.jetbrains.annotations.Nullable;
|
|
||||||
|
|
||||||
import dev.engine_room.flywheel.api.instance.Instance;
|
import dev.engine_room.flywheel.api.instance.Instance;
|
||||||
import dev.engine_room.flywheel.api.instance.InstanceType;
|
import dev.engine_room.flywheel.api.instance.InstanceType;
|
||||||
import dev.engine_room.flywheel.api.instance.Instancer;
|
import dev.engine_room.flywheel.api.instance.Instancer;
|
||||||
import dev.engine_room.flywheel.backend.engine.embed.Environment;
|
import dev.engine_room.flywheel.backend.engine.embed.Environment;
|
||||||
import dev.engine_room.flywheel.backend.util.AtomicBitSet;
|
|
||||||
|
|
||||||
public abstract class AbstractInstancer<I extends Instance> implements Instancer<I>, InstanceHandleImpl.State<I> {
|
public abstract class AbstractInstancer<I extends Instance> implements Instancer<I> {
|
||||||
public final InstanceType<I> type;
|
public final InstanceType<I> type;
|
||||||
public final Environment environment;
|
public final Environment environment;
|
||||||
private final Recreate<I> recreate;
|
public final Recreate<I> recreate;
|
||||||
|
|
||||||
// Lock for all instances, only needs to be used in methods that may run on the TaskExecutor.
|
|
||||||
protected final Object lock = new Object();
|
|
||||||
protected final ArrayList<I> instances = new ArrayList<>();
|
|
||||||
protected final ArrayList<InstanceHandleImpl<I>> handles = new ArrayList<>();
|
|
||||||
|
|
||||||
protected final AtomicBitSet changed = new AtomicBitSet();
|
|
||||||
protected final AtomicBitSet deleted = new AtomicBitSet();
|
|
||||||
|
|
||||||
protected AbstractInstancer(InstancerKey<I> key, Recreate<I> recreate) {
|
protected AbstractInstancer(InstancerKey<I> key, Recreate<I> recreate) {
|
||||||
this.type = key.type();
|
this.type = key.type();
|
||||||
@ -29,218 +16,16 @@ public abstract class AbstractInstancer<I extends Instance> implements Instancer
|
|||||||
this.recreate = recreate;
|
this.recreate = recreate;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
public abstract InstanceHandleImpl.State<I> revealInstance(InstanceHandleImpl<I> handle, I instance);
|
||||||
public InstanceHandleImpl.State<I> setChanged(int index) {
|
|
||||||
notifyDirty(index);
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
public abstract int instanceCount();
|
||||||
public InstanceHandleImpl.State<I> setDeleted(int index) {
|
|
||||||
notifyRemoval(index);
|
|
||||||
return InstanceHandleImpl.Deleted.instance();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
public abstract void parallelUpdate();
|
||||||
public InstanceHandleImpl.State<I> setVisible(InstanceHandleImpl<I> handle, int index, boolean visible) {
|
|
||||||
if (visible) {
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
notifyRemoval(index);
|
|
||||||
|
|
||||||
I instance;
|
|
||||||
synchronized (lock) {
|
|
||||||
// I think we need to lock to prevent wacky stuff from happening if the array gets resized.
|
|
||||||
instance = instances.get(index);
|
|
||||||
}
|
|
||||||
|
|
||||||
return new InstanceHandleImpl.Hidden<>(recreate, instance);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public I createInstance() {
|
|
||||||
var handle = new InstanceHandleImpl<>(this);
|
|
||||||
I instance = type.create(handle);
|
|
||||||
|
|
||||||
synchronized (lock) {
|
|
||||||
handle.index = instances.size();
|
|
||||||
addLocked(instance, handle);
|
|
||||||
return instance;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public void revealInstance(InstanceHandleImpl<I> handle, I instance) {
|
|
||||||
synchronized (lock) {
|
|
||||||
handle.index = instances.size();
|
|
||||||
addLocked(instance, handle);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void stealInstance(@Nullable I instance) {
|
|
||||||
if (instance == null) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
var instanceHandle = instance.handle();
|
|
||||||
|
|
||||||
if (!(instanceHandle instanceof InstanceHandleImpl<?>)) {
|
|
||||||
// UB: do nothing
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Should InstanceType have an isInstance method?
|
|
||||||
@SuppressWarnings("unchecked")
|
|
||||||
var handle = (InstanceHandleImpl<I>) instanceHandle;
|
|
||||||
|
|
||||||
// No need to steal if this instance is already owned by this instancer.
|
|
||||||
if (handle.state == this) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
// Not allowed to steal deleted instances.
|
|
||||||
if (handle.state instanceof InstanceHandleImpl.Deleted) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
// No need to steal if the instance will recreate to us.
|
|
||||||
if (handle.state instanceof InstanceHandleImpl.Hidden<I> hidden && recreate.equals(hidden.recreate())) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// FIXME: in theory there could be a race condition here if the instance
|
|
||||||
// is somehow being stolen by 2 different instancers between threads.
|
|
||||||
// That seems kinda impossible so I'm fine leaving it as is for now.
|
|
||||||
|
|
||||||
// Add the instance to this instancer.
|
|
||||||
if (handle.state instanceof AbstractInstancer<I> other) {
|
|
||||||
// Remove the instance from its old instancer.
|
|
||||||
// This won't have any unwanted effect when the old instancer
|
|
||||||
// is filtering deleted instances later, so is safe.
|
|
||||||
other.notifyRemoval(handle.index);
|
|
||||||
|
|
||||||
handle.state = this;
|
|
||||||
// Only lock now that we'll be mutating our state.
|
|
||||||
synchronized (lock) {
|
|
||||||
handle.index = instances.size();
|
|
||||||
addLocked(instance, handle);
|
|
||||||
}
|
|
||||||
} else if (handle.state instanceof InstanceHandleImpl.Hidden<I>) {
|
|
||||||
handle.state = new InstanceHandleImpl.Hidden<>(recreate, instance);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Calls must be synchronized on {@link #lock}.
|
|
||||||
*/
|
|
||||||
private void addLocked(I instance, InstanceHandleImpl<I> handle) {
|
|
||||||
instances.add(instance);
|
|
||||||
handles.add(handle);
|
|
||||||
setIndexChanged(handle.index);
|
|
||||||
}
|
|
||||||
|
|
||||||
public int instanceCount() {
|
|
||||||
return instances.size();
|
|
||||||
}
|
|
||||||
|
|
||||||
public void notifyDirty(int index) {
|
|
||||||
if (index < 0 || index >= instanceCount()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
setIndexChanged(index);
|
|
||||||
}
|
|
||||||
|
|
||||||
protected void setIndexChanged(int index) {
|
|
||||||
changed.set(index);
|
|
||||||
}
|
|
||||||
|
|
||||||
public void notifyRemoval(int index) {
|
|
||||||
if (index < 0 || index >= instanceCount()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
deleted.set(index);
|
|
||||||
}
|
|
||||||
|
|
||||||
public void removeDeletedInstances() {
|
|
||||||
if (deleted.isEmpty()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Figure out which elements are to be removed.
|
|
||||||
final int oldSize = this.instances.size();
|
|
||||||
int removeCount = deleted.cardinality();
|
|
||||||
|
|
||||||
if (oldSize == removeCount) {
|
|
||||||
clear();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
final int newSize = oldSize - removeCount;
|
|
||||||
|
|
||||||
// Start from the first deleted index.
|
|
||||||
int writePos = deleted.nextSetBit(0);
|
|
||||||
|
|
||||||
if (writePos < newSize) {
|
|
||||||
// Since we'll be shifting everything into this space we can consider it all changed.
|
|
||||||
setRangeChanged(writePos, newSize);
|
|
||||||
}
|
|
||||||
|
|
||||||
// We definitely shouldn't consider the deleted instances as changed though,
|
|
||||||
// else we might try some out of bounds accesses later.
|
|
||||||
changed.clear(newSize, oldSize);
|
|
||||||
|
|
||||||
// Punch out the deleted instances, shifting over surviving instances to fill their place.
|
|
||||||
for (int scanPos = writePos; (scanPos < oldSize) && (writePos < newSize); scanPos++, writePos++) {
|
|
||||||
// Find next non-deleted element.
|
|
||||||
scanPos = deleted.nextClearBit(scanPos);
|
|
||||||
|
|
||||||
if (scanPos != writePos) {
|
|
||||||
// Grab the old instance/handle from scanPos...
|
|
||||||
var handle = handles.get(scanPos);
|
|
||||||
I instance = instances.get(scanPos);
|
|
||||||
|
|
||||||
// ... and move it to writePos.
|
|
||||||
handles.set(writePos, handle);
|
|
||||||
instances.set(writePos, instance);
|
|
||||||
|
|
||||||
// Make sure the handle knows it's been moved
|
|
||||||
handle.index = writePos;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
deleted.clear();
|
|
||||||
instances.subList(newSize, oldSize)
|
|
||||||
.clear();
|
|
||||||
handles.subList(newSize, oldSize)
|
|
||||||
.clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
protected void setRangeChanged(int start, int end) {
|
|
||||||
changed.set(start, end);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Clear all instances without freeing resources.
|
|
||||||
*/
|
|
||||||
public void clear() {
|
|
||||||
for (InstanceHandleImpl<I> handle : handles) {
|
|
||||||
// Only clear instances that belong to this instancer.
|
|
||||||
// If one of these handles was stolen by another instancer,
|
|
||||||
// clearing it here would cause significant visual artifacts and instance leaks.
|
|
||||||
// At the same time, we need to clear handles we own to prevent
|
|
||||||
// instances from changing/deleting positions in this instancer that no longer exist.
|
|
||||||
if (handle.state == this) {
|
|
||||||
handle.clear();
|
|
||||||
handle.state = InstanceHandleImpl.Deleted.instance();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
instances.clear();
|
|
||||||
handles.clear();
|
|
||||||
changed.clear();
|
|
||||||
deleted.clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
public abstract void delete();
|
public abstract void delete();
|
||||||
|
|
||||||
|
public abstract void clear();
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return "AbstractInstancer[" + instanceCount() + ']';
|
return "AbstractInstancer[" + instanceCount() + ']';
|
||||||
|
@ -0,0 +1,175 @@
|
|||||||
|
package dev.engine_room.flywheel.backend.engine;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
|
||||||
|
import org.jetbrains.annotations.Nullable;
|
||||||
|
|
||||||
|
import dev.engine_room.flywheel.api.instance.Instance;
|
||||||
|
import dev.engine_room.flywheel.backend.util.AtomicBitSet;
|
||||||
|
|
||||||
|
public abstract class BaseInstancer<I extends Instance> extends AbstractInstancer<I> implements InstanceHandleImpl.State<I> {
|
||||||
|
// Lock for all instances, only needs to be used in methods that may run on the TaskExecutor.
|
||||||
|
protected final Object lock = new Object();
|
||||||
|
protected final ArrayList<I> instances = new ArrayList<>();
|
||||||
|
protected final ArrayList<InstanceHandleImpl<I>> handles = new ArrayList<>();
|
||||||
|
|
||||||
|
protected final AtomicBitSet changed = new AtomicBitSet();
|
||||||
|
protected final AtomicBitSet deleted = new AtomicBitSet();
|
||||||
|
|
||||||
|
protected BaseInstancer(InstancerKey<I> key, Recreate<I> recreate) {
|
||||||
|
super(key, recreate);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public InstanceHandleImpl.State<I> setChanged(int index) {
|
||||||
|
notifyDirty(index);
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public InstanceHandleImpl.State<I> setDeleted(int index) {
|
||||||
|
notifyRemoval(index);
|
||||||
|
return InstanceHandleImpl.Deleted.instance();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public InstanceHandleImpl.State<I> setVisible(InstanceHandleImpl<I> handle, int index, boolean visible) {
|
||||||
|
if (visible) {
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
notifyRemoval(index);
|
||||||
|
|
||||||
|
I instance;
|
||||||
|
synchronized (lock) {
|
||||||
|
// I think we need to lock to prevent wacky stuff from happening if the array gets resized.
|
||||||
|
instance = instances.get(index);
|
||||||
|
}
|
||||||
|
|
||||||
|
return new InstanceHandleImpl.Hidden<>(recreate, instance);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public I createInstance() {
|
||||||
|
var handle = new InstanceHandleImpl<>(this);
|
||||||
|
I instance = type.create(handle);
|
||||||
|
|
||||||
|
synchronized (lock) {
|
||||||
|
handle.index = instances.size();
|
||||||
|
addLocked(instance, handle);
|
||||||
|
return instance;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public InstanceHandleImpl.State<I> revealInstance(InstanceHandleImpl<I> handle, I instance) {
|
||||||
|
synchronized (lock) {
|
||||||
|
handle.index = instances.size();
|
||||||
|
addLocked(instance, handle);
|
||||||
|
}
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void stealInstance(@Nullable I instance) {
|
||||||
|
if (instance == null) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
var instanceHandle = instance.handle();
|
||||||
|
|
||||||
|
if (!(instanceHandle instanceof InstanceHandleImpl<?>)) {
|
||||||
|
// UB: do nothing
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should InstanceType have an isInstance method?
|
||||||
|
@SuppressWarnings("unchecked") var handle = (InstanceHandleImpl<I>) instanceHandle;
|
||||||
|
|
||||||
|
// No need to steal if this instance is already owned by this instancer.
|
||||||
|
if (handle.state == this) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// Not allowed to steal deleted instances.
|
||||||
|
if (handle.state instanceof InstanceHandleImpl.Deleted) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// No need to steal if the instance will recreate to us.
|
||||||
|
if (handle.state instanceof InstanceHandleImpl.Hidden<I> hidden && recreate.equals(hidden.recreate())) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// FIXME: in theory there could be a race condition here if the instance
|
||||||
|
// is somehow being stolen by 2 different instancers between threads.
|
||||||
|
// That seems kinda impossible so I'm fine leaving it as is for now.
|
||||||
|
|
||||||
|
// Add the instance to this instancer.
|
||||||
|
if (handle.state instanceof BaseInstancer<I> other) {
|
||||||
|
// Remove the instance from its old instancer.
|
||||||
|
// This won't have any unwanted effect when the old instancer
|
||||||
|
// is filtering deleted instances later, so is safe.
|
||||||
|
other.notifyRemoval(handle.index);
|
||||||
|
|
||||||
|
handle.state = this;
|
||||||
|
// Only lock now that we'll be mutating our state.
|
||||||
|
synchronized (lock) {
|
||||||
|
handle.index = instances.size();
|
||||||
|
addLocked(instance, handle);
|
||||||
|
}
|
||||||
|
} else if (handle.state instanceof InstanceHandleImpl.Hidden<I>) {
|
||||||
|
handle.state = new InstanceHandleImpl.Hidden<>(recreate, instance);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Calls must be synchronized on {@link #lock}.
|
||||||
|
*/
|
||||||
|
private void addLocked(I instance, InstanceHandleImpl<I> handle) {
|
||||||
|
instances.add(instance);
|
||||||
|
handles.add(handle);
|
||||||
|
setIndexChanged(handle.index);
|
||||||
|
}
|
||||||
|
|
||||||
|
public int instanceCount() {
|
||||||
|
return instances.size();
|
||||||
|
}
|
||||||
|
|
||||||
|
public void notifyDirty(int index) {
|
||||||
|
if (index < 0 || index >= instanceCount()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
setIndexChanged(index);
|
||||||
|
}
|
||||||
|
|
||||||
|
protected void setIndexChanged(int index) {
|
||||||
|
changed.set(index);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void notifyRemoval(int index) {
|
||||||
|
if (index < 0 || index >= instanceCount()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
deleted.set(index);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Clear all instances without freeing resources.
|
||||||
|
*/
|
||||||
|
public void clear() {
|
||||||
|
for (InstanceHandleImpl<I> handle : handles) {
|
||||||
|
// Only clear instances that belong to this instancer.
|
||||||
|
// If one of these handles was stolen by another instancer,
|
||||||
|
// clearing it here would cause significant visual artifacts and instance leaks.
|
||||||
|
// At the same time, we need to clear handles we own to prevent
|
||||||
|
// instances from changing/deleting positions in this instancer that no longer exist.
|
||||||
|
if (handle.state == this) {
|
||||||
|
handle.clear();
|
||||||
|
handle.state = InstanceHandleImpl.Deleted.instance();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
instances.clear();
|
||||||
|
handles.clear();
|
||||||
|
changed.clear();
|
||||||
|
deleted.clear();
|
||||||
|
}
|
||||||
|
}
|
@ -50,7 +50,7 @@ public abstract class DrawManager<N extends AbstractInstancer<?>> {
|
|||||||
|
|
||||||
public Plan<RenderContext> createFramePlan() {
|
public Plan<RenderContext> createFramePlan() {
|
||||||
// Go wide on instancers to process deletions in parallel.
|
// Go wide on instancers to process deletions in parallel.
|
||||||
return ForEachPlan.of(() -> new ArrayList<>(instancers.values()), AbstractInstancer::removeDeletedInstances);
|
return ForEachPlan.of(() -> new ArrayList<>(instancers.values()), AbstractInstancer::parallelUpdate);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void flush(LightStorage lightStorage, EnvironmentStorage environmentStorage) {
|
public void flush(LightStorage lightStorage, EnvironmentStorage environmentStorage) {
|
||||||
|
@ -1,13 +1,16 @@
|
|||||||
package dev.engine_room.flywheel.backend.engine;
|
package dev.engine_room.flywheel.backend.engine;
|
||||||
|
|
||||||
|
import org.jetbrains.annotations.UnknownNullability;
|
||||||
|
|
||||||
import dev.engine_room.flywheel.api.instance.Instance;
|
import dev.engine_room.flywheel.api.instance.Instance;
|
||||||
import dev.engine_room.flywheel.api.instance.InstanceHandle;
|
import dev.engine_room.flywheel.api.instance.InstanceHandle;
|
||||||
|
|
||||||
public class InstanceHandleImpl<I extends Instance> implements InstanceHandle {
|
public class InstanceHandleImpl<I extends Instance> implements InstanceHandle {
|
||||||
|
@UnknownNullability
|
||||||
public State<I> state;
|
public State<I> state;
|
||||||
public int index;
|
public int index;
|
||||||
|
|
||||||
public InstanceHandleImpl(State<I> state) {
|
public InstanceHandleImpl(@UnknownNullability State<I> state) {
|
||||||
this.state = state;
|
this.state = state;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -62,8 +65,7 @@ public class InstanceHandleImpl<I extends Instance> implements InstanceHandle {
|
|||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
var instancer = recreate.recreate();
|
var instancer = recreate.recreate();
|
||||||
instancer.revealInstance(handle, instance);
|
return instancer.revealInstance(handle, instance);
|
||||||
return instancer;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2,7 +2,10 @@ package dev.engine_room.flywheel.backend.engine.indirect;
|
|||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
|
import java.util.concurrent.atomic.AtomicReference;
|
||||||
|
|
||||||
|
import org.jetbrains.annotations.Nullable;
|
||||||
import org.jetbrains.annotations.UnknownNullability;
|
import org.jetbrains.annotations.UnknownNullability;
|
||||||
import org.joml.Vector4fc;
|
import org.joml.Vector4fc;
|
||||||
import org.lwjgl.system.MemoryUtil;
|
import org.lwjgl.system.MemoryUtil;
|
||||||
@ -10,6 +13,7 @@ import org.lwjgl.system.MemoryUtil;
|
|||||||
import dev.engine_room.flywheel.api.instance.Instance;
|
import dev.engine_room.flywheel.api.instance.Instance;
|
||||||
import dev.engine_room.flywheel.api.instance.InstanceWriter;
|
import dev.engine_room.flywheel.api.instance.InstanceWriter;
|
||||||
import dev.engine_room.flywheel.backend.engine.AbstractInstancer;
|
import dev.engine_room.flywheel.backend.engine.AbstractInstancer;
|
||||||
|
import dev.engine_room.flywheel.backend.engine.InstanceHandleImpl;
|
||||||
import dev.engine_room.flywheel.backend.engine.InstancerKey;
|
import dev.engine_room.flywheel.backend.engine.InstancerKey;
|
||||||
import dev.engine_room.flywheel.backend.util.AtomicBitSet;
|
import dev.engine_room.flywheel.backend.util.AtomicBitSet;
|
||||||
import dev.engine_room.flywheel.lib.math.MoreMath;
|
import dev.engine_room.flywheel.lib.math.MoreMath;
|
||||||
@ -20,7 +24,27 @@ public class IndirectInstancer<I extends Instance> extends AbstractInstancer<I>
|
|||||||
private final List<IndirectDraw> associatedDraws = new ArrayList<>();
|
private final List<IndirectDraw> associatedDraws = new ArrayList<>();
|
||||||
private final Vector4fc boundingSphere;
|
private final Vector4fc boundingSphere;
|
||||||
|
|
||||||
private final AtomicBitSet changedPages = new AtomicBitSet();
|
private final AtomicReference<InstancePage<I>[]> pages = new AtomicReference<>(pageArray(0));
|
||||||
|
/**
|
||||||
|
* The set of pages whose count changed and thus need their descriptor re-uploaded.
|
||||||
|
*/
|
||||||
|
private final AtomicBitSet validityChanged = new AtomicBitSet();
|
||||||
|
/**
|
||||||
|
* The set of pages whose content changed and thus need their instances re-uploaded.
|
||||||
|
* Note that we don't re-upload for deletions, as the memory becomes invalid and masked out by the validity bits.
|
||||||
|
*/
|
||||||
|
private final AtomicBitSet contentsChanged = new AtomicBitSet();
|
||||||
|
/**
|
||||||
|
* The set of pages that are entirely full.
|
||||||
|
* We scan the clear bits of this set when trying to add an instance.
|
||||||
|
*/
|
||||||
|
private final AtomicBitSet fullPages = new AtomicBitSet();
|
||||||
|
/**
|
||||||
|
* The set of mergable pages. A page is mergeable if it is not empty and has 16 or fewer instances.
|
||||||
|
* These constraints are set so that we can guarantee that merging two pages leaves one entirely empty,
|
||||||
|
* but we also don't want to waste work merging into pages that are already empty.
|
||||||
|
*/
|
||||||
|
private final AtomicBitSet mergeablePages = new AtomicBitSet();
|
||||||
|
|
||||||
public ObjectStorage.@UnknownNullability Mapping mapping;
|
public ObjectStorage.@UnknownNullability Mapping mapping;
|
||||||
|
|
||||||
@ -35,16 +59,198 @@ public class IndirectInstancer<I extends Instance> extends AbstractInstancer<I>
|
|||||||
boundingSphere = key.model().boundingSphere();
|
boundingSphere = key.model().boundingSphere();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@SuppressWarnings("unchecked")
|
||||||
public void setIndexChanged(int index) {
|
private static <I extends Instance> InstancePage<I>[] pageArray(int length) {
|
||||||
changedPages.set(ObjectStorage.objectIndex2PageIndex(index));
|
return new InstancePage[length];
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@SuppressWarnings("unchecked")
|
||||||
protected void setRangeChanged(int start, int end) {
|
private static <I extends Instance> I[] instanceArray() {
|
||||||
super.setRangeChanged(start, end);
|
return (I[]) new Instance[ObjectStorage.PAGE_SIZE];
|
||||||
|
}
|
||||||
|
|
||||||
changedPages.set(ObjectStorage.objectIndex2PageIndex(start), ObjectStorage.objectIndex2PageIndex(end) + 1);
|
@SuppressWarnings("unchecked")
|
||||||
|
private static <I extends Instance> InstanceHandleImpl<I>[] handleArray() {
|
||||||
|
return new InstanceHandleImpl[ObjectStorage.PAGE_SIZE];
|
||||||
|
}
|
||||||
|
|
||||||
|
private static final class InstancePage<I extends Instance> implements InstanceHandleImpl.State<I> {
|
||||||
|
private final IndirectInstancer<I> parent;
|
||||||
|
private final int pageNo;
|
||||||
|
private final I[] instances;
|
||||||
|
// Handles are only read in #takeFrom. It would be nice to avoid tracking these at all.
|
||||||
|
private final InstanceHandleImpl<I>[] handles;
|
||||||
|
/**
|
||||||
|
* A bitset describing which indices in the instances/handles arrays contain live instances.
|
||||||
|
*/
|
||||||
|
private final AtomicInteger valid;
|
||||||
|
|
||||||
|
private InstancePage(IndirectInstancer<I> parent, int pageNo) {
|
||||||
|
this.parent = parent;
|
||||||
|
this.pageNo = pageNo;
|
||||||
|
this.instances = instanceArray();
|
||||||
|
this.handles = handleArray();
|
||||||
|
this.valid = new AtomicInteger(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Attempt to add the given instance/handle to this page.
|
||||||
|
*
|
||||||
|
* @param instance The instance to add
|
||||||
|
* @param handle The instance's handle
|
||||||
|
* @return true if the instance was added, false if the page is full
|
||||||
|
*/
|
||||||
|
public boolean add(I instance, InstanceHandleImpl<I> handle) {
|
||||||
|
// Thread safety: we loop until we either win the race and add the given instance, or we
|
||||||
|
// run out of space because other threads trying to add at the same time.
|
||||||
|
while (true) {
|
||||||
|
int currentValue = valid.get();
|
||||||
|
if (isFull(currentValue)) {
|
||||||
|
// The page is full, must search elsewhere
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// determine what the new long value will be after we set the appropriate bit.
|
||||||
|
int index = Integer.numberOfTrailingZeros(~currentValue);
|
||||||
|
|
||||||
|
int newValue = currentValue | (1 << index);
|
||||||
|
|
||||||
|
// if no other thread has modified the value since we read it, we won the race and we are done.
|
||||||
|
if (valid.compareAndSet(currentValue, newValue)) {
|
||||||
|
instances[index] = instance;
|
||||||
|
handles[index] = handle;
|
||||||
|
handle.state = this;
|
||||||
|
// Handle index is unique amongst all pages of this instancer.
|
||||||
|
handle.index = local2HandleIndex(index);
|
||||||
|
|
||||||
|
parent.contentsChanged.set(pageNo);
|
||||||
|
parent.validityChanged.set(pageNo);
|
||||||
|
if (isFull(newValue)) {
|
||||||
|
// The page is now full, mark it so in the bitset.
|
||||||
|
// This is safe because only one bit position changes at a time.
|
||||||
|
parent.fullPages.set(pageNo);
|
||||||
|
}
|
||||||
|
if (isEmpty(currentValue)) {
|
||||||
|
// Value we just saw was zero, so since we added something we are now mergeable!
|
||||||
|
parent.mergeablePages.set(pageNo);
|
||||||
|
} else if (Integer.bitCount(currentValue) == 16) {
|
||||||
|
// We just filled the 17th instance, so we are no longer mergeable.
|
||||||
|
parent.mergeablePages.clear(pageNo);
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private int local2HandleIndex(int index) {
|
||||||
|
return (pageNo << ObjectStorage.LOG_2_PAGE_SIZE) + index;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public InstanceHandleImpl.State<I> setChanged(int index) {
|
||||||
|
parent.contentsChanged.set(pageNo);
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public InstanceHandleImpl.State<I> setDeleted(int index) {
|
||||||
|
int localIndex = index % ObjectStorage.PAGE_SIZE;
|
||||||
|
|
||||||
|
instances[localIndex] = null;
|
||||||
|
handles[localIndex] = null;
|
||||||
|
|
||||||
|
while (true) {
|
||||||
|
int currentValue = valid.get();
|
||||||
|
int newValue = currentValue & ~(1 << localIndex);
|
||||||
|
|
||||||
|
if (valid.compareAndSet(currentValue, newValue)) {
|
||||||
|
parent.validityChanged.set(pageNo);
|
||||||
|
if (isEmpty(newValue)) {
|
||||||
|
// If we decremented to zero then we're no longer mergeable.
|
||||||
|
parent.mergeablePages.clear(pageNo);
|
||||||
|
} else if (Integer.bitCount(newValue) == 16) {
|
||||||
|
// If we decremented to 16 then we're now mergeable.
|
||||||
|
parent.mergeablePages.set(pageNo);
|
||||||
|
}
|
||||||
|
// Set full page last so that other threads don't race to set the other bitsets.
|
||||||
|
parent.fullPages.clear(pageNo);
|
||||||
|
return InstanceHandleImpl.Deleted.instance();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public InstanceHandleImpl.State<I> setVisible(InstanceHandleImpl<I> handle, int index, boolean visible) {
|
||||||
|
if (visible) {
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
int localIndex = index % ObjectStorage.PAGE_SIZE;
|
||||||
|
|
||||||
|
return new InstanceHandleImpl.Hidden<>(parent.recreate, instances[localIndex]);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Only call this on 2 pages that are mergeable.
|
||||||
|
*
|
||||||
|
* @param other The page to take instances from.
|
||||||
|
*/
|
||||||
|
private void takeFrom(InstancePage<I> other) {
|
||||||
|
// Fill the holes in this page with instances from the other page.
|
||||||
|
|
||||||
|
int valid = this.valid.get();
|
||||||
|
int otherValid = other.valid.get();
|
||||||
|
|
||||||
|
for (int i = 0; i < ObjectStorage.PAGE_SIZE; i++) {
|
||||||
|
int mask = 1 << i;
|
||||||
|
|
||||||
|
// Find set bits in the other page.
|
||||||
|
if ((otherValid & mask) == 0) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
int writePos = Integer.numberOfTrailingZeros(~valid);
|
||||||
|
|
||||||
|
instances[writePos] = other.instances[i];
|
||||||
|
handles[writePos] = other.handles[i];
|
||||||
|
|
||||||
|
handles[writePos].state = this;
|
||||||
|
handles[writePos].index = local2HandleIndex(writePos);
|
||||||
|
|
||||||
|
// Clear out the other page.
|
||||||
|
otherValid &= ~mask;
|
||||||
|
other.handles[i] = null;
|
||||||
|
other.instances[i] = null;
|
||||||
|
|
||||||
|
// Set the bit in this page so we can find the next write position.
|
||||||
|
valid |= 1 << writePos;
|
||||||
|
|
||||||
|
// If we're full, we're done.
|
||||||
|
if (isFull(valid)) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
this.valid.set(valid);
|
||||||
|
other.valid.set(otherValid);
|
||||||
|
|
||||||
|
// If the other page was quite empty we may still be mergeable.
|
||||||
|
parent.mergeablePages.set(pageNo, isMergeable(valid));
|
||||||
|
|
||||||
|
// We definitely changed the contents and validity of this page.
|
||||||
|
parent.contentsChanged.set(pageNo);
|
||||||
|
parent.validityChanged.set(pageNo);
|
||||||
|
|
||||||
|
// The other page will end up empty, so the validity changes and it's no longer mergeable.
|
||||||
|
// Also clear the changed bit so we don't re-upload the instances.
|
||||||
|
parent.contentsChanged.clear(other.pageNo);
|
||||||
|
parent.validityChanged.set(other.pageNo);
|
||||||
|
parent.mergeablePages.clear(other.pageNo);
|
||||||
|
|
||||||
|
if (isFull(valid)) {
|
||||||
|
parent.fullPages.set(pageNo);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public void addDraw(IndirectDraw draw) {
|
public void addDraw(IndirectDraw draw) {
|
||||||
@ -56,9 +262,32 @@ public class IndirectInstancer<I extends Instance> extends AbstractInstancer<I>
|
|||||||
}
|
}
|
||||||
|
|
||||||
public void update(int modelIndex, int baseInstance) {
|
public void update(int modelIndex, int baseInstance) {
|
||||||
this.modelIndex = modelIndex;
|
|
||||||
this.baseInstance = baseInstance;
|
this.baseInstance = baseInstance;
|
||||||
mapping.update(modelIndex, instanceCount());
|
|
||||||
|
var sameModelIndex = this.modelIndex == modelIndex;
|
||||||
|
if (sameModelIndex && validityChanged.isEmpty()) {
|
||||||
|
// Nothing to do!
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
this.modelIndex = modelIndex;
|
||||||
|
|
||||||
|
var pages = this.pages.get();
|
||||||
|
mapping.updateCount(pages.length);
|
||||||
|
|
||||||
|
if (sameModelIndex) {
|
||||||
|
// Only need to update the changed pages.
|
||||||
|
for (int page = validityChanged.nextSetBit(0); page >= 0 && page < pages.length; page = validityChanged.nextSetBit(page + 1)) {
|
||||||
|
mapping.updatePage(page, modelIndex, pages[page].valid.get());
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Need to update all pages since the model index changed.
|
||||||
|
for (int i = 0; i < pages.length; i++) {
|
||||||
|
mapping.updatePage(i, modelIndex, pages[i].valid.get());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
validityChanged.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
public void writeModel(long ptr) {
|
public void writeModel(long ptr) {
|
||||||
@ -72,25 +301,22 @@ public class IndirectInstancer<I extends Instance> extends AbstractInstancer<I>
|
|||||||
}
|
}
|
||||||
|
|
||||||
public void uploadInstances(StagingBuffer stagingBuffer, int instanceVbo) {
|
public void uploadInstances(StagingBuffer stagingBuffer, int instanceVbo) {
|
||||||
if (changedPages.isEmpty()) {
|
if (contentsChanged.isEmpty()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
int numPages = mapping.pageCount();
|
var pages = this.pages.get();
|
||||||
|
for (int page = contentsChanged.nextSetBit(0); page >= 0 && page < pages.length; page = contentsChanged.nextSetBit(page + 1)) {
|
||||||
var instanceCount = instances.size();
|
var instances = pages[page].instances;
|
||||||
|
|
||||||
for (int page = changedPages.nextSetBit(0); page >= 0 && page < numPages; page = changedPages.nextSetBit(page + 1)) {
|
|
||||||
int startObject = ObjectStorage.pageIndex2ObjectIndex(page);
|
|
||||||
|
|
||||||
if (startObject >= instanceCount) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
int endObject = Math.min(instanceCount, ObjectStorage.pageIndex2ObjectIndex(page + 1));
|
|
||||||
|
|
||||||
long baseByte = mapping.page2ByteOffset(page);
|
long baseByte = mapping.page2ByteOffset(page);
|
||||||
long size = (endObject - startObject) * instanceStride;
|
|
||||||
|
if (baseByte < 0) {
|
||||||
|
// This page is not mapped to the VBO.
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
long size = ObjectStorage.PAGE_SIZE * instanceStride;
|
||||||
|
|
||||||
// Because writes are broken into pages, we end up with significantly more calls into
|
// Because writes are broken into pages, we end up with significantly more calls into
|
||||||
// StagingBuffer#enqueueCopy and the allocations for the writer got out of hand. Here
|
// StagingBuffer#enqueueCopy and the allocations for the writer got out of hand. Here
|
||||||
@ -101,9 +327,10 @@ public class IndirectInstancer<I extends Instance> extends AbstractInstancer<I>
|
|||||||
long direct = stagingBuffer.reserveForCopy(size, instanceVbo, baseByte);
|
long direct = stagingBuffer.reserveForCopy(size, instanceVbo, baseByte);
|
||||||
|
|
||||||
if (direct != MemoryUtil.NULL) {
|
if (direct != MemoryUtil.NULL) {
|
||||||
for (int i = startObject; i < endObject; i++) {
|
for (I instance : instances) {
|
||||||
var instance = instances.get(i);
|
if (instance != null) {
|
||||||
writer.write(direct, instance);
|
writer.write(direct, instance);
|
||||||
|
}
|
||||||
direct += instanceStride;
|
direct += instanceStride;
|
||||||
}
|
}
|
||||||
continue;
|
continue;
|
||||||
@ -112,15 +339,49 @@ public class IndirectInstancer<I extends Instance> extends AbstractInstancer<I>
|
|||||||
// Otherwise, write to a scratch buffer and enqueue a copy.
|
// Otherwise, write to a scratch buffer and enqueue a copy.
|
||||||
var block = stagingBuffer.getScratch(size);
|
var block = stagingBuffer.getScratch(size);
|
||||||
var ptr = block.ptr();
|
var ptr = block.ptr();
|
||||||
for (int i = startObject; i < endObject; i++) {
|
for (I instance : instances) {
|
||||||
var instance = instances.get(i);
|
if (instance != null) {
|
||||||
writer.write(ptr, instance);
|
writer.write(ptr, instance);
|
||||||
|
}
|
||||||
ptr += instanceStride;
|
ptr += instanceStride;
|
||||||
}
|
}
|
||||||
stagingBuffer.enqueueCopy(block.ptr(), size, instanceVbo, baseByte);
|
stagingBuffer.enqueueCopy(block.ptr(), size, instanceVbo, baseByte);
|
||||||
}
|
}
|
||||||
|
|
||||||
changedPages.clear();
|
contentsChanged.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
public void parallelUpdate() {
|
||||||
|
var pages = this.pages.get();
|
||||||
|
|
||||||
|
int page = 0;
|
||||||
|
while (mergeablePages.cardinality() > 1) {
|
||||||
|
page = mergeablePages.nextSetBit(page);
|
||||||
|
if (page < 0) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find the next mergeable page.
|
||||||
|
int next = mergeablePages.nextSetBit(page + 1);
|
||||||
|
if (next < 0) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to merge the pages.
|
||||||
|
pages[page].takeFrom(pages[next]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static boolean isFull(int valid) {
|
||||||
|
return valid == 0xFFFFFFFF;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static boolean isEmpty(int valid) {
|
||||||
|
return valid == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static boolean isMergeable(int valid) {
|
||||||
|
return !isEmpty(valid) && Integer.bitCount(valid) <= 16;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -143,4 +404,135 @@ public class IndirectInstancer<I extends Instance> extends AbstractInstancer<I>
|
|||||||
public int local2GlobalInstanceIndex(int instanceIndex) {
|
public int local2GlobalInstanceIndex(int instanceIndex) {
|
||||||
return mapping.objectIndex2GlobalIndex(instanceIndex);
|
return mapping.objectIndex2GlobalIndex(instanceIndex);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public I createInstance() {
|
||||||
|
var handle = new InstanceHandleImpl<I>(null);
|
||||||
|
I instance = type.create(handle);
|
||||||
|
|
||||||
|
addInner(instance, handle);
|
||||||
|
|
||||||
|
return instance;
|
||||||
|
}
|
||||||
|
|
||||||
|
public InstanceHandleImpl.State<I> revealInstance(InstanceHandleImpl<I> handle, I instance) {
|
||||||
|
addInner(instance, handle);
|
||||||
|
return handle.state;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void stealInstance(@Nullable I instance) {
|
||||||
|
if (instance == null) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
var instanceHandle = instance.handle();
|
||||||
|
|
||||||
|
if (!(instanceHandle instanceof InstanceHandleImpl<?>)) {
|
||||||
|
// UB: do nothing
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should InstanceType have an isInstance method?
|
||||||
|
@SuppressWarnings("unchecked") var handle = (InstanceHandleImpl<I>) instanceHandle;
|
||||||
|
|
||||||
|
// Not allowed to steal deleted instances.
|
||||||
|
if (handle.state instanceof InstanceHandleImpl.Deleted) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// No need to steal if the instance will recreate to us.
|
||||||
|
if (handle.state instanceof InstanceHandleImpl.Hidden<I> hidden && recreate.equals(hidden.recreate())) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// FIXME: in theory there could be a race condition here if the instance
|
||||||
|
// is somehow being stolen by 2 different instancers between threads.
|
||||||
|
// That seems kinda impossible so I'm fine leaving it as is for now.
|
||||||
|
|
||||||
|
// Add the instance to this instancer.
|
||||||
|
if (handle.state instanceof InstancePage<?> other) {
|
||||||
|
if (other.parent == this) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove the instance from its old instancer.
|
||||||
|
// This won't have any unwanted effect when the old instancer
|
||||||
|
// is filtering deleted instances later, so is safe.
|
||||||
|
other.setDeleted(handle.index);
|
||||||
|
|
||||||
|
// Only lock now that we'll be mutating our state.
|
||||||
|
addInner(instance, handle);
|
||||||
|
} else if (handle.state instanceof InstanceHandleImpl.Hidden<I>) {
|
||||||
|
handle.state = new InstanceHandleImpl.Hidden<>(recreate, instance);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void addInner(I instance, InstanceHandleImpl<I> handle) {
|
||||||
|
// Outer loop:
|
||||||
|
// - try to find an empty space
|
||||||
|
// - or grow the page array if we can't
|
||||||
|
// - add the instance to the new page, or try again
|
||||||
|
while (true) {
|
||||||
|
var pages = this.pages.get();
|
||||||
|
|
||||||
|
// First, try to find a page with space.
|
||||||
|
for (int i = fullPages.nextClearBit(0); i < pages.length; i = fullPages.nextClearBit(i + 1)) {
|
||||||
|
// It may have been filled in while we were searching, but hopefully not.
|
||||||
|
if (pages[i].add(instance, handle)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we're here, all other pages are full
|
||||||
|
// If we hit this on the second iteration of the outer loop then `pages` is once again full.
|
||||||
|
var desiredLength = pages.length + 1;
|
||||||
|
|
||||||
|
// Inner loop: grow the page array. This is very similar to the logic in AtomicBitSet.
|
||||||
|
while (pages.length < desiredLength) {
|
||||||
|
// Thread safety: segments contains all pages from the currently visible pages, plus extra.
|
||||||
|
// all pages in the currently visible pages are canonical and will not change.
|
||||||
|
// Can't just `new InstancePage[]` because it has a generic parameter.
|
||||||
|
InstancePage<I>[] newPages = pageArray(desiredLength);
|
||||||
|
|
||||||
|
System.arraycopy(pages, 0, newPages, 0, pages.length);
|
||||||
|
newPages[pages.length] = new InstancePage<>(this, pages.length);
|
||||||
|
|
||||||
|
// because we are using a compareAndSet, if this thread "wins the race" and successfully sets this variable, then the new page becomes canonical.
|
||||||
|
if (this.pages.compareAndSet(pages, newPages)) {
|
||||||
|
pages = newPages;
|
||||||
|
} else {
|
||||||
|
// If we "lose the race" and are growing the AtomicBitset segments larger,
|
||||||
|
// then we will gather the new canonical pages from the update which we missed on the next iteration of this loop.
|
||||||
|
// The new page will be discarded and never seen again.
|
||||||
|
pages = this.pages.get();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shortcut: try to add the instance to the last page.
|
||||||
|
// Technically we could just let the outer loop go again, but that
|
||||||
|
// involves a good bit of work just to likely get back here.
|
||||||
|
if (pages[pages.length - 1].add(instance, handle)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// It may be the case that many other instances were added in the same instant.
|
||||||
|
// We can still lose this race, though it is very unlikely.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public int instanceCount() {
|
||||||
|
// Not exactly accurate but it's an upper bound.
|
||||||
|
// TODO: maybe this could be tracked with an AtomicInteger?
|
||||||
|
return pages.get().length << ObjectStorage.LOG_2_PAGE_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Clear all instances without freeing resources.
|
||||||
|
*/
|
||||||
|
public void clear() {
|
||||||
|
this.pages.set(pageArray(0));
|
||||||
|
contentsChanged.clear();
|
||||||
|
validityChanged.clear();
|
||||||
|
fullPages.clear();
|
||||||
|
mergeablePages.clear();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package dev.engine_room.flywheel.backend.engine.indirect;
|
package dev.engine_room.flywheel.backend.engine.indirect;
|
||||||
|
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
|
import java.util.BitSet;
|
||||||
|
|
||||||
import org.lwjgl.system.MemoryUtil;
|
import org.lwjgl.system.MemoryUtil;
|
||||||
|
|
||||||
@ -13,8 +14,12 @@ public class ObjectStorage extends AbstractArena {
|
|||||||
public static final int PAGE_SIZE = 1 << LOG_2_PAGE_SIZE;
|
public static final int PAGE_SIZE = 1 << LOG_2_PAGE_SIZE;
|
||||||
public static final int PAGE_MASK = PAGE_SIZE - 1;
|
public static final int PAGE_MASK = PAGE_SIZE - 1;
|
||||||
|
|
||||||
public static final int INITIAL_PAGES_ALLOCATED = 4;
|
public static final int INVALID_PAGE = -1;
|
||||||
|
|
||||||
|
public static final int INITIAL_PAGES_ALLOCATED = 4;
|
||||||
|
public static final int DESCRIPTOR_SIZE_BYTES = Integer.BYTES * 2;
|
||||||
|
|
||||||
|
private final BitSet changedFrames = new BitSet();
|
||||||
/**
|
/**
|
||||||
* The GPU side buffer containing all the objects, logically divided into page frames.
|
* The GPU side buffer containing all the objects, logically divided into page frames.
|
||||||
*/
|
*/
|
||||||
@ -28,8 +33,6 @@ public class ObjectStorage extends AbstractArena {
|
|||||||
*/
|
*/
|
||||||
private MemoryBlock frameDescriptors;
|
private MemoryBlock frameDescriptors;
|
||||||
|
|
||||||
private boolean needsUpload = false;
|
|
||||||
|
|
||||||
public ObjectStorage(long objectSizeBytes) {
|
public ObjectStorage(long objectSizeBytes) {
|
||||||
super(PAGE_SIZE * objectSizeBytes);
|
super(PAGE_SIZE * objectSizeBytes);
|
||||||
|
|
||||||
@ -37,8 +40,8 @@ public class ObjectStorage extends AbstractArena {
|
|||||||
this.frameDescriptorBuffer = new ResizableStorageBuffer();
|
this.frameDescriptorBuffer = new ResizableStorageBuffer();
|
||||||
|
|
||||||
objectBuffer.ensureCapacity(INITIAL_PAGES_ALLOCATED * elementSizeBytes);
|
objectBuffer.ensureCapacity(INITIAL_PAGES_ALLOCATED * elementSizeBytes);
|
||||||
frameDescriptorBuffer.ensureCapacity(INITIAL_PAGES_ALLOCATED * Integer.BYTES);
|
frameDescriptorBuffer.ensureCapacity(INITIAL_PAGES_ALLOCATED * DESCRIPTOR_SIZE_BYTES);
|
||||||
frameDescriptors = MemoryBlock.malloc(INITIAL_PAGES_ALLOCATED * Integer.BYTES);
|
frameDescriptors = MemoryBlock.malloc(INITIAL_PAGES_ALLOCATED * DESCRIPTOR_SIZE_BYTES);
|
||||||
}
|
}
|
||||||
|
|
||||||
public Mapping createMapping() {
|
public Mapping createMapping() {
|
||||||
@ -52,8 +55,23 @@ public class ObjectStorage extends AbstractArena {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void free(int i) {
|
public void free(int i) {
|
||||||
|
if (i == INVALID_PAGE) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
super.free(i);
|
super.free(i);
|
||||||
MemoryUtil.memPutInt(ptrForPage(i), 0);
|
var ptr = ptrForPage(i);
|
||||||
|
MemoryUtil.memPutInt(ptr, 0);
|
||||||
|
MemoryUtil.memPutInt(ptr + 4, 0);
|
||||||
|
|
||||||
|
changedFrames.set(i);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void set(int i, int modelIndex, int validBits) {
|
||||||
|
var ptr = ptrForPage(i);
|
||||||
|
MemoryUtil.memPutInt(ptr, modelIndex);
|
||||||
|
MemoryUtil.memPutInt(ptr + 4, validBits);
|
||||||
|
|
||||||
|
changedFrames.set(i);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -64,12 +82,17 @@ public class ObjectStorage extends AbstractArena {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public void uploadDescriptors(StagingBuffer stagingBuffer) {
|
public void uploadDescriptors(StagingBuffer stagingBuffer) {
|
||||||
if (!needsUpload) {
|
if (changedFrames.isEmpty()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
// We could be smarter about which spans are uploaded but this thing is so small it's probably not worth it.
|
|
||||||
stagingBuffer.enqueueCopy(frameDescriptors.ptr(), frameDescriptors.size(), frameDescriptorBuffer.handle(), 0);
|
var ptr = frameDescriptors.ptr();
|
||||||
needsUpload = false;
|
for (int i = changedFrames.nextSetBit(0); i >= 0 && i < capacity(); i = changedFrames.nextSetBit(i + 1)) {
|
||||||
|
var offset = (long) i * DESCRIPTOR_SIZE_BYTES;
|
||||||
|
stagingBuffer.enqueueCopy(ptr + offset, DESCRIPTOR_SIZE_BYTES, frameDescriptorBuffer.handle(), offset);
|
||||||
|
}
|
||||||
|
|
||||||
|
changedFrames.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
public void delete() {
|
public void delete() {
|
||||||
@ -79,7 +102,7 @@ public class ObjectStorage extends AbstractArena {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private long ptrForPage(int page) {
|
private long ptrForPage(int page) {
|
||||||
return frameDescriptors.ptr() + (long) page * Integer.BYTES;
|
return frameDescriptors.ptr() + (long) page * DESCRIPTOR_SIZE_BYTES;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static int objectIndex2PageIndex(int objectIndex) {
|
public static int objectIndex2PageIndex(int objectIndex) {
|
||||||
@ -97,61 +120,52 @@ public class ObjectStorage extends AbstractArena {
|
|||||||
private static final int[] EMPTY_ALLOCATION = new int[0];
|
private static final int[] EMPTY_ALLOCATION = new int[0];
|
||||||
private int[] pages = EMPTY_ALLOCATION;
|
private int[] pages = EMPTY_ALLOCATION;
|
||||||
|
|
||||||
private int modelIndex = -1;
|
public void updatePage(int index, int modelIndex, int validBits) {
|
||||||
private int objectCount = 0;
|
if (validBits == 0) {
|
||||||
|
holePunch(index);
|
||||||
/**
|
|
||||||
* Adjust this allocation to the given model index and object count.
|
|
||||||
*
|
|
||||||
* <p>This method triggers eager resizing of the allocation to fit the new object count.
|
|
||||||
* If the model index is different from the current one, all frame descriptors will be updated.
|
|
||||||
*
|
|
||||||
* @param modelIndex The model index the objects in this allocation are associated with.
|
|
||||||
* @param objectCount The number of objects in this allocation.
|
|
||||||
*/
|
|
||||||
public void update(int modelIndex, int objectCount) {
|
|
||||||
boolean incremental = this.modelIndex == modelIndex;
|
|
||||||
|
|
||||||
if (incremental && objectCount == this.objectCount) {
|
|
||||||
// Nothing will change.
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
var frame = pages[index];
|
||||||
|
|
||||||
ObjectStorage.this.needsUpload = true;
|
if (frame == INVALID_PAGE) {
|
||||||
|
// Un-holed punch.
|
||||||
|
frame = unHolePunch(index);
|
||||||
|
}
|
||||||
|
|
||||||
this.modelIndex = modelIndex;
|
ObjectStorage.this.set(frame, modelIndex, validBits);
|
||||||
this.objectCount = objectCount;
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Free a page on the inside of the mapping, maintaining the same virtual mapping size.
|
||||||
|
*
|
||||||
|
* @param index The index of the page to free.
|
||||||
|
*/
|
||||||
|
public void holePunch(int index) {
|
||||||
|
ObjectStorage.this.free(pages[index]);
|
||||||
|
pages[index] = INVALID_PAGE;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Allocate a new page on the inside of the mapping, maintaining the same virtual mapping size.
|
||||||
|
*
|
||||||
|
* @param index The index of the page to allocate.
|
||||||
|
* @return The allocated page.
|
||||||
|
*/
|
||||||
|
private int unHolePunch(int index) {
|
||||||
|
int page = ObjectStorage.this.alloc();
|
||||||
|
pages[index] = page;
|
||||||
|
return page;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void updateCount(int newLength) {
|
||||||
var oldLength = pages.length;
|
var oldLength = pages.length;
|
||||||
var newLength = objectIndex2PageIndex((objectCount + PAGE_MASK));
|
|
||||||
|
|
||||||
if (oldLength > newLength) {
|
if (oldLength > newLength) {
|
||||||
// Eagerly free the now unnecessary pages.
|
// Eagerly free the now unnecessary pages.
|
||||||
// shrink will zero out the pageTable entries for the freed pages.
|
// shrink will zero out the pageTable entries for the freed pages.
|
||||||
shrink(oldLength, newLength);
|
shrink(oldLength, newLength);
|
||||||
|
|
||||||
if (incremental) {
|
|
||||||
// Only update the last page, everything else is unchanged.
|
|
||||||
updateRange(newLength - 1, newLength);
|
|
||||||
}
|
|
||||||
} else if (oldLength < newLength) {
|
} else if (oldLength < newLength) {
|
||||||
// Allocate new pages to fit the new object count.
|
// Allocate new pages to fit the new object count.
|
||||||
grow(newLength, oldLength);
|
grow(newLength, oldLength);
|
||||||
|
|
||||||
if (incremental) {
|
|
||||||
// Update the old last page + all new pages
|
|
||||||
updateRange(oldLength - 1, newLength);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (incremental) {
|
|
||||||
// Only update the last page.
|
|
||||||
updateRange(oldLength - 1, oldLength);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!incremental) {
|
|
||||||
// Update all pages.
|
|
||||||
updateRange(0, newLength);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -159,8 +173,8 @@ public class ObjectStorage extends AbstractArena {
|
|||||||
return pages.length;
|
return pages.length;
|
||||||
}
|
}
|
||||||
|
|
||||||
public long page2ByteOffset(int page) {
|
public long page2ByteOffset(int index) {
|
||||||
return ObjectStorage.this.byteOffsetOf(pages[page]);
|
return ObjectStorage.this.byteOffsetOf(pages[index]);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void delete() {
|
public void delete() {
|
||||||
@ -168,35 +182,6 @@ public class ObjectStorage extends AbstractArena {
|
|||||||
ObjectStorage.this.free(page);
|
ObjectStorage.this.free(page);
|
||||||
}
|
}
|
||||||
pages = EMPTY_ALLOCATION;
|
pages = EMPTY_ALLOCATION;
|
||||||
modelIndex = -1;
|
|
||||||
objectCount = 0;
|
|
||||||
|
|
||||||
ObjectStorage.this.needsUpload = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Calculates the page descriptor for the given page index.
|
|
||||||
* Runs under the assumption than all pages are full except maybe the last one.
|
|
||||||
*/
|
|
||||||
private int calculatePageDescriptor(int pageIndex) {
|
|
||||||
int countInPage;
|
|
||||||
if (objectCount % PAGE_SIZE != 0 && pageIndex == pages.length - 1) {
|
|
||||||
// Last page && it isn't full -> use the remainder.
|
|
||||||
countInPage = objectCount & PAGE_MASK;
|
|
||||||
} else if (objectCount > 0) {
|
|
||||||
// Full page.
|
|
||||||
countInPage = PAGE_SIZE;
|
|
||||||
} else {
|
|
||||||
// Empty page, this shouldn't be reachable because we eagerly free empty pages.
|
|
||||||
countInPage = 0;
|
|
||||||
}
|
|
||||||
return (modelIndex & 0x3FFFFF) | (countInPage << 26);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void updateRange(int start, int oldLength) {
|
|
||||||
for (int i = start; i < oldLength; i++) {
|
|
||||||
MemoryUtil.memPutInt(ptrForPage(pages[i]), calculatePageDescriptor(i));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private void grow(int neededPages, int oldLength) {
|
private void grow(int neededPages, int oldLength) {
|
||||||
|
@ -7,7 +7,7 @@ import org.jetbrains.annotations.Nullable;
|
|||||||
|
|
||||||
import dev.engine_room.flywheel.api.instance.Instance;
|
import dev.engine_room.flywheel.api.instance.Instance;
|
||||||
import dev.engine_room.flywheel.api.instance.InstanceWriter;
|
import dev.engine_room.flywheel.api.instance.InstanceWriter;
|
||||||
import dev.engine_room.flywheel.backend.engine.AbstractInstancer;
|
import dev.engine_room.flywheel.backend.engine.BaseInstancer;
|
||||||
import dev.engine_room.flywheel.backend.engine.InstancerKey;
|
import dev.engine_room.flywheel.backend.engine.InstancerKey;
|
||||||
import dev.engine_room.flywheel.backend.gl.TextureBuffer;
|
import dev.engine_room.flywheel.backend.gl.TextureBuffer;
|
||||||
import dev.engine_room.flywheel.backend.gl.buffer.GlBuffer;
|
import dev.engine_room.flywheel.backend.gl.buffer.GlBuffer;
|
||||||
@ -15,7 +15,7 @@ import dev.engine_room.flywheel.backend.gl.buffer.GlBufferUsage;
|
|||||||
import dev.engine_room.flywheel.lib.math.MoreMath;
|
import dev.engine_room.flywheel.lib.math.MoreMath;
|
||||||
import dev.engine_room.flywheel.lib.memory.MemoryBlock;
|
import dev.engine_room.flywheel.lib.memory.MemoryBlock;
|
||||||
|
|
||||||
public class InstancedInstancer<I extends Instance> extends AbstractInstancer<I> {
|
public class InstancedInstancer<I extends Instance> extends BaseInstancer<I> {
|
||||||
private final int instanceStride;
|
private final int instanceStride;
|
||||||
|
|
||||||
private final InstanceWriter<I> writer;
|
private final InstanceWriter<I> writer;
|
||||||
@ -109,6 +109,60 @@ public class InstancedInstancer<I extends Instance> extends AbstractInstancer<I>
|
|||||||
return capacity > vbo.size();
|
return capacity > vbo.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void parallelUpdate() {
|
||||||
|
if (deleted.isEmpty()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Figure out which elements are to be removed.
|
||||||
|
final int oldSize = this.instances.size();
|
||||||
|
int removeCount = deleted.cardinality();
|
||||||
|
|
||||||
|
if (oldSize == removeCount) {
|
||||||
|
clear();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
final int newSize = oldSize - removeCount;
|
||||||
|
|
||||||
|
// Start from the first deleted index.
|
||||||
|
int writePos = deleted.nextSetBit(0);
|
||||||
|
|
||||||
|
if (writePos < newSize) {
|
||||||
|
// Since we'll be shifting everything into this space we can consider it all changed.
|
||||||
|
changed.set(writePos, newSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
// We definitely shouldn't consider the deleted instances as changed though,
|
||||||
|
// else we might try some out of bounds accesses later.
|
||||||
|
changed.clear(newSize, oldSize);
|
||||||
|
|
||||||
|
// Punch out the deleted instances, shifting over surviving instances to fill their place.
|
||||||
|
for (int scanPos = writePos; (scanPos < oldSize) && (writePos < newSize); scanPos++, writePos++) {
|
||||||
|
// Find next non-deleted element.
|
||||||
|
scanPos = deleted.nextClearBit(scanPos);
|
||||||
|
|
||||||
|
if (scanPos != writePos) {
|
||||||
|
// Grab the old instance/handle from scanPos...
|
||||||
|
var handle = handles.get(scanPos);
|
||||||
|
I instance = instances.get(scanPos);
|
||||||
|
|
||||||
|
// ... and move it to writePos.
|
||||||
|
handles.set(writePos, handle);
|
||||||
|
instances.set(writePos, instance);
|
||||||
|
|
||||||
|
// Make sure the handle knows it's been moved
|
||||||
|
handle.index = writePos;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
deleted.clear();
|
||||||
|
instances.subList(newSize, oldSize)
|
||||||
|
.clear();
|
||||||
|
handles.subList(newSize, oldSize)
|
||||||
|
.clear();
|
||||||
|
}
|
||||||
|
|
||||||
public void delete() {
|
public void delete() {
|
||||||
if (vbo == null) {
|
if (vbo == null) {
|
||||||
return;
|
return;
|
||||||
|
@ -43,6 +43,14 @@ public class AtomicBitSet {
|
|||||||
segments = new AtomicReference<>(new AtomicBitSetSegments(numSegmentsToPreallocate, numLongsPerSegment));
|
segments = new AtomicReference<>(new AtomicBitSetSegments(numSegmentsToPreallocate, numLongsPerSegment));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void set(int position, boolean value) {
|
||||||
|
if (value) {
|
||||||
|
set(position);
|
||||||
|
} else {
|
||||||
|
clear(position);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public void set(int position) {
|
public void set(int position) {
|
||||||
int longPosition = longIndexInSegmentForPosition(position);
|
int longPosition = longIndexInSegmentForPosition(position);
|
||||||
|
|
||||||
|
@ -126,24 +126,22 @@ bool _flw_isVisible(uint instanceIndex, uint modelIndex) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void main() {
|
void main() {
|
||||||
uint pageIndex = gl_WorkGroupID.x;
|
uint pageIndex = gl_WorkGroupID.x << 1u;
|
||||||
|
|
||||||
if (pageIndex >= _flw_pageFrameDescriptors.length()) {
|
if (pageIndex >= _flw_pageFrameDescriptors.length()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint packedModelIndexAndCount = _flw_pageFrameDescriptors[pageIndex];
|
uint modelIndex = _flw_pageFrameDescriptors[pageIndex];
|
||||||
|
|
||||||
uint pageInstanceCount = packedModelIndexAndCount >> _FLW_PAGE_COUNT_OFFSET;
|
uint pageValidity = _flw_pageFrameDescriptors[pageIndex + 1];
|
||||||
|
|
||||||
if (gl_LocalInvocationID.x >= pageInstanceCount) {
|
if (((1u << gl_LocalInvocationID.x) & pageValidity) == 0) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint instanceIndex = gl_GlobalInvocationID.x;
|
uint instanceIndex = gl_GlobalInvocationID.x;
|
||||||
|
|
||||||
uint modelIndex = packedModelIndexAndCount & _FLW_MODEL_INDEX_MASK;
|
|
||||||
|
|
||||||
if (_flw_isVisible(instanceIndex, modelIndex)) {
|
if (_flw_isVisible(instanceIndex, modelIndex)) {
|
||||||
uint localIndex = atomicAdd(_flw_models[modelIndex].instanceCount, 1);
|
uint localIndex = atomicAdd(_flw_models[modelIndex].instanceCount, 1);
|
||||||
uint targetIndex = _flw_models[modelIndex].baseInstance + localIndex;
|
uint targetIndex = _flw_models[modelIndex].baseInstance + localIndex;
|
||||||
|
Loading…
Reference in New Issue
Block a user