error CS1593: Delegate ‘Invalid_ForEach_Signature_See_ForEach_Documentation_For_Rules_And_Restrictions’ does not take N arguments
此错误消息将参数数量作为问题,即使问题是参数顺序也是如此。
自定义代理
如果要在 ForEach lambda 表达式中使用八个以上的参数,则必须声明自己的委托类型和 ForEach 重载。这允许您使用无限数量的参数,并以您想要的任何顺序放置 ref、in 和 value 参数。
您还可以在参数列表中的任意位置声明三个命名参数 entity、entityInQueryIndex 和 nativeThreadIndex。不要对这些参数使用 ref 或 in 修饰符。
以下示例显示 12 个参数,并在 lambda 表达式中使用实体参数:
static class BringYourOwnDelegate
{
// Declare the delegate that takes 12 parameters. T0 is used for the Entity argument
public delegate void CustomForEachDelegate<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11>
(T0 t0, in T1 t1, in T2 t2, in T3 t3, in T4 t4, in T5 t5,
in T6 t6, in T7 t7, in T8 t8, in T9 t9, in T10 t10, in T11 t11);
// Declare the function overload
public static TDescription ForEach<TDescription, T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11>
(this TDescription description, CustomForEachDelegate<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> codeToRun)
where TDescription : struct, Unity.Entities.CodeGeneratedJobForEach.ISupportForEachWithUniversalDelegate =>
LambdaForEachDescriptionConstructionMethods.ThrowCodeGenException<TDescription>();
}
// A system that uses the custom delegate and overload
[RequireMatchingQueriesForUpdate]
public partial class MayParamsSystem : SystemBase
{
protected override void OnUpdate()
{
Entities.ForEach(
(Entity entity0,
in Data1 d1,
in Data2 d2,
in Data3 d3,
in Data4 d4,
in Data5 d5,
in Data6 d6,
in Data7 d7,
in Data8 d8,
in Data9 d9,
in Data10 d10,
in Data11 d11
) => {/* .. */})
.Run();
}
}
You can’t pass chunk components to the Entities.ForEach lambda expression.
对于动态缓冲区,使用 DynamicBuffer 而不是缓冲区中存储的组件类型:
[RequireMatchingQueriesForUpdate]
public partial class BufferSum : SystemBase
{
private EntityQuery query;
//Schedules the two jobs with a dependency between them
protected override void OnUpdate()
{
//The query variable can be accessed here because we are
//using WithStoreEntityQueryInField(query) in the entities.ForEach below
int entitiesInQuery = query.CalculateEntityCount();
//Create a native array to hold the intermediate sums
//(one element per entity)
NativeArray<int> intermediateSums
= new NativeArray<int>(entitiesInQuery, Allocator.TempJob);
//Schedule the first job to add all the buffer elements
Entities
.ForEach((int entityInQueryIndex, in DynamicBuffer<IntBufferData> buffer) =>
{
for (int i = 0; i < buffer.Length; i++)
{
intermediateSums[entityInQueryIndex] += buffer[i].Value;
}
})
.WithStoreEntityQueryInField(ref query)
.WithName("IntermediateSums")
.ScheduleParallel(); // Execute in parallel for each chunk of entities
//Schedule the second job, which depends on the first
Job.WithCode(() =>
{
int result = 0;
for (int i = 0; i < intermediateSums.Length; i++)
{
result += intermediateSums[i];
}
//Not burst compatible:
Debug.Log("Final sum is " + result);
})
.WithDisposeOnCompletion(intermediateSums)
.WithoutBurst()
.WithName("FinalSum")
.Schedule(); // Execute on a single, background thread
}
}
Entities.WithAll<LocalToWorld>()
.WithAny<Rotation, ObjectPosition, ObjectUniformScale>()
.WithNone<ObjectNonUniformScale>()
.ForEach((ref Destination outputData, in Source inputData) =>
{
/* do some work */
})
.Schedule();
[RequireMatchingQueriesForUpdate]
public partial class BoidForEachSystem : SystemBase
{
EntityQuery m_BoidQuery;
EntityQuery m_ObstacleQuery;
EntityQuery m_TargetQuery;
protected override void OnUpdate()
{
// Calculate amount of entities in respective queries.
var boidCount = m_BoidQuery.CalculateEntityCount();
var obstacleCount = m_ObstacleQuery.CalculateEntityCount();
var targetCount = m_TargetQuery.CalculateEntityCount();
// Allocate arrays to store data equal to the amount of entities matching respective queries.
var cellSeparation = CollectionHelper.CreateNativeArray<float3, RewindableAllocator>(boidCount, ref World.UpdateAllocator);
var copyTargetPositions = CollectionHelper.CreateNativeArray<float3, RewindableAllocator>(targetCount, ref World.UpdateAllocator);
var copyObstaclePositions = CollectionHelper.CreateNativeArray<float3, RewindableAllocator>(obstacleCount, ref World.UpdateAllocator);
// Schedule job for respective arrays to be stored with respective queries.
Entities
.WithSharedComponentFilter(new BoidSetting{num=1})
.ForEach((int entityInQueryIndex, in LocalToWorld localToWorld) =>
{
cellSeparation[entityInQueryIndex] = localToWorld.Position;
})
.ScheduleParallel();
Entities
.WithAll<BoidTarget>()
.WithStoreEntityQueryInField(ref m_TargetQuery)
.ForEach((int entityInQueryIndex, in LocalToWorld localToWorld) =>
{
copyTargetPositions[entityInQueryIndex] = localToWorld.Position;
})
.ScheduleParallel();
Entities
.WithAll<BoidObstacle>()
.WithStoreEntityQueryInField(ref m_ObstacleQuery)
.ForEach((int entityInQueryIndex, in LocalToWorld localToWorld) =>
{
copyObstaclePositions[entityInQueryIndex] = localToWorld.Position;
})
.ScheduleParallel();
}
}
可以改写如下:
[RequireMatchingQueriesForUpdate]
public partial class BoidJobEntitySystem : SystemBase
{
EntityQuery m_BoidQuery;
EntityQuery m_ObstacleQuery;
EntityQuery m_TargetQuery;
protected override void OnUpdate()
{
// Calculate amount of entities in respective queries.
var boidCount = m_BoidQuery.CalculateEntityCount();
var obstacleCount = m_ObstacleQuery.CalculateEntityCount();
var targetCount = m_TargetQuery.CalculateEntityCount();
// Allocate arrays to store data equal to the amount of entities matching respective queries.
var cellSeparation = CollectionHelper.CreateNativeArray<float3, RewindableAllocator>(boidCount, ref World.UpdateAllocator);
var copyTargetPositions = CollectionHelper.CreateNativeArray<float3, RewindableAllocator>(targetCount, ref World.UpdateAllocator);
var copyObstaclePositions = CollectionHelper.CreateNativeArray<float3, RewindableAllocator>(obstacleCount, ref World.UpdateAllocator);
// Schedule job for respective arrays to be stored with respective queries.
new CopyPositionsJob { copyPositions = cellSeparation}.ScheduleParallel(m_BoidQuery);
new CopyPositionsJob { copyPositions = copyTargetPositions}.ScheduleParallel(m_TargetQuery);
new CopyPositionsJob { copyPositions = copyObstaclePositions}.ScheduleParallel(m_ObstacleQuery);
}
protected override void OnCreate()
{
// Get respective queries, that includes components required by `CopyPositionsJob` described earlier.
m_BoidQuery = GetEntityQuery(typeof(LocalToWorld));
m_BoidQuery.SetSharedComponentFilter(new BoidSetting{num=1});
m_ObstacleQuery = GetEntityQuery(typeof(LocalToWorld), typeof(BoidObstacle));
m_TargetQuery = GetEntityQuery(typeof(LocalToWorld), typeof(BoidTarget));;
}
}
public struct SampleComponent : IComponentData { public float Value; }
public partial struct ASampleJob : IJobEntity
{
// Adds one to every SampleComponent value
void Execute(ref SampleComponent sample)
{
sample.Value += 1f;
}
}
public partial class ASample : SystemBase
{
protected override void OnUpdate()
{
// Schedules the job
new ASampleJob().ScheduleParallel();
}
}
partial struct QueryJob : IJobEntity
{
// Iterates over all SampleComponents and increments their value
public void Execute(ref SampleComponent sample)
{
sample.Value += 1;
}
}
[RequireMatchingQueriesForUpdate]
public partial class QuerySystem : SystemBase
{
// Query that matches QueryJob, specified for `BoidTarget`
EntityQuery query_boidtarget;
// Query that matches QueryJob, specified for `BoidObstacle`
EntityQuery query_boidobstacle;
protected override void OnCreate()
{
// Query that contains all of Execute params found in `QueryJob` - as well as additional user specified component `BoidTarget`.
query_boidtarget = GetEntityQuery(ComponentType.ReadWrite<SampleComponent>(),ComponentType.ReadOnly<BoidTarget>());
// Query that contains all of Execute params found in `QueryJob` - as well as additional user specified component `BoidObstacle`.
query_boidobstacle = GetEntityQuery(ComponentType.ReadWrite<SampleComponent>(),ComponentType.ReadOnly<BoidObstacle>());
}
protected override void OnUpdate()
{
// Uses the BoidTarget query
new QueryJob().ScheduleParallel(query_boidtarget);
// Uses the BoidObstacle query
new QueryJob().ScheduleParallel(query_boidobstacle);
// Uses query created automatically that matches parameters found in `QueryJob`.
new QueryJob().ScheduleParallel();
}
}
在 Execute 中设置 int 参数以获取查询中的当前索引,用于当前实体迭代。这与 Entities.ForEach 中的 entityInQueryIndex 相同。
以下是 EntityInQueryIndex 的示例:
[BurstCompile]
partial struct CopyPositionsJob : IJobEntity
{
public NativeArray<float3> copyPositions;
// Iterates over all `LocalToWorld` and stores their position inside `copyPositions`.
public void Execute([EntityInQueryIndex] int entityInQueryIndex, in LocalToWorld localToWorld)
{
copyPositions[entityInQueryIndex] = localToWorld.Position;
}
}
[RequireMatchingQueriesForUpdate]
public partial class EntityInQuerySystem : SystemBase
{
// This query should match `CopyPositionsJob` parameters
EntityQuery query;
protected override void OnCreate()
{
// Get query that matches `CopyPositionsJob` parameters
query = GetEntityQuery(ComponentType.ReadOnly<LocalToWorld>());
}
protected override void OnUpdate()
{
// Get a native array equal to the size of the amount of entities found by the query.
var positions = new NativeArray<float3>(query.CalculateEntityCount(), World.UpdateAllocator.ToAllocator);
// Schedule job on parallel threads for this array.
new CopyPositionsJob{copyPositions = positions}.ScheduleParallel();
// Dispose the array of positions found by the job.
positions.Dispose(Dependency);
}
}
如果您的实体查询中有 Any 过滤器或完全可选的组件根本没有出现在查询中,您可以使用 ArchetypeChunk.Has 函数在使用之前测试当前块是否包含这些组件之一:
// If entity has Rotation and LocalToWorld components,
// slerp to align to the velocity vector
if (batchInChunk.Has<Rotation>(rotationTypeHandle) &&
batchInChunk.Has<LocalToWorld>(l2wTypeHandle))
{
NativeArray<Rotation> rotations
= batchInChunk.GetNativeArray(rotationTypeHandle);
NativeArray<LocalToWorld> transforms
= batchInChunk.GetNativeArray(l2wTypeHandle);
// By putting the loop inside the check for the
// optional components, we can check once per batch
// rather than once per entity.
for (int i = 0; i < batchInChunk.Count; i++)
{
float3 direction = math.normalize(velocityVectors[i].Value);
float3 up = transforms[i].Up;
quaternion rotation = rotations[i].Value;
quaternion look = quaternion.LookRotation(direction, up);
quaternion newRotation = math.slerp(rotation, look, DeltaTime);
rotations[i] = new Rotation() { Value = newRotation };
}
}
[RequireMatchingQueriesForUpdate]
public partial class UpdateTranslationFromVelocitySystem : SystemBase
{
EntityQuery query;
protected override void OnCreate()
{
// Set up the query
var description = new EntityQueryDesc()
{
All = new ComponentType[]
{ComponentType.ReadWrite<ObjectPosition>(),
ComponentType.ReadOnly<VelocityVector>()}
};
query = this.GetEntityQuery(description);
}
protected override void OnUpdate()
{
// Instantiate the job struct
var updateFromVelocityJob
= new UpdateTranslationFromVelocityJob();
// Set the job component type handles
// "this" is your SystemBase subclass
updateFromVelocityJob.translationTypeHandle
= this.GetComponentTypeHandle<ObjectPosition>(false);
updateFromVelocityJob.velocityTypeHandle
= this.GetComponentTypeHandle<VelocityVector>(true);
// Set other data need in job, such as time
updateFromVelocityJob.DeltaTime = World.Time.DeltaTime;
// Schedule the job
this.Dependency
= updateFromVelocityJob.ScheduleParallel(query, this.Dependency);
}
}
struct UpdateOnChangeJob : IJobEntityBatch
{
public ComponentTypeHandle<InputA> InputATypeHandle;
public ComponentTypeHandle<InputB> InputBTypeHandle;
[ReadOnly] public ComponentTypeHandle<Output> OutputTypeHandle;
public uint LastSystemVersion;
[BurstCompile]
public void Execute(ArchetypeChunk batchInChunk, int batchIndex)
{
var inputAChanged = batchInChunk.DidChange(InputATypeHandle, LastSystemVersion);
var inputBChanged = batchInChunk.DidChange(InputBTypeHandle, LastSystemVersion);
// If neither component changed, skip the current batch
if (!(inputAChanged || inputBChanged))
return;
var inputAs = batchInChunk.GetNativeArray(InputATypeHandle);
var inputBs = batchInChunk.GetNativeArray(InputBTypeHandle);
var outputs = batchInChunk.GetNativeArray(OutputTypeHandle);
for (var i = 0; i < outputs.Length; i++)
{
outputs[i] = new Output { Value = inputAs[i].Value + inputBs[i].Value };
}
}
}
using Unity.Burst;
using Unity.Collections;
using Unity.Entities;
using Unity.Jobs;
using Unity.Transforms;
public class MovementSpeedSystem : SystemBase
{
// OnUpdate runs on the main thread.
protected override void OnUpdate()
{
Entities
.ForEach((ref Translation position, in MovementSpeed speed) =>
{
float3 displacement = speed.Value * dt;
position = new Translation(){
Value = position.Value + displacement
};
})
.ScheduleParallel();
}
}
// This method can operate on any IBlendable struct (and can call the
// IBlendable methods) but requires no managed objects or virtual method calls.
void foo<T>(T a) where T : struct, IBlendable {...}
您必须在 HPC# 中编写作业,因此对于在一系列类型上运行的作业,它必须是通用的:
[BurstCompile()]
public struct BlendJob<T> : IJob
where T : struct, IBlendable
{
public NativeReference<T> blendable;
public void Execute()
{
var val = blendable.Value;
val.Blend();
blendable.Value = val;
}
}
// This assembly attribute allows Burst-compiled
// code in the same assembly to schedule the
// concrete specialization <int, float> for MyJob. */
[assembly: RegisterGenericJobType(typeof(MyJob<int, float>))]
如果您尝试安排未在程序集中注册具体专业化的作业,则 Unity 会抛出异常。
注册类型的程序集无关紧要。例如,如果一个作业类型只在程序集 Foo 中注册,您也可以在程序集 Bar 中安排它。
如果您多次重复注册相同的具体专业化,则不会将其视为错误。
具体作业类型的自动注册
当您直接实例化通用作业的具体特化时,Unity 会自动在程序集中注册该特化:
// Registers specialization <int, float> for MyJob in the assembly.
var job = new MyJob<int, float>();
但是,在间接实例化具体特化时,Unity 不会自动注册它:
void makeJob<T>()
{
new MyJob<T, float>().Schedule();
}
void foo()
{
makeJob<int>(); // does NOT register MyJob<int, float>
}
struct BlendJobWrapper<T> where T : struct, IBlendable
{
public T blendable;
[BurstCompile()]
public struct BlendJob : IJob
{
public T blendable;
public void Execute() {...}
}
public JobHandle Schedule(JobHandle dep = new JobHandle())
{
return new BlendJob { blendable = blendable }.Schedule(dep);
}
}
public unsafe static JobHandle Sort<T, U>(T* array, int length, U comp, JobHandle deps
out SegmentSort<T, U> segmentSortJob, out SegmentSortMerge<T, U> segmentSortMergeJob)
where T : unmanaged
where U : IComparer<T>
{
if (length == 0)
return inputDeps;
segmentSortJob = new SegmentSort<T, U> { Data = array, Comp = comp, Length = length, SegmentWidth = 1024 };
segmentSortMergeJob = new SegmentSortMerge<T, U> { Data = array, Comp = comp, Length = length, SegmentWidth = 1024 };
var segmentCount = (length + 1023) / 1024;
var workerSegmentCount = segmentCount / math.max(1, JobsUtility.MaxJobThreadCount);
var handle = segmentSortJob.Schedule(segmentCount, workerSegmentCount, deps);
return segmentSortMergeJob.Schedule(segmentSortJobHandle);
}
然而,这解决了注册问题,但是您随后必须传递参数以获得您可能不想要的两个作业结构。
更好的解决方案是将两种作业类型包装在一个包装器类型中:
unsafe struct SortJob<T, U> :
where T : unamanged
where U : IComparer<T>
{
public T* data;
public U comparer;
public int length;
unsafe struct SegmentSort : IJobParallelFor
{
[NativeDisableUnsafePtrRestriction]
public T* data;
public U comp;
public int length;
public int segmentWidth;
public void Execute(int index) {...}
}
unsafe struct SegmentSortMerge : IJob
{
[NativeDisableUnsafePtrRestriction]
public T* data;
public U comp;
public int length;
public int segmentWidth;
public void Execute() {...}
}
public JobHandle Schedule(JobHandle dep = new JobHandle())
{
if (length == 0)
return inputDeps;
var segmentSortJob = new SegmentSort<T, U> { Data = array, Comp = comp, Length = length, SegmentWidth = 1024 };
var segmentSortMergeJob = new SegmentSortMerge<T, U> { Data = array, Comp = comp, Length = length, SegmentWidth = 1024 };
var segmentCount = (length + 1023) / 1024;
var workerSegmentCount = segmentCount / math.max(1, JobsUtility.MaxJobThreadCount);
var handle = segmentSortJob.Schedule(segmentCount, workerSegmentCount, deps);
return segmentSortMergeJob.Schedule(segmentSortJobHandle);
}
}
public partial class RandomSumJob : SystemBase
{
private uint seed = 1;
protected override void OnUpdate()
{
Random randomGen = new Random(seed++);
NativeArray<float> randomNumbers
= new NativeArray<float>(500, Allocator.TempJob);
Job.WithCode(() =>
{
for (int i = 0; i < randomNumbers.Length; i++)
{
randomNumbers[i] = randomGen.NextFloat();
}
}).Schedule();
// To get data out of a job, you must use a NativeArray
// even if there is only one value
NativeArray<float> result
= new NativeArray<float>(1, Allocator.TempJob);
Job.WithCode(() =>
{
for (int i = 0; i < randomNumbers.Length; i++)
{
result[0] += randomNumbers[i];
}
}).Schedule();
// This completes the scheduled jobs to get the result immediately, but for
// better efficiency you should schedule jobs early in the frame with one
// system and get the results late in the frame with a different system.
this.CompleteDependency();
UnityEngine.Debug.Log("The sum of "
+ randomNumbers.Length + " numbers is " + result[0]);
randomNumbers.Dispose();
result.Dispose();
}
}
struct SharedGrouping : ISharedComponentData
{
public int Group;
}
[RequireMatchingQueriesForUpdate]
partial class ImpulseSystem : SystemBase
{
EntityQuery query;
protected override void OnCreate()
{
query = new EntityQueryBuilder(Allocator.Temp)
.WithAllRW<ObjectPosition>()
.WithAll<Displacement, SharedGrouping>()
.Build(this);
}
protected override void OnUpdate()
{
// Only iterate over entities that have the SharedGrouping data set to 1
query.SetSharedComponentFilter(new SharedGrouping { Group = 1 });
var positions = query.ToComponentDataArray<ObjectPosition>(Allocator.Temp);
var displacements = query.ToComponentDataArray<Displacement>(Allocator.Temp);
for (int i = 0; i < positions.Length; i++)
positions[i] = new ObjectPosition
{
Value = positions[i].Value + displacements[i].Value
};
}
}
EntityQuery query = new EntityQueryBuilder(Allocator.Temp)
.WithAllRW<ObjectRotation>()
// Start a new query description
.AddAdditionalQuery()
.WithAllRW<ObjectRotationSpeed>()
.Build(this);
// ... in a system update
// You don't specify a size because the buffer will grow as needed.
EntityCommandBuffer ecb = new EntityCommandBuffer(Allocator.TempJob);
// The ECB is captured by the ForEach job.
// Until completed, the job owns the ECB's job safety handle.
Entities
.ForEach((Entity e, in FooComp foo) =>
{
if (foo.Value > 0)
{
// Record a command that will later add
// BarComp to the entity.
ecb.AddComponent<BarComp>(e);
}
}).Schedule();
this.Dependency.Complete();
// Now that the job is completed, you can enact the changes.
// Note that Playback can only be called on the main thread.
ecb.Playback(this.EntityManager);
// You are responsible for disposing of any ECB you create.
ecb.Dispose();
EntityCommandBuffer ecb = new EntityCommandBuffer(Allocator.TempJob);
// Methods of this writer record commands to
// the EntityCommandBuffer in a thread-safe way.
EntityCommandBuffer.ParallelWriter parallelEcb = ecb.AsParallelWriter();
// ... in a system update
EntityCommandBuffer ecb = new EntityCommandBuffer(Allocator.TempJob);
// We need to write to the ECB concurrently across threads.
EntityCommandBuffer.ParallelWriter ecbParallel = ecb.AsParallelWriter();
// The entityInQueryIndex is unique for each entity and will be
// consistent for each particular entity regardless of scheduling.
Entities
.ForEach((Entity e, int entityInQueryIndex, in FooComp foo) => {
if (foo.Value > 0)
{
// The first arg is the 'sort key' recorded with the command.
ecbParallel.AddComponent<BarComp>(entityInQueryIndex, e);
}
}).Schedule();
// Playback is single-threaded as normal.
this.Dependency.Complete();
// To ensure deterministic playback order,
// the commands are first sorted by their sort keys.
ecb.Playback(this.EntityManager);
ecb.Dispose();
// ... in a system update
EntityCommandBuffer ecb =
new EntityCommandBuffer(Allocator.TempJob, PlaybackPolicy.MultiPlayback);
// ... record commands
ecb.Playback(this.EntityManager);
// Additional playbacks are OK because this ECB is MultiPlayback.
ecb.Playback(this.EntityManager);
ecb.Dispose();
// ... in a system
// Assume an EntityCommandBufferSystem exists named FooECBSystem.
EntityCommandBufferSystem sys =
this.World.GetExistingSystemManaged<FooECBSystem>();
// Create a command buffer that will be played back
// and disposed by MyECBSystem.
EntityCommandBuffer ecb = sys.CreateCommandBuffer();
// A ForEach with no argument to Schedule implicitly
// assigns its returned JobHandle to this.Dependency
Entities
.ForEach((Entity e, in FooComp foo) => {
// ... record to the ECB
}).Schedule();
// Register the job so that it gets completed by the ECB system.
sys.AddJobHandleForProducer(this.Dependency);
// You should specify where exactly in the frame
// that the ECB system should update.
[UpdateInGroup(typeof(SimulationSystemGroup))]
[UpdateAfter(typeof(FooSystem))]
public class MyECBSystem : EntityCommandBufferSystem {
// This class is intentionally empty. There is generally no
// reason to put any code in an EntityCommandBufferSystem.
}
// ... in a system
EntityCommandBuffer ecb = new EntityCommandBuffer(Allocator.TempJob);
Entity placeholderEntity = ecb.CreateEntity();
// Valid to use placeholderEntity in later commands of same ECB.
ecb.AddComponent<FooComp>(placeholderEntity);
// The real entity is created, and
// FooComp is added to the real entity.
ecb.Playback(this.EntityManager);
// Exception! The placeholderEntity has no meaning outside
// the ECB which created it, even after playback.
this.EntityManager.AddComponent<BarComp>(placeholderEntity);
ecb.Dispose();
// ... in a system
EntityCommandBuffer ecb = new EntityCommandBuffer(Allocator.TempJob);
// For all entities with a FooComp component...
Entities
.WithAll<FooComp>()
.ForEach((Entity e) =>
{
// In playback, an actual entity will be created
// that corresponds to this placeholder entity.
Entity placeholderEntity = ecb.CreateEntity();
// (Assume BarComp has an Entity field called TargetEnt.)
BarComp bar = new BarComp { TargetEnt = placeholderEntity };
// In playback, TargetEnt will be assigned the
// actual Entity that corresponds to placeholderEntity.
ecb.AddComponent(e, bar);
}).Run();
// After playback, each entity with FooComp now has a
// BarComp component whose TargetEnt references a new entity.
ecb.Playback(this.EntityManager);
ecb.Dispose();
public struct BufferData : IBufferElementData
{
public float Value;
}
[RequireMatchingQueriesForUpdate]
public partial class BufferLookupSystem : SystemBase
{
protected override void OnUpdate()
{
BufferLookup<BufferData> buffersOfAllEntities
= this.GetBufferLookup<BufferData>(true);
Entities
.ForEach((ref Rotation orientation,
in LocalToWorld transform,
in Target target) =>
{
// Check to make sure the target Entity with this buffer type still exists
if (!buffersOfAllEntities.HasBuffer(target.entity))
return;
// Get a reference to the buffer
DynamicBuffer<BufferData> bufferOfOneEntity = buffersOfAllEntities[target.entity];
// Use the data in the buffer
float avg = 0;
for (var i = 0; i < bufferOfOneEntity.Length; i++)
{
avg += bufferOfOneEntity[i].Value;
}
if (bufferOfOneEntity.Length > 0)
avg /= bufferOfOneEntity.Length;
})
.ScheduleParallel();
}
}
protected override void OnUpdate()
{
var job = new ChaserSystemJob();
// Set non-ECS data fields
job.deltaTime = SystemAPI.Time.DeltaTime;
// Schedule the job using Dependency property
Dependency = job.ScheduleParallel(query, this.Dependency);
}
例如,如果您的系统在实体上存在组件 A 或 B 时写入组件 W,那么您可以为 W 定义一个写入组,如下所示:
public struct W : IComponentData
{
public int Value;
}
[WriteGroup(typeof(W))]
public struct A : IComponentData
{
public int Value;
}
[WriteGroup(typeof(W))]
public struct B : IComponentData
{
public int Value;
}
您没有将写入组的目标(上例中的组件 W)添加到它自己的写入组。
启用写组过滤
要启用写入组过滤,请在您的作业上设置 FilterWriteGroups 标志:
public class AddingSystem : SystemBase
{
protected override void OnUpdate() {
Entities
// support write groups by setting EntityQueryOptions
.WithEntityQueryOptions(EntityQueryOptions.FilterWriteGroup)
.ForEach((ref W w, in B b) => {
// perform computation here
}).ScheduleParallel();}
}
对于查询描述对象,在创建查询时设置标志:
public class AddingSystem : SystemBase
{
private EntityQuery m_Query;
protected override void OnCreate()
{
var queryDescription = new EntityQueryDesc
{
All = new ComponentType[] {
ComponentType.ReadWrite<W>(),
ComponentType.ReadOnly<B>()
},
Options = EntityQueryOptions.FilterWriteGroup
};
m_Query = GetEntityQuery(queryDescription);
}
// Define IJobEntityBatch struct and schedule...
}
当您在查询中启用写入组过滤时,该查询会将可写组件的写入组中的所有组件添加到查询的 None 列表中,除非您明确将它们添加到 All 或 Any 列表中。因此,如果查询明确需要来自特定写入组的实体上的每个组件,则查询只会选择该实体。如果实体具有来自该写入组的一个或多个附加组件,则查询将拒绝它。
在上面的示例代码中,查询:
排除具有组件 A 的任何实体,因为 W 是可写的并且 A 是 W 的写入组的一部分。
不排除具有组件 B 的任何实体。即使 B 是 W 的写入组的一部分,它也在 All 列表中明确指定。
using System;
using Unity.Collections;
using Unity.Entities;
using Unity.Transforms;
using Unity.Mathematics;
[Serializable]
[WriteGroup(typeof(Rotation))]
public struct RotationAngleAxis : IComponentData
{
public float Angle;
public float3 Axis;
}
然后,您可以无竞争地使用 RotationAngleAxis 组件更新任何实体:
using Unity.Burst;
using Unity.Entities;
using Unity.Jobs;
using Unity.Collections;
using Unity.Mathematics;
using Unity.Transforms;
public class RotationAngleAxisSystem : SystemBase
{
protected override void OnUpdate()
{
Entities.ForEach((ref Rotation destination, in RotationAngleAxis source) =>
{
destination.Value
= quaternion.AxisAngle(math.normalize(source.Axis), source.Angle);
}).ScheduleParallel();
}
}
在前面的示例中,它定义了一个写入组,其中包含以组件 W 为目标的组件 A 和 B。如果将名为 C 的新组件添加到写入组,那么知道 C 的新系统可以查询包含的实体C,这些实体是否也有组件 A 或 B 并不重要。
但是,如果新系统还启用了写组过滤,那就不再是这样了。如果您只需要组件 C,则写入组过滤会排除任何具有 A 或 B 的实体。相反,您必须显式查询每个有意义的组件组合。
您可以在适当的时候使用查询的 Any 子句。
var query = new EntityQueryDesc
{
All = new ComponentType[] {
ComponentType.ReadOnly<C>(),
ComponentType.ReadWrite<W>()
},
Any = new ComponentType[] {
ComponentType.ReadOnly<A>(),
ComponentType.ReadOnly<B>()
},
Options = EntityQueryOptions.FilterWriteGroup
};