Handling async pipeline errors more gracefully
diff --git a/src/webgpu/shader/execution/expression/expression.ts b/src/webgpu/shader/execution/expression/expression.ts
index d59456c..bba8fef 100644
--- a/src/webgpu/shader/execution/expression/expression.ts
+++ b/src/webgpu/shader/execution/expression/expression.ts
@@ -220,12 +220,12 @@
* @param create the function used to construct a value, if not found in the cache
* @returns the value, either fetched from the cache, or newly built.
*/
-function getOrCreate<K, V>(map: Map<K, V>, key: K, create: () => V) {
+async function getOrCreate<K, V>(map: Map<K, V>, key: K, create: () => Promise<V>) {
const existing = map.get(key);
if (existing !== undefined) {
return existing;
}
- const value = create();
+ const value = await create();
map.set(key, value);
return value;
}
@@ -307,16 +307,24 @@
};
const processBatch = async (batchCases: CaseList) => {
- const checkBatch = await submitBatch(
- t,
- shaderBuilder,
- parameterTypes,
- resultType,
- batchCases,
- cfg.inputSource,
- pipelineCache
- );
- checkBatch();
+ try {
+ const checkBatch = await submitBatch(
+ t,
+ shaderBuilder,
+ parameterTypes,
+ resultType,
+ batchCases,
+ cfg.inputSource,
+ pipelineCache
+ );
+ checkBatch();
+ } catch (err) {
+ if (err instanceof GPUPipelineError) {
+ t.fail(`Pipeline Creation Error, ${err.reason}: ${err.message}`);
+ } else {
+ throw err;
+ }
+ }
void t.queue.onSubmittedWorkDone().finally(batchFinishedCallback);
};
@@ -993,6 +1001,7 @@
const module = t.device.createShaderModule({ code: source });
// build the pipeline
+
const pipeline = await t.device.createComputePipelineAsync({
layout: 'auto',
compute: { module, entryPoint: 'main' },
@@ -1037,12 +1046,12 @@
}
// build the compute pipeline, if the shader hasn't been compiled already.
- const pipeline = getOrCreate(pipelineCache, source, () => {
+ const pipeline = await getOrCreate(pipelineCache, source, () => {
// build the shader module
const module = t.device.createShaderModule({ code: source });
// build the pipeline
- return t.device.createComputePipeline({
+ return t.device.createComputePipelineAsync({
layout: 'auto',
compute: { module, entryPoint: 'main' },
});
diff --git a/src/webgpu/shader/execution/robust_access.spec.ts b/src/webgpu/shader/execution/robust_access.spec.ts
index 965dd28..aafce2d 100644
--- a/src/webgpu/shader/execution/robust_access.spec.ts
+++ b/src/webgpu/shader/execution/robust_access.spec.ts
@@ -62,35 +62,43 @@
t.debug(source);
const module = t.device.createShaderModule({ code: source });
- const pipeline = await t.device.createComputePipelineAsync({
- layout,
- compute: { module, entryPoint: 'main' },
- });
- const group = t.device.createBindGroup({
- layout: pipeline.getBindGroupLayout(1),
- entries: [
- { binding: 0, resource: { buffer: constantsBuffer } },
- { binding: 1, resource: { buffer: resultBuffer } },
- ],
- });
+ try {
+ const pipeline = await t.device.createComputePipelineAsync({
+ layout,
+ compute: { module, entryPoint: 'main' },
+ });
- const testGroup = t.device.createBindGroup({
- layout: pipeline.getBindGroupLayout(0),
- entries: testBindings,
- });
+ const group = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(1),
+ entries: [
+ { binding: 0, resource: { buffer: constantsBuffer } },
+ { binding: 1, resource: { buffer: resultBuffer } },
+ ],
+ });
- const encoder = t.device.createCommandEncoder();
- const pass = encoder.beginComputePass();
- pass.setPipeline(pipeline);
- pass.setBindGroup(0, testGroup, dynamicOffsets);
- pass.setBindGroup(1, group);
- pass.dispatchWorkgroups(1);
- pass.end();
+ const testGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: testBindings,
+ });
- t.queue.submit([encoder.finish()]);
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, testGroup, dynamicOffsets);
+ pass.setBindGroup(1, group);
+ pass.dispatchWorkgroups(1);
+ pass.end();
- t.expectGPUBufferValuesEqual(resultBuffer, new Uint32Array([0]));
+ t.queue.submit([encoder.finish()]);
+ t.expectGPUBufferValuesEqual(resultBuffer, new Uint32Array([0]));
+ } catch (err) {
+ if (err instanceof GPUPipelineError) {
+ t.fail(`Pipeline Creation Error, ${err.reason}: ${err.message}`);
+ } else {
+ throw err;
+ }
+ }
}
/** Fill an ArrayBuffer with sentinel values, except clear a region to zero. */
diff --git a/src/webgpu/shader/execution/zero_init.spec.ts b/src/webgpu/shader/execution/zero_init.spec.ts
index e03a72f..eef155b 100644
--- a/src/webgpu/shader/execution/zero_init.spec.ts
+++ b/src/webgpu/shader/execution/zero_init.spec.ts
@@ -446,101 +446,118 @@
],
});
- const fillPipeline = await t.device.createComputePipelineAsync({
- layout: t.device.createPipelineLayout({ bindGroupLayouts: [fillLayout] }),
- label: 'Workgroup Fill Pipeline',
+ try {
+ const fillPipeline = await t.device.createComputePipelineAsync({
+ layout: t.device.createPipelineLayout({ bindGroupLayouts: [fillLayout] }),
+ label: 'Workgroup Fill Pipeline',
+ compute: {
+ module: t.device.createShaderModule({
+ code: wgsl,
+ }),
+ entryPoint: 'fill',
+ },
+ });
+
+ const inputBuffer = t.makeBufferWithContents(
+ new Uint32Array([...iterRange(wg_memory_limits / 4, _i => 0xdeadbeef)]),
+ GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST
+ );
+ t.trackForCleanup(inputBuffer);
+ const outputBuffer = t.device.createBuffer({
+ size: wg_memory_limits,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ });
+ t.trackForCleanup(outputBuffer);
+
+ const bg = t.device.createBindGroup({
+ layout: fillPipeline.getBindGroupLayout(0),
+ entries: [
+ {
+ binding: 0,
+ resource: {
+ buffer: inputBuffer,
+ },
+ },
+ {
+ binding: 1,
+ resource: {
+ buffer: outputBuffer,
+ },
+ },
+ ],
+ });
+
+ const e = t.device.createCommandEncoder();
+ const p = e.beginComputePass();
+ p.setPipeline(fillPipeline);
+ p.setBindGroup(0, bg);
+ p.dispatchWorkgroups(1);
+ p.end();
+ t.queue.submit([e.finish()]);
+ } catch (err) {
+ if (err instanceof GPUPipelineError) {
+ t.fail(`Pipeline Creation Error, ${err.reason}: ${err.message}`);
+ return;
+ } else {
+ throw err;
+ }
+ }
+ }
+
+ try {
+ const pipeline = await t.device.createComputePipelineAsync({
+ layout: 'auto',
compute: {
module: t.device.createShaderModule({
code: wgsl,
}),
- entryPoint: 'fill',
+ entryPoint: 'main',
},
});
- const inputBuffer = t.makeBufferWithContents(
- new Uint32Array([...iterRange(wg_memory_limits / 4, _i => 0xdeadbeef)]),
- GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST
- );
- t.trackForCleanup(inputBuffer);
- const outputBuffer = t.device.createBuffer({
- size: wg_memory_limits,
+ const resultBuffer = t.device.createBuffer({
+ size: 4,
usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
});
- t.trackForCleanup(outputBuffer);
+ t.trackForCleanup(resultBuffer);
- const bg = t.device.createBindGroup({
- layout: fillPipeline.getBindGroupLayout(0),
+ const zeroBuffer = t.device.createBuffer({
+ size: 4,
+ usage: GPUBufferUsage.UNIFORM,
+ });
+ t.trackForCleanup(zeroBuffer);
+
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
entries: [
{
binding: 0,
resource: {
- buffer: inputBuffer,
+ buffer: resultBuffer,
},
},
{
binding: 1,
resource: {
- buffer: outputBuffer,
+ buffer: zeroBuffer,
},
},
],
});
- const e = t.device.createCommandEncoder();
- const p = e.beginComputePass();
- p.setPipeline(fillPipeline);
- p.setBindGroup(0, bg);
- p.dispatchWorkgroups(1);
- p.end();
- t.queue.submit([e.finish()]);
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(1);
+ pass.end();
+ t.queue.submit([encoder.finish()]);
+ t.expectGPUBufferValuesEqual(resultBuffer, new Uint32Array([0]));
+ } catch (err) {
+ if (err instanceof GPUPipelineError) {
+ t.fail(`Pipeline Creation Error, ${err.reason}: ${err.message}`);
+ } else {
+ throw err;
+ }
}
-
- const pipeline = await t.device.createComputePipelineAsync({
- layout: 'auto',
- compute: {
- module: t.device.createShaderModule({
- code: wgsl,
- }),
- entryPoint: 'main',
- },
- });
-
- const resultBuffer = t.device.createBuffer({
- size: 4,
- usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
- });
- t.trackForCleanup(resultBuffer);
-
- const zeroBuffer = t.device.createBuffer({
- size: 4,
- usage: GPUBufferUsage.UNIFORM,
- });
- t.trackForCleanup(zeroBuffer);
-
- const bindGroup = t.device.createBindGroup({
- layout: pipeline.getBindGroupLayout(0),
- entries: [
- {
- binding: 0,
- resource: {
- buffer: resultBuffer,
- },
- },
- {
- binding: 1,
- resource: {
- buffer: zeroBuffer,
- },
- },
- ],
- });
-
- const encoder = t.device.createCommandEncoder();
- const pass = encoder.beginComputePass();
- pass.setPipeline(pipeline);
- pass.setBindGroup(0, bindGroup);
- pass.dispatchWorkgroups(1);
- pass.end();
- t.queue.submit([encoder.finish()]);
- t.expectGPUBufferValuesEqual(resultBuffer, new Uint32Array([0]));
});