Skip to content

Commit

Permalink
Handling async pipeline errors more gracefully
Browse files Browse the repository at this point in the history
  • Loading branch information
toji committed Dec 12, 2023
1 parent 7a6ef73 commit 8452940
Show file tree
Hide file tree
Showing 3 changed files with 145 additions and 111 deletions.
37 changes: 23 additions & 14 deletions src/webgpu/shader/execution/expression/expression.ts
Original file line number Diff line number Diff line change
Expand Up @@ -220,12 +220,12 @@ type PipelineCache = Map<String, GPUComputePipeline>;
* @param create the function used to construct a value, if not found in the cache
* @returns the value, either fetched from the cache, or newly built.
*/
function getOrCreate<K, V>(map: Map<K, V>, key: K, create: () => V) {
async function getOrCreate<K, V>(map: Map<K, V>, key: K, create: () => Promise<V>) {
const existing = map.get(key);
if (existing !== undefined) {
return existing;
}
const value = create();
const value = await create();
map.set(key, value);
return value;
}
Expand Down Expand Up @@ -307,16 +307,24 @@ export async function run(
};

const processBatch = async (batchCases: CaseList) => {
const checkBatch = await submitBatch(
t,
shaderBuilder,
parameterTypes,
resultType,
batchCases,
cfg.inputSource,
pipelineCache
);
checkBatch();
try {
const checkBatch = await submitBatch(
t,
shaderBuilder,
parameterTypes,
resultType,
batchCases,
cfg.inputSource,
pipelineCache
);
checkBatch();
} catch (err) {
if (err instanceof GPUPipelineError) {
t.fail(`Pipeline Creation Error, ${err.reason}: ${err.message}`);
} else {
throw err;
}
}
void t.queue.onSubmittedWorkDone().finally(batchFinishedCallback);
};

Expand Down Expand Up @@ -993,6 +1001,7 @@ async function buildPipeline(
const module = t.device.createShaderModule({ code: source });

// build the pipeline

const pipeline = await t.device.createComputePipelineAsync({
layout: 'auto',
compute: { module, entryPoint: 'main' },
Expand Down Expand Up @@ -1037,12 +1046,12 @@ async function buildPipeline(
}

// build the compute pipeline, if the shader hasn't been compiled already.
const pipeline = getOrCreate(pipelineCache, source, () => {
const pipeline = await getOrCreate(pipelineCache, source, () => {
// build the shader module
const module = t.device.createShaderModule({ code: source });

// build the pipeline
return t.device.createComputePipeline({
return t.device.createComputePipelineAsync({
layout: 'auto',
compute: { module, entryPoint: 'main' },
});
Expand Down
58 changes: 33 additions & 25 deletions src/webgpu/shader/execution/robust_access.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -62,35 +62,43 @@ fn main() {

t.debug(source);
const module = t.device.createShaderModule({ code: source });
const pipeline = await t.device.createComputePipelineAsync({
layout,
compute: { module, entryPoint: 'main' },
});

const group = t.device.createBindGroup({
layout: pipeline.getBindGroupLayout(1),
entries: [
{ binding: 0, resource: { buffer: constantsBuffer } },
{ binding: 1, resource: { buffer: resultBuffer } },
],
});

const testGroup = t.device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: testBindings,
});
try {
const pipeline = await t.device.createComputePipelineAsync({
layout,
compute: { module, entryPoint: 'main' },
});

const encoder = t.device.createCommandEncoder();
const pass = encoder.beginComputePass();
pass.setPipeline(pipeline);
pass.setBindGroup(0, testGroup, dynamicOffsets);
pass.setBindGroup(1, group);
pass.dispatchWorkgroups(1);
pass.end();
const group = t.device.createBindGroup({
layout: pipeline.getBindGroupLayout(1),
entries: [
{ binding: 0, resource: { buffer: constantsBuffer } },
{ binding: 1, resource: { buffer: resultBuffer } },
],
});

t.queue.submit([encoder.finish()]);
const testGroup = t.device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: testBindings,
});

t.expectGPUBufferValuesEqual(resultBuffer, new Uint32Array([0]));
const encoder = t.device.createCommandEncoder();
const pass = encoder.beginComputePass();
pass.setPipeline(pipeline);
pass.setBindGroup(0, testGroup, dynamicOffsets);
pass.setBindGroup(1, group);
pass.dispatchWorkgroups(1);
pass.end();

t.queue.submit([encoder.finish()]);
t.expectGPUBufferValuesEqual(resultBuffer, new Uint32Array([0]));
} catch (err) {
if (err instanceof GPUPipelineError) {
t.fail(`Pipeline Creation Error, ${err.reason}: ${err.message}`);
} else {
throw err;
}
}
}

/** Fill an ArrayBuffer with sentinel values, except clear a region to zero. */
Expand Down
161 changes: 89 additions & 72 deletions src/webgpu/shader/execution/zero_init.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -446,101 +446,118 @@ g.test('compute,zero_init')
],
});

const fillPipeline = await t.device.createComputePipelineAsync({
layout: t.device.createPipelineLayout({ bindGroupLayouts: [fillLayout] }),
label: 'Workgroup Fill Pipeline',
try {
const fillPipeline = await t.device.createComputePipelineAsync({
layout: t.device.createPipelineLayout({ bindGroupLayouts: [fillLayout] }),
label: 'Workgroup Fill Pipeline',
compute: {
module: t.device.createShaderModule({
code: wgsl,
}),
entryPoint: 'fill',
},
});

const inputBuffer = t.makeBufferWithContents(
new Uint32Array([...iterRange(wg_memory_limits / 4, _i => 0xdeadbeef)]),
GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST
);
t.trackForCleanup(inputBuffer);
const outputBuffer = t.device.createBuffer({
size: wg_memory_limits,
usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
});
t.trackForCleanup(outputBuffer);

const bg = t.device.createBindGroup({
layout: fillPipeline.getBindGroupLayout(0),
entries: [
{
binding: 0,
resource: {
buffer: inputBuffer,
},
},
{
binding: 1,
resource: {
buffer: outputBuffer,
},
},
],
});

const e = t.device.createCommandEncoder();
const p = e.beginComputePass();
p.setPipeline(fillPipeline);
p.setBindGroup(0, bg);
p.dispatchWorkgroups(1);
p.end();
t.queue.submit([e.finish()]);
} catch (err) {
if (err instanceof GPUPipelineError) {
t.fail(`Pipeline Creation Error, ${err.reason}: ${err.message}`);
return;
} else {
throw err;
}
}
}

try {
const pipeline = await t.device.createComputePipelineAsync({
layout: 'auto',
compute: {
module: t.device.createShaderModule({
code: wgsl,
}),
entryPoint: 'fill',
entryPoint: 'main',
},
});

const inputBuffer = t.makeBufferWithContents(
new Uint32Array([...iterRange(wg_memory_limits / 4, _i => 0xdeadbeef)]),
GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST
);
t.trackForCleanup(inputBuffer);
const outputBuffer = t.device.createBuffer({
size: wg_memory_limits,
const resultBuffer = t.device.createBuffer({
size: 4,
usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
});
t.trackForCleanup(outputBuffer);
t.trackForCleanup(resultBuffer);

const bg = t.device.createBindGroup({
layout: fillPipeline.getBindGroupLayout(0),
const zeroBuffer = t.device.createBuffer({
size: 4,
usage: GPUBufferUsage.UNIFORM,
});
t.trackForCleanup(zeroBuffer);

const bindGroup = t.device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: [
{
binding: 0,
resource: {
buffer: inputBuffer,
buffer: resultBuffer,
},
},
{
binding: 1,
resource: {
buffer: outputBuffer,
buffer: zeroBuffer,
},
},
],
});

const e = t.device.createCommandEncoder();
const p = e.beginComputePass();
p.setPipeline(fillPipeline);
p.setBindGroup(0, bg);
p.dispatchWorkgroups(1);
p.end();
t.queue.submit([e.finish()]);
const encoder = t.device.createCommandEncoder();
const pass = encoder.beginComputePass();
pass.setPipeline(pipeline);
pass.setBindGroup(0, bindGroup);
pass.dispatchWorkgroups(1);
pass.end();
t.queue.submit([encoder.finish()]);
t.expectGPUBufferValuesEqual(resultBuffer, new Uint32Array([0]));
} catch (err) {
if (err instanceof GPUPipelineError) {
t.fail(`Pipeline Creation Error, ${err.reason}: ${err.message}`);
} else {
throw err;
}
}

const pipeline = await t.device.createComputePipelineAsync({
layout: 'auto',
compute: {
module: t.device.createShaderModule({
code: wgsl,
}),
entryPoint: 'main',
},
});

const resultBuffer = t.device.createBuffer({
size: 4,
usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
});
t.trackForCleanup(resultBuffer);

const zeroBuffer = t.device.createBuffer({
size: 4,
usage: GPUBufferUsage.UNIFORM,
});
t.trackForCleanup(zeroBuffer);

const bindGroup = t.device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: [
{
binding: 0,
resource: {
buffer: resultBuffer,
},
},
{
binding: 1,
resource: {
buffer: zeroBuffer,
},
},
],
});

const encoder = t.device.createCommandEncoder();
const pass = encoder.beginComputePass();
pass.setPipeline(pipeline);
pass.setBindGroup(0, bindGroup);
pass.dispatchWorkgroups(1);
pass.end();
t.queue.submit([encoder.finish()]);
t.expectGPUBufferValuesEqual(resultBuffer, new Uint32Array([0]));
});

0 comments on commit 8452940

Please sign in to comment.