Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Changing HAL dialect syntax to express all types. #5239

Merged
merged 1 commit into from
Apr 1, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion iree/compiler/Bindings/SIP/Transforms/Passes.h
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,10 @@ std::unique_ptr<OperationPass<FuncOp>> createMaterializeReflectionAttrsPass();
// Register all Passes
//===----------------------------------------------------------------------===//

inline void registerPasses() { createMaterializeReflectionAttrsPass(); }
inline void registerPasses() {
registerTransformPassPipeline();
createMaterializeReflectionAttrsPass();
}

} // namespace SIP
} // namespace IREE
Expand Down
12 changes: 6 additions & 6 deletions iree/compiler/Conversion/LinalgToLLVM/ConvertToLLVM.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -505,9 +505,9 @@ class ConvertHALInterfaceBindingSubspanOp : public ConvertToLLVMPattern {
cast<IREE::HAL::InterfaceBindingSubspanOp>(op).queryBindingOp();
IREE::HAL::InterfaceBindingSubspanOpAdaptor newOperands(operands);
MemRefType memRefType = op->getResult(0).getType().cast<MemRefType>();
auto memRefDesc =
abi.loadBinding(op->getLoc(), interfaceBindingOp.binding(),
newOperands.byte_offset(), memRefType, rewriter);
auto memRefDesc = abi.loadBinding(
op->getLoc(), interfaceBindingOp.binding().getZExtValue(),
newOperands.byte_offset(), memRefType, rewriter);
rewriter.replaceOp(op, {memRefDesc});
return success();
}
Expand All @@ -532,9 +532,9 @@ class ConvertLegacyPlaceholderOp : public ConvertToLLVMPattern {
SymbolTable::lookupNearestSymbolFrom(
op, op->getAttrOfType<SymbolRefAttr>("binding")));
MemRefType memRefType = op->getResult(0).getType().cast<MemRefType>();
auto memRefDesc =
abi.loadBinding(op->getLoc(), interfaceBindingOp.binding(),
/*baseOffset=*/{}, memRefType, rewriter);
auto memRefDesc = abi.loadBinding(
op->getLoc(), interfaceBindingOp.binding().getZExtValue(),
/*baseOffset=*/{}, memRefType, rewriter);
rewriter.replaceOp(op, {memRefDesc});
return success();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ func @binding_ptrs() {
"test.sink"(%memref) : (memref<?xf32>) -> ()
return
}
hal.interface @io attributes {push_constants = 2 : i32, sym_visibility = "private"} {
hal.interface @io attributes {push_constants = 2 : index, sym_visibility = "private"} {
hal.interface.binding @arg0, set=0, binding=0, type="StorageBuffer", access="Read"
hal.interface.binding @ret0, set=0, binding=1, type="StorageBuffer", access="Write"
}
Expand Down Expand Up @@ -57,7 +57,7 @@ func @tie_shape() {
"test.sink"(%tied_memref) : (memref<?x2xf32>) -> ()
return
}
hal.interface @io attributes {push_constants = 2 : i32, sym_visibility = "private"} {
hal.interface @io attributes {push_constants = 2 : index, sym_visibility = "private"} {
hal.interface.binding @arg0, set=0, binding=0, type="StorageBuffer", access="Read"
hal.interface.binding @ret0, set=0, binding=1, type="StorageBuffer", access="Write"
}
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ hal.executable @matmul_tensors attributes {sym_visibility = "private"} {
}
hal.executable.target @llvm_aot, filter="dylib*" {
hal.executable.entry_point @matmul_tensors attributes {
interface = @legacy_io, ordinal = 0 : i32,
interface = @legacy_io, ordinal = 0 : index,
signature = (!flow.dispatch.tensor<readonly:?x?xf32>, !flow.dispatch.tensor<readonly:?x?xf32>,
!flow.dispatch.tensor<writeonly:?x?xf32>) -> ()}
module {
Expand Down Expand Up @@ -96,7 +96,7 @@ hal.executable @add attributes {sym_visibility = "private"} {
}
hal.executable.target @llvm_aot, filter="dylib*" {
hal.executable.entry_point @add attributes {
interface = @legacy_io, ordinal = 0 : i32,
interface = @legacy_io, ordinal = 0 : index,
signature = (!flow.dispatch.tensor<readonly:?x?xf32>, !flow.dispatch.tensor<readonly:?xf32>,
!flow.dispatch.tensor<writeonly:?x?xf32>) -> ()}
module {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ hal.executable @dynamic_matmul attributes {sym_visibility = "private"} {
}
hal.executable.target @llvm_aot, filter="dylib*" {
hal.executable.entry_point @matmul_128x128x128 attributes {
interface = @legacy_io, ordinal = 0 : i32,
interface = @legacy_io, ordinal = 0 : index,
signature = (!flow.dispatch.tensor<readonly:128x128xf32>, !flow.dispatch.tensor<readonly:128x128xf32>,
!flow.dispatch.tensor<writeonly:128x128xf32>) -> ()}
module {
Expand Down Expand Up @@ -90,7 +90,7 @@ hal.executable @dynamic_matmul_i8_i8_i32 attributes {sym_visibility = "private"}
}
hal.executable.target @llvm_aot, filter="dylib*" {
hal.executable.entry_point @matmul_i8_i8_i32_128x128x128 attributes {
interface = @legacy_io, ordinal = 0 : i32,
interface = @legacy_io, ordinal = 0 : index,
signature = (!flow.dispatch.tensor<readonly:128x128xi8>, !flow.dispatch.tensor<readonly:128x128xi8>,
!flow.dispatch.tensor<writeonly:128x128xi32>) -> ()}
module {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
// }
// hal.executable.target @llvm_aot, filter="dylib*" {
// hal.executable.entry_point @dynamic_matmul attributes {
// interface = @legacy_io, ordinal = 0 : i32,
// interface = @legacy_io, ordinal = 0 : index,
// signature = (!flow.dispatch.tensor<readonly:?x?xf32>, !flow.dispatch.tensor<readonly:?x?xf32>,
// !flow.dispatch.tensor<writeonly:?x?xf32>) -> ()}
// module {
Expand Down Expand Up @@ -57,7 +57,7 @@ hal.executable @static_matmul attributes {sym_visibility = "private"} {
}
hal.executable.target @llvm_aot, filter="dylib*" {
hal.executable.entry_point @static_matmul attributes {
interface = @legacy_io, ordinal = 0 : i32,
interface = @legacy_io, ordinal = 0 : index,
signature = (!flow.dispatch.tensor<readonly:16x4xf32>, !flow.dispatch.tensor<readonly:4x8xf32>,
!flow.dispatch.tensor<writeonly:16x8xf32>) -> ()}
module {
Expand Down
2 changes: 1 addition & 1 deletion iree/compiler/Conversion/LinalgToNVVM/ConvertToNVVM.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ class ConvertIREEBindingOp : public ConvertToLLVMPattern {
op, op->getAttrOfType<SymbolRefAttr>("binding"));
auto interfaceBindingOp = cast<IREE::HAL::InterfaceBindingOp>(symbol);
Value llvmBufferBasePtr =
llvmFuncOp.getArgument(interfaceBindingOp.binding());
llvmFuncOp.getArgument(interfaceBindingOp.binding().getZExtValue());
if (memrefType.hasStaticShape()) {
auto desc = MemRefDescriptor::fromStaticShape(
rewriter, loc, *getTypeConverter(), memrefType, llvmBufferBasePtr);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

hal.executable @add_dispatch_0 attributes {sym_visibility = "private"} {
hal.executable.target @cuda, filter="cuda" {
hal.executable.entry_point @add_dispatch_0 attributes {interface = @legacy_io, ordinal = 0 : i32, signature = (!flow.dispatch.tensor<readonly:1024xf32>, !flow.dispatch.tensor<readonly:1024xf32>, !flow.dispatch.tensor<writeonly:1024xf32>) -> ()}
hal.executable.entry_point @add_dispatch_0 attributes {interface = @legacy_io, ordinal = 0 : index, signature = (!flow.dispatch.tensor<readonly:1024xf32>, !flow.dispatch.tensor<readonly:1024xf32>, !flow.dispatch.tensor<writeonly:1024xf32>) -> ()}
module {
func @add_dispatch_0() {
%c0 = constant 0 : index
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ hal.executable @simpleMath_ex_dispatch_0 {
hal.interface.binding @ret0, set=0, binding=1, type="StorageBuffer", access="Write|Discard"
}
hal.executable.target @cuda, filter="cuda" {
hal.executable.entry_point @add_dispatch_0 attributes {interface = @legacy_io, ordinal = 0 : i32, signature = (!flow.dispatch.tensor<readonly:16xf32>, !flow.dispatch.tensor<readonly:16xf32>, !flow.dispatch.tensor<writeonly:16xf32>) -> ()}
hal.executable.entry_point @add_dispatch_0 attributes {interface = @legacy_io, ordinal = 0 : index, signature = (!flow.dispatch.tensor<readonly:16xf32>, !flow.dispatch.tensor<readonly:16xf32>, !flow.dispatch.tensor<writeonly:16xf32>) -> ()}
module {
func @add_dispatch_0() {
%c0 = constant 0 : index
Expand Down
12 changes: 7 additions & 5 deletions iree/compiler/Conversion/LinalgToSPIRV/ConvertToSPIRVPass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -158,9 +158,9 @@ IREE::HAL::InterfaceBindingOp getBindingOp(Operation *op) {
}

/// Returns the (set, binding) pair for the given placeholder op.
std::pair<uint32_t, uint32_t> getPlaceholderSetAndBinding(Operation *op) {
std::pair<int32_t, int32_t> getPlaceholderSetAndBinding(Operation *op) {
IREE::HAL::InterfaceBindingOp bindingOp = getBindingOp(op);
return {bindingOp.set(), bindingOp.binding()};
return {bindingOp.set().getSExtValue(), bindingOp.binding().getSExtValue()};
}

/// Returns the set of resources that should be marked as aliased in SPIR-V.
Expand Down Expand Up @@ -259,8 +259,8 @@ struct InterfaceOpConverter final : public OpConversionPattern<InterfaceOpTy> {
// placeholder op's pointer address as the `id`.
spirv::GlobalVariableOp varOp = insertResourceVariable(
interfaceOp.getLoc(), convertedType,
reinterpret_cast<uint64_t>(interfaceOp.getOperation()), bindingOp.set(),
bindingOp.binding(),
reinterpret_cast<uint64_t>(interfaceOp.getOperation()),
bindingOp.set().getZExtValue(), bindingOp.binding().getZExtValue(),
aliasedResources.contains(interfaceOp.getOperation()),
*moduleOp.getBody(), rewriter);

Expand Down Expand Up @@ -484,8 +484,10 @@ LogicalResult HALInterfaceLoadConstantConverter::matchAndRewrite(
auto halInterfaceOps =
llvm::to_vector<1>(moduleOp.getOps<IREE::HAL::InterfaceOp>());
assert(halInterfaceOps.size() == 1);
assert(halInterfaceOps.front().push_constants().hasValue());

unsigned elementCount = *halInterfaceOps.front().push_constants();
uint64_t elementCount =
(*halInterfaceOps.front().push_constants()).getZExtValue();
unsigned offset = loadOp.offset().getZExtValue();

// The following function generates SPIR-V ops with i32 types. So it does type
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -283,7 +283,7 @@ LogicalResult SplitDispatchFunctionPass::splitDispatchFunction(
builder.clone(*oldEntryPointOp.getOperation()));
clonedEntryPointOp.sym_nameAttr(builder.getStringAttr(newFnName));
clonedEntryPointOp.ordinalAttr(
builder.getI32IntegerAttr(static_cast<int32_t>(entryPoints.size())));
builder.getIndexAttr(static_cast<int32_t>(entryPoints.size())));
entryPoints.push_back(builder.getSymbolRefAttr(clonedEntryPointOp));
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ hal.executable @batch_matmul_static_shape attributes {sym_visibility = "private"
}
hal.executable.target @vulkan, filter="dylib*" {
hal.executable.entry_point @batch_matmul_static_shape attributes {
interface = @legacy_io, ordinal = 0 : i32,
interface = @legacy_io, ordinal = 0 : index,
signature = (!flow.dispatch.tensor<readonly:?x?xf32>, !flow.dispatch.tensor<readonly:?x?xf32>,
!flow.dispatch.tensor<writeonly:?x?xf32>) -> ()}
module attributes {
Expand Down Expand Up @@ -298,7 +298,7 @@ hal.executable @batch_matmul_fused_fillop attributes {sym_visibility = "private"
}
hal.executable.target @vulkan, filter="dylib*" {
hal.executable.entry_point @batch_matmul_fused_fillop attributes {
interface = @legacy_io, ordinal = 0 : i32,
interface = @legacy_io, ordinal = 0 : index,
signature = (!flow.dispatch.tensor<readonly:?x?xf32>, !flow.dispatch.tensor<readonly:?x?xf32>,
!flow.dispatch.tensor<writeonly:?x?xf32>) -> ()}
module attributes {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ hal.executable @conv2d_static_shape attributes {sym_visibility = "private"} {
}
hal.executable.target @vulkan_spirv, filter="vulkan*" {
hal.executable.entry_point @conv2d_static_shape attributes {
interface = @legacy_io, ordinal = 0 : i32,
interface = @legacy_io, ordinal = 0 : index,
signature = (!flow.dispatch.tensor<readonly:1x225x225x16xf32>, !flow.dispatch.tensor<readonly:3x3x16x32xf32>, !flow.dispatch.tensor<writeonly:1x112x112x32xf32>) -> ()}
module attributes {spv.target_env = #spv.target_env<#spv.vce<v1.3, [Shader], [SPV_KHR_storage_buffer_storage_class]>, ARM:IntegratedGPU, {}>} {
func @conv2d_static_shape() {
Expand Down Expand Up @@ -119,7 +119,7 @@ hal.executable @matmul_dynamic_shape attributes {sym_visibility = "private"} {
}
hal.executable.target @vulkan_spirv, filter="vulkan*" {
hal.executable.entry_point @matmul_dynamic_shape attributes {
interface = @legacy_io, ordinal = 0 : i32,
interface = @legacy_io, ordinal = 0 : index,
signature = (!flow.dispatch.tensor<readonly:1x225x225x16xf32>, !flow.dispatch.tensor<readonly:3x3x16x32xf32>, !flow.dispatch.tensor<writeonly:1x112x112x32xf32>) -> ()}
module attributes {spv.target_env = #spv.target_env<#spv.vce<v1.3, [Shader], [SPV_KHR_storage_buffer_storage_class]>, ARM:IntegratedGPU, {}>} {
func @matmul_dynamic_shape() {
Expand Down
18 changes: 9 additions & 9 deletions iree/compiler/Conversion/LinalgToSPIRV/test/convert_to_gpu.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
// }
// hal.executable.target @vulkan, filter="vulkan*" {
// hal.executable.entry_point @parallel_4D attributes {
// interface = @legacy_io, ordinal = 0 : i32,
// interface = @legacy_io, ordinal = 0 : index,
// signature = (!flow.dispatch.tensor<readonly:?x?xf32>, !flow.dispatch.tensor<readonly:?x?xf32>,
// !flow.dispatch.tensor<writeonly:?x?xf32>) -> ()}
// module attributes {
Expand Down Expand Up @@ -89,7 +89,7 @@ hal.executable @parallel_4D_static attributes {sym_visibility = "private"} {
}
hal.executable.target @vulkan, filter="vulkan*" {
hal.executable.entry_point @parallel_4D_static attributes {
interface = @legacy_io, ordinal = 0 : i32,
interface = @legacy_io, ordinal = 0 : index,
signature = (!flow.dispatch.tensor<readonly:?x?xf32>, !flow.dispatch.tensor<readonly:?x?xf32>,
!flow.dispatch.tensor<writeonly:?x?xf32>) -> ()}
module attributes {
Expand Down Expand Up @@ -168,7 +168,7 @@ hal.executable @scalar_add attributes {sym_visibility = "private"} {
}
hal.executable.target @vulkan, filter="vulkan*" {
hal.executable.entry_point @scalar_add attributes {
interface = @legacy_io, ordinal = 0 : i32,
interface = @legacy_io, ordinal = 0 : index,
signature = (!flow.dispatch.tensor<readonly:f32>, !flow.dispatch.tensor<readonly:f32>,
!flow.dispatch.tensor<writeonly:f32>) -> ()}
module attributes {
Expand Down Expand Up @@ -222,7 +222,7 @@ hal.executable @reduce_sum attributes {sym_visibility = "private"} {
}
hal.executable.target @vulkan, filter="vulkan*" {
hal.executable.entry_point @reduce_sum attributes {
interface = @legacy_io, ordinal = 0 : i32,
interface = @legacy_io, ordinal = 0 : index,
signature = (!flow.dispatch.tensor<readonly:40x50x75xf32>, !flow.dispatch.tensor<readonly:f32>,
!flow.dispatch.tensor<writeonly:40xf32>) -> ()}
module {
Expand Down Expand Up @@ -295,7 +295,7 @@ hal.executable @matmul attributes {sym_visibility = "private"} {
}
hal.executable.target @vulkan, filter="vulkan*" {
hal.executable.entry_point @matmul attributes {
interface = @legacy_io, ordinal = 0 : i32,
interface = @legacy_io, ordinal = 0 : index,
signature = (!flow.dispatch.tensor<readonly:?x?xf32>, !flow.dispatch.tensor<readonly:?x?xf32>,
!flow.dispatch.tensor<writeonly:?x?xf32>) -> ()}
module attributes {
Expand Down Expand Up @@ -367,7 +367,7 @@ hal.executable @conv_1d attributes {sym_visibility = "private"} {
hal.interface.binding @ret0, set=0, binding=2, type="StorageBuffer", access="Write|Discard"
}
hal.executable.target @vulkan_spirv, filter="vulkan*" {
hal.executable.entry_point @conv_1d attributes {interface = @legacy_io, ordinal = 0 : i32, signature = (tensor<3x8x1xf32>, tensor<3x1x1xf32>) -> tensor<3x6x1xf32>}
hal.executable.entry_point @conv_1d attributes {interface = @legacy_io, ordinal = 0 : index, signature = (tensor<3x8x1xf32>, tensor<3x1x1xf32>) -> tensor<3x6x1xf32>}
module attributes {spv.target_env = #spv.target_env<#spv.vce<v1.3, [Shader, GroupNonUniform, GroupNonUniformVote, GroupNonUniformArithmetic, GroupNonUniformBallot, GroupNonUniformShuffle, GroupNonUniformShuffleRelative], [SPV_KHR_storage_buffer_storage_class]>, SwiftShader:CPU, {cooperative_matrix_properties_nv = [], max_compute_shared_memory_size = 16384 : i32, max_compute_workgroup_invocations = 128 : i32, max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>, subgroup_size = 4 : i32}>} {
func @conv_1d() attributes {spv.entry_point_abi = {local_size = dense<[32, 4, 1]> : vector<3xi32>}} {
%cst = constant 0.000000e+00 : f32
Expand Down Expand Up @@ -426,7 +426,7 @@ hal.executable @conv_no_padding attributes {sym_visibility = "private"} {
}
hal.executable.target @vulkan, filter="vulkan*" {
hal.executable.entry_point @conv_no_padding attributes {
interface = @legacy_io, ordinal = 0 : i32,
interface = @legacy_io, ordinal = 0 : index,
signature = (!flow.dispatch.tensor<readonly:?x?xf32>, !flow.dispatch.tensor<readonly:?x?xf32>,
!flow.dispatch.tensor<writeonly:?x?xf32>) -> ()}
module attributes {
Expand Down Expand Up @@ -542,7 +542,7 @@ hal.executable @conv_3d attributes {sym_visibility = "private"} {
hal.interface.binding @ret0, set=0, binding=2, type="StorageBuffer", access="Write|Discard"
}
hal.executable.target @vulkan_spirv, filter="vulkan*" {
hal.executable.entry_point @conv_3d attributes {interface = @legacy_io, ordinal = 0 : i32, signature = (tensor<2x8x8x8x3xf32>, tensor<2x2x2x3x2xf32>) -> tensor<2x7x7x7x2xf32>}
hal.executable.entry_point @conv_3d attributes {interface = @legacy_io, ordinal = 0 : index, signature = (tensor<2x8x8x8x3xf32>, tensor<2x2x2x3x2xf32>) -> tensor<2x7x7x7x2xf32>}
module attributes {spv.target_env = #spv.target_env<#spv.vce<v1.3, [Shader, GroupNonUniform, GroupNonUniformVote, GroupNonUniformArithmetic, GroupNonUniformBallot, GroupNonUniformShuffle, GroupNonUniformShuffleRelative], [SPV_KHR_storage_buffer_storage_class]>, SwiftShader:CPU, {cooperative_matrix_properties_nv = [], max_compute_shared_memory_size = 16384 : i32, max_compute_workgroup_invocations = 128 : i32, max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>, subgroup_size = 4 : i32}>} {
func @conv_3d() attributes {spv.entry_point_abi = {local_size = dense<[32, 4, 1]> : vector<3xi32>}} {
%cst = constant 0.000000e+00 : f32
Expand Down Expand Up @@ -603,7 +603,7 @@ module {
hal.interface.binding @ret0, set=0, binding=2, type="StorageBuffer", access="Write|Discard"
}
hal.executable.target @vulkan, filter="vulkan*" {
hal.executable.entry_point @pooling_nhwc_max attributes {interface = @legacy_io, ordinal = 0 : i32, signature = (!flow.dispatch.tensor<readonly:2x16x16x6xf32>, !flow.dispatch.tensor<readonly:1x3x4x2xf32>, !flow.dispatch.tensor<writeonly:2x14x13x5xf32>) -> ()} {
hal.executable.entry_point @pooling_nhwc_max attributes {interface = @legacy_io, ordinal = 0 : index, signature = (!flow.dispatch.tensor<readonly:2x16x16x6xf32>, !flow.dispatch.tensor<readonly:1x3x4x2xf32>, !flow.dispatch.tensor<writeonly:2x14x13x5xf32>) -> ()} {
^bb0(%arg0: index, %arg1: index, %arg2: index): // no predecessors
%c4 = constant 4 : index
%c1 = constant 1 : index
Expand Down
Loading