Skip to content

Commit

Permalink
Merge branch 'google' into main-to-google
Browse files Browse the repository at this point in the history
  • Loading branch information
ScottTodd committed Mar 23, 2021
2 parents b7e2b09 + f29d6c8 commit 82d8765
Show file tree
Hide file tree
Showing 76 changed files with 325 additions and 349 deletions.
6 changes: 3 additions & 3 deletions SUBMODULE_VERSIONS.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,15 +5,15 @@
b1fbd33c06cdb0024c67733c6fdec2009d17b384 third_party/googletest
88b845dee001723c4a0db1fe5477de735b6d3bb0 third_party/liburing
013b829185fee6d8eaa515a7e36ec468a2a02600 third_party/llvm-bazel
cd442157cff4aad209ae532cbf031abbe10bc1df third_party/llvm-project
b24436ac96bdf3f2c545fc85dc8af239d618c9c4 third_party/llvm-project
68547d08daca039467df49c7cc50c3a0061787f3 third_party/mlir-emitc
431be0e9b235e1b98adf0367f3beb440aa672875 third_party/mlir-hlo
98debb127d3a14e0239a3432461e3876d293b409 third_party/mlir-hlo
2b2bd45bbf9be04fd22ece5cc1f54679202e9257 third_party/pffft
d8c7ee00a687ac369e62e2032514a93a9b413502 third_party/pybind11
2887692065c38ef6617f423feafc6b69dd0a0681 third_party/ruy
685f86471e9d26b3eb7676695a2e2cefb4551ae9 third_party/spirv_cross
f8bf11a0253a32375c32cad92c841237b96696c0 third_party/spirv_headers
aa3bd9f6de5a76c4c226548a48e448d211978e92 third_party/tensorflow
5c483374ac525a388ac9b1b24e468eb874ed0980 third_party/tensorflow
8732f0e94e4e41049a43029202bda94d7b4e85da third_party/tracy
9bd3f561bcee3f01d22912de10bb07ce4e23d378 third_party/vulkan_headers
3528e2aed3e8808f33e1e7d63eeb1560456a605a third_party/vulkan_memory_allocator
Expand Down
8 changes: 3 additions & 5 deletions experimental/ModelBuilder/ModelRunner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -59,12 +59,10 @@ void mlir::ModelRunner::compile(
if (target == Target::CPUTarget) {
// Lower vector operations progressively into more elementary
// vector operations before running the regular compiler passes.
mlir::OwningRewritePatternList patterns;
mlir::vector::populateVectorSlicesLoweringPatterns(patterns,
module->getContext());
mlir::OwningRewritePatternList patterns(module->getContext());
mlir::vector::populateVectorSlicesLoweringPatterns(patterns);
mlir::vector::populateVectorContractLoweringPatterns(
patterns, module->getContext(),
compilationOptions.vectorTransformsOptions);
patterns, compilationOptions.vectorTransformsOptions);
(void)mlir::applyPatternsAndFoldGreedily(*module, std::move(patterns));
}
runLoweringPass(compilationOptions.loweringPasses
Expand Down
12 changes: 6 additions & 6 deletions integrations/tensorflow/iree_tf_compiler/TF/ConvertToMHLO.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -58,15 +58,15 @@ class ConvertToMHLOPass : public PassWrapper<ConvertToMHLOPass, FunctionPass> {

// Lower TF Patterns must be separate from canonocalization patterns as
// they are sometimes inversions of eachother.
OwningRewritePatternList lowerTfPatterns;
OwningRewritePatternList lowerTfPatterns(&getContext());
mlir::TF::PopulateLoweringTFPatterns(context, &lowerTfPatterns);

OwningRewritePatternList canonicalizePatterns;
OwningRewritePatternList canonicalizePatterns(&getContext());
for (auto *op : context->getRegisteredOperations()) {
op->getCanonicalizationPatterns(canonicalizePatterns, context);
}

OwningRewritePatternList patterns;
OwningRewritePatternList patterns(&getContext());
// Note that the `OperationConverter` orders patterns lexicographically by:
// 1) Ascending legalization depth (i.e., minimum number of patterns
// necessary to arrive at conversion target).
Expand Down Expand Up @@ -98,10 +98,10 @@ class ConvertToMHLOPass : public PassWrapper<ConvertToMHLOPass, FunctionPass> {
DenseSet<Operation *> prevUnconvertedOps;
DenseSet<Operation *> unconvertedOps;

FrozenRewritePatternList frozenPatterns(std::move(patterns));
FrozenRewritePatternList frozenCanonicalizePatterns(
FrozenRewritePatternSet frozenPatterns(std::move(patterns));
FrozenRewritePatternSet frozenCanonicalizePatterns(
std::move(canonicalizePatterns));
FrozenRewritePatternList frozenTfPatterns(std::move(lowerTfPatterns));
FrozenRewritePatternSet frozenTfPatterns(std::move(lowerTfPatterns));
while (true) {
if (failed(
applyPatternsAndFoldGreedily(op, frozenCanonicalizePatterns))) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ class ConvertTFToTFStringsPass

void populateTFToTFStringsPatterns(MLIRContext *ctx,
OwningRewritePatternList &patterns) {
populateWithGenerated(ctx, patterns);
populateWithGenerated(patterns);
patterns.insert<GatherV2OpLowering>(ctx);
patterns.insert<StringFormatOpLowering>(ctx);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -98,8 +98,8 @@ void ConvertTFToTFTensorListPass::runOnOperation() {
// The MLIR type conversion infrastructure doesn't handle this situation well.
// It only knows how to handle blindly convert one type to another type.

OwningRewritePatternList patterns;
populateWithGenerated(&getContext(), patterns);
OwningRewritePatternList patterns(&getContext());
populateWithGenerated(patterns);
patterns.insert<ConvertTfTensorlistConcatV2>(&getContext());

ConversionTarget target(getContext());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ class ConversionPass : public PassWrapper<T, OperationPass<ModuleOp>> {

LogicalResult run() {
auto module = this->getOperation();
OwningRewritePatternList patterns;
OwningRewritePatternList patterns(&this->getContext());
Converter typeConverter;

// Lower to the standard string operations.
Expand All @@ -82,10 +82,8 @@ class ConversionPass : public PassWrapper<T, OperationPass<ModuleOp>> {
llvm::all_of(op.getResultTypes(), func);
});

populateFuncOpTypeConversionPattern(patterns, &this->getContext(),
typeConverter);
populateCallOpTypeConversionPattern(patterns, &this->getContext(),
typeConverter);
populateFuncOpTypeConversionPattern(patterns, typeConverter);
populateCallOpTypeConversionPattern(patterns, typeConverter);

auto result = applyPartialConversion(module.getOperation(), target,
std::move(patterns));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,7 @@ struct ForOpCanonicalizationPass
: PassWrapper<ForOpCanonicalizationPass, FunctionPass> {
void runOnFunction() override {
FuncOp fn = getFunction();
OwningRewritePatternList patterns;
OwningRewritePatternList patterns(&getContext());
patterns.insert<CanonicalizeForOpInductionVarShape,
PackForOpInductionVarVector>(fn.getContext());
(void)applyPatternsAndFoldGreedily(fn, std::move(patterns));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ struct RemoveDeadMemAllocs : RewritePattern {
struct BufferAllocViewCleanUpPass
: public PassWrapper<BufferAllocViewCleanUpPass, FunctionPass> {
void runOnFunction() override {
OwningRewritePatternList patterns;
OwningRewritePatternList patterns(&getContext());
patterns.insert<FoldReshapeIntoInterfaceTensorLoad>(&getContext());
patterns.insert<RemoveDeadMemAllocs>();
(void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -532,7 +532,7 @@ void LinalgRewriteDestructiveUpdates::runOnFunction() {
// Non-default canonicalization patterns.
// TODO: add Linalg tiling canonicalization patterns, affineminscf and others
// as needed.
OwningRewritePatternList canonicalizationPatterns;
OwningRewritePatternList canonicalizationPatterns(&getContext());
scf::ForOp::getCanonicalizationPatterns(canonicalizationPatterns, context);
(void)applyPatternsAndFoldGreedily(funcOp,
std::move(canonicalizationPatterns));
Expand Down
4 changes: 2 additions & 2 deletions iree/compiler/Conversion/Common/Transforms.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ namespace iree_compiler {
/// easier.
void applyCanonicalizationPatternsForTiling(MLIRContext *context,
Operation *op) {
OwningRewritePatternList canonicalizationPatterns;
OwningRewritePatternList canonicalizationPatterns(context);
canonicalizationPatterns.insert<AffineMinCanonicalizationPattern>(context);
scf::ForOp::getCanonicalizationPatterns(canonicalizationPatterns, context);
AffineApplyOp::getCanonicalizationPatterns(canonicalizationPatterns, context);
Expand Down Expand Up @@ -344,7 +344,7 @@ LogicalResult defineWorkgroupCountRegion(

LogicalResult materializeStaticLaunchInformation(
FuncOp funcOp, ArrayRef<int64_t> workloadPerWorkgroup) {
OwningRewritePatternList patterns;
OwningRewritePatternList patterns(funcOp.getContext());
patterns.insert<SetWorkgroupSizePattern>(funcOp.getContext(),
workloadPerWorkgroup);
if (failed(applyPatternsAndFoldGreedily(funcOp, std::move(patterns)))) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -64,9 +64,8 @@ struct VectorTransferOptimizationPass
// Generate vector.shape_cast for dropping leading one dimensions in vector
// ops. This increases the chance that we can forward more transfer writes
// to transfer reads.
OwningRewritePatternList patterns;
mlir::vector::populateCastAwayVectorLeadingOneDimPatterns(
patterns, funcOp.getContext());
OwningRewritePatternList patterns(&getContext());
mlir::vector::populateCastAwayVectorLeadingOneDimPatterns(patterns);
(void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns));

vector::transferOpflowOpt(funcOp);
Expand Down
2 changes: 1 addition & 1 deletion iree/compiler/Conversion/HLOToHLO/Convert1x1ConvToDot.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ struct Convert1x1ConvToDotPass

void runOnFunction() override {
MLIRContext *context = &getContext();
OwningRewritePatternList patterns;
OwningRewritePatternList patterns(&getContext());
patterns.insert<Convert1x1ConvolutionToDotOp>(context);
(void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns));
}
Expand Down
2 changes: 1 addition & 1 deletion iree/compiler/Conversion/HLOToHLO/DecomposeHLOClamp.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ struct DecomposeHLOClampPass

void runOnFunction() override {
MLIRContext *context = &getContext();
OwningRewritePatternList patterns;
OwningRewritePatternList patterns(&getContext());
patterns.insert<DecomposeClampOp>(context);
(void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns));
}
Expand Down
4 changes: 2 additions & 2 deletions iree/compiler/Conversion/HLOToHLO/DemoteF32ToF16.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -172,9 +172,9 @@ void ConvertF32ToF16Pass::runOnOperation() {
ModuleOp moduleOp = getOperation();

FloatTypeConverter converter;
OwningRewritePatternList patterns;
OwningRewritePatternList patterns(&getContext());
patterns.insert<GenericTypeConvert>(context, converter);
populateFuncOpTypeConversionPattern(patterns, context, converter);
populateFuncOpTypeConversionPattern(patterns, converter);
F32ToF16ConversionTarget target(*context);
target.markUnknownOpDynamicallyLegal();
if (failed(applyFullConversion(moduleOp, target, std::move(patterns))))
Expand Down
7 changes: 4 additions & 3 deletions iree/compiler/Conversion/HLOToLinalg/FusionOfTensorOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -73,18 +73,19 @@ struct FusionOfTensorOpsPass
}

void runOnOperation() override {
OwningRewritePatternList fusionPatterns, interfacePatterns;
OwningRewritePatternList fusionPatterns(&getContext());
OwningRewritePatternList interfacePatterns(&getContext());
Operation *op = getOperation();
MLIRContext *context = op->getContext();
interfacePatterns.insert<FuseWithHALInterfaceLoadTensor,
FuseWithHALInterfaceStoreTensor>(context);
FrozenRewritePatternList frozenInterfacePatterns(
FrozenRewritePatternSet frozenInterfacePatterns(
std::move(interfacePatterns));

(void)applyPatternsAndFoldGreedily(op->getRegions(),
frozenInterfacePatterns);

populateLinalgTensorOpsFusionPatterns(context, fusionPatterns);
populateLinalgTensorOpsFusionPatterns(fusionPatterns);
(void)applyPatternsAndFoldGreedily(op->getRegions(),
std::move(fusionPatterns));

Expand Down
6 changes: 4 additions & 2 deletions iree/compiler/Conversion/HLOToLinalg/HLOToLinalgOnBuffers.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -825,6 +825,8 @@ static LogicalResult createAndPropagateBufferUsedForResultTensors(
// Canonicalization patterns.
//===----------------------------------------------------------------------===//

// TODO(hanchung): Revisit the pattern, this seems no longer needed because the
// reshape ops are folded in tensors world.
// Folds linalg.reshape op that directly reshaping an iree.placeholder op into
// the iree.placeholder op itself.
class FoldReshapeIntoPlaceholder final
Expand Down Expand Up @@ -900,7 +902,7 @@ void ConvertHLOToLinalgOnBuffersPass::runOnFunction() {
return signalPassFailure();
}

OwningRewritePatternList patterns;
OwningRewritePatternList patterns(&getContext());
populateHLOToLinalgOnBuffersConversionPatterns(context, patterns,
resultTensorToBufferMap);
patterns.insert<HALInterfaceLoadTensorOpEraser, ShapeOpPattern>(
Expand Down Expand Up @@ -940,7 +942,7 @@ void ConvertHLOToLinalgOnBuffersPass::runOnFunction() {

// Perform additional canonicalizations.
{
OwningRewritePatternList foldingPatterns;
OwningRewritePatternList foldingPatterns(&getContext());
foldingPatterns.insert<FoldReshapeIntoPlaceholder>(context);
(void)applyPatternsAndFoldGreedily(funcOp, std::move(foldingPatterns));
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ struct ConvertHLOToLinalgOnTensorsPass
}

void runOnFunction() override {
OwningRewritePatternList patterns;
OwningRewritePatternList patterns(&getContext());
MLIRContext *context = &getContext();
populateHLOToLinalgOnTensorsConversionPatterns(context, patterns);
if (useLinalgOnTensorsPath) {
Expand Down
4 changes: 2 additions & 2 deletions iree/compiler/Conversion/HLOToLinalg/ResolveShapeOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ struct ResolveShapeOpsPass
void ResolveShapeOpsPass::runOnFunction() {
MLIRContext *context = &getContext();

OwningRewritePatternList dimPatterns;
OwningRewritePatternList dimPatterns(&getContext());
dimPatterns.insert<StdDimResolver>(context);

// Set up a target to convert all std.dim ops. We need a conversion target
Expand All @@ -111,7 +111,7 @@ void ResolveShapeOpsPass::runOnFunction() {
return signalPassFailure();
}

OwningRewritePatternList shapePatterns;
OwningRewritePatternList shapePatterns(&getContext());
shapePatterns.insert<TieShapeElider>(context);
Shape::RankedDimOp::getCanonicalizationPatterns(shapePatterns, context);

Expand Down
9 changes: 4 additions & 5 deletions iree/compiler/Conversion/HLOToLinalg/test/fusion.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -32,10 +32,9 @@ module {
// -----

module {
func @fuse_store_reshape() {
func @fuse_store_reshape(%arg0: tensor<100xi32>) {
%c0 = constant 0 : index
%c42 = constant dense<42> : tensor<100xi32>
%0 = linalg.tensor_reshape %c42 [affine_map<(d0, d1) -> (d0, d1)>] : tensor<100xi32> into tensor<4x25xi32>
%0 = linalg.tensor_reshape %arg0 [affine_map<(d0, d1) -> (d0, d1)>] : tensor<100xi32> into tensor<4x25xi32>
hal.interface.store.tensor %0, @legacy_io::@ret0, offset = %c0 : tensor<4x25xi32>
return
}
Expand All @@ -45,8 +44,8 @@ module {
}

// CHECK-LABEL: func @fuse_store_reshape
// CHECK: %[[C42:.+]] = constant dense<{{.+}}> : tensor<100xi32>
// CHECK: hal.interface.store.tensor %[[C42]]
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]*]]: tensor<100xi32>
// CHECK: hal.interface.store.tensor %[[ARG0]]

// -----

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -320,66 +320,6 @@ module {

// -----

#map0 = affine_map<(d0, d1) -> (d0, d1)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d1)>
#map2 = affine_map<(d0, d1, d2) -> (d2)>

module {
func @store_reshape_src_and_result_2() {
%c0 = constant 0 : index
%shape = linalg.init_tensor[2, 4] : tensor<2x4xf32>
%0 = hal.interface.load.tensor @legacy_io::@arg0, offset = %c0
{operand_result_index = 0 : i32} : tensor<2x4xf32>
%1 = linalg.generic {
indexing_maps = [#map0, #map0],
iterator_types = ["parallel", "parallel"]}
ins(%0 : tensor<2x4xf32>)
outs(%shape : tensor<2x4xf32>) {
^bb0(%arg0: f32, %s: f32): // no predecessors
%2 = math.tanh %arg0 : f32
linalg.yield %2 : f32
} -> tensor<2x4xf32>
%3 = linalg.tensor_reshape %1 [#map1, #map2]
: tensor<2x4xf32> into tensor<1x2x4xf32>
%4 = linalg.tensor_reshape %1 [#map1, #map2]
: tensor<2x4xf32> into tensor<1x2x4xf32>
%5 = linalg.tensor_reshape %1 [#map1, #map2]
: tensor<2x4xf32> into tensor<1x2x4xf32>
hal.interface.store.tensor %3, @legacy_io::@ret0, offset = %c0
{operand_result_index = 1 : i32} : tensor<1x2x4xf32>
hal.interface.store.tensor %4, @legacy_io::@ret1, offset = %c0
{operand_result_index = 2 : i32} : tensor<1x2x4xf32>
hal.interface.store.tensor %5, @legacy_io::@ret2, offset = %c0
{operand_result_index = 3 : i32} : tensor<1x2x4xf32>
return
}
hal.interface @legacy_io attributes {sym_visibility = "private"} {
hal.interface.binding @arg0, set=0, binding=0, type="StorageBuffer",
access="Read"
hal.interface.binding @ret0, set=0, binding=1, type="StorageBuffer",
access="Write|Discard"
hal.interface.binding @ret1, set=0, binding=2, type="StorageBuffer",
access="Write|Discard"
hal.interface.binding @ret2, set=0, binding=3, type="StorageBuffer",
access="Write|Discard"
}
}

// CHECK-LABEL: func @store_reshape_src_and_result_2
// CHECK-DAG: %[[T0:.+]] = iree.placeholder for "interface buffer" {binding = @legacy_io::@ret2, operand_result_index = 3 : i32} : memref<1x2x4xf32>
// CHECK-DAG: %[[T1:.+]] = iree.placeholder for "interface buffer" {binding = @legacy_io::@ret2, operand_result_index = 3 : i32} : memref<2x4xf32>
// CHECK-DAG: %[[T2:.+]] = iree.placeholder for "interface buffer" {binding = @legacy_io::@ret1, operand_result_index = 2 : i32} : memref<1x2x4xf32>
// CHECK-DAG: %[[T3:.+]] = iree.placeholder for "interface buffer" {binding = @legacy_io::@ret0, operand_result_index = 1 : i32} : memref<1x2x4xf32>
// CHECK-DAG: %[[T4:.+]] = iree.placeholder for "interface buffer" {binding = @legacy_io::@arg0, operand_result_index = 0 : i32} : memref<2x4xf32>
// CHECK: linalg.generic
// CHECK-SAME: ins(%[[T4]] :
// CHECK-SAME: outs(%[[T1]] :
// CHECK: linalg.copy(%[[T0]], %[[T3]])
// CHECK: linalg.copy(%[[T0]], %[[T2]])
// CHECK: return

// -----

#map0 = affine_map<(d0, d1, d2, d3) -> (d0, d1)>
#map1 = affine_map<(d0, d1, d2, d3) -> (d2, d3)>
#map2 = affine_map<(d0, d1) -> (d0, d1)>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ void populateConvImg2ColMatmulConversionPatterns(
void ConvImg2ColMatmulConversionPass::runOnFunction() {
auto funcOp = getOperation();
auto context = funcOp.getContext();
OwningRewritePatternList patterns;
OwningRewritePatternList patterns(&getContext());
populateConvImg2ColMatmulConversionPatterns(context, patterns);
(void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns));
}
Expand Down
Loading

0 comments on commit 82d8765

Please sign in to comment.