diff --git a/base/compiler/optimize.jl b/base/compiler/optimize.jl index 2bfb15ddb0fc4..77163f2ff76d0 100644 --- a/base/compiler/optimize.jl +++ b/base/compiler/optimize.jl @@ -251,7 +251,7 @@ function optimize(opt::OptimizationState, params::OptimizationParams, @nospecial # obey @inline declaration if a dispatch barrier would not help else bonus = 0 - if result ⊑ Tuple && !isbitstype(widenconst(result)) + if result ⊑ Tuple && !isconcretetype(widenconst(result)) bonus = params.inline_tupleret_bonus end if opt.src.inlineable diff --git a/base/compiler/types.jl b/base/compiler/types.jl index a5177afb70543..754be738252e8 100644 --- a/base/compiler/types.jl +++ b/base/compiler/types.jl @@ -44,7 +44,7 @@ struct OptimizationParams inlining::Bool # whether inlining is enabled inline_cost_threshold::Int # number of CPU cycles beyond which it's not worth inlining inline_nonleaf_penalty::Int # penalty for dynamic dispatch - inline_tupleret_bonus::Int # extra willingness for non-isbits tuple return types + inline_tupleret_bonus::Int # extra inlining willingness for non-concrete tuple return types (in hopes of splitting it up) inline_error_path_cost::Int # cost of (un-optimized) calls in blocks that throw # Duplicating for now because optimizer inlining requires it. @@ -57,7 +57,7 @@ struct OptimizationParams inlining::Bool = inlining_enabled(), inline_cost_threshold::Int = 100, inline_nonleaf_penalty::Int = 1000, - inline_tupleret_bonus::Int = 400, + inline_tupleret_bonus::Int = 250, inline_error_path_cost::Int = 20, max_methods::Int = 3, tuple_splat::Int = 32,