-
Notifications
You must be signed in to change notification settings - Fork 6k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[Impeller] Encode render passes concurrently on iOS. #42028
Changes from 19 commits
93b6a9c
7c9382d
e122d32
23ecc12
69ac789
b49f056
18ef717
71369e4
2648dd2
2821932
b2515af
83b0b90
1aed386
5198c2c
56c1687
929f0bc
b843862
3df1b91
07ab4e9
9670639
60b42be
9d1a0cc
ff058f8
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -4,8 +4,13 @@ | |
|
||
#include "impeller/renderer/backend/metal/command_buffer_mtl.h" | ||
|
||
#include "flutter/fml/make_copyable.h" | ||
#include "flutter/fml/synchronization/semaphore.h" | ||
#include "flutter/fml/trace_event.h" | ||
|
||
#include "impeller/renderer/backend/metal/blit_pass_mtl.h" | ||
#include "impeller/renderer/backend/metal/compute_pass_mtl.h" | ||
#include "impeller/renderer/backend/metal/context_mtl.h" | ||
#include "impeller/renderer/backend/metal/render_pass_mtl.h" | ||
|
||
namespace impeller { | ||
|
@@ -171,6 +176,62 @@ static bool LogMTLCommandBufferErrorIfPresent(id<MTLCommandBuffer> buffer) { | |
return true; | ||
} | ||
|
||
bool CommandBufferMTL::SubmitCommandsAsync( | ||
std::shared_ptr<RenderPass> render_pass) { | ||
TRACE_EVENT0("impeller", "CommandBufferMTL::SubmitCommandsAsync"); | ||
if (!IsValid() || !render_pass->IsValid()) { | ||
return false; | ||
} | ||
auto context = context_.lock(); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think this is more or less what ended up being the fix for problems around ownership in Vulkan backend. |
||
if (!context) { | ||
return false; | ||
} | ||
[buffer_ enqueue]; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. enquing on the main therad ensures the order |
||
auto buffer = buffer_; | ||
buffer_ = nil; | ||
|
||
auto worker_task_runner = ContextMTL::Cast(*context).GetWorkerTaskRunner(); | ||
auto mtl_render_pass = static_cast<RenderPassMTL*>(render_pass.get()); | ||
|
||
// Render command encoder creation has been observed to exceed the stack size | ||
// limit for worker threads, and therefore is intentionally constructed on the | ||
// raster thread. | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can we not bump the stack size limit for worker threads? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. How can I try that? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It's platform specific code, we'd need a pthreads and a win32 implementation at the least. |
||
auto render_command_encoder = | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This was a frustrating discovery because it prevents us from doing the obvious thing and just calling RenderPass::Encode in the closure. |
||
[buffer renderCommandEncoderWithDescriptor:mtl_render_pass->desc_]; | ||
if (!render_command_encoder) { | ||
return false; | ||
} | ||
|
||
auto task = fml::MakeCopyable([render_pass, buffer, render_command_encoder, | ||
weak_context = context_]() { | ||
auto context = weak_context.lock(); | ||
if (!context) { | ||
return; | ||
} | ||
auto is_gpu_disabled_sync_switch = | ||
ContextMTL::Cast(*context).GetIsGpuDisabledSyncSwitch(); | ||
is_gpu_disabled_sync_switch->Execute(fml::SyncSwitch::Handlers().SetIfFalse( | ||
[&render_pass, &render_command_encoder, &buffer, &context] { | ||
auto mtl_render_pass = static_cast<RenderPassMTL*>(render_pass.get()); | ||
if (!mtl_render_pass->label_.empty()) { | ||
[render_command_encoder | ||
setLabel:@(mtl_render_pass->label_.c_str())]; | ||
} | ||
|
||
auto result = mtl_render_pass->EncodeCommands( | ||
context->GetResourceAllocator(), render_command_encoder); | ||
[render_command_encoder endEncoding]; | ||
if (result) { | ||
[buffer commit]; | ||
} else { | ||
VALIDATION_LOG << "Failed to encode command buffer"; | ||
} | ||
})); | ||
}); | ||
worker_task_runner->PostTask(task); | ||
return true; | ||
} | ||
|
||
void CommandBufferMTL::OnWaitUntilScheduled() {} | ||
|
||
std::shared_ptr<RenderPass> CommandBufferMTL::OnCreateRenderPass( | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Currently this only works for the render pass but could optionally take the blit and compute passes too. I moved this functionality to the command buffer to make ownership of the render pass data more obvious - the closure keeps the shared_ptr for the render pass alive while the command buffer itself only requires the buffer.