You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
If i try to run the colab version im getting this error - does someone know a solution:
Traceback (most recent call last):
File "/usr/lib/python3.10/threading.py", line 1016, in _bootstrap_inner
self.run()
File "/usr/lib/python3.10/threading.py", line 953, in run
self._target(*self._args, **self._kwargs)
File "/content/Fooocus-MRE/modules/async_worker.py", line 613, in worker
handler(task)
File "/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "/content/Fooocus-MRE/modules/async_worker.py", line 487, in handler
imgs = pipeline.process_diffusion(
File "/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "/content/Fooocus-MRE/modules/default_pipeline.py", line 403, in process_diffusion
sampled_latent = core.ksampler(
File "/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/utils/contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "/content/Fooocus-MRE/modules/core.py", line 347, in ksampler
samples = sampler.sample(noise, positive_copy, negative_copy, cfg=cfg, latent_image=latent_image,
File "/content/Fooocus-MRE/modules/samplers_advanced.py", line 202, in sample
samples = getattr(k_diffusion_sampling, "sample{}".format(self.sampler))(self.model_k, noise, sigmas, extra_args=extra_args, callback=k_callback, disable=disable_pbar)
File "/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "/content/Fooocus-MRE/repositories/ComfyUI-from-StabilityAI-Official/comfy/k_diffusion/sampling.py", line 701, in sample_dpmpp_2m_sde_gpu
return sample_dpmpp_2m_sde(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, eta=eta, s_noise=s_noise, noise_sampler=noise_sampler, solver_type=solver_type)
File "/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "/content/Fooocus-MRE/repositories/ComfyUI-from-StabilityAI-Official/comfy/k_diffusion/sampling.py", line 613, in sample_dpmpp_2m_sde
denoised = model(x, sigmas[i] * s_in, **extra_args)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "/content/Fooocus-MRE/repositories/ComfyUI-from-StabilityAI-Official/comfy/samplers.py", line 323, in forward
out = self.inner_model(x, sigma, cond=cond, uncond=uncond, cond_scale=cond_scale, cond_concat=cond_concat, model_options=model_options, seed=seed)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1527, in call_impl
return forward_call(*args, **kwargs)
File "/content/Fooocus-MRE/modules/patch.py", line 164, in patched_discrete_eps_ddpm_denoiser_forward
return self.get_eps(input * c_in, self.sigma_to_t(sigma), **kwargs)
File "/content/Fooocus-MRE/repositories/ComfyUI-from-StabilityAI-Official/comfy/k_diffusion/external.py", line 151, in get_eps
return self.inner_model.apply_model(*args, **kwargs)
File "/content/Fooocus-MRE/repositories/ComfyUI-from-StabilityAI-Official/comfy/samplers.py", line 311, in apply_model
out = sampling_function(self.inner_model.apply_model, x, timestep, uncond, cond, cond_scale, cond_concat, model_options=model_options, seed=seed)
File "/content/Fooocus-MRE/repositories/ComfyUI-from-StabilityAI-Official/comfy/samplers.py", line 289, in sampling_function
cond, uncond = calc_cond_uncond_batch(model_function, cond, uncond, x, timestep, max_total_area, cond_concat, model_options)
File "/content/Fooocus-MRE/repositories/ComfyUI-from-StabilityAI-Official/comfy/samplers.py", line 263, in calc_cond_uncond_batch
output = model_options['model_function_wrapper'](model_function, {"input": input_x, "timestep": timestep, "c": c, "cond_or_uncond": cond_or_uncond}).chunk(batch_chunks)
File "/content/Fooocus-MRE/modules/patch.py", line 173, in patched_model_function
return func(x, t, **c)
File "/content/Fooocus-MRE/repositories/ComfyUI-from-StabilityAI-Official/comfy/model_base.py", line 63, in apply_model
return self.diffusion_model(xc, t, context=context, y=c_adm, control=control, transformer_options=transformer_options).float()
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "/content/Fooocus-MRE/modules/patch.py", line 338, in patched_unet_forward
h = forward_timestep_embed(module, h, emb, context, transformer_options)
File "/content/Fooocus-MRE/repositories/ComfyUI-from-StabilityAI-Official/comfy/ldm/modules/diffusionmodules/openaimodel.py", line 56, in forward_timestep_embed
x = layer(x, context, transformer_options)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "/content/Fooocus-MRE/repositories/ComfyUI-from-StabilityAI-Official/comfy/ldm/modules/attention.py", line 693, in forward
x = block(x, context=context[i], transformer_options=transformer_options)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "/content/Fooocus-MRE/repositories/ComfyUI-from-StabilityAI-Official/comfy/ldm/modules/attention.py", line 525, in forward
return checkpoint(self._forward, (x, context, transformer_options), self.parameters(), self.checkpoint)
File "/content/Fooocus-MRE/repositories/ComfyUI-from-StabilityAI-Official/comfy/ldm/modules/diffusionmodules/util.py", line 123, in checkpoint
return func(*inputs)
File "/content/Fooocus-MRE/repositories/ComfyUI-from-StabilityAI-Official/comfy/ldm/modules/attention.py", line 588, in _forward
n = self.attn1(n, context=context_attn1, value=value_attn1)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "/content/Fooocus-MRE/repositories/ComfyUI-from-StabilityAI-Official/comfy/ldm/modules/attention.py", line 438, in forward
out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=self.attention_op)
File "/usr/local/lib/python3.10/dist-packages/xformers/ops/fmha/init.py", line 193, in memory_efficient_attention
return _memory_efficient_attention(
File "/usr/local/lib/python3.10/dist-packages/xformers/ops/fmha/init.py", line 291, in _memory_efficient_attention
return _memory_efficient_attention_forward(
File "/usr/local/lib/python3.10/dist-packages/xformers/ops/fmha/init.py", line 307, in _memory_efficient_attention_forward
op = _dispatch_fw(inp, False)
File "/usr/local/lib/python3.10/dist-packages/xformers/ops/fmha/dispatch.py", line 96, in _dispatch_fw
return _run_priority_list(
File "/usr/local/lib/python3.10/dist-packages/xformers/ops/fmha/dispatch.py", line 63, in _run_priority_list
raise NotImplementedError(msg)
NotImplementedError: No operator found for memory_efficient_attention_forward with inputs:
query : shape=(20, 4032, 1, 64) (torch.float16)
key : shape=(20, 4032, 1, 64) (torch.float16)
value : shape=(20, 4032, 1, 64) (torch.float16)
attn_bias : <class 'NoneType'>
p : 0.0 decoderF is not supported because:
xFormers wasn't build with CUDA support
requires device with capability > (8, 0) but your GPU has capability (7, 5) (too old)
attn_bias type is <class 'NoneType'>
operator wasn't built - see python -m xformers.info for more info [email protected] is not supported because:
xFormers wasn't build with CUDA support
requires device with capability > (8, 0) but your GPU has capability (7, 5) (too old)
operator wasn't built - see python -m xformers.info for more info tritonflashattF is not supported because:
xFormers wasn't build with CUDA support
requires device with capability > (8, 0) but your GPU has capability (7, 5) (too old)
operator wasn't built - see python -m xformers.info for more info
triton is not available
requires GPU with sm80 minimum compute capacity, e.g., A100/H100/L4
Only work on pre-MLIR triton for now cutlassF is not supported because:
xFormers wasn't build with CUDA support
operator wasn't built - see python -m xformers.info for more info smallkF is not supported because:
max(query.shape[-1] != value.shape[-1]) > 32
xFormers wasn't build with CUDA support
dtype=torch.float16 (supported: {torch.float32})
operator wasn't built - see python -m xformers.info for more info
unsupported embed per head: 64
The text was updated successfully, but these errors were encountered:
If i try to run the colab version im getting this error - does someone know a solution:
Traceback (most recent call last):
File "/usr/lib/python3.10/threading.py", line 1016, in _bootstrap_inner
self.run()
File "/usr/lib/python3.10/threading.py", line 953, in run
self._target(*self._args, **self._kwargs)
File "/content/Fooocus-MRE/modules/async_worker.py", line 613, in worker
handler(task)
File "/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "/content/Fooocus-MRE/modules/async_worker.py", line 487, in handler
imgs = pipeline.process_diffusion(
File "/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "/content/Fooocus-MRE/modules/default_pipeline.py", line 403, in process_diffusion
sampled_latent = core.ksampler(
File "/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/utils/contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "/content/Fooocus-MRE/modules/core.py", line 347, in ksampler
samples = sampler.sample(noise, positive_copy, negative_copy, cfg=cfg, latent_image=latent_image,
File "/content/Fooocus-MRE/modules/samplers_advanced.py", line 202, in sample
samples = getattr(k_diffusion_sampling, "sample{}".format(self.sampler))(self.model_k, noise, sigmas, extra_args=extra_args, callback=k_callback, disable=disable_pbar)
File "/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "/content/Fooocus-MRE/repositories/ComfyUI-from-StabilityAI-Official/comfy/k_diffusion/sampling.py", line 701, in sample_dpmpp_2m_sde_gpu
return sample_dpmpp_2m_sde(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, eta=eta, s_noise=s_noise, noise_sampler=noise_sampler, solver_type=solver_type)
File "/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "/content/Fooocus-MRE/repositories/ComfyUI-from-StabilityAI-Official/comfy/k_diffusion/sampling.py", line 613, in sample_dpmpp_2m_sde
denoised = model(x, sigmas[i] * s_in, **extra_args)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "/content/Fooocus-MRE/repositories/ComfyUI-from-StabilityAI-Official/comfy/samplers.py", line 323, in forward
out = self.inner_model(x, sigma, cond=cond, uncond=uncond, cond_scale=cond_scale, cond_concat=cond_concat, model_options=model_options, seed=seed)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1527, in call_impl
return forward_call(*args, **kwargs)
File "/content/Fooocus-MRE/modules/patch.py", line 164, in patched_discrete_eps_ddpm_denoiser_forward
return self.get_eps(input * c_in, self.sigma_to_t(sigma), **kwargs)
File "/content/Fooocus-MRE/repositories/ComfyUI-from-StabilityAI-Official/comfy/k_diffusion/external.py", line 151, in get_eps
return self.inner_model.apply_model(*args, **kwargs)
File "/content/Fooocus-MRE/repositories/ComfyUI-from-StabilityAI-Official/comfy/samplers.py", line 311, in apply_model
out = sampling_function(self.inner_model.apply_model, x, timestep, uncond, cond, cond_scale, cond_concat, model_options=model_options, seed=seed)
File "/content/Fooocus-MRE/repositories/ComfyUI-from-StabilityAI-Official/comfy/samplers.py", line 289, in sampling_function
cond, uncond = calc_cond_uncond_batch(model_function, cond, uncond, x, timestep, max_total_area, cond_concat, model_options)
File "/content/Fooocus-MRE/repositories/ComfyUI-from-StabilityAI-Official/comfy/samplers.py", line 263, in calc_cond_uncond_batch
output = model_options['model_function_wrapper'](model_function, {"input": input_x, "timestep": timestep, "c": c, "cond_or_uncond": cond_or_uncond}).chunk(batch_chunks)
File "/content/Fooocus-MRE/modules/patch.py", line 173, in patched_model_function
return func(x, t, **c)
File "/content/Fooocus-MRE/repositories/ComfyUI-from-StabilityAI-Official/comfy/model_base.py", line 63, in apply_model
return self.diffusion_model(xc, t, context=context, y=c_adm, control=control, transformer_options=transformer_options).float()
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "/content/Fooocus-MRE/modules/patch.py", line 338, in patched_unet_forward
h = forward_timestep_embed(module, h, emb, context, transformer_options)
File "/content/Fooocus-MRE/repositories/ComfyUI-from-StabilityAI-Official/comfy/ldm/modules/diffusionmodules/openaimodel.py", line 56, in forward_timestep_embed
x = layer(x, context, transformer_options)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "/content/Fooocus-MRE/repositories/ComfyUI-from-StabilityAI-Official/comfy/ldm/modules/attention.py", line 693, in forward
x = block(x, context=context[i], transformer_options=transformer_options)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "/content/Fooocus-MRE/repositories/ComfyUI-from-StabilityAI-Official/comfy/ldm/modules/attention.py", line 525, in forward
return checkpoint(self._forward, (x, context, transformer_options), self.parameters(), self.checkpoint)
File "/content/Fooocus-MRE/repositories/ComfyUI-from-StabilityAI-Official/comfy/ldm/modules/diffusionmodules/util.py", line 123, in checkpoint
return func(*inputs)
File "/content/Fooocus-MRE/repositories/ComfyUI-from-StabilityAI-Official/comfy/ldm/modules/attention.py", line 588, in _forward
n = self.attn1(n, context=context_attn1, value=value_attn1)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "/content/Fooocus-MRE/repositories/ComfyUI-from-StabilityAI-Official/comfy/ldm/modules/attention.py", line 438, in forward
out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=self.attention_op)
File "/usr/local/lib/python3.10/dist-packages/xformers/ops/fmha/init.py", line 193, in memory_efficient_attention
return _memory_efficient_attention(
File "/usr/local/lib/python3.10/dist-packages/xformers/ops/fmha/init.py", line 291, in _memory_efficient_attention
return _memory_efficient_attention_forward(
File "/usr/local/lib/python3.10/dist-packages/xformers/ops/fmha/init.py", line 307, in _memory_efficient_attention_forward
op = _dispatch_fw(inp, False)
File "/usr/local/lib/python3.10/dist-packages/xformers/ops/fmha/dispatch.py", line 96, in _dispatch_fw
return _run_priority_list(
File "/usr/local/lib/python3.10/dist-packages/xformers/ops/fmha/dispatch.py", line 63, in _run_priority_list
raise NotImplementedError(msg)
NotImplementedError: No operator found for
memory_efficient_attention_forward
with inputs:query : shape=(20, 4032, 1, 64) (torch.float16)
key : shape=(20, 4032, 1, 64) (torch.float16)
value : shape=(20, 4032, 1, 64) (torch.float16)
attn_bias : <class 'NoneType'>
p : 0.0
decoderF
is not supported because:xFormers wasn't build with CUDA support
requires device with capability > (8, 0) but your GPU has capability (7, 5) (too old)
attn_bias type is <class 'NoneType'>
operator wasn't built - see
python -m xformers.info
for more info[email protected]
is not supported because:xFormers wasn't build with CUDA support
requires device with capability > (8, 0) but your GPU has capability (7, 5) (too old)
operator wasn't built - see
python -m xformers.info
for more infotritonflashattF
is not supported because:xFormers wasn't build with CUDA support
requires device with capability > (8, 0) but your GPU has capability (7, 5) (too old)
operator wasn't built - see
python -m xformers.info
for more infotriton is not available
requires GPU with sm80 minimum compute capacity, e.g., A100/H100/L4
Only work on pre-MLIR triton for now
cutlassF
is not supported because:xFormers wasn't build with CUDA support
operator wasn't built - see
python -m xformers.info
for more infosmallkF
is not supported because:max(query.shape[-1] != value.shape[-1]) > 32
xFormers wasn't build with CUDA support
dtype=torch.float16 (supported: {torch.float32})
operator wasn't built - see
python -m xformers.info
for more infounsupported embed per head: 64
The text was updated successfully, but these errors were encountered: