You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
{{ message }}
This repository has been archived by the owner on Dec 25, 2023. It is now read-only.
Not sure why this is happening while testing the IP adaptor using the comfy workflow and using the sample.json file given here !
!!! Exception during processing !!!
Traceback (most recent call last):
File "D:\AI\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\ComfyUI\execution.py", line 152, in recursive_execute
output_data, output_ui = get_output_data(obj, input_data_all)
File "D:\AI\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\ComfyUI\execution.py", line 82, in get_output_data
return_values = map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True)
File "D:\AI\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\ComfyUI\execution.py", line 75, in map_node_over_list
results.append(getattr(obj, func)(**slice_dict(input_data_all, i)))
File "D:\AI\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\ComfyUI\custom_nodes\IPAdapter-ComfyUI\ip_adapter.py", line 154, in adapter
cond, uncond, outputs = self.clip_vision_encode(clip_vision, image, self.plus)
File "D:\AI\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\ComfyUI\custom_nodes\IPAdapter-ComfyUI\ip_adapter.py", line 221, in clip_vision_encode
outputs = clip_vision.model(image, output_hidden_states=True)
File "D:\AI\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "D:\AI\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\python_embeded\lib\site-packages\transformers\models\clip\modeling_clip.py", line 1304, in forward
vision_outputs = self.vision_model(
File "D:\AI\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "D:\AI\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\python_embeded\lib\site-packages\transformers\models\clip\modeling_clip.py", line 859, in forward
hidden_states = self.embeddings(pixel_values)
File "D:\AI\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(args, **kwargs)
File "D:\AI\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\python_embeded\lib\site-packages\transformers\models\clip\modeling_clip.py", line 195, in forward
patch_embeds = self.patch_embedding(pixel_values) # shape = [, width, grid, grid]
File "D:\AI\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "D:\AI\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch\nn\modules\conv.py", line 463, in forward
return self._conv_forward(input, self.weight, self.bias)
File "D:\AI\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch\nn\modules\conv.py", line 459, in _conv_forward
return F.conv2d(input, weight, bias, self.stride,
RuntimeError: Input type (torch.FloatTensor) and weight type (torch.HalfTensor) should be the same or input should be a MKLDNN tensor and weight is a dense tensor
The text was updated successfully, but these errors were encountered:
Not sure why this is happening while testing the IP adaptor using the comfy workflow and using the sample.json file given here !
!!! Exception during processing !!!
Traceback (most recent call last):
File "D:\AI\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\ComfyUI\execution.py", line 152, in recursive_execute
output_data, output_ui = get_output_data(obj, input_data_all)
File "D:\AI\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\ComfyUI\execution.py", line 82, in get_output_data
return_values = map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True)
File "D:\AI\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\ComfyUI\execution.py", line 75, in map_node_over_list
results.append(getattr(obj, func)(**slice_dict(input_data_all, i)))
File "D:\AI\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\ComfyUI\custom_nodes\IPAdapter-ComfyUI\ip_adapter.py", line 154, in adapter
cond, uncond, outputs = self.clip_vision_encode(clip_vision, image, self.plus)
File "D:\AI\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\ComfyUI\custom_nodes\IPAdapter-ComfyUI\ip_adapter.py", line 221, in clip_vision_encode
outputs = clip_vision.model(image, output_hidden_states=True)
File "D:\AI\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "D:\AI\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\python_embeded\lib\site-packages\transformers\models\clip\modeling_clip.py", line 1304, in forward
vision_outputs = self.vision_model(
File "D:\AI\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "D:\AI\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\python_embeded\lib\site-packages\transformers\models\clip\modeling_clip.py", line 859, in forward
hidden_states = self.embeddings(pixel_values)
File "D:\AI\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(args, **kwargs)
File "D:\AI\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\python_embeded\lib\site-packages\transformers\models\clip\modeling_clip.py", line 195, in forward
patch_embeds = self.patch_embedding(pixel_values) # shape = [, width, grid, grid]
File "D:\AI\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "D:\AI\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch\nn\modules\conv.py", line 463, in forward
return self._conv_forward(input, self.weight, self.bias)
File "D:\AI\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch\nn\modules\conv.py", line 459, in _conv_forward
return F.conv2d(input, weight, bias, self.stride,
RuntimeError: Input type (torch.FloatTensor) and weight type (torch.HalfTensor) should be the same or input should be a MKLDNN tensor and weight is a dense tensor
The text was updated successfully, but these errors were encountered: