You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
启动webui python webui.py
重复切分生成两次语音,会报显存。
Traceback (most recent call last):
File "/home/python_env/bert/lib/python3.10/site-packages/gradio/routes.py", line 534, in predict
output = await route_utils.call_process_api(
File "/home/python_env/bert/lib/python3.10/site-packages/gradio/route_utils.py", line 226, in call_process_api
output = await app.get_blocks().process_api(
File "/home/python_env/bert/lib/python3.10/site-packages/gradio/blocks.py", line 1550, in process_api
result = await self.call_function(
File "/home/python_env/bert/lib/python3.10/site-packages/gradio/blocks.py", line 1185, in call_function
prediction = await anyio.to_thread.run_sync(
File "/home/python_env/bert/lib/python3.10/site-packages/anyio/to_thread.py", line 56, in run_sync
return await get_async_backend().run_sync_in_worker_thread(
File "/home/python_env/bert/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 2405, in run_sync_in_worker_thread
return await future
File "/home/python_env/bert/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 914, in run
result = context.run(func, *args)
File "/home/python_env/bert/lib/python3.10/site-packages/gradio/utils.py", line 661, in wrapper
response = f(*args, **kwargs)
File "/home/project/Bert-VITS2/webui.py", line 164, in tts_split
audio_list += process_text(
File "/home/project/Bert-VITS2/webui.py", line 308, in process_text
generate_audio(
File "/home/project/Bert-VITS2/webui.py", line 72, in generate_audio
audio = infer(
File "/home/project/Bert-VITS2/infer.py", line 302, in infer
net_g.infer(
File "/home/project/Bert-VITS2/models.py", line 1074, in infer
z = self.flow(z_p, y_mask, g=g, reverse=True)
File "/home/python_env/bert/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1553, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/home/python_env/bert/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1562, in _call_impl
return forward_call(*args, **kwargs)
File "/home/project/Bert-VITS2/models.py", line 145, in forward
x = flow(x, x_mask, g=g, reverse=reverse)
File "/home/python_env/bert/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1553, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/home/python_env/bert/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1562, in _call_impl
return forward_call(*args, **kwargs)
File "/home/project/Bert-VITS2/modules.py", line 564, in forward
h = self.enc(h, x_mask, g=g)
File "/home/python_env/bert/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1553, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/home/python_env/bert/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1562, in _call_impl
return forward_call(*args, **kwargs)
File "/home/project/Bert-VITS2/attentions.py", line 112, in forward
y = self.attn_layers[i](x, x, attn_mask)
File "/home/python_env/bert/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1553, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/home/python_env/bert/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1562, in _call_impl
return forward_call(*args, **kwargs)
File "/home/project/Bert-VITS2/attentions.py", line 268, in forward
x, self.attn = self.attention(q, k, v, mask=attn_mask)
File "/home/project/Bert-VITS2/attentions.py", line 312, in attention
relative_weights = self._absolute_position_to_relative_position(p_attn)
File "/home/project/Bert-VITS2/attentions.py", line 393, in _absolute_position_to_relative_position
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
File "/home/python_env/bert/lib/python3.10/site-packages/torch/nn/functional.py", line 4552, in pad
return torch._C._nn.pad(input, pad, mode, value)
torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 688.00 MiB. GPU 0 has a total capacity of 11.92 GiB of which 317.25 MiB is free. Including non-PyTorch memory, this process has 11.60 GiB memory in use. Of the allocated memory 9.98 GiB is allocated by PyTorch, and 1.51 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
请问大佬怎么解决啊?
reacted with thumbs up emoji reacted with thumbs down emoji reacted with laugh emoji reacted with hooray emoji reacted with confused emoji reacted with heart emoji reacted with rocket emoji reacted with eyes emoji
-
启动webui python webui.py
重复切分生成两次语音,会报显存。
Traceback (most recent call last):
File "/home/python_env/bert/lib/python3.10/site-packages/gradio/routes.py", line 534, in predict
output = await route_utils.call_process_api(
File "/home/python_env/bert/lib/python3.10/site-packages/gradio/route_utils.py", line 226, in call_process_api
output = await app.get_blocks().process_api(
File "/home/python_env/bert/lib/python3.10/site-packages/gradio/blocks.py", line 1550, in process_api
result = await self.call_function(
File "/home/python_env/bert/lib/python3.10/site-packages/gradio/blocks.py", line 1185, in call_function
prediction = await anyio.to_thread.run_sync(
File "/home/python_env/bert/lib/python3.10/site-packages/anyio/to_thread.py", line 56, in run_sync
return await get_async_backend().run_sync_in_worker_thread(
File "/home/python_env/bert/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 2405, in run_sync_in_worker_thread
return await future
File "/home/python_env/bert/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 914, in run
result = context.run(func, *args)
File "/home/python_env/bert/lib/python3.10/site-packages/gradio/utils.py", line 661, in wrapper
response = f(*args, **kwargs)
File "/home/project/Bert-VITS2/webui.py", line 164, in tts_split
audio_list += process_text(
File "/home/project/Bert-VITS2/webui.py", line 308, in process_text
generate_audio(
File "/home/project/Bert-VITS2/webui.py", line 72, in generate_audio
audio = infer(
File "/home/project/Bert-VITS2/infer.py", line 302, in infer
net_g.infer(
File "/home/project/Bert-VITS2/models.py", line 1074, in infer
z = self.flow(z_p, y_mask, g=g, reverse=True)
File "/home/python_env/bert/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1553, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/home/python_env/bert/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1562, in _call_impl
return forward_call(*args, **kwargs)
File "/home/project/Bert-VITS2/models.py", line 145, in forward
x = flow(x, x_mask, g=g, reverse=reverse)
File "/home/python_env/bert/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1553, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/home/python_env/bert/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1562, in _call_impl
return forward_call(*args, **kwargs)
File "/home/project/Bert-VITS2/modules.py", line 564, in forward
h = self.enc(h, x_mask, g=g)
File "/home/python_env/bert/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1553, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/home/python_env/bert/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1562, in _call_impl
return forward_call(*args, **kwargs)
File "/home/project/Bert-VITS2/attentions.py", line 112, in forward
y = self.attn_layers[i](x, x, attn_mask)
File "/home/python_env/bert/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1553, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/home/python_env/bert/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1562, in _call_impl
return forward_call(*args, **kwargs)
File "/home/project/Bert-VITS2/attentions.py", line 268, in forward
x, self.attn = self.attention(q, k, v, mask=attn_mask)
File "/home/project/Bert-VITS2/attentions.py", line 312, in attention
relative_weights = self._absolute_position_to_relative_position(p_attn)
File "/home/project/Bert-VITS2/attentions.py", line 393, in _absolute_position_to_relative_position
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
File "/home/python_env/bert/lib/python3.10/site-packages/torch/nn/functional.py", line 4552, in pad
return torch._C._nn.pad(input, pad, mode, value)
torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 688.00 MiB. GPU 0 has a total capacity of 11.92 GiB of which 317.25 MiB is free. Including non-PyTorch memory, this process has 11.60 GiB memory in use. Of the allocated memory 9.98 GiB is allocated by PyTorch, and 1.51 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
请问大佬怎么解决啊?
Beta Was this translation helpful? Give feedback.
All reactions