You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Model loaded in 8.2s (load weights from disk: 1.4s, create model: 0.9s, apply weights to model: 2.7s, apply half(): 2.3s, calculate empty prompt: 0.8s).
WARNING: The new version of the model will be updated by safetensor, you may need to download it mannully. We run the old version of the checkpoint this time!
{'wav2lip_checkpoint': 'D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2\extensions\SadTalker\checkpoints\wav2lip.pth', 'audio2pose_checkpoint': 'D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2\extensions\SadTalker\checkpoints\auido2pose_00140-model.pth', 'audio2exp_checkpoint': 'D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2\extensions\SadTalker\checkpoints\auido2exp_00300-model.pth', 'free_view_checkpoint': 'D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2\extensions\SadTalker\checkpoints\facevid2vid_00189-model.pth.tar', 'path_of_net_recon_model': 'D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2\extensions\SadTalker\checkpoints\epoch_20.pth', 'dir_of_BFM_fitting': 'D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2/extensions/SadTalker/src/config', 'audio2pose_yaml_path': 'D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2/extensions/SadTalker/src/config\auido2pose.yaml', 'audio2exp_yaml_path': 'D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2/extensions/SadTalker/src/config\auido2exp.yaml', 'use_safetensor': False, 'mappingnet_checkpoint': 'D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2\extensions\SadTalker\checkpoints\mapping_00229-model.pth.tar', 'facerender_yaml': 'D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2/extensions/SadTalker/src/config\facerender.yaml'}
C:\Users\小米\AppData\Local\Temp\gradio\tmp_o_3vb2_.png
Traceback (most recent call last):
File "D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2\python\lib\site-packages\gradio\routes.py", line 422, in run_predict
output = await app.get_blocks().process_api(
File "D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2\python\lib\site-packages\gradio\blocks.py", line 1323, in process_api
result = await self.call_function(
File "D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2\python\lib\site-packages\gradio\blocks.py", line 1051, in call_function
prediction = await anyio.to_thread.run_sync(
File "D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2\python\lib\site-packages\anyio\to_thread.py", line 31, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File "D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2\python\lib\site-packages\anyio_backends_asyncio.py", line 937, in run_sync_in_worker_thread
return await future
File "D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2\python\lib\site-packages\anyio_backends_asyncio.py", line 867, in run
result = context.run(func, *args)
File "D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2\modules\call_queue.py", line 14, in f
res = func(*args, **kwargs)
File "D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2/extensions/SadTalker\src\gradio_demo.py", line 139, in test
return_path = self.animate_from_coeff.generate(data, save_dir, pic_path, crop_info, enhancer='gfpgan' if use_enhancer else None, preprocess=preprocess, img_size=size)
File "D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2/extensions/SadTalker\src\facerender\animate.py", line 183, in generate
predictions_video = make_animation(source_image, source_semantics, target_semantics,
File "D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2/extensions/SadTalker\src\facerender\modules\make_animation.py", line 128, in make_animation
out = generator(source_image, kp_source=kp_source, kp_driving=kp_norm)
File "D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2/extensions/SadTalker\src\facerender\modules\generator.py", line 224, in forward
dense_motion = self.dense_motion_network(feature=feature_3d, kp_driving=kp_driving,
File "D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2/extensions/SadTalker\src\facerender\modules\dense_motion.py", line 101, in forward
mask = self.mask(prediction)
File "D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2\python\lib\site-packages\torch\nn\modules\conv.py", line 613, in forward
return self._conv_forward(input, self.weight, self.bias)
File "D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2\python\lib\site-packages\torch\nn\modules\conv.py", line 608, in _conv_forward
return F.conv3d(
RuntimeError: GET was unable to find an engine to execute this computation
提示:Python 运行时抛出了一个异常。请检查疑难解答页面。
reacted with thumbs up emoji reacted with thumbs down emoji reacted with laugh emoji reacted with hooray emoji reacted with confused emoji reacted with heart emoji reacted with rocket emoji reacted with eyes emoji
-
Model loaded in 8.2s (load weights from disk: 1.4s, create model: 0.9s, apply weights to model: 2.7s, apply half(): 2.3s, calculate empty prompt: 0.8s).
WARNING: The new version of the model will be updated by safetensor, you may need to download it mannully. We run the old version of the checkpoint this time!
{'wav2lip_checkpoint': 'D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2\extensions\SadTalker\checkpoints\wav2lip.pth', 'audio2pose_checkpoint': 'D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2\extensions\SadTalker\checkpoints\auido2pose_00140-model.pth', 'audio2exp_checkpoint': 'D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2\extensions\SadTalker\checkpoints\auido2exp_00300-model.pth', 'free_view_checkpoint': 'D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2\extensions\SadTalker\checkpoints\facevid2vid_00189-model.pth.tar', 'path_of_net_recon_model': 'D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2\extensions\SadTalker\checkpoints\epoch_20.pth', 'dir_of_BFM_fitting': 'D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2/extensions/SadTalker/src/config', 'audio2pose_yaml_path': 'D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2/extensions/SadTalker/src/config\auido2pose.yaml', 'audio2exp_yaml_path': 'D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2/extensions/SadTalker/src/config\auido2exp.yaml', 'use_safetensor': False, 'mappingnet_checkpoint': 'D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2\extensions\SadTalker\checkpoints\mapping_00229-model.pth.tar', 'facerender_yaml': 'D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2/extensions/SadTalker/src/config\facerender.yaml'}
C:\Users\小米\AppData\Local\Temp\gradio\tmp_o_3vb2_.png
Traceback (most recent call last):
File "D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2\python\lib\site-packages\gradio\routes.py", line 422, in run_predict
output = await app.get_blocks().process_api(
File "D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2\python\lib\site-packages\gradio\blocks.py", line 1323, in process_api
result = await self.call_function(
File "D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2\python\lib\site-packages\gradio\blocks.py", line 1051, in call_function
prediction = await anyio.to_thread.run_sync(
File "D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2\python\lib\site-packages\anyio\to_thread.py", line 31, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File "D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2\python\lib\site-packages\anyio_backends_asyncio.py", line 937, in run_sync_in_worker_thread
return await future
File "D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2\python\lib\site-packages\anyio_backends_asyncio.py", line 867, in run
result = context.run(func, *args)
File "D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2\modules\call_queue.py", line 14, in f
res = func(*args, **kwargs)
File "D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2/extensions/SadTalker\src\gradio_demo.py", line 139, in test
return_path = self.animate_from_coeff.generate(data, save_dir, pic_path, crop_info, enhancer='gfpgan' if use_enhancer else None, preprocess=preprocess, img_size=size)
File "D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2/extensions/SadTalker\src\facerender\animate.py", line 183, in generate
predictions_video = make_animation(source_image, source_semantics, target_semantics,
File "D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2/extensions/SadTalker\src\facerender\modules\make_animation.py", line 128, in make_animation
out = generator(source_image, kp_source=kp_source, kp_driving=kp_norm)
File "D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2/extensions/SadTalker\src\facerender\modules\generator.py", line 224, in forward
dense_motion = self.dense_motion_network(feature=feature_3d, kp_driving=kp_driving,
File "D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2/extensions/SadTalker\src\facerender\modules\dense_motion.py", line 101, in forward
mask = self.mask(prediction)
File "D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2\python\lib\site-packages\torch\nn\modules\conv.py", line 613, in forward
return self._conv_forward(input, self.weight, self.bias)
File "D:\sd-webui-aki-v4.2\sd-webui-aki-v4.2\python\lib\site-packages\torch\nn\modules\conv.py", line 608, in _conv_forward
return F.conv3d(
RuntimeError: GET was unable to find an engine to execute this computation
提示:Python 运行时抛出了一个异常。请检查疑难解答页面。
Beta Was this translation helpful? Give feedback.
All reactions