You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
reacted with thumbs up emoji reacted with thumbs down emoji reacted with laugh emoji reacted with hooray emoji reacted with confused emoji reacted with heart emoji reacted with rocket emoji reacted with eyes emoji
-
Can someone help me?
Thanks !
[Dataset 0]
loading image sizes.
100%|██████████████████████████████████████████████████████████████████████████████████| 76/76 [00:01<00:00, 38.18it/s]
prepare dataset
preparing accelerator
Using accelerator 0.15.0 or above.
loading model for process 0/1
load StableDiffusion checkpoint: D:/stable diffusion/stable-diffusion-webui/models/Stable-diffusion/v1-5-pruned.safetensors
loading u-net:
loading vae:
Downloading pytorch_model.bin: 1%|▎ | 10.5M/1.71G [00:20<25:27, 1.11MB/s]╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮
│ D:\kohya_ss\venv\lib\site-packages\urllib3\response.py:444 in _error_catcher │
│ │
│ 441 │ │ │
│ 442 │ │ try: │
│ 443 │ │ │ try: │
│ ❱ 444 │ │ │ │ yield │
│ 445 │ │ │ │
│ 446 │ │ │ except SocketTimeout: │
│ 447 │ │ │ │ # FIXME: Ideally we'd like to include the url in the ReadTimeoutError bu │
│ │
│ D:\kohya_ss\venv\lib\site-packages\urllib3\response.py:567 in read │
│ │
│ 564 │ │ fp_closed = getattr(self._fp, "closed", False) │
│ 565 │ │ │
│ 566 │ │ with self._error_catcher(): │
│ ❱ 567 │ │ │ data = self._fp_read(amt) if not fp_closed else b"" │
│ 568 │ │ │ if amt is None: │
│ 569 │ │ │ │ flush_decoder = True │
│ 570 │ │ │ else: │
│ │
│ D:\kohya_ss\venv\lib\site-packages\urllib3\response.py:533 in _fp_read │
│ │
│ 530 │ │ │ return buffer.getvalue() │
│ 531 │ │ else: │
│ 532 │ │ │ # StringIO doesn't like amt=None │
│ ❱ 533 │ │ │ return self._fp.read(amt) if amt is not None else self._fp.read() │
│ 534 │ │
│ 535 │ def read(self, amt=None, decode_content=None, cache_content=False): │
│ 536 │ │ """ │
│ │
│ C:\Users\qqq99\AppData\Local\Programs\Python\Python310\lib\http\client.py:465 in read │
│ │
│ 462 │ │ │ if self.length is not None and amt > self.length: │
│ 463 │ │ │ │ # clip the read to the "end of response" │
│ 464 │ │ │ │ amt = self.length │
│ ❱ 465 │ │ │ s = self.fp.read(amt) │
│ 466 │ │ │ if not s and amt: │
│ 467 │ │ │ │ # Ideally, we would raise IncompleteRead if the content-length │
│ 468 │ │ │ │ # wasn't satisfied, but it might break compatibility. │
│ │
│ C:\Users\qqq99\AppData\Local\Programs\Python\Python310\lib\socket.py:705 in readinto │
│ │
│ 702 │ │ │ raise OSError("cannot read from timed out object") │
│ 703 │ │ while True: │
│ 704 │ │ │ try: │
│ ❱ 705 │ │ │ │ return self._sock.recv_into(b) │
│ 706 │ │ │ except timeout: │
│ 707 │ │ │ │ self._timeout_occurred = True │
│ 708 │ │ │ │ raise │
│ │
│ C:\Users\qqq99\AppData\Local\Programs\Python\Python310\lib\ssl.py:1274 in recv_into │
│ │
│ 1271 │ │ │ │ raise ValueError( │
│ 1272 │ │ │ │ "non-zero flags not allowed in calls to recv_into() on %s" % │
│ 1273 │ │ │ │ self.class) │
│ ❱ 1274 │ │ │ return self.read(nbytes, buffer) │
│ 1275 │ │ else: │
│ 1276 │ │ │ return super().recv_into(buffer, nbytes, flags) │
│ 1277 │
│ │
│ C:\Users\qqq99\AppData\Local\Programs\Python\Python310\lib\ssl.py:1130 in read │
│ │
│ 1127 │ │ │ raise ValueError("Read on closed or unwrapped SSL socket.") │
│ 1128 │ │ try: │
│ 1129 │ │ │ if buffer is not None: │
│ ❱ 1130 │ │ │ │ return self._sslobj.read(len, buffer) │
│ 1131 │ │ │ else: │
│ 1132 │ │ │ │ return self._sslobj.read(len) │
│ 1133 │ │ except SSLError as x: │
╰──────────────────────────────────────────────────────────────────────────────────────────────────╯
TimeoutError: The read operation timed out
During handling of the above exception, another exception occurred:
╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮
│ D:\kohya_ss\venv\lib\site-packages\requests\models.py:816 in generate │
│ │
│ 813 │ │ │ # Special case for urllib3. │
│ 814 │ │ │ if hasattr(self.raw, "stream"): │
│ 815 │ │ │ │ try: │
│ ❱ 816 │ │ │ │ │ yield from self.raw.stream(chunk_size, decode_content=True) │
│ 817 │ │ │ │ except ProtocolError as e: │
│ 818 │ │ │ │ │ raise ChunkedEncodingError(e) │
│ 819 │ │ │ │ except DecodeError as e: │
│ │
│ D:\kohya_ss\venv\lib\site-packages\urllib3\response.py:628 in stream │
│ │
│ 625 │ │ │ │ yield line │
│ 626 │ │ else: │
│ 627 │ │ │ while not is_fp_closed(self._fp): │
│ ❱ 628 │ │ │ │ data = self.read(amt=amt, decode_content=decode_content) │
│ 629 │ │ │ │ │
│ 630 │ │ │ │ if data: │
│ 631 │ │ │ │ │ yield data │
│ │
│ D:\kohya_ss\venv\lib\site-packages\urllib3\response.py:566 in read │
│ │
│ 563 │ │ flush_decoder = False │
│ 564 │ │ fp_closed = getattr(self._fp, "closed", False) │
│ 565 │ │ │
│ ❱ 566 │ │ with self._error_catcher(): │
│ 567 │ │ │ data = self._fp_read(amt) if not fp_closed else b"" │
│ 568 │ │ │ if amt is None: │
│ 569 │ │ │ │ flush_decoder = True │
│ │
│ C:\Users\qqq99\AppData\Local\Programs\Python\Python310\lib\contextlib.py:153 in exit │
│ │
│ 150 │ │ │ │ # tell if we get the same exception back │
│ 151 │ │ │ │ value = typ() │
│ 152 │ │ │ try: │
│ ❱ 153 │ │ │ │ self.gen.throw(typ, value, traceback) │
│ 154 │ │ │ except StopIteration as exc: │
│ 155 │ │ │ │ # Suppress StopIteration unless it's the same exception that │
│ 156 │ │ │ │ # was passed to throw(). This prevents a StopIteration │
│ │
│ D:\kohya_ss\venv\lib\site-packages\urllib3\response.py:449 in _error_catcher │
│ │
│ 446 │ │ │ except SocketTimeout: │
│ 447 │ │ │ │ # FIXME: Ideally we'd like to include the url in the ReadTimeoutError bu │
│ 448 │ │ │ │ # there is yet no clean way to get at it from this context. │
│ ❱ 449 │ │ │ │ raise ReadTimeoutError(self._pool, None, "Read timed out.") │
│ 450 │ │ │ │
│ 451 │ │ │ except BaseSSLError as e: │
│ 452 │ │ │ │ # FIXME: Is there a better way to differentiate between SSLErrors? │
╰──────────────────────────────────────────────────────────────────────────────────────────────────╯
ReadTimeoutError: HTTPSConnectionPool(host='cdn-lfs.huggingface.co', port=443): Read timed out.
During handling of the above exception, another exception occurred:
╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮
│ D:\kohya_ss\train_network.py:864 in │
│ │
│ 861 │ args = parser.parse_args() │
│ 862 │ args = train_util.read_config_from_file(args, parser) │
│ 863 │ │
│ ❱ 864 │ train(args) │
│ 865 │
│ │
│ D:\kohya_ss\train_network.py:160 in train │
│ │
│ 157 │ weight_dtype, save_dtype = train_util.prepare_dtype(args) │
│ 158 │ │
│ 159 │ # モデルを読み込む │
│ ❱ 160 │ text_encoder, vae, unet, _ = train_util.load_target_model(args, weight_dtype, accele │
│ 161 │ │
│ 162 │ # モデルに xformers とか memory efficient attention を組み込む │
│ 163 │ train_util.replace_unet_modules(unet, args.mem_eff_attn, args.xformers) │
│ │
│ D:\kohya_ss\library\train_util.py:3061 in load_target_model │
│ │
│ 3058 │ │ if pi == accelerator.state.local_process_index: │
│ 3059 │ │ │ print(f"loading model for process {accelerator.state.local_process_index}/{a │
│ 3060 │ │ │ │
│ ❱ 3061 │ │ │ text_encoder, vae, unet, load_stable_diffusion_format = _load_target_model( │
│ 3062 │ │ │ │ args, weight_dtype, accelerator.device if args.lowram else "cpu" │
│ 3063 │ │ │ ) │
│ 3064 │
│ │
│ D:\kohya_ss\library\train_util.py:3027 in _load_target_model │
│ │
│ 3024 │ load_stable_diffusion_format = os.path.isfile(name_or_path) # determine SD or Diffu │
│ 3025 │ if load_stable_diffusion_format: │
│ 3026 │ │ print(f"load StableDiffusion checkpoint: {name_or_path}") │
│ ❱ 3027 │ │ text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoin │
│ 3028 │ else: │
│ 3029 │ │ # Diffusers model is loaded to CPU │
│ 3030 │ │ print(f"load Diffusers pretrained models: {name_or_path}") │
│ │
│ D:\kohya_ss\library\model_util.py:904 in load_models_from_stable_diffusion_checkpoint │
│ │
│ 901 │ │ converted_text_encoder_checkpoint = convert_ldm_clip_checkpoint_v1(state_dict) │
│ 902 │ │ │
│ 903 │ │ logging.set_verbosity_error() # don't show annoying warning │
│ ❱ 904 │ │ text_model = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14").to(d │
│ 905 │ │ logging.set_verbosity_warning() │
│ 906 │ │ │
│ 907 │ │ info = text_model.load_state_dict(converted_text_encoder_checkpoint) │
│ │
│ D:\kohya_ss\venv\lib\site-packages\transformers\modeling_utils.py:2222 in from_pretrained │
│ │
│ 2219 │ │ │ │ │ │ else: │
│ 2220 │ │ │ │ │ │ │ # This repo has no safetensors file of any kind, we switch t │
│ 2221 │ │ │ │ │ │ │ filename = WEIGHTS_NAME │
│ ❱ 2222 │ │ │ │ │ │ │ resolved_archive_file = cached_file( │
│ 2223 │ │ │ │ │ │ │ │ pretrained_model_name_or_path, WEIGHTS_NAME, **cached_fi │
│ 2224 │ │ │ │ │ │ │ ) │
│ 2225 │ │ │ │ │ if resolved_archive_file is None and filename == WEIGHTS_NAME: │
│ │
│ D:\kohya_ss\venv\lib\site-packages\transformers\utils\hub.py:409 in cached_file │
│ │
│ 406 │ user_agent = http_user_agent(user_agent) │
│ 407 │ try: │
│ 408 │ │ # Load from URL or cache if already cached │
│ ❱ 409 │ │ resolved_file = hf_hub_download( │
│ 410 │ │ │ path_or_repo_id, │
│ 411 │ │ │ filename, │
│ 412 │ │ │ subfolder=None if len(subfolder) == 0 else subfolder, │
│ │
│ D:\kohya_ss\venv\lib\site-packages\huggingface_hub\utils_validators.py:120 in _inner_fn │
│ │
│ 117 │ │ if check_use_auth_token: │
│ 118 │ │ │ kwargs = smoothly_deprecate_use_auth_token(fn_name=fn.name, has_token=ha │
│ 119 │ │ │
│ ❱ 120 │ │ return fn(*args, **kwargs) │
│ 121 │ │
│ 122 │ return _inner_fn # type: ignore │
│ 123 │
│ │
│ D:\kohya_ss\venv\lib\site-packages\huggingface_hub\file_download.py:1326 in hf_hub_download │
│ │
│ 1323 │ │ with temp_file_manager() as temp_file: │
│ 1324 │ │ │ logger.info("downloading %s to %s", url, temp_file.name) │
│ 1325 │ │ │ │
│ ❱ 1326 │ │ │ http_get( │
│ 1327 │ │ │ │ url_to_download, │
│ 1328 │ │ │ │ temp_file, │
│ 1329 │ │ │ │ proxies=proxies, │
│ │
│ D:\kohya_ss\venv\lib\site-packages\huggingface_hub\file_download.py:538 in http_get │
│ │
│ 535 │ │ desc=f"Downloading {displayed_name}", │
│ 536 │ │ disable=bool(logger.getEffectiveLevel() == logging.NOTSET), │
│ 537 │ ) │
│ ❱ 538 │ for chunk in r.iter_content(chunk_size=10 * 1024 * 1024): │
│ 539 │ │ if chunk: # filter out keep-alive new chunks │
│ 540 │ │ │ progress.update(len(chunk)) │
│ 541 │ │ │ temp_file.write(chunk) │
│ │
│ D:\kohya_ss\venv\lib\site-packages\requests\models.py:822 in generate │
│ │
│ 819 │ │ │ │ except DecodeError as e: │
│ 820 │ │ │ │ │ raise ContentDecodingError(e) │
│ 821 │ │ │ │ except ReadTimeoutError as e: │
│ ❱ 822 │ │ │ │ │ raise ConnectionError(e) │
│ 823 │ │ │ │ except SSLError as e: │
│ 824 │ │ │ │ │ raise RequestsSSLError(e) │
│ 825 │ │ │ else: │
╰──────────────────────────────────────────────────────────────────────────────────────────────────╯
ConnectionError: HTTPSConnectionPool(host='cdn-lfs.huggingface.co', port=443): Read timed out.
Downloading pytorch_model.bin: 1%|▎ | 10.5M/1.71G [00:31<1:25:13, 332kB/s]
╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮
│ C:\Users\qqq99\AppData\Local\Programs\Python\Python310\lib\runpy.py:196 in _run_module_as_main │
│ │
│ 193 │ main_globals = sys.modules["main"].dict │
│ 194 │ if alter_argv: │
│ 195 │ │ sys.argv[0] = mod_spec.origin │
│ ❱ 196 │ return _run_code(code, main_globals, None, │
│ 197 │ │ │ │ │ "main", mod_spec) │
│ 198 │
│ 199 def run_module(mod_name, init_globals=None, │
│ │
│ C:\Users\qqq99\AppData\Local\Programs\Python\Python310\lib\runpy.py:86 in _run_code │
│ │
│ 83 │ │ │ │ │ loader = loader, │
│ 84 │ │ │ │ │ package = pkg_name, │
│ 85 │ │ │ │ │ spec = mod_spec) │
│ ❱ 86 │ exec(code, run_globals) │
│ 87 │ return run_globals │
│ 88 │
│ 89 def _run_module_code(code, init_globals=None, │
│ │
│ in :7 │
│ │
│ 4 from accelerate.commands.accelerate_cli import main │
│ 5 if name == 'main': │
│ 6 │ sys.argv[0] = re.sub(r'(-script.pyw|.exe)?$', '', sys.argv[0]) │
│ ❱ 7 │ sys.exit(main()) │
│ 8 │
│ │
│ D:\kohya_ss\venv\lib\site-packages\accelerate\commands\accelerate_cli.py:45 in main │
│ │
│ 42 │ │ exit(1) │
│ 43 │ │
│ 44 │ # Run │
│ ❱ 45 │ args.func(args) │
│ 46 │
│ 47 │
│ 48 if name == "main": │
│ │
│ D:\kohya_ss\venv\lib\site-packages\accelerate\commands\launch.py:1104 in launch_command │
│ │
│ 1101 │ elif defaults is not None and defaults.compute_environment == ComputeEnvironment.AMA │
│ 1102 │ │ sagemaker_launcher(defaults, args) │
│ 1103 │ else: │
│ ❱ 1104 │ │ simple_launcher(args) │
│ 1105 │
│ 1106 │
│ 1107 def main(): │
│ │
│ D:\kohya_ss\venv\lib\site-packages\accelerate\commands\launch.py:567 in simple_launcher │
│ │
│ 564 │ process = subprocess.Popen(cmd, env=current_env) │
│ 565 │ process.wait() │
│ 566 │ if process.returncode != 0: │
│ ❱ 567 │ │ raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd) │
│ 568 │
│ 569 │
│ 570 def multi_gpu_launcher(args): │
╰──────────────────────────────────────────────────────────────────────────────────────────────────╯
CalledProcessError: Command '['D:\kohya_ss\venv\Scripts\python.exe', 'train_network.py',
'--pretrained_model_name_or_path=D:/stable
diffusion/stable-diffusion-webui/models/Stable-diffusion/v1-5-pruned.safetensors', '--train_data_dir=D:/stable
diffusion/pre/', '--resolution=512,512', '--output_dir=D:/stable diffusion/pre/OtPut', '--network_alpha=128',
'--save_model_as=safetensors', '--network_module=networks.lora', '--text_encoder_lr=5e-05', '--unet_lr=0.0001',
'--network_dim=128', '--output_name=asian', '--lr_scheduler_num_cycles=1', '--learning_rate=0.0001',
'--lr_scheduler=constant', '--train_batch_size=1', '--max_train_steps=7600', '--save_every_n_epochs=1',
'--mixed_precision=fp16', '--save_precision=fp16', '--seed=1234', '--caption_extension=.txt', '--cache_latents',
'--optimizer_type=AdamW', '--max_data_loader_n_workers=1', '--clip_skip=2', '--bucket_reso_steps=64', '--mem_eff_attn',
'--gradient_checkpointing', '--xformers', '--bucket_no_upscale']' returned non-zero exit status 1.
Beta Was this translation helpful? Give feedback.
All reactions