seawolf2357 commited on
Commit
8fd539c
β€’
1 Parent(s): e8d684c

Upload folder using huggingface_hub

Browse files
febb161d-54ad-40d7-af5a-e0bfbfbdbe1c/metadata.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {"file_name": "\uc548\ub1552001.bmp", "prompt": "TOK"}
2
+ {"file_name": "\uc548\ub1553001.bmp", "prompt": "TOK"}
3
+ {"file_name": "\uc548\ub155001.bmp", "prompt": "TOK"}
febb161d-54ad-40d7-af5a-e0bfbfbdbe1c/μ•ˆλ…•001.bmp ADDED

Git LFS Details

  • SHA256: 58efb18b057074421c112e00e8ede6ed07e60df2f77197713f08e404819d23fc
  • Pointer size: 132 Bytes
  • Size of remote file: 4.18 MB
febb161d-54ad-40d7-af5a-e0bfbfbdbe1c/μ•ˆλ…•2001.bmp ADDED

Git LFS Details

  • SHA256: 58efb18b057074421c112e00e8ede6ed07e60df2f77197713f08e404819d23fc
  • Pointer size: 132 Bytes
  • Size of remote file: 4.18 MB
febb161d-54ad-40d7-af5a-e0bfbfbdbe1c/μ•ˆλ…•3001.bmp ADDED

Git LFS Details

  • SHA256: 17bc0bc67431ae34ee3f76a78d1252932005ba78b688f56a111de97052f24611
  • Pointer size: 132 Bytes
  • Size of remote file: 4.18 MB
requirements.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ peft==0.7.1
2
+ -huggingface_hub
3
+ torch
4
+ git+https://github.com/huggingface/diffusers@ba28006f8b2a0f7ec3b6784695790422b4f80a97
5
+ transformers==4.36.2
6
+ accelerate==0.25.0
7
+ safetensors==0.4.1
8
+ prodigyopt==1.0
9
+ hf-transfer==0.1.4
10
+ huggingface_hub==0.20.3
11
+ git+https://github.com/huggingface/datasets.git@3f149204a2a5948287adcade5e90707aa5207a92
script.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import subprocess
3
+ from safetensors.torch import load_file
4
+ from diffusers import AutoPipelineForText2Image
5
+ from datasets import load_dataset
6
+ from huggingface_hub.repocard import RepoCard
7
+ from huggingface_hub import HfApi
8
+ import torch
9
+ import re
10
+ import argparse
11
+ import os
12
+ import zipfile
13
+
14
+ def do_preprocess(class_data_dir):
15
+ print("Unzipping dataset")
16
+ zip_file_path = f"{class_data_dir}/class_images.zip"
17
+ with zipfile.ZipFile(zip_file_path, 'r') as zip_ref:
18
+ zip_ref.extractall(class_data_dir)
19
+ os.remove(zip_file_path)
20
+
21
+ def do_train(script_args):
22
+ # Pass all arguments to trainer.py
23
+ print("Starting training...")
24
+ result = subprocess.run(['python', 'trainer.py'] + script_args)
25
+ if result.returncode != 0:
26
+ raise Exception("Training failed.")
27
+
28
+ def replace_output_dir(text, output_dir, replacement):
29
+ # Define a pattern that matches the output_dir followed by whitespace, '/', new line, or "'"
30
+ # Add system name from HF only in the correct spots
31
+ pattern = rf"{output_dir}(?=[\s/'\n])"
32
+ return re.sub(pattern, replacement, text)
33
+
34
+ def do_inference(dataset_name, output_dir, num_tokens):
35
+ widget_content = []
36
+ try:
37
+ print("Starting inference to generate example images...")
38
+ dataset = load_dataset(dataset_name)
39
+ pipe = AutoPipelineForText2Image.from_pretrained(
40
+ "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
41
+ )
42
+ pipe = pipe.to("cuda")
43
+ pipe.load_lora_weights(f'{output_dir}/pytorch_lora_weights.safetensors')
44
+
45
+ prompts = dataset["train"]["prompt"]
46
+ if(num_tokens > 0):
47
+ tokens_sequence = ''.join(f'<s{i}>' for i in range(num_tokens))
48
+ tokens_list = [f'<s{i}>' for i in range(num_tokens)]
49
+
50
+ state_dict = load_file(f"{output_dir}/{output_dir}_emb.safetensors")
51
+ pipe.load_textual_inversion(state_dict["clip_l"], token=tokens_list, text_encoder=pipe.text_encoder, tokenizer=pipe.tokenizer)
52
+ pipe.load_textual_inversion(state_dict["clip_g"], token=tokens_list, text_encoder=pipe.text_encoder_2, tokenizer=pipe.tokenizer_2)
53
+
54
+ prompts = [prompt.replace("TOK", tokens_sequence) for prompt in prompts]
55
+
56
+ for i, prompt in enumerate(prompts):
57
+ image = pipe(prompt, num_inference_steps=25, guidance_scale=7.5).images[0]
58
+ filename = f"image-{i}.png"
59
+ image.save(f"{output_dir}/{filename}")
60
+ card_dict = {
61
+ "text": prompt,
62
+ "output": {
63
+ "url": filename
64
+ }
65
+ }
66
+ widget_content.append(card_dict)
67
+ except Exception as e:
68
+ print("Something went wrong with generating images, specifically: ", e)
69
+
70
+ try:
71
+ api = HfApi()
72
+ username = api.whoami()["name"]
73
+ repo_id = api.create_repo(f"{username}/{output_dir}", exist_ok=True, private=True).repo_id
74
+
75
+ with open(f'{output_dir}/README.md', 'r') as file:
76
+ readme_content = file.read()
77
+
78
+
79
+ readme_content = replace_output_dir(readme_content, output_dir, f"{username}/{output_dir}")
80
+
81
+ card = RepoCard(readme_content)
82
+ if widget_content:
83
+ card.data["widget"] = widget_content
84
+ card.save(f'{output_dir}/README.md')
85
+
86
+ print("Starting upload...")
87
+ api.upload_folder(
88
+ folder_path=output_dir,
89
+ repo_id=f"{username}/{output_dir}",
90
+ repo_type="model",
91
+ )
92
+ except Exception as e:
93
+ print("Something went wrong with uploading your model, specificaly: ", e)
94
+ else:
95
+ print("Upload finished!")
96
+
97
+ import sys
98
+ import argparse
99
+
100
+ def main():
101
+ # Capture all arguments except the script name
102
+ script_args = sys.argv[1:]
103
+
104
+ # Create the argument parser
105
+ parser = argparse.ArgumentParser()
106
+ parser.add_argument('--dataset_name', required=True)
107
+ parser.add_argument('--output_dir', required=True)
108
+ parser.add_argument('--num_new_tokens_per_abstraction', type=int, default=0)
109
+ parser.add_argument('--train_text_encoder_ti', action='store_true')
110
+ parser.add_argument('--class_data_dir', help="Name of the class images dataset")
111
+
112
+ # Parse known arguments
113
+ args, _ = parser.parse_known_args(script_args)
114
+
115
+ # Set num_tokens to 0 if '--train_text_encoder_ti' is not present
116
+ if not args.train_text_encoder_ti:
117
+ args.num_new_tokens_per_abstraction = 0
118
+
119
+ # Proceed with training and inference
120
+ if args.class_data_dir:
121
+ do_preprocess(args.class_data_dir)
122
+ print("Pre-processing finished!")
123
+ do_train(script_args)
124
+ print("Training finished!")
125
+ do_inference(args.dataset_name, args.output_dir, args.num_new_tokens_per_abstraction)
126
+ print("All finished!")
127
+
128
+ if __name__ == "__main__":
129
+ main()
trainer.py ADDED
@@ -0,0 +1,2136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+
16
+ import argparse
17
+ import gc
18
+ import hashlib
19
+ import itertools
20
+ import logging
21
+ import math
22
+ import os
23
+ import re
24
+ import shutil
25
+ import warnings
26
+ from pathlib import Path
27
+ from typing import List, Optional
28
+
29
+ import numpy as np
30
+ import torch
31
+ import torch.nn.functional as F
32
+
33
+ # imports of the TokenEmbeddingsHandler class
34
+ import torch.utils.checkpoint
35
+ import transformers
36
+ from accelerate import Accelerator
37
+ from accelerate.logging import get_logger
38
+ from accelerate.utils import DistributedDataParallelKwargs, ProjectConfiguration, set_seed
39
+ from huggingface_hub import create_repo, upload_folder
40
+ from packaging import version
41
+ from peft import LoraConfig, set_peft_model_state_dict
42
+ from peft.utils import get_peft_model_state_dict
43
+ from PIL import Image
44
+ from PIL.ImageOps import exif_transpose
45
+ from safetensors.torch import load_file, save_file
46
+ from torch.utils.data import Dataset
47
+ from torchvision import transforms
48
+ from tqdm.auto import tqdm
49
+ from transformers import AutoTokenizer, PretrainedConfig
50
+
51
+ import diffusers
52
+ from diffusers import (
53
+ AutoencoderKL,
54
+ DDPMScheduler,
55
+ DPMSolverMultistepScheduler,
56
+ StableDiffusionXLPipeline,
57
+ UNet2DConditionModel,
58
+ )
59
+ from diffusers.loaders import LoraLoaderMixin
60
+ from diffusers.optimization import get_scheduler
61
+ from diffusers.training_utils import _set_state_dict_into_text_encoder, cast_training_params, compute_snr
62
+ from diffusers.utils import (
63
+ check_min_version,
64
+ convert_all_state_dict_to_peft,
65
+ convert_state_dict_to_diffusers,
66
+ convert_state_dict_to_kohya,
67
+ convert_unet_state_dict_to_peft,
68
+ is_wandb_available,
69
+ )
70
+ from diffusers.utils.import_utils import is_xformers_available
71
+ from diffusers.utils.torch_utils import is_compiled_module
72
+
73
+
74
+ # Will error if the minimal version of diffusers is not installed. Remove at your own risks.
75
+ check_min_version("0.26.0.dev0")
76
+
77
+ logger = get_logger(__name__)
78
+
79
+
80
+ def save_model_card(
81
+ repo_id: str,
82
+ images=None,
83
+ base_model=str,
84
+ train_text_encoder=False,
85
+ train_text_encoder_ti=False,
86
+ token_abstraction_dict=None,
87
+ instance_prompt=str,
88
+ validation_prompt=str,
89
+ repo_folder=None,
90
+ vae_path=None,
91
+ ):
92
+ img_str = "widget:\n"
93
+ for i, image in enumerate(images):
94
+ image.save(os.path.join(repo_folder, f"image_{i}.png"))
95
+ img_str += f"""
96
+ - text: '{validation_prompt if validation_prompt else ' ' }'
97
+ output:
98
+ url:
99
+ "image_{i}.png"
100
+ """
101
+ if not images:
102
+ img_str += f"""
103
+ - text: '{instance_prompt}'
104
+ """
105
+ embeddings_filename = f"{repo_folder}_emb"
106
+ instance_prompt_webui = re.sub(r"<s\d+>", "", re.sub(r"<s\d+>", embeddings_filename, instance_prompt, count=1))
107
+ ti_keys = ", ".join(f'"{match}"' for match in re.findall(r"<s\d+>", instance_prompt))
108
+ if instance_prompt_webui != embeddings_filename:
109
+ instance_prompt_sentence = f"For example, `{instance_prompt_webui}`"
110
+ else:
111
+ instance_prompt_sentence = ""
112
+ trigger_str = f"You should use {instance_prompt} to trigger the image generation."
113
+ diffusers_imports_pivotal = ""
114
+ diffusers_example_pivotal = ""
115
+ webui_example_pivotal = ""
116
+ if train_text_encoder_ti:
117
+ trigger_str = (
118
+ "To trigger image generation of trained concept(or concepts) replace each concept identifier "
119
+ "in you prompt with the new inserted tokens:\n"
120
+ )
121
+ diffusers_imports_pivotal = """from huggingface_hub import hf_hub_download
122
+ from safetensors.torch import load_file
123
+ """
124
+ diffusers_example_pivotal = f"""embedding_path = hf_hub_download(repo_id='{repo_id}', filename='{embeddings_filename}.safetensors' repo_type="model")
125
+ state_dict = load_file(embedding_path)
126
+ pipeline.load_textual_inversion(state_dict["clip_l"], token=[{ti_keys}], text_encoder=pipeline.text_encoder, tokenizer=pipeline.tokenizer)
127
+ pipeline.load_textual_inversion(state_dict["clip_g"], token=[{ti_keys}], text_encoder=pipeline.text_encoder_2, tokenizer=pipeline.tokenizer_2)
128
+ """
129
+ webui_example_pivotal = f"""- *Embeddings*: download **[`{embeddings_filename}.safetensors` here πŸ’Ύ](/{repo_id}/blob/main/{embeddings_filename}.safetensors)**.
130
+ - Place it on it on your `embeddings` folder
131
+ - Use it by adding `{embeddings_filename}` to your prompt. {instance_prompt_sentence}
132
+ (you need both the LoRA and the embeddings as they were trained together for this LoRA)
133
+ """
134
+ if token_abstraction_dict:
135
+ for key, value in token_abstraction_dict.items():
136
+ tokens = "".join(value)
137
+ trigger_str += f"""
138
+ to trigger concept `{key}` β†’ use `{tokens}` in your prompt \n
139
+ """
140
+
141
+ yaml = f"""---
142
+ tags:
143
+ - stable-diffusion-xl
144
+ - stable-diffusion-xl-diffusers
145
+ - text-to-image
146
+ - diffusers
147
+ - lora
148
+ - template:sd-lora
149
+ {img_str}
150
+ base_model: {base_model}
151
+ instance_prompt: {instance_prompt}
152
+ license: openrail++
153
+ ---
154
+ """
155
+
156
+ model_card = f"""
157
+ # SDXL LoRA DreamBooth - {repo_id}
158
+
159
+ <Gallery />
160
+
161
+ ## Model description
162
+
163
+ ### These are {repo_id} LoRA adaption weights for {base_model}.
164
+
165
+ ## Download model
166
+
167
+ ### Use it with UIs such as AUTOMATIC1111, Comfy UI, SD.Next, Invoke
168
+
169
+ - **LoRA**: download **[`{repo_folder}.safetensors` here πŸ’Ύ](/{repo_id}/blob/main/{repo_folder}.safetensors)**.
170
+ - Place it on your `models/Lora` folder.
171
+ - On AUTOMATIC1111, load the LoRA by adding `<lora:{repo_folder}:1>` to your prompt. On ComfyUI just [load it as a regular LoRA](https://comfyanonymous.github.io/ComfyUI_examples/lora/).
172
+ {webui_example_pivotal}
173
+
174
+ ## Use it with the [🧨 diffusers library](https://github.com/huggingface/diffusers)
175
+
176
+ ```py
177
+ from diffusers import AutoPipelineForText2Image
178
+ import torch
179
+ {diffusers_imports_pivotal}
180
+ pipeline = AutoPipelineForText2Image.from_pretrained('stabilityai/stable-diffusion-xl-base-1.0', torch_dtype=torch.float16).to('cuda')
181
+ pipeline.load_lora_weights('{repo_id}', weight_name='pytorch_lora_weights.safetensors')
182
+ {diffusers_example_pivotal}
183
+ image = pipeline('{validation_prompt if validation_prompt else instance_prompt}').images[0]
184
+ ```
185
+
186
+ For more details, including weighting, merging and fusing LoRAs, check the [documentation on loading LoRAs in diffusers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters)
187
+
188
+ ## Trigger words
189
+
190
+ {trigger_str}
191
+
192
+ ## Details
193
+ All [Files & versions](/{repo_id}/tree/main).
194
+
195
+ The weights were trained using [🧨 diffusers Advanced Dreambooth Training Script](https://github.com/huggingface/diffusers/blob/main/examples/advanced_diffusion_training/train_dreambooth_lora_sdxl_advanced.py).
196
+
197
+ LoRA for the text encoder was enabled. {train_text_encoder}.
198
+
199
+ Pivotal tuning was enabled: {train_text_encoder_ti}.
200
+
201
+ Special VAE used for training: {vae_path}.
202
+
203
+ """
204
+ with open(os.path.join(repo_folder, "README.md"), "w") as f:
205
+ f.write(yaml + model_card)
206
+
207
+
208
+ def import_model_class_from_model_name_or_path(
209
+ pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder"
210
+ ):
211
+ text_encoder_config = PretrainedConfig.from_pretrained(
212
+ pretrained_model_name_or_path, subfolder=subfolder, revision=revision
213
+ )
214
+ model_class = text_encoder_config.architectures[0]
215
+
216
+ if model_class == "CLIPTextModel":
217
+ from transformers import CLIPTextModel
218
+
219
+ return CLIPTextModel
220
+ elif model_class == "CLIPTextModelWithProjection":
221
+ from transformers import CLIPTextModelWithProjection
222
+
223
+ return CLIPTextModelWithProjection
224
+ else:
225
+ raise ValueError(f"{model_class} is not supported.")
226
+
227
+
228
+ def parse_args(input_args=None):
229
+ parser = argparse.ArgumentParser(description="Simple example of a training script.")
230
+ parser.add_argument(
231
+ "--pretrained_model_name_or_path",
232
+ type=str,
233
+ default=None,
234
+ required=True,
235
+ help="Path to pretrained model or model identifier from huggingface.co/models.",
236
+ )
237
+ parser.add_argument(
238
+ "--pretrained_vae_model_name_or_path",
239
+ type=str,
240
+ default=None,
241
+ help="Path to pretrained VAE model with better numerical stability. More details: https://github.com/huggingface/diffusers/pull/4038.",
242
+ )
243
+ parser.add_argument(
244
+ "--revision",
245
+ type=str,
246
+ default=None,
247
+ required=False,
248
+ help="Revision of pretrained model identifier from huggingface.co/models.",
249
+ )
250
+ parser.add_argument(
251
+ "--variant",
252
+ type=str,
253
+ default=None,
254
+ help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16",
255
+ )
256
+ parser.add_argument(
257
+ "--dataset_name",
258
+ type=str,
259
+ default=None,
260
+ help=(
261
+ "The name of the Dataset (from the HuggingFace hub) containing the training data of instance images (could be your own, possibly private,"
262
+ " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem,"
263
+ " or to a folder containing files that πŸ€— Datasets can understand.To load the custom captions, the training set directory needs to follow the structure of a "
264
+ "datasets ImageFolder, containing both the images and the corresponding caption for each image. see: "
265
+ "https://huggingface.co/docs/datasets/image_dataset for more information"
266
+ ),
267
+ )
268
+ parser.add_argument(
269
+ "--dataset_config_name",
270
+ type=str,
271
+ default=None,
272
+ help="The config of the Dataset. In some cases, a dataset may have more than one configuration (for example "
273
+ "if it contains different subsets of data within, and you only wish to load a specific subset - in that case specify the desired configuration using --dataset_config_name. Leave as "
274
+ "None if there's only one config.",
275
+ )
276
+ parser.add_argument(
277
+ "--instance_data_dir",
278
+ type=str,
279
+ default=None,
280
+ help="A path to local folder containing the training data of instance images. Specify this arg instead of "
281
+ "--dataset_name if you wish to train using a local folder without custom captions. If you wish to train with custom captions please specify "
282
+ "--dataset_name instead.",
283
+ )
284
+
285
+ parser.add_argument(
286
+ "--cache_dir",
287
+ type=str,
288
+ default=None,
289
+ help="The directory where the downloaded models and datasets will be stored.",
290
+ )
291
+
292
+ parser.add_argument(
293
+ "--image_column",
294
+ type=str,
295
+ default="image",
296
+ help="The column of the dataset containing the target image. By "
297
+ "default, the standard Image Dataset maps out 'file_name' "
298
+ "to 'image'.",
299
+ )
300
+ parser.add_argument(
301
+ "--caption_column",
302
+ type=str,
303
+ default=None,
304
+ help="The column of the dataset containing the instance prompt for each image",
305
+ )
306
+
307
+ parser.add_argument("--repeats", type=int, default=1, help="How many times to repeat the training data.")
308
+
309
+ parser.add_argument(
310
+ "--class_data_dir",
311
+ type=str,
312
+ default=None,
313
+ required=False,
314
+ help="A folder containing the training data of class images.",
315
+ )
316
+ parser.add_argument(
317
+ "--instance_prompt",
318
+ type=str,
319
+ default=None,
320
+ required=True,
321
+ help="The prompt with identifier specifying the instance, e.g. 'photo of a TOK dog', 'in the style of TOK'",
322
+ )
323
+ parser.add_argument(
324
+ "--token_abstraction",
325
+ type=str,
326
+ default="TOK",
327
+ help="identifier specifying the instance(or instances) as used in instance_prompt, validation prompt, "
328
+ "captions - e.g. TOK. To use multiple identifiers, please specify them in a comma seperated string - e.g. "
329
+ "'TOK,TOK2,TOK3' etc.",
330
+ )
331
+
332
+ parser.add_argument(
333
+ "--num_new_tokens_per_abstraction",
334
+ type=int,
335
+ default=2,
336
+ help="number of new tokens inserted to the tokenizers per token_abstraction identifier when "
337
+ "--train_text_encoder_ti = True. By default, each --token_abstraction (e.g. TOK) is mapped to 2 new "
338
+ "tokens - <si><si+1> ",
339
+ )
340
+
341
+ parser.add_argument(
342
+ "--class_prompt",
343
+ type=str,
344
+ default=None,
345
+ help="The prompt to specify images in the same class as provided instance images.",
346
+ )
347
+ parser.add_argument(
348
+ "--validation_prompt",
349
+ type=str,
350
+ default=None,
351
+ help="A prompt that is used during validation to verify that the model is learning.",
352
+ )
353
+ parser.add_argument(
354
+ "--num_validation_images",
355
+ type=int,
356
+ default=4,
357
+ help="Number of images that should be generated during validation with `validation_prompt`.",
358
+ )
359
+ parser.add_argument(
360
+ "--validation_epochs",
361
+ type=int,
362
+ default=50,
363
+ help=(
364
+ "Run dreambooth validation every X epochs. Dreambooth validation consists of running the prompt"
365
+ " `args.validation_prompt` multiple times: `args.num_validation_images`."
366
+ ),
367
+ )
368
+ parser.add_argument(
369
+ "--with_prior_preservation",
370
+ default=False,
371
+ action="store_true",
372
+ help="Flag to add prior preservation loss.",
373
+ )
374
+ parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.")
375
+ parser.add_argument(
376
+ "--num_class_images",
377
+ type=int,
378
+ default=100,
379
+ help=(
380
+ "Minimal class images for prior preservation loss. If there are not enough images already present in"
381
+ " class_data_dir, additional images will be sampled with class_prompt."
382
+ ),
383
+ )
384
+ parser.add_argument(
385
+ "--output_dir",
386
+ type=str,
387
+ default="lora-dreambooth-model",
388
+ help="The output directory where the model predictions and checkpoints will be written.",
389
+ )
390
+ parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
391
+ parser.add_argument(
392
+ "--resolution",
393
+ type=int,
394
+ default=1024,
395
+ help=(
396
+ "The resolution for input images, all the images in the train/validation dataset will be resized to this"
397
+ " resolution"
398
+ ),
399
+ )
400
+ parser.add_argument(
401
+ "--crops_coords_top_left_h",
402
+ type=int,
403
+ default=0,
404
+ help=("Coordinate for (the height) to be included in the crop coordinate embeddings needed by SDXL UNet."),
405
+ )
406
+ parser.add_argument(
407
+ "--crops_coords_top_left_w",
408
+ type=int,
409
+ default=0,
410
+ help=("Coordinate for (the height) to be included in the crop coordinate embeddings needed by SDXL UNet."),
411
+ )
412
+ parser.add_argument(
413
+ "--center_crop",
414
+ default=False,
415
+ action="store_true",
416
+ help=(
417
+ "Whether to center crop the input images to the resolution. If not set, the images will be randomly"
418
+ " cropped. The images will be resized to the resolution first before cropping."
419
+ ),
420
+ )
421
+ parser.add_argument(
422
+ "--train_text_encoder",
423
+ action="store_true",
424
+ help="Whether to train the text encoder. If set, the text encoder should be float32 precision.",
425
+ )
426
+ parser.add_argument(
427
+ "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
428
+ )
429
+ parser.add_argument(
430
+ "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images."
431
+ )
432
+ parser.add_argument("--num_train_epochs", type=int, default=1)
433
+ parser.add_argument(
434
+ "--max_train_steps",
435
+ type=int,
436
+ default=None,
437
+ help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
438
+ )
439
+ parser.add_argument(
440
+ "--checkpointing_steps",
441
+ type=int,
442
+ default=500,
443
+ help=(
444
+ "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final"
445
+ " checkpoints in case they are better than the last checkpoint, and are also suitable for resuming"
446
+ " training using `--resume_from_checkpoint`."
447
+ ),
448
+ )
449
+ parser.add_argument(
450
+ "--checkpoints_total_limit",
451
+ type=int,
452
+ default=None,
453
+ help=("Max number of checkpoints to store."),
454
+ )
455
+ parser.add_argument(
456
+ "--resume_from_checkpoint",
457
+ type=str,
458
+ default=None,
459
+ help=(
460
+ "Whether training should be resumed from a previous checkpoint. Use a path saved by"
461
+ ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
462
+ ),
463
+ )
464
+ parser.add_argument(
465
+ "--gradient_accumulation_steps",
466
+ type=int,
467
+ default=1,
468
+ help="Number of updates steps to accumulate before performing a backward/update pass.",
469
+ )
470
+ parser.add_argument(
471
+ "--gradient_checkpointing",
472
+ action="store_true",
473
+ help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
474
+ )
475
+ parser.add_argument(
476
+ "--learning_rate",
477
+ type=float,
478
+ default=1e-4,
479
+ help="Initial learning rate (after the potential warmup period) to use.",
480
+ )
481
+
482
+ parser.add_argument(
483
+ "--text_encoder_lr",
484
+ type=float,
485
+ default=5e-6,
486
+ help="Text encoder learning rate to use.",
487
+ )
488
+ parser.add_argument(
489
+ "--scale_lr",
490
+ action="store_true",
491
+ default=False,
492
+ help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
493
+ )
494
+ parser.add_argument(
495
+ "--lr_scheduler",
496
+ type=str,
497
+ default="constant",
498
+ help=(
499
+ 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
500
+ ' "constant", "constant_with_warmup"]'
501
+ ),
502
+ )
503
+
504
+ parser.add_argument(
505
+ "--snr_gamma",
506
+ type=float,
507
+ default=None,
508
+ help="SNR weighting gamma to be used if rebalancing the loss. Recommended value is 5.0. "
509
+ "More details here: https://arxiv.org/abs/2303.09556.",
510
+ )
511
+ parser.add_argument(
512
+ "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
513
+ )
514
+ parser.add_argument(
515
+ "--lr_num_cycles",
516
+ type=int,
517
+ default=1,
518
+ help="Number of hard resets of the lr in cosine_with_restarts scheduler.",
519
+ )
520
+ parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.")
521
+ parser.add_argument(
522
+ "--dataloader_num_workers",
523
+ type=int,
524
+ default=0,
525
+ help=(
526
+ "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
527
+ ),
528
+ )
529
+
530
+ parser.add_argument(
531
+ "--train_text_encoder_ti",
532
+ action="store_true",
533
+ help=("Whether to use textual inversion"),
534
+ )
535
+
536
+ parser.add_argument(
537
+ "--train_text_encoder_ti_frac",
538
+ type=float,
539
+ default=0.5,
540
+ help=("The percentage of epochs to perform textual inversion"),
541
+ )
542
+
543
+ parser.add_argument(
544
+ "--train_text_encoder_frac",
545
+ type=float,
546
+ default=1.0,
547
+ help=("The percentage of epochs to perform text encoder tuning"),
548
+ )
549
+
550
+ parser.add_argument(
551
+ "--optimizer",
552
+ type=str,
553
+ default="adamW",
554
+ help=('The optimizer type to use. Choose between ["AdamW", "prodigy"]'),
555
+ )
556
+
557
+ parser.add_argument(
558
+ "--use_8bit_adam",
559
+ action="store_true",
560
+ help="Whether or not to use 8-bit Adam from bitsandbytes. Ignored if optimizer is not set to AdamW",
561
+ )
562
+
563
+ parser.add_argument(
564
+ "--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam and Prodigy optimizers."
565
+ )
566
+ parser.add_argument(
567
+ "--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam and Prodigy optimizers."
568
+ )
569
+ parser.add_argument(
570
+ "--prodigy_beta3",
571
+ type=float,
572
+ default=None,
573
+ help="coefficients for computing the Prodidy stepsize using running averages. If set to None, "
574
+ "uses the value of square root of beta2. Ignored if optimizer is adamW",
575
+ )
576
+ parser.add_argument("--prodigy_decouple", type=bool, default=True, help="Use AdamW style decoupled weight decay")
577
+ parser.add_argument("--adam_weight_decay", type=float, default=1e-04, help="Weight decay to use for unet params")
578
+ parser.add_argument(
579
+ "--adam_weight_decay_text_encoder", type=float, default=None, help="Weight decay to use for text_encoder"
580
+ )
581
+
582
+ parser.add_argument(
583
+ "--adam_epsilon",
584
+ type=float,
585
+ default=1e-08,
586
+ help="Epsilon value for the Adam optimizer and Prodigy optimizers.",
587
+ )
588
+
589
+ parser.add_argument(
590
+ "--prodigy_use_bias_correction",
591
+ type=bool,
592
+ default=True,
593
+ help="Turn on Adam's bias correction. True by default. Ignored if optimizer is adamW",
594
+ )
595
+ parser.add_argument(
596
+ "--prodigy_safeguard_warmup",
597
+ type=bool,
598
+ default=True,
599
+ help="Remove lr from the denominator of D estimate to avoid issues during warm-up stage. True by default. "
600
+ "Ignored if optimizer is adamW",
601
+ )
602
+ parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
603
+ parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
604
+ parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
605
+ parser.add_argument(
606
+ "--hub_model_id",
607
+ type=str,
608
+ default=None,
609
+ help="The name of the repository to keep in sync with the local `output_dir`.",
610
+ )
611
+ parser.add_argument(
612
+ "--logging_dir",
613
+ type=str,
614
+ default="logs",
615
+ help=(
616
+ "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
617
+ " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
618
+ ),
619
+ )
620
+ parser.add_argument(
621
+ "--allow_tf32",
622
+ action="store_true",
623
+ help=(
624
+ "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
625
+ " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
626
+ ),
627
+ )
628
+ parser.add_argument(
629
+ "--report_to",
630
+ type=str,
631
+ default="tensorboard",
632
+ help=(
633
+ 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
634
+ ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
635
+ ),
636
+ )
637
+ parser.add_argument(
638
+ "--mixed_precision",
639
+ type=str,
640
+ default=None,
641
+ choices=["no", "fp16", "bf16"],
642
+ help=(
643
+ "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
644
+ " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
645
+ " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
646
+ ),
647
+ )
648
+ parser.add_argument(
649
+ "--prior_generation_precision",
650
+ type=str,
651
+ default=None,
652
+ choices=["no", "fp32", "fp16", "bf16"],
653
+ help=(
654
+ "Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
655
+ " 1.10.and an Nvidia Ampere GPU. Default to fp16 if a GPU is available else fp32."
656
+ ),
657
+ )
658
+ parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
659
+ parser.add_argument(
660
+ "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
661
+ )
662
+ parser.add_argument("--noise_offset", type=float, default=0, help="The scale of noise offset.")
663
+ parser.add_argument(
664
+ "--rank",
665
+ type=int,
666
+ default=4,
667
+ help=("The dimension of the LoRA update matrices."),
668
+ )
669
+ parser.add_argument(
670
+ "--cache_latents",
671
+ action="store_true",
672
+ default=False,
673
+ help="Cache the VAE latents",
674
+ )
675
+
676
+ if input_args is not None:
677
+ args = parser.parse_args(input_args)
678
+ else:
679
+ args = parser.parse_args()
680
+
681
+ if args.dataset_name is None and args.instance_data_dir is None:
682
+ raise ValueError("Specify either `--dataset_name` or `--instance_data_dir`")
683
+
684
+ if args.dataset_name is not None and args.instance_data_dir is not None:
685
+ raise ValueError("Specify only one of `--dataset_name` or `--instance_data_dir`")
686
+
687
+ if args.train_text_encoder and args.train_text_encoder_ti:
688
+ raise ValueError(
689
+ "Specify only one of `--train_text_encoder` or `--train_text_encoder_ti. "
690
+ "For full LoRA text encoder training check --train_text_encoder, for textual "
691
+ "inversion training check `--train_text_encoder_ti`"
692
+ )
693
+
694
+ env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
695
+ if env_local_rank != -1 and env_local_rank != args.local_rank:
696
+ args.local_rank = env_local_rank
697
+
698
+ if args.with_prior_preservation:
699
+ if args.class_data_dir is None:
700
+ raise ValueError("You must specify a data directory for class images.")
701
+ if args.class_prompt is None:
702
+ raise ValueError("You must specify prompt for class images.")
703
+ else:
704
+ # logger is not available yet
705
+ if args.class_data_dir is not None:
706
+ warnings.warn("You need not use --class_data_dir without --with_prior_preservation.")
707
+ if args.class_prompt is not None:
708
+ warnings.warn("You need not use --class_prompt without --with_prior_preservation.")
709
+
710
+ return args
711
+
712
+
713
+ # Taken from https://github.com/replicate/cog-sdxl/blob/main/dataset_and_utils.py
714
+ class TokenEmbeddingsHandler:
715
+ def __init__(self, text_encoders, tokenizers):
716
+ self.text_encoders = text_encoders
717
+ self.tokenizers = tokenizers
718
+
719
+ self.train_ids: Optional[torch.Tensor] = None
720
+ self.inserting_toks: Optional[List[str]] = None
721
+ self.embeddings_settings = {}
722
+
723
+ def initialize_new_tokens(self, inserting_toks: List[str]):
724
+ idx = 0
725
+ for tokenizer, text_encoder in zip(self.tokenizers, self.text_encoders):
726
+ assert isinstance(inserting_toks, list), "inserting_toks should be a list of strings."
727
+ assert all(
728
+ isinstance(tok, str) for tok in inserting_toks
729
+ ), "All elements in inserting_toks should be strings."
730
+
731
+ self.inserting_toks = inserting_toks
732
+ special_tokens_dict = {"additional_special_tokens": self.inserting_toks}
733
+ tokenizer.add_special_tokens(special_tokens_dict)
734
+ text_encoder.resize_token_embeddings(len(tokenizer))
735
+
736
+ self.train_ids = tokenizer.convert_tokens_to_ids(self.inserting_toks)
737
+
738
+ # random initialization of new tokens
739
+ std_token_embedding = text_encoder.text_model.embeddings.token_embedding.weight.data.std()
740
+
741
+ print(f"{idx} text encodedr's std_token_embedding: {std_token_embedding}")
742
+
743
+ text_encoder.text_model.embeddings.token_embedding.weight.data[self.train_ids] = (
744
+ torch.randn(len(self.train_ids), text_encoder.text_model.config.hidden_size)
745
+ .to(device=self.device)
746
+ .to(dtype=self.dtype)
747
+ * std_token_embedding
748
+ )
749
+ self.embeddings_settings[
750
+ f"original_embeddings_{idx}"
751
+ ] = text_encoder.text_model.embeddings.token_embedding.weight.data.clone()
752
+ self.embeddings_settings[f"std_token_embedding_{idx}"] = std_token_embedding
753
+
754
+ inu = torch.ones((len(tokenizer),), dtype=torch.bool)
755
+ inu[self.train_ids] = False
756
+
757
+ self.embeddings_settings[f"index_no_updates_{idx}"] = inu
758
+
759
+ print(self.embeddings_settings[f"index_no_updates_{idx}"].shape)
760
+
761
+ idx += 1
762
+
763
+ def save_embeddings(self, file_path: str):
764
+ assert self.train_ids is not None, "Initialize new tokens before saving embeddings."
765
+ tensors = {}
766
+ # text_encoder_0 - CLIP ViT-L/14, text_encoder_1 - CLIP ViT-G/14
767
+ idx_to_text_encoder_name = {0: "clip_l", 1: "clip_g"}
768
+ for idx, text_encoder in enumerate(self.text_encoders):
769
+ assert text_encoder.text_model.embeddings.token_embedding.weight.data.shape[0] == len(
770
+ self.tokenizers[0]
771
+ ), "Tokenizers should be the same."
772
+ new_token_embeddings = text_encoder.text_model.embeddings.token_embedding.weight.data[self.train_ids]
773
+
774
+ # New tokens for each text encoder are saved under "clip_l" (for text_encoder 0), "clip_g" (for
775
+ # text_encoder 1) to keep compatible with the ecosystem.
776
+ # Note: When loading with diffusers, any name can work - simply specify in inference
777
+ tensors[idx_to_text_encoder_name[idx]] = new_token_embeddings
778
+ # tensors[f"text_encoders_{idx}"] = new_token_embeddings
779
+
780
+ save_file(tensors, file_path)
781
+
782
+ @property
783
+ def dtype(self):
784
+ return self.text_encoders[0].dtype
785
+
786
+ @property
787
+ def device(self):
788
+ return self.text_encoders[0].device
789
+
790
+ @torch.no_grad()
791
+ def retract_embeddings(self):
792
+ for idx, text_encoder in enumerate(self.text_encoders):
793
+ index_no_updates = self.embeddings_settings[f"index_no_updates_{idx}"]
794
+ text_encoder.text_model.embeddings.token_embedding.weight.data[index_no_updates] = (
795
+ self.embeddings_settings[f"original_embeddings_{idx}"][index_no_updates]
796
+ .to(device=text_encoder.device)
797
+ .to(dtype=text_encoder.dtype)
798
+ )
799
+
800
+ # for the parts that were updated, we need to normalize them
801
+ # to have the same std as before
802
+ std_token_embedding = self.embeddings_settings[f"std_token_embedding_{idx}"]
803
+
804
+ index_updates = ~index_no_updates
805
+ new_embeddings = text_encoder.text_model.embeddings.token_embedding.weight.data[index_updates]
806
+ off_ratio = std_token_embedding / new_embeddings.std()
807
+
808
+ new_embeddings = new_embeddings * (off_ratio**0.1)
809
+ text_encoder.text_model.embeddings.token_embedding.weight.data[index_updates] = new_embeddings
810
+
811
+
812
+ class DreamBoothDataset(Dataset):
813
+ """
814
+ A dataset to prepare the instance and class images with the prompts for fine-tuning the model.
815
+ It pre-processes the images.
816
+ """
817
+
818
+ def __init__(
819
+ self,
820
+ instance_data_root,
821
+ instance_prompt,
822
+ class_prompt,
823
+ dataset_name,
824
+ dataset_config_name,
825
+ cache_dir,
826
+ image_column,
827
+ caption_column,
828
+ train_text_encoder_ti,
829
+ class_data_root=None,
830
+ class_num=None,
831
+ token_abstraction_dict=None, # token mapping for textual inversion
832
+ size=1024,
833
+ repeats=1,
834
+ center_crop=False,
835
+ ):
836
+ self.size = size
837
+ self.center_crop = center_crop
838
+
839
+ self.instance_prompt = instance_prompt
840
+ self.custom_instance_prompts = None
841
+ self.class_prompt = class_prompt
842
+ self.token_abstraction_dict = token_abstraction_dict
843
+ self.train_text_encoder_ti = train_text_encoder_ti
844
+ # if --dataset_name is provided or a metadata jsonl file is provided in the local --instance_data directory,
845
+ # we load the training data using load_dataset
846
+ if dataset_name is not None:
847
+ try:
848
+ from datasets import load_dataset
849
+ except ImportError:
850
+ raise ImportError(
851
+ "You are trying to load your data using the datasets library. If you wish to train using custom "
852
+ "captions please install the datasets library: `pip install datasets`. If you wish to load a "
853
+ "local folder containing images only, specify --instance_data_dir instead."
854
+ )
855
+ # Downloading and loading a dataset from the hub.
856
+ # See more about loading custom images at
857
+ # https://huggingface.co/docs/datasets/v2.0.0/en/dataset_script
858
+ dataset = load_dataset(
859
+ dataset_name,
860
+ dataset_config_name,
861
+ cache_dir=cache_dir,
862
+ )
863
+ # Preprocessing the datasets.
864
+ column_names = dataset["train"].column_names
865
+
866
+ # 6. Get the column names for input/target.
867
+ if image_column is None:
868
+ image_column = column_names[0]
869
+ logger.info(f"image column defaulting to {image_column}")
870
+ else:
871
+ if image_column not in column_names:
872
+ raise ValueError(
873
+ f"`--image_column` value '{image_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}"
874
+ )
875
+ instance_images = dataset["train"][image_column]
876
+
877
+ if caption_column is None:
878
+ logger.info(
879
+ "No caption column provided, defaulting to instance_prompt for all images. If your dataset "
880
+ "contains captions/prompts for the images, make sure to specify the "
881
+ "column as --caption_column"
882
+ )
883
+ self.custom_instance_prompts = None
884
+ else:
885
+ if caption_column not in column_names:
886
+ raise ValueError(
887
+ f"`--caption_column` value '{caption_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}"
888
+ )
889
+ custom_instance_prompts = dataset["train"][caption_column]
890
+ # create final list of captions according to --repeats
891
+ self.custom_instance_prompts = []
892
+ for caption in custom_instance_prompts:
893
+ self.custom_instance_prompts.extend(itertools.repeat(caption, repeats))
894
+ else:
895
+ self.instance_data_root = Path(instance_data_root)
896
+ if not self.instance_data_root.exists():
897
+ raise ValueError("Instance images root doesn't exists.")
898
+
899
+ instance_images = [Image.open(path) for path in list(Path(instance_data_root).iterdir())]
900
+ self.custom_instance_prompts = None
901
+
902
+ self.instance_images = []
903
+ for img in instance_images:
904
+ self.instance_images.extend(itertools.repeat(img, repeats))
905
+ self.num_instance_images = len(self.instance_images)
906
+ self._length = self.num_instance_images
907
+
908
+ if class_data_root is not None:
909
+ self.class_data_root = Path(class_data_root)
910
+ self.class_data_root.mkdir(parents=True, exist_ok=True)
911
+ self.class_images_path = list(self.class_data_root.iterdir())
912
+ if class_num is not None:
913
+ self.num_class_images = min(len(self.class_images_path), class_num)
914
+ else:
915
+ self.num_class_images = len(self.class_images_path)
916
+ self._length = max(self.num_class_images, self.num_instance_images)
917
+ else:
918
+ self.class_data_root = None
919
+
920
+ self.image_transforms = transforms.Compose(
921
+ [
922
+ transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
923
+ transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),
924
+ transforms.ToTensor(),
925
+ transforms.Normalize([0.5], [0.5]),
926
+ ]
927
+ )
928
+
929
+ def __len__(self):
930
+ return self._length
931
+
932
+ def __getitem__(self, index):
933
+ example = {}
934
+ instance_image = self.instance_images[index % self.num_instance_images]
935
+ instance_image = exif_transpose(instance_image)
936
+
937
+ if not instance_image.mode == "RGB":
938
+ instance_image = instance_image.convert("RGB")
939
+ example["instance_images"] = self.image_transforms(instance_image)
940
+
941
+ if self.custom_instance_prompts:
942
+ caption = self.custom_instance_prompts[index % self.num_instance_images]
943
+ if caption:
944
+ if self.train_text_encoder_ti:
945
+ # replace instances of --token_abstraction in caption with the new tokens: "<si><si+1>" etc.
946
+ for token_abs, token_replacement in self.token_abstraction_dict.items():
947
+ caption = caption.replace(token_abs, "".join(token_replacement))
948
+ example["instance_prompt"] = caption
949
+ else:
950
+ example["instance_prompt"] = self.instance_prompt
951
+
952
+ else: # costum prompts were provided, but length does not match size of image dataset
953
+ example["instance_prompt"] = self.instance_prompt
954
+
955
+ if self.class_data_root:
956
+ class_image = Image.open(self.class_images_path[index % self.num_class_images])
957
+ class_image = exif_transpose(class_image)
958
+
959
+ if not class_image.mode == "RGB":
960
+ class_image = class_image.convert("RGB")
961
+ example["class_images"] = self.image_transforms(class_image)
962
+ example["class_prompt"] = self.class_prompt
963
+
964
+ return example
965
+
966
+
967
+ def collate_fn(examples, with_prior_preservation=False):
968
+ pixel_values = [example["instance_images"] for example in examples]
969
+ prompts = [example["instance_prompt"] for example in examples]
970
+
971
+ # Concat class and instance examples for prior preservation.
972
+ # We do this to avoid doing two forward passes.
973
+ if with_prior_preservation:
974
+ pixel_values += [example["class_images"] for example in examples]
975
+ prompts += [example["class_prompt"] for example in examples]
976
+
977
+ pixel_values = torch.stack(pixel_values)
978
+ pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
979
+
980
+ batch = {"pixel_values": pixel_values, "prompts": prompts}
981
+ return batch
982
+
983
+
984
+ class PromptDataset(Dataset):
985
+ "A simple dataset to prepare the prompts to generate class images on multiple GPUs."
986
+
987
+ def __init__(self, prompt, num_samples):
988
+ self.prompt = prompt
989
+ self.num_samples = num_samples
990
+
991
+ def __len__(self):
992
+ return self.num_samples
993
+
994
+ def __getitem__(self, index):
995
+ example = {}
996
+ example["prompt"] = self.prompt
997
+ example["index"] = index
998
+ return example
999
+
1000
+
1001
+ def tokenize_prompt(tokenizer, prompt, add_special_tokens=False):
1002
+ text_inputs = tokenizer(
1003
+ prompt,
1004
+ padding="max_length",
1005
+ max_length=tokenizer.model_max_length,
1006
+ truncation=True,
1007
+ add_special_tokens=add_special_tokens,
1008
+ return_tensors="pt",
1009
+ )
1010
+ text_input_ids = text_inputs.input_ids
1011
+ return text_input_ids
1012
+
1013
+
1014
+ # Adapted from pipelines.StableDiffusionXLPipeline.encode_prompt
1015
+ def encode_prompt(text_encoders, tokenizers, prompt, text_input_ids_list=None):
1016
+ prompt_embeds_list = []
1017
+
1018
+ for i, text_encoder in enumerate(text_encoders):
1019
+ if tokenizers is not None:
1020
+ tokenizer = tokenizers[i]
1021
+ text_input_ids = tokenize_prompt(tokenizer, prompt)
1022
+ else:
1023
+ assert text_input_ids_list is not None
1024
+ text_input_ids = text_input_ids_list[i]
1025
+
1026
+ prompt_embeds = text_encoder(
1027
+ text_input_ids.to(text_encoder.device),
1028
+ output_hidden_states=True,
1029
+ )
1030
+
1031
+ # We are only ALWAYS interested in the pooled output of the final text encoder
1032
+ pooled_prompt_embeds = prompt_embeds[0]
1033
+ prompt_embeds = prompt_embeds.hidden_states[-2]
1034
+ bs_embed, seq_len, _ = prompt_embeds.shape
1035
+ prompt_embeds = prompt_embeds.view(bs_embed, seq_len, -1)
1036
+ prompt_embeds_list.append(prompt_embeds)
1037
+
1038
+ prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
1039
+ pooled_prompt_embeds = pooled_prompt_embeds.view(bs_embed, -1)
1040
+ return prompt_embeds, pooled_prompt_embeds
1041
+
1042
+
1043
+ def main(args):
1044
+ logging_dir = Path(args.output_dir, args.logging_dir)
1045
+
1046
+ accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
1047
+ kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)
1048
+ accelerator = Accelerator(
1049
+ gradient_accumulation_steps=args.gradient_accumulation_steps,
1050
+ mixed_precision=args.mixed_precision,
1051
+ log_with=args.report_to,
1052
+ project_config=accelerator_project_config,
1053
+ kwargs_handlers=[kwargs],
1054
+ )
1055
+
1056
+ if args.report_to == "wandb":
1057
+ if not is_wandb_available():
1058
+ raise ImportError("Make sure to install wandb if you want to use it for logging during training.")
1059
+ import wandb
1060
+
1061
+ # Make one log on every process with the configuration for debugging.
1062
+ logging.basicConfig(
1063
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
1064
+ datefmt="%m/%d/%Y %H:%M:%S",
1065
+ level=logging.INFO,
1066
+ )
1067
+ logger.info(accelerator.state, main_process_only=False)
1068
+ if accelerator.is_local_main_process:
1069
+ transformers.utils.logging.set_verbosity_warning()
1070
+ diffusers.utils.logging.set_verbosity_info()
1071
+ else:
1072
+ transformers.utils.logging.set_verbosity_error()
1073
+ diffusers.utils.logging.set_verbosity_error()
1074
+
1075
+ # If passed along, set the training seed now.
1076
+ if args.seed is not None:
1077
+ set_seed(args.seed)
1078
+
1079
+ # Generate class images if prior preservation is enabled.
1080
+ if args.with_prior_preservation:
1081
+ class_images_dir = Path(args.class_data_dir)
1082
+ if not class_images_dir.exists():
1083
+ class_images_dir.mkdir(parents=True)
1084
+ cur_class_images = len(list(class_images_dir.iterdir()))
1085
+
1086
+ if cur_class_images < args.num_class_images:
1087
+ torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32
1088
+ if args.prior_generation_precision == "fp32":
1089
+ torch_dtype = torch.float32
1090
+ elif args.prior_generation_precision == "fp16":
1091
+ torch_dtype = torch.float16
1092
+ elif args.prior_generation_precision == "bf16":
1093
+ torch_dtype = torch.bfloat16
1094
+ pipeline = StableDiffusionXLPipeline.from_pretrained(
1095
+ args.pretrained_model_name_or_path,
1096
+ torch_dtype=torch_dtype,
1097
+ revision=args.revision,
1098
+ variant=args.variant,
1099
+ )
1100
+ pipeline.set_progress_bar_config(disable=True)
1101
+
1102
+ num_new_images = args.num_class_images - cur_class_images
1103
+ logger.info(f"Number of class images to sample: {num_new_images}.")
1104
+
1105
+ sample_dataset = PromptDataset(args.class_prompt, num_new_images)
1106
+ sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size)
1107
+
1108
+ sample_dataloader = accelerator.prepare(sample_dataloader)
1109
+ pipeline.to(accelerator.device)
1110
+
1111
+ for example in tqdm(
1112
+ sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process
1113
+ ):
1114
+ images = pipeline(example["prompt"]).images
1115
+
1116
+ for i, image in enumerate(images):
1117
+ hash_image = hashlib.sha1(image.tobytes()).hexdigest()
1118
+ image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg"
1119
+ image.save(image_filename)
1120
+
1121
+ del pipeline
1122
+ if torch.cuda.is_available():
1123
+ torch.cuda.empty_cache()
1124
+
1125
+ # Handle the repository creation
1126
+ if accelerator.is_main_process:
1127
+ if args.output_dir is not None:
1128
+ os.makedirs(args.output_dir, exist_ok=True)
1129
+
1130
+ model_id = args.hub_model_id or Path(args.output_dir).name
1131
+ repo_id = None
1132
+ if args.push_to_hub:
1133
+ repo_id = create_repo(repo_id=model_id, exist_ok=True, token=args.hub_token).repo_id
1134
+
1135
+ # Load the tokenizers
1136
+ tokenizer_one = AutoTokenizer.from_pretrained(
1137
+ args.pretrained_model_name_or_path,
1138
+ subfolder="tokenizer",
1139
+ revision=args.revision,
1140
+ variant=args.variant,
1141
+ use_fast=False,
1142
+ )
1143
+ tokenizer_two = AutoTokenizer.from_pretrained(
1144
+ args.pretrained_model_name_or_path,
1145
+ subfolder="tokenizer_2",
1146
+ revision=args.revision,
1147
+ variant=args.variant,
1148
+ use_fast=False,
1149
+ )
1150
+
1151
+ # import correct text encoder classes
1152
+ text_encoder_cls_one = import_model_class_from_model_name_or_path(
1153
+ args.pretrained_model_name_or_path, args.revision
1154
+ )
1155
+ text_encoder_cls_two = import_model_class_from_model_name_or_path(
1156
+ args.pretrained_model_name_or_path, args.revision, subfolder="text_encoder_2"
1157
+ )
1158
+
1159
+ # Load scheduler and models
1160
+ noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
1161
+ text_encoder_one = text_encoder_cls_one.from_pretrained(
1162
+ args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, variant=args.variant
1163
+ )
1164
+ text_encoder_two = text_encoder_cls_two.from_pretrained(
1165
+ args.pretrained_model_name_or_path, subfolder="text_encoder_2", revision=args.revision, variant=args.variant
1166
+ )
1167
+ vae_path = (
1168
+ args.pretrained_model_name_or_path
1169
+ if args.pretrained_vae_model_name_or_path is None
1170
+ else args.pretrained_vae_model_name_or_path
1171
+ )
1172
+ vae = AutoencoderKL.from_pretrained(
1173
+ vae_path,
1174
+ subfolder="vae" if args.pretrained_vae_model_name_or_path is None else None,
1175
+ revision=args.revision,
1176
+ variant=args.variant,
1177
+ )
1178
+ vae_scaling_factor = vae.config.scaling_factor
1179
+ unet = UNet2DConditionModel.from_pretrained(
1180
+ args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, variant=args.variant
1181
+ )
1182
+
1183
+ if args.train_text_encoder_ti:
1184
+ # we parse the provided token identifier (or identifiers) into a list. s.t. - "TOK" -> ["TOK"], "TOK,
1185
+ # TOK2" -> ["TOK", "TOK2"] etc.
1186
+ token_abstraction_list = "".join(args.token_abstraction.split()).split(",")
1187
+ logger.info(f"list of token identifiers: {token_abstraction_list}")
1188
+
1189
+ token_abstraction_dict = {}
1190
+ token_idx = 0
1191
+ for i, token in enumerate(token_abstraction_list):
1192
+ token_abstraction_dict[token] = [
1193
+ f"<s{token_idx + i + j}>" for j in range(args.num_new_tokens_per_abstraction)
1194
+ ]
1195
+ token_idx += args.num_new_tokens_per_abstraction - 1
1196
+
1197
+ # replace instances of --token_abstraction in --instance_prompt with the new tokens: "<si><si+1>" etc.
1198
+ for token_abs, token_replacement in token_abstraction_dict.items():
1199
+ args.instance_prompt = args.instance_prompt.replace(token_abs, "".join(token_replacement))
1200
+ if args.with_prior_preservation:
1201
+ args.class_prompt = args.class_prompt.replace(token_abs, "".join(token_replacement))
1202
+
1203
+ # initialize the new tokens for textual inversion
1204
+ embedding_handler = TokenEmbeddingsHandler(
1205
+ [text_encoder_one, text_encoder_two], [tokenizer_one, tokenizer_two]
1206
+ )
1207
+ inserting_toks = []
1208
+ for new_tok in token_abstraction_dict.values():
1209
+ inserting_toks.extend(new_tok)
1210
+ embedding_handler.initialize_new_tokens(inserting_toks=inserting_toks)
1211
+
1212
+ # We only train the additional adapter LoRA layers
1213
+ vae.requires_grad_(False)
1214
+ text_encoder_one.requires_grad_(False)
1215
+ text_encoder_two.requires_grad_(False)
1216
+ unet.requires_grad_(False)
1217
+
1218
+ # For mixed precision training we cast all non-trainable weights (vae, non-lora text_encoder and non-lora unet) to half-precision
1219
+ # as these weights are only used for inference, keeping weights in full precision is not required.
1220
+ weight_dtype = torch.float32
1221
+ if accelerator.mixed_precision == "fp16":
1222
+ weight_dtype = torch.float16
1223
+ elif accelerator.mixed_precision == "bf16":
1224
+ weight_dtype = torch.bfloat16
1225
+
1226
+ # Move unet, vae and text_encoder to device and cast to weight_dtype
1227
+ unet.to(accelerator.device, dtype=weight_dtype)
1228
+
1229
+ # The VAE is always in float32 to avoid NaN losses.
1230
+ vae.to(accelerator.device, dtype=torch.float32)
1231
+
1232
+ text_encoder_one.to(accelerator.device, dtype=weight_dtype)
1233
+ text_encoder_two.to(accelerator.device, dtype=weight_dtype)
1234
+
1235
+ if args.enable_xformers_memory_efficient_attention:
1236
+ if is_xformers_available():
1237
+ import xformers
1238
+
1239
+ xformers_version = version.parse(xformers.__version__)
1240
+ if xformers_version == version.parse("0.0.16"):
1241
+ logger.warn(
1242
+ "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, "
1243
+ "please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
1244
+ )
1245
+ unet.enable_xformers_memory_efficient_attention()
1246
+ else:
1247
+ raise ValueError("xformers is not available. Make sure it is installed correctly")
1248
+
1249
+ if args.gradient_checkpointing:
1250
+ unet.enable_gradient_checkpointing()
1251
+ if args.train_text_encoder:
1252
+ text_encoder_one.gradient_checkpointing_enable()
1253
+ text_encoder_two.gradient_checkpointing_enable()
1254
+
1255
+ # now we will add new LoRA weights to the attention layers
1256
+ unet_lora_config = LoraConfig(
1257
+ r=args.rank,
1258
+ lora_alpha=args.rank,
1259
+ init_lora_weights="gaussian",
1260
+ target_modules=["to_k", "to_q", "to_v", "to_out.0"],
1261
+ )
1262
+ unet.add_adapter(unet_lora_config)
1263
+
1264
+ # The text encoder comes from πŸ€— transformers, so we cannot directly modify it.
1265
+ # So, instead, we monkey-patch the forward calls of its attention-blocks.
1266
+ if args.train_text_encoder:
1267
+ text_lora_config = LoraConfig(
1268
+ r=args.rank,
1269
+ lora_alpha=args.rank,
1270
+ init_lora_weights="gaussian",
1271
+ target_modules=["q_proj", "k_proj", "v_proj", "out_proj"],
1272
+ )
1273
+ text_encoder_one.add_adapter(text_lora_config)
1274
+ text_encoder_two.add_adapter(text_lora_config)
1275
+
1276
+ # if we use textual inversion, we freeze all parameters except for the token embeddings
1277
+ # in text encoder
1278
+ elif args.train_text_encoder_ti:
1279
+ text_lora_parameters_one = []
1280
+ for name, param in text_encoder_one.named_parameters():
1281
+ if "token_embedding" in name:
1282
+ # ensure that dtype is float32, even if rest of the model that isn't trained is loaded in fp16
1283
+ param.data = param.to(dtype=torch.float32)
1284
+ param.requires_grad = True
1285
+ text_lora_parameters_one.append(param)
1286
+ else:
1287
+ param.requires_grad = False
1288
+ text_lora_parameters_two = []
1289
+ for name, param in text_encoder_two.named_parameters():
1290
+ if "token_embedding" in name:
1291
+ # ensure that dtype is float32, even if rest of the model that isn't trained is loaded in fp16
1292
+ param.data = param.to(dtype=torch.float32)
1293
+ param.requires_grad = True
1294
+ text_lora_parameters_two.append(param)
1295
+ else:
1296
+ param.requires_grad = False
1297
+
1298
+ def unwrap_model(model):
1299
+ model = accelerator.unwrap_model(model)
1300
+ model = model._orig_mod if is_compiled_module(model) else model
1301
+ return model
1302
+
1303
+ # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format
1304
+ def save_model_hook(models, weights, output_dir):
1305
+ if accelerator.is_main_process:
1306
+ # there are only two options here. Either are just the unet attn processor layers
1307
+ # or there are the unet and text encoder atten layers
1308
+ unet_lora_layers_to_save = None
1309
+ text_encoder_one_lora_layers_to_save = None
1310
+ text_encoder_two_lora_layers_to_save = None
1311
+
1312
+ for model in models:
1313
+ if isinstance(model, type(unwrap_model(unet))):
1314
+ unet_lora_layers_to_save = convert_state_dict_to_diffusers(get_peft_model_state_dict(model))
1315
+ elif isinstance(model, type(unwrap_model(text_encoder_one))):
1316
+ if args.train_text_encoder:
1317
+ text_encoder_one_lora_layers_to_save = convert_state_dict_to_diffusers(
1318
+ get_peft_model_state_dict(model)
1319
+ )
1320
+ elif isinstance(model, type(unwrap_model(text_encoder_two))):
1321
+ if args.train_text_encoder:
1322
+ text_encoder_two_lora_layers_to_save = convert_state_dict_to_diffusers(
1323
+ get_peft_model_state_dict(model)
1324
+ )
1325
+ else:
1326
+ raise ValueError(f"unexpected save model: {model.__class__}")
1327
+
1328
+ # make sure to pop weight so that corresponding model is not saved again
1329
+ weights.pop()
1330
+
1331
+ StableDiffusionXLPipeline.save_lora_weights(
1332
+ output_dir,
1333
+ unet_lora_layers=unet_lora_layers_to_save,
1334
+ text_encoder_lora_layers=text_encoder_one_lora_layers_to_save,
1335
+ text_encoder_2_lora_layers=text_encoder_two_lora_layers_to_save,
1336
+ )
1337
+ if args.train_text_encoder_ti:
1338
+ embedding_handler.save_embeddings(f"{output_dir}/{args.output_dir}_emb.safetensors")
1339
+
1340
+ def load_model_hook(models, input_dir):
1341
+ unet_ = None
1342
+ text_encoder_one_ = None
1343
+ text_encoder_two_ = None
1344
+
1345
+ while len(models) > 0:
1346
+ model = models.pop()
1347
+
1348
+ if isinstance(model, type(unwrap_model(unet))):
1349
+ unet_ = model
1350
+ elif isinstance(model, type(unwrap_model(text_encoder_one))):
1351
+ text_encoder_one_ = model
1352
+ elif isinstance(model, type(unwrap_model(text_encoder_two))):
1353
+ text_encoder_two_ = model
1354
+ else:
1355
+ raise ValueError(f"unexpected save model: {model.__class__}")
1356
+
1357
+ lora_state_dict, network_alphas = LoraLoaderMixin.lora_state_dict(input_dir)
1358
+
1359
+ unet_state_dict = {f'{k.replace("unet.", "")}': v for k, v in lora_state_dict.items() if k.startswith("unet.")}
1360
+ unet_state_dict = convert_unet_state_dict_to_peft(unet_state_dict)
1361
+ incompatible_keys = set_peft_model_state_dict(unet_, unet_state_dict, adapter_name="default")
1362
+ if incompatible_keys is not None:
1363
+ # check only for unexpected keys
1364
+ unexpected_keys = getattr(incompatible_keys, "unexpected_keys", None)
1365
+ if unexpected_keys:
1366
+ logger.warning(
1367
+ f"Loading adapter weights from state_dict led to unexpected keys not found in the model: "
1368
+ f" {unexpected_keys}. "
1369
+ )
1370
+
1371
+ if args.train_text_encoder:
1372
+ _set_state_dict_into_text_encoder(lora_state_dict, prefix="text_encoder.", text_encoder=text_encoder_one_)
1373
+
1374
+ _set_state_dict_into_text_encoder(
1375
+ lora_state_dict, prefix="text_encoder_2.", text_encoder=text_encoder_two_
1376
+ )
1377
+
1378
+ # Make sure the trainable params are in float32. This is again needed since the base models
1379
+ # are in `weight_dtype`. More details:
1380
+ # https://github.com/huggingface/diffusers/pull/6514#discussion_r1449796804
1381
+ if args.mixed_precision == "fp16":
1382
+ models = [unet_]
1383
+ if args.train_text_encoder:
1384
+ models.extend([text_encoder_one_, text_encoder_two_])
1385
+ cast_training_params(models)
1386
+
1387
+ accelerator.register_save_state_pre_hook(save_model_hook)
1388
+ accelerator.register_load_state_pre_hook(load_model_hook)
1389
+
1390
+ # Enable TF32 for faster training on Ampere GPUs,
1391
+ # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
1392
+ if args.allow_tf32:
1393
+ torch.backends.cuda.matmul.allow_tf32 = True
1394
+
1395
+ if args.scale_lr:
1396
+ args.learning_rate = (
1397
+ args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
1398
+ )
1399
+
1400
+ # Make sure the trainable params are in float32.
1401
+ if args.mixed_precision == "fp16":
1402
+ models = [unet]
1403
+ if args.train_text_encoder:
1404
+ models.extend([text_encoder_one, text_encoder_two])
1405
+ cast_training_params(models, dtype=torch.float32)
1406
+
1407
+ unet_lora_parameters = list(filter(lambda p: p.requires_grad, unet.parameters()))
1408
+
1409
+ if args.train_text_encoder:
1410
+ text_lora_parameters_one = list(filter(lambda p: p.requires_grad, text_encoder_one.parameters()))
1411
+ text_lora_parameters_two = list(filter(lambda p: p.requires_grad, text_encoder_two.parameters()))
1412
+
1413
+ # If neither --train_text_encoder nor --train_text_encoder_ti, text_encoders remain frozen during training
1414
+ freeze_text_encoder = not (args.train_text_encoder or args.train_text_encoder_ti)
1415
+
1416
+ # Optimization parameters
1417
+ unet_lora_parameters_with_lr = {"params": unet_lora_parameters, "lr": args.learning_rate}
1418
+ if not freeze_text_encoder:
1419
+ # different learning rate for text encoder and unet
1420
+ text_lora_parameters_one_with_lr = {
1421
+ "params": text_lora_parameters_one,
1422
+ "weight_decay": args.adam_weight_decay_text_encoder
1423
+ if args.adam_weight_decay_text_encoder
1424
+ else args.adam_weight_decay,
1425
+ "lr": args.text_encoder_lr if args.text_encoder_lr else args.learning_rate,
1426
+ }
1427
+ text_lora_parameters_two_with_lr = {
1428
+ "params": text_lora_parameters_two,
1429
+ "weight_decay": args.adam_weight_decay_text_encoder
1430
+ if args.adam_weight_decay_text_encoder
1431
+ else args.adam_weight_decay,
1432
+ "lr": args.text_encoder_lr if args.text_encoder_lr else args.learning_rate,
1433
+ }
1434
+ params_to_optimize = [
1435
+ unet_lora_parameters_with_lr,
1436
+ text_lora_parameters_one_with_lr,
1437
+ text_lora_parameters_two_with_lr,
1438
+ ]
1439
+ else:
1440
+ params_to_optimize = [unet_lora_parameters_with_lr]
1441
+
1442
+ # Optimizer creation
1443
+ if not (args.optimizer.lower() == "prodigy" or args.optimizer.lower() == "adamw"):
1444
+ logger.warn(
1445
+ f"Unsupported choice of optimizer: {args.optimizer}.Supported optimizers include [adamW, prodigy]."
1446
+ "Defaulting to adamW"
1447
+ )
1448
+ args.optimizer = "adamw"
1449
+
1450
+ if args.use_8bit_adam and not args.optimizer.lower() == "adamw":
1451
+ logger.warn(
1452
+ f"use_8bit_adam is ignored when optimizer is not set to 'AdamW'. Optimizer was "
1453
+ f"set to {args.optimizer.lower()}"
1454
+ )
1455
+
1456
+ if args.optimizer.lower() == "adamw":
1457
+ if args.use_8bit_adam:
1458
+ try:
1459
+ import bitsandbytes as bnb
1460
+ except ImportError:
1461
+ raise ImportError(
1462
+ "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
1463
+ )
1464
+
1465
+ optimizer_class = bnb.optim.AdamW8bit
1466
+ else:
1467
+ optimizer_class = torch.optim.AdamW
1468
+
1469
+ optimizer = optimizer_class(
1470
+ params_to_optimize,
1471
+ betas=(args.adam_beta1, args.adam_beta2),
1472
+ weight_decay=args.adam_weight_decay,
1473
+ eps=args.adam_epsilon,
1474
+ )
1475
+
1476
+ if args.optimizer.lower() == "prodigy":
1477
+ try:
1478
+ import prodigyopt
1479
+ except ImportError:
1480
+ raise ImportError("To use Prodigy, please install the prodigyopt library: `pip install prodigyopt`")
1481
+
1482
+ optimizer_class = prodigyopt.Prodigy
1483
+
1484
+ if args.learning_rate <= 0.1:
1485
+ logger.warn(
1486
+ "Learning rate is too low. When using prodigy, it's generally better to set learning rate around 1.0"
1487
+ )
1488
+ if args.train_text_encoder and args.text_encoder_lr:
1489
+ logger.warn(
1490
+ f"Learning rates were provided both for the unet and the text encoder- e.g. text_encoder_lr:"
1491
+ f" {args.text_encoder_lr} and learning_rate: {args.learning_rate}. "
1492
+ f"When using prodigy only learning_rate is used as the initial learning rate."
1493
+ )
1494
+ # changes the learning rate of text_encoder_parameters_one and text_encoder_parameters_two to be
1495
+ # --learning_rate
1496
+ params_to_optimize[1]["lr"] = args.learning_rate
1497
+ params_to_optimize[2]["lr"] = args.learning_rate
1498
+
1499
+ optimizer = optimizer_class(
1500
+ params_to_optimize,
1501
+ lr=args.learning_rate,
1502
+ betas=(args.adam_beta1, args.adam_beta2),
1503
+ beta3=args.prodigy_beta3,
1504
+ weight_decay=args.adam_weight_decay,
1505
+ eps=args.adam_epsilon,
1506
+ decouple=args.prodigy_decouple,
1507
+ use_bias_correction=args.prodigy_use_bias_correction,
1508
+ safeguard_warmup=args.prodigy_safeguard_warmup,
1509
+ )
1510
+
1511
+ # Dataset and DataLoaders creation:
1512
+ train_dataset = DreamBoothDataset(
1513
+ instance_data_root=args.instance_data_dir,
1514
+ instance_prompt=args.instance_prompt,
1515
+ class_prompt=args.class_prompt,
1516
+ dataset_name=args.dataset_name,
1517
+ dataset_config_name=args.dataset_config_name,
1518
+ cache_dir=args.cache_dir,
1519
+ image_column=args.image_column,
1520
+ train_text_encoder_ti=args.train_text_encoder_ti,
1521
+ caption_column=args.caption_column,
1522
+ class_data_root=args.class_data_dir if args.with_prior_preservation else None,
1523
+ token_abstraction_dict=token_abstraction_dict if args.train_text_encoder_ti else None,
1524
+ class_num=args.num_class_images,
1525
+ size=args.resolution,
1526
+ repeats=args.repeats,
1527
+ center_crop=args.center_crop,
1528
+ )
1529
+
1530
+ train_dataloader = torch.utils.data.DataLoader(
1531
+ train_dataset,
1532
+ batch_size=args.train_batch_size,
1533
+ shuffle=True,
1534
+ collate_fn=lambda examples: collate_fn(examples, args.with_prior_preservation),
1535
+ num_workers=args.dataloader_num_workers,
1536
+ )
1537
+
1538
+ # Computes additional embeddings/ids required by the SDXL UNet.
1539
+ # regular text embeddings (when `train_text_encoder` is not True)
1540
+ # pooled text embeddings
1541
+ # time ids
1542
+
1543
+ def compute_time_ids():
1544
+ # Adapted from pipeline.StableDiffusionXLPipeline._get_add_time_ids
1545
+ original_size = (args.resolution, args.resolution)
1546
+ target_size = (args.resolution, args.resolution)
1547
+ crops_coords_top_left = (args.crops_coords_top_left_h, args.crops_coords_top_left_w)
1548
+ add_time_ids = list(original_size + crops_coords_top_left + target_size)
1549
+ add_time_ids = torch.tensor([add_time_ids])
1550
+ add_time_ids = add_time_ids.to(accelerator.device, dtype=weight_dtype)
1551
+ return add_time_ids
1552
+
1553
+ if not args.train_text_encoder:
1554
+ tokenizers = [tokenizer_one, tokenizer_two]
1555
+ text_encoders = [text_encoder_one, text_encoder_two]
1556
+
1557
+ def compute_text_embeddings(prompt, text_encoders, tokenizers):
1558
+ with torch.no_grad():
1559
+ prompt_embeds, pooled_prompt_embeds = encode_prompt(text_encoders, tokenizers, prompt)
1560
+ prompt_embeds = prompt_embeds.to(accelerator.device)
1561
+ pooled_prompt_embeds = pooled_prompt_embeds.to(accelerator.device)
1562
+ return prompt_embeds, pooled_prompt_embeds
1563
+
1564
+ # Handle instance prompt.
1565
+ instance_time_ids = compute_time_ids()
1566
+
1567
+ # If no type of tuning is done on the text_encoder and custom instance prompts are NOT
1568
+ # provided (i.e. the --instance_prompt is used for all images), we encode the instance prompt once to avoid
1569
+ # the redundant encoding.
1570
+ if freeze_text_encoder and not train_dataset.custom_instance_prompts:
1571
+ instance_prompt_hidden_states, instance_pooled_prompt_embeds = compute_text_embeddings(
1572
+ args.instance_prompt, text_encoders, tokenizers
1573
+ )
1574
+
1575
+ # Handle class prompt for prior-preservation.
1576
+ if args.with_prior_preservation:
1577
+ class_time_ids = compute_time_ids()
1578
+ if freeze_text_encoder:
1579
+ class_prompt_hidden_states, class_pooled_prompt_embeds = compute_text_embeddings(
1580
+ args.class_prompt, text_encoders, tokenizers
1581
+ )
1582
+
1583
+ # Clear the memory here
1584
+ if freeze_text_encoder and not train_dataset.custom_instance_prompts:
1585
+ del tokenizers, text_encoders
1586
+ gc.collect()
1587
+ torch.cuda.empty_cache()
1588
+
1589
+ # If custom instance prompts are NOT provided (i.e. the instance prompt is used for all images),
1590
+ # pack the statically computed variables appropriately here. This is so that we don't
1591
+ # have to pass them to the dataloader.
1592
+ add_time_ids = instance_time_ids
1593
+ if args.with_prior_preservation:
1594
+ add_time_ids = torch.cat([add_time_ids, class_time_ids], dim=0)
1595
+
1596
+ # if --train_text_encoder_ti we need add_special_tokens to be True fo textual inversion
1597
+ add_special_tokens = True if args.train_text_encoder_ti else False
1598
+
1599
+ if not train_dataset.custom_instance_prompts:
1600
+ if freeze_text_encoder:
1601
+ prompt_embeds = instance_prompt_hidden_states
1602
+ unet_add_text_embeds = instance_pooled_prompt_embeds
1603
+ if args.with_prior_preservation:
1604
+ prompt_embeds = torch.cat([prompt_embeds, class_prompt_hidden_states], dim=0)
1605
+ unet_add_text_embeds = torch.cat([unet_add_text_embeds, class_pooled_prompt_embeds], dim=0)
1606
+ # if we're optmizing the text encoder (both if instance prompt is used for all images or custom prompts) we need to tokenize and encode the
1607
+ # batch prompts on all training steps
1608
+ else:
1609
+ tokens_one = tokenize_prompt(tokenizer_one, args.instance_prompt, add_special_tokens)
1610
+ tokens_two = tokenize_prompt(tokenizer_two, args.instance_prompt, add_special_tokens)
1611
+ if args.with_prior_preservation:
1612
+ class_tokens_one = tokenize_prompt(tokenizer_one, args.class_prompt, add_special_tokens)
1613
+ class_tokens_two = tokenize_prompt(tokenizer_two, args.class_prompt, add_special_tokens)
1614
+ tokens_one = torch.cat([tokens_one, class_tokens_one], dim=0)
1615
+ tokens_two = torch.cat([tokens_two, class_tokens_two], dim=0)
1616
+
1617
+ if args.train_text_encoder_ti and args.validation_prompt:
1618
+ # replace instances of --token_abstraction in validation prompt with the new tokens: "<si><si+1>" etc.
1619
+ for token_abs, token_replacement in train_dataset.token_abstraction_dict.items():
1620
+ args.validation_prompt = args.validation_prompt.replace(token_abs, "".join(token_replacement))
1621
+ print("validation prompt:", args.validation_prompt)
1622
+
1623
+ if args.cache_latents:
1624
+ latents_cache = []
1625
+ for batch in tqdm(train_dataloader, desc="Caching latents"):
1626
+ with torch.no_grad():
1627
+ batch["pixel_values"] = batch["pixel_values"].to(
1628
+ accelerator.device, non_blocking=True, dtype=torch.float32
1629
+ )
1630
+ latents_cache.append(vae.encode(batch["pixel_values"]).latent_dist)
1631
+
1632
+ if args.validation_prompt is None:
1633
+ del vae
1634
+ if torch.cuda.is_available():
1635
+ torch.cuda.empty_cache()
1636
+
1637
+ # Scheduler and math around the number of training steps.
1638
+ overrode_max_train_steps = False
1639
+ num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
1640
+ if args.max_train_steps is None:
1641
+ args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
1642
+ overrode_max_train_steps = True
1643
+
1644
+ lr_scheduler = get_scheduler(
1645
+ args.lr_scheduler,
1646
+ optimizer=optimizer,
1647
+ num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
1648
+ num_training_steps=args.max_train_steps * accelerator.num_processes,
1649
+ num_cycles=args.lr_num_cycles,
1650
+ power=args.lr_power,
1651
+ )
1652
+
1653
+ # Prepare everything with our `accelerator`.
1654
+ if not freeze_text_encoder:
1655
+ unet, text_encoder_one, text_encoder_two, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
1656
+ unet, text_encoder_one, text_encoder_two, optimizer, train_dataloader, lr_scheduler
1657
+ )
1658
+ else:
1659
+ unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
1660
+ unet, optimizer, train_dataloader, lr_scheduler
1661
+ )
1662
+
1663
+ # We need to recalculate our total training steps as the size of the training dataloader may have changed.
1664
+ num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
1665
+ if overrode_max_train_steps:
1666
+ args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
1667
+ # Afterwards we recalculate our number of training epochs
1668
+ args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
1669
+
1670
+ # We need to initialize the trackers we use, and also store our configuration.
1671
+ # The trackers initializes automatically on the main process.
1672
+ if accelerator.is_main_process:
1673
+ accelerator.init_trackers("dreambooth-lora-sd-xl", config=vars(args))
1674
+
1675
+ # Train!
1676
+ total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
1677
+
1678
+ logger.info("***** Running training *****")
1679
+ logger.info(f" Num examples = {len(train_dataset)}")
1680
+ logger.info(f" Num batches each epoch = {len(train_dataloader)}")
1681
+ logger.info(f" Num Epochs = {args.num_train_epochs}")
1682
+ logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
1683
+ logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
1684
+ logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
1685
+ logger.info(f" Total optimization steps = {args.max_train_steps}")
1686
+ global_step = 0
1687
+ first_epoch = 0
1688
+
1689
+ # Potentially load in the weights and states from a previous save
1690
+ if args.resume_from_checkpoint:
1691
+ if args.resume_from_checkpoint != "latest":
1692
+ path = os.path.basename(args.resume_from_checkpoint)
1693
+ else:
1694
+ # Get the mos recent checkpoint
1695
+ dirs = os.listdir(args.output_dir)
1696
+ dirs = [d for d in dirs if d.startswith("checkpoint")]
1697
+ dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
1698
+ path = dirs[-1] if len(dirs) > 0 else None
1699
+
1700
+ if path is None:
1701
+ accelerator.print(
1702
+ f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
1703
+ )
1704
+ args.resume_from_checkpoint = None
1705
+ initial_global_step = 0
1706
+ else:
1707
+ accelerator.print(f"Resuming from checkpoint {path}")
1708
+ accelerator.load_state(os.path.join(args.output_dir, path))
1709
+ global_step = int(path.split("-")[1])
1710
+
1711
+ initial_global_step = global_step
1712
+ first_epoch = global_step // num_update_steps_per_epoch
1713
+
1714
+ else:
1715
+ initial_global_step = 0
1716
+
1717
+ progress_bar = tqdm(
1718
+ range(0, args.max_train_steps),
1719
+ initial=initial_global_step,
1720
+ desc="Steps",
1721
+ # Only show the progress bar once on each machine.
1722
+ disable=not accelerator.is_local_main_process,
1723
+ )
1724
+
1725
+ if args.train_text_encoder:
1726
+ num_train_epochs_text_encoder = int(args.train_text_encoder_frac * args.num_train_epochs)
1727
+ elif args.train_text_encoder_ti: # args.train_text_encoder_ti
1728
+ num_train_epochs_text_encoder = int(args.train_text_encoder_ti_frac * args.num_train_epochs)
1729
+ # flag used for textual inversion
1730
+ pivoted = False
1731
+ for epoch in range(first_epoch, args.num_train_epochs):
1732
+ # if performing any kind of optimization of text_encoder params
1733
+ if args.train_text_encoder or args.train_text_encoder_ti:
1734
+ if epoch == num_train_epochs_text_encoder:
1735
+ print("PIVOT HALFWAY", epoch)
1736
+ # stopping optimization of text_encoder params
1737
+ # this flag is used to reset the optimizer to optimize only on unet params
1738
+ pivoted = True
1739
+
1740
+ else:
1741
+ # still optimizing the text encoder
1742
+ text_encoder_one.train()
1743
+ text_encoder_two.train()
1744
+ # set top parameter requires_grad = True for gradient checkpointing works
1745
+ if args.train_text_encoder:
1746
+ text_encoder_one.text_model.embeddings.requires_grad_(True)
1747
+ text_encoder_two.text_model.embeddings.requires_grad_(True)
1748
+
1749
+ unet.train()
1750
+ for step, batch in enumerate(train_dataloader):
1751
+ if pivoted:
1752
+ # stopping optimization of text_encoder params
1753
+ # re setting the optimizer to optimize only on unet params
1754
+ optimizer.param_groups[1]["lr"] = 0.0
1755
+ optimizer.param_groups[2]["lr"] = 0.0
1756
+
1757
+ with accelerator.accumulate(unet):
1758
+ prompts = batch["prompts"]
1759
+ # encode batch prompts when custom prompts are provided for each image -
1760
+ if train_dataset.custom_instance_prompts:
1761
+ if freeze_text_encoder:
1762
+ prompt_embeds, unet_add_text_embeds = compute_text_embeddings(
1763
+ prompts, text_encoders, tokenizers
1764
+ )
1765
+
1766
+ else:
1767
+ tokens_one = tokenize_prompt(tokenizer_one, prompts, add_special_tokens)
1768
+ tokens_two = tokenize_prompt(tokenizer_two, prompts, add_special_tokens)
1769
+
1770
+ if args.cache_latents:
1771
+ model_input = latents_cache[step].sample()
1772
+ else:
1773
+ pixel_values = batch["pixel_values"].to(dtype=vae.dtype)
1774
+ model_input = vae.encode(pixel_values).latent_dist.sample()
1775
+
1776
+ model_input = model_input * vae_scaling_factor
1777
+ if args.pretrained_vae_model_name_or_path is None:
1778
+ model_input = model_input.to(weight_dtype)
1779
+
1780
+ # Sample noise that we'll add to the latents
1781
+ noise = torch.randn_like(model_input)
1782
+ if args.noise_offset:
1783
+ # https://www.crosslabs.org//blog/diffusion-with-offset-noise
1784
+ noise += args.noise_offset * torch.randn(
1785
+ (model_input.shape[0], model_input.shape[1], 1, 1), device=model_input.device
1786
+ )
1787
+
1788
+ bsz = model_input.shape[0]
1789
+ # Sample a random timestep for each image
1790
+ timesteps = torch.randint(
1791
+ 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=model_input.device
1792
+ )
1793
+ timesteps = timesteps.long()
1794
+
1795
+ # Add noise to the model input according to the noise magnitude at each timestep
1796
+ # (this is the forward diffusion process)
1797
+ noisy_model_input = noise_scheduler.add_noise(model_input, noise, timesteps)
1798
+
1799
+ # Calculate the elements to repeat depending on the use of prior-preservation and custom captions.
1800
+ if not train_dataset.custom_instance_prompts:
1801
+ elems_to_repeat_text_embeds = bsz // 2 if args.with_prior_preservation else bsz
1802
+ elems_to_repeat_time_ids = bsz // 2 if args.with_prior_preservation else bsz
1803
+
1804
+ else:
1805
+ elems_to_repeat_text_embeds = 1
1806
+ elems_to_repeat_time_ids = bsz // 2 if args.with_prior_preservation else bsz
1807
+
1808
+ # Predict the noise residual
1809
+ if freeze_text_encoder:
1810
+ unet_added_conditions = {
1811
+ "time_ids": add_time_ids.repeat(elems_to_repeat_time_ids, 1),
1812
+ "text_embeds": unet_add_text_embeds.repeat(elems_to_repeat_text_embeds, 1),
1813
+ }
1814
+ prompt_embeds_input = prompt_embeds.repeat(elems_to_repeat_text_embeds, 1, 1)
1815
+ model_pred = unet(
1816
+ noisy_model_input,
1817
+ timesteps,
1818
+ prompt_embeds_input,
1819
+ added_cond_kwargs=unet_added_conditions,
1820
+ ).sample
1821
+ else:
1822
+ unet_added_conditions = {"time_ids": add_time_ids.repeat(elems_to_repeat_time_ids, 1)}
1823
+ prompt_embeds, pooled_prompt_embeds = encode_prompt(
1824
+ text_encoders=[text_encoder_one, text_encoder_two],
1825
+ tokenizers=None,
1826
+ prompt=None,
1827
+ text_input_ids_list=[tokens_one, tokens_two],
1828
+ )
1829
+ unet_added_conditions.update(
1830
+ {"text_embeds": pooled_prompt_embeds.repeat(elems_to_repeat_text_embeds, 1)}
1831
+ )
1832
+ prompt_embeds_input = prompt_embeds.repeat(elems_to_repeat_text_embeds, 1, 1)
1833
+ model_pred = unet(
1834
+ noisy_model_input, timesteps, prompt_embeds_input, added_cond_kwargs=unet_added_conditions
1835
+ ).sample
1836
+
1837
+ # Get the target for loss depending on the prediction type
1838
+ if noise_scheduler.config.prediction_type == "epsilon":
1839
+ target = noise
1840
+ elif noise_scheduler.config.prediction_type == "v_prediction":
1841
+ target = noise_scheduler.get_velocity(model_input, noise, timesteps)
1842
+ else:
1843
+ raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
1844
+
1845
+ if args.with_prior_preservation:
1846
+ # Chunk the noise and model_pred into two parts and compute the loss on each part separately.
1847
+ model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0)
1848
+ target, target_prior = torch.chunk(target, 2, dim=0)
1849
+
1850
+ # Compute prior loss
1851
+ prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean")
1852
+
1853
+ if args.snr_gamma is None:
1854
+ loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
1855
+ else:
1856
+ # Compute loss-weights as per Section 3.4 of https://arxiv.org/abs/2303.09556.
1857
+ # Since we predict the noise instead of x_0, the original formulation is slightly changed.
1858
+ # This is discussed in Section 4.2 of the same paper.
1859
+
1860
+ if args.with_prior_preservation:
1861
+ # if we're using prior preservation, we calc snr for instance loss only -
1862
+ # and hence only need timesteps corresponding to instance images
1863
+ snr_timesteps, _ = torch.chunk(timesteps, 2, dim=0)
1864
+ else:
1865
+ snr_timesteps = timesteps
1866
+
1867
+ snr = compute_snr(noise_scheduler, snr_timesteps)
1868
+ base_weight = (
1869
+ torch.stack([snr, args.snr_gamma * torch.ones_like(snr_timesteps)], dim=1).min(dim=1)[0] / snr
1870
+ )
1871
+
1872
+ if noise_scheduler.config.prediction_type == "v_prediction":
1873
+ # Velocity objective needs to be floored to an SNR weight of one.
1874
+ mse_loss_weights = base_weight + 1
1875
+ else:
1876
+ # Epsilon and sample both use the same loss weights.
1877
+ mse_loss_weights = base_weight
1878
+
1879
+ loss = F.mse_loss(model_pred.float(), target.float(), reduction="none")
1880
+ loss = loss.mean(dim=list(range(1, len(loss.shape)))) * mse_loss_weights
1881
+ loss = loss.mean()
1882
+
1883
+ if args.with_prior_preservation:
1884
+ # Add the prior loss to the instance loss.
1885
+ loss = loss + args.prior_loss_weight * prior_loss
1886
+
1887
+ accelerator.backward(loss)
1888
+ if accelerator.sync_gradients:
1889
+ params_to_clip = (
1890
+ itertools.chain(unet_lora_parameters, text_lora_parameters_one, text_lora_parameters_two)
1891
+ if (args.train_text_encoder or args.train_text_encoder_ti)
1892
+ else unet_lora_parameters
1893
+ )
1894
+ accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
1895
+ optimizer.step()
1896
+ lr_scheduler.step()
1897
+ optimizer.zero_grad()
1898
+
1899
+ # every step, we reset the embeddings to the original embeddings.
1900
+ if args.train_text_encoder_ti:
1901
+ embedding_handler.retract_embeddings()
1902
+
1903
+ # Checks if the accelerator has performed an optimization step behind the scenes
1904
+ if accelerator.sync_gradients:
1905
+ progress_bar.update(1)
1906
+ global_step += 1
1907
+
1908
+ if accelerator.is_main_process:
1909
+ if global_step % args.checkpointing_steps == 0:
1910
+ # _before_ saving state, check if this save would set us over the `checkpoints_total_limit`
1911
+ if args.checkpoints_total_limit is not None:
1912
+ checkpoints = os.listdir(args.output_dir)
1913
+ checkpoints = [d for d in checkpoints if d.startswith("checkpoint")]
1914
+ checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1]))
1915
+
1916
+ # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints
1917
+ if len(checkpoints) >= args.checkpoints_total_limit:
1918
+ num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1
1919
+ removing_checkpoints = checkpoints[0:num_to_remove]
1920
+
1921
+ logger.info(
1922
+ f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints"
1923
+ )
1924
+ logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}")
1925
+
1926
+ for removing_checkpoint in removing_checkpoints:
1927
+ removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint)
1928
+ shutil.rmtree(removing_checkpoint)
1929
+
1930
+ save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
1931
+ accelerator.save_state(save_path)
1932
+ logger.info(f"Saved state to {save_path}")
1933
+
1934
+ logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
1935
+ progress_bar.set_postfix(**logs)
1936
+ accelerator.log(logs, step=global_step)
1937
+
1938
+ if global_step >= args.max_train_steps:
1939
+ break
1940
+
1941
+ if accelerator.is_main_process:
1942
+ if args.validation_prompt is not None and epoch % args.validation_epochs == 0:
1943
+ logger.info(
1944
+ f"Running validation... \n Generating {args.num_validation_images} images with prompt:"
1945
+ f" {args.validation_prompt}."
1946
+ )
1947
+ # create pipeline
1948
+ if freeze_text_encoder:
1949
+ text_encoder_one = text_encoder_cls_one.from_pretrained(
1950
+ args.pretrained_model_name_or_path,
1951
+ subfolder="text_encoder",
1952
+ revision=args.revision,
1953
+ variant=args.variant,
1954
+ )
1955
+ text_encoder_two = text_encoder_cls_two.from_pretrained(
1956
+ args.pretrained_model_name_or_path,
1957
+ subfolder="text_encoder_2",
1958
+ revision=args.revision,
1959
+ variant=args.variant,
1960
+ )
1961
+ pipeline = StableDiffusionXLPipeline.from_pretrained(
1962
+ args.pretrained_model_name_or_path,
1963
+ vae=vae,
1964
+ text_encoder=accelerator.unwrap_model(text_encoder_one),
1965
+ text_encoder_2=accelerator.unwrap_model(text_encoder_two),
1966
+ unet=accelerator.unwrap_model(unet),
1967
+ revision=args.revision,
1968
+ variant=args.variant,
1969
+ torch_dtype=weight_dtype,
1970
+ )
1971
+
1972
+ # We train on the simplified learning objective. If we were previously predicting a variance, we need the scheduler to ignore it
1973
+ scheduler_args = {}
1974
+
1975
+ if "variance_type" in pipeline.scheduler.config:
1976
+ variance_type = pipeline.scheduler.config.variance_type
1977
+
1978
+ if variance_type in ["learned", "learned_range"]:
1979
+ variance_type = "fixed_small"
1980
+
1981
+ scheduler_args["variance_type"] = variance_type
1982
+
1983
+ pipeline.scheduler = DPMSolverMultistepScheduler.from_config(
1984
+ pipeline.scheduler.config, **scheduler_args
1985
+ )
1986
+
1987
+ pipeline = pipeline.to(accelerator.device)
1988
+ pipeline.set_progress_bar_config(disable=True)
1989
+
1990
+ # run inference
1991
+ generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None
1992
+ pipeline_args = {"prompt": args.validation_prompt}
1993
+
1994
+ with torch.cuda.amp.autocast():
1995
+ images = [
1996
+ pipeline(**pipeline_args, generator=generator).images[0]
1997
+ for _ in range(args.num_validation_images)
1998
+ ]
1999
+
2000
+ for tracker in accelerator.trackers:
2001
+ if tracker.name == "tensorboard":
2002
+ np_images = np.stack([np.asarray(img) for img in images])
2003
+ tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC")
2004
+ if tracker.name == "wandb":
2005
+ tracker.log(
2006
+ {
2007
+ "validation": [
2008
+ wandb.Image(image, caption=f"{i}: {args.validation_prompt}")
2009
+ for i, image in enumerate(images)
2010
+ ]
2011
+ }
2012
+ )
2013
+
2014
+ del pipeline
2015
+ torch.cuda.empty_cache()
2016
+
2017
+ # Save the lora layers
2018
+ accelerator.wait_for_everyone()
2019
+ if accelerator.is_main_process:
2020
+ unet = accelerator.unwrap_model(unet)
2021
+ unet = unet.to(torch.float32)
2022
+ unet_lora_layers = convert_state_dict_to_diffusers(get_peft_model_state_dict(unet))
2023
+
2024
+ if args.train_text_encoder:
2025
+ text_encoder_one = accelerator.unwrap_model(text_encoder_one)
2026
+ text_encoder_lora_layers = convert_state_dict_to_diffusers(
2027
+ get_peft_model_state_dict(text_encoder_one.to(torch.float32))
2028
+ )
2029
+ text_encoder_two = accelerator.unwrap_model(text_encoder_two)
2030
+ text_encoder_2_lora_layers = convert_state_dict_to_diffusers(
2031
+ get_peft_model_state_dict(text_encoder_two.to(torch.float32))
2032
+ )
2033
+ else:
2034
+ text_encoder_lora_layers = None
2035
+ text_encoder_2_lora_layers = None
2036
+
2037
+ StableDiffusionXLPipeline.save_lora_weights(
2038
+ save_directory=args.output_dir,
2039
+ unet_lora_layers=unet_lora_layers,
2040
+ text_encoder_lora_layers=text_encoder_lora_layers,
2041
+ text_encoder_2_lora_layers=text_encoder_2_lora_layers,
2042
+ )
2043
+ images = []
2044
+ if args.validation_prompt and args.num_validation_images > 0:
2045
+ # Final inference
2046
+ # Load previous pipeline
2047
+ vae = AutoencoderKL.from_pretrained(
2048
+ vae_path,
2049
+ subfolder="vae" if args.pretrained_vae_model_name_or_path is None else None,
2050
+ revision=args.revision,
2051
+ variant=args.variant,
2052
+ torch_dtype=weight_dtype,
2053
+ )
2054
+ pipeline = StableDiffusionXLPipeline.from_pretrained(
2055
+ args.pretrained_model_name_or_path,
2056
+ vae=vae,
2057
+ revision=args.revision,
2058
+ variant=args.variant,
2059
+ torch_dtype=weight_dtype,
2060
+ )
2061
+
2062
+ # We train on the simplified learning objective. If we were previously predicting a variance, we need the scheduler to ignore it
2063
+ scheduler_args = {}
2064
+
2065
+ if "variance_type" in pipeline.scheduler.config:
2066
+ variance_type = pipeline.scheduler.config.variance_type
2067
+
2068
+ if variance_type in ["learned", "learned_range"]:
2069
+ variance_type = "fixed_small"
2070
+
2071
+ scheduler_args["variance_type"] = variance_type
2072
+
2073
+ pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config, **scheduler_args)
2074
+
2075
+ # load attention processors
2076
+ pipeline.load_lora_weights(args.output_dir)
2077
+
2078
+ # run inference
2079
+ pipeline = pipeline.to(accelerator.device)
2080
+ generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None
2081
+ images = [
2082
+ pipeline(args.validation_prompt, num_inference_steps=25, generator=generator).images[0]
2083
+ for _ in range(args.num_validation_images)
2084
+ ]
2085
+
2086
+ for tracker in accelerator.trackers:
2087
+ if tracker.name == "tensorboard":
2088
+ np_images = np.stack([np.asarray(img) for img in images])
2089
+ tracker.writer.add_images("test", np_images, epoch, dataformats="NHWC")
2090
+ if tracker.name == "wandb":
2091
+ tracker.log(
2092
+ {
2093
+ "test": [
2094
+ wandb.Image(image, caption=f"{i}: {args.validation_prompt}")
2095
+ for i, image in enumerate(images)
2096
+ ]
2097
+ }
2098
+ )
2099
+
2100
+ if args.train_text_encoder_ti:
2101
+ embedding_handler.save_embeddings(
2102
+ f"{args.output_dir}/{args.output_dir}_emb.safetensors",
2103
+ )
2104
+
2105
+ # Conver to WebUI format
2106
+ lora_state_dict = load_file(f"{args.output_dir}/pytorch_lora_weights.safetensors")
2107
+ peft_state_dict = convert_all_state_dict_to_peft(lora_state_dict)
2108
+ kohya_state_dict = convert_state_dict_to_kohya(peft_state_dict)
2109
+ save_file(kohya_state_dict, f"{args.output_dir}/{args.output_dir}.safetensors")
2110
+
2111
+ save_model_card(
2112
+ model_id if not args.push_to_hub else repo_id,
2113
+ images=images,
2114
+ base_model=args.pretrained_model_name_or_path,
2115
+ train_text_encoder=args.train_text_encoder,
2116
+ train_text_encoder_ti=args.train_text_encoder_ti,
2117
+ token_abstraction_dict=train_dataset.token_abstraction_dict,
2118
+ instance_prompt=args.instance_prompt,
2119
+ validation_prompt=args.validation_prompt,
2120
+ repo_folder=args.output_dir,
2121
+ vae_path=args.pretrained_vae_model_name_or_path,
2122
+ )
2123
+ if args.push_to_hub:
2124
+ upload_folder(
2125
+ repo_id=repo_id,
2126
+ folder_path=args.output_dir,
2127
+ commit_message="End of training",
2128
+ ignore_patterns=["step_*", "epoch_*"],
2129
+ )
2130
+
2131
+ accelerator.end_training()
2132
+
2133
+
2134
+ if __name__ == "__main__":
2135
+ args = parse_args()
2136
+ main(args)