jiangzeyinzi commited on
Commit
28608a6
1 Parent(s): 261aa3e

Upload folder using huggingface_hub

Browse files
0_SwiftLoRA/adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": null,
5
+ "bias": "none",
6
+ "enable_lora": null,
7
+ "fan_in_fan_out": false,
8
+ "inference_mode": false,
9
+ "init_lora_weights": true,
10
+ "layer_replication": null,
11
+ "layers_pattern": null,
12
+ "layers_to_transform": null,
13
+ "loftq_config": {},
14
+ "lora_alpha": 64,
15
+ "lora_dropout": 0.0,
16
+ "lora_dtype": null,
17
+ "lorap_emb_lr": 1e-06,
18
+ "lorap_lr_ratio": 16.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "model_key_mapping": null,
22
+ "modules_to_save": null,
23
+ "peft_type": "LORA",
24
+ "r": 64,
25
+ "rank_pattern": {},
26
+ "revision": null,
27
+ "swift_type": "LORA",
28
+ "target_modules": "(cond_stage_model.*(q_proj|k_proj|v_proj|out_proj|mlp.fc1|mlp.fc2))|(model.*(to_q|to_k|to_v|to_out.0|net.0.proj|net.2))$",
29
+ "task_type": null,
30
+ "use_dora": false,
31
+ "use_merged_linear": false,
32
+ "use_qa_lora": false,
33
+ "use_rslora": false
34
+ }
0_SwiftLoRA/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8f7627401d1372d0ed3d183d498fd97fedcc9d2672ff244e1642591ea3c7dc8
3
+ size 138393741
README.md ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ frameworks:
3
+ - Pytorch
4
+ license: apache-2.0
5
+ tasks:
6
+ - efficient-diffusion-tuning
7
+ ---
8
+
9
+ <p align="center">
10
+
11
+ <h2 align="center">SD15-TEXT_LORA-3DStyle-20240523-test</h2>
12
+ <p align="center">
13
+ <br>
14
+ <a href="https://github.com/modelscope/scepter/"><img src="https://img.shields.io/badge/powered by-scepter-6FEBB9.svg"></a>
15
+ <br>
16
+ </p>
17
+
18
+ ## Model Introduction
19
+ test123
20
+
21
+ ## Model Parameters
22
+ <table>
23
+ <thead>
24
+ <tr>
25
+ <th rowspan="2">Base Model</th>
26
+ <th rowspan="2">Tuner Type</th>
27
+ <th colspan="4">Training Parameters</th>
28
+ </tr>
29
+ <tr>
30
+ <th>Batch Size</th>
31
+ <th>Epochs</th>
32
+ <th>Learning Rate</th>
33
+ <th>Resolution</th>
34
+ </tr>
35
+ </thead>
36
+ <tbody align="center">
37
+ <tr>
38
+ <td rowspan="8">SD1.5</td>
39
+ <td>TEXT_LORA</td>
40
+ <td>4</td>
41
+ <td>50</td>
42
+ <td>0.0001</td>
43
+ <td>[512, 512]</td>
44
+ </tr>
45
+ </tbody>
46
+ </table>
47
+
48
+
49
+ <table>
50
+ <thead>
51
+ <tr>
52
+ <th>Data Type</th>
53
+ <th>Data Space</th>
54
+ <th>Data Name</th>
55
+ <th>Data Subset</th>
56
+ </tr>
57
+ </thead>
58
+ <tbody align="center">
59
+ <tr>
60
+ <td>Dataset zip</td>
61
+ <td></td>
62
+ <td>/home/scepter/cache/scepter_ui/datasets/scepter_txt2img_3D_example</td>
63
+ <td>default</td>
64
+ </tr>
65
+ </tbody>
66
+ </table>
67
+
68
+
69
+ ## Model Performance
70
+ Given the input "a boy wearing a jacket," the following image may be generated:
71
+
72
+ ![image](./image.jpg)
73
+
74
+ ## Model Usage
75
+ ### Command Line Execution
76
+ * Run using Scepter's SDK, taking care to use different configuration files in accordance with the different base models, as per the corresponding relationships shown below
77
+ <table>
78
+ <thead>
79
+ <tr>
80
+ <th rowspan="2">Base Model</th>
81
+ <th rowspan="1">LORA</th>
82
+ <th colspan="1">SCE</th>
83
+ <th colspan="1">TEXT_LORA</th>
84
+ <th colspan="1">TEXT_SCE</th>
85
+ </tr>
86
+ </thead>
87
+ <tbody align="center">
88
+ <tr>
89
+ <td rowspan="8">SD1.5</td>
90
+ <td><a href="https://github.com/modelscope/scepter/blob/main/scepter/methods/examples/generation/stable_diffusion_1.5_512_lora.yaml">lora_cfg</a></td>
91
+ <td><a href="https://github.com/modelscope/scepter/blob/main/scepter/methods/scedit/t2i/sd15_512_sce_t2i_swift.yaml">sce_cfg</a></td>
92
+ <td><a href="https://github.com/modelscope/scepter/blob/main/scepter/methods/examples/generation/stable_diffusion_1.5_512_text_lora.yaml">text_lora_cfg</a></td>
93
+ <td><a href="https://github.com/modelscope/scepter/blob/main/scepter/methods/scedit/t2i/stable_diffusion_1.5_512_text_sce.yaml">text_sce_cfg</a></td>
94
+ </tr>
95
+ </tbody>
96
+ <tbody align="center">
97
+ <tr>
98
+ <td rowspan="8">SD2.1</td>
99
+ <td><a href="https://github.com/modelscope/scepter/blob/main/scepter/methods/examples/generation/stable_diffusion_2.1_768_lora.yaml">lora_cfg</a></td>
100
+ <td><a href="https://github.com/modelscope/scepter/blob/main/scepter/methods/scedit/t2i/sd21_768_sce_t2i_swift.yaml">sce_cfg</a></td>
101
+ <td><a href="https://github.com/modelscope/scepter/blob/main/scepter/methods/examples/generation/stable_diffusion_2.1_768_text_lora.yaml">text_lora_cfg</a></td>
102
+ <td><a href="https://github.com/modelscope/scepter/blob/main/scepter/methods/scedit/t2i/sd21_768_text_sce_t2i_swift.yaml">text_sce_cfg</a></td>
103
+ </tr>
104
+ </tbody>
105
+ <tbody align="center">
106
+ <tr>
107
+ <td rowspan="8">SDXL</td>
108
+ <td><a href="https://github.com/modelscope/scepter/blob/main/scepter/methods/examples/generation/stable_diffusion_xl_1024_lora.yaml">lora_cfg</a></td>
109
+ <td><a href="https://github.com/modelscope/scepter/blob/main/scepter/methods/scedit/t2i/sdxl_1024_sce_t2i_swift.yaml">sce_cfg</a></td>
110
+ <td><a href="https://github.com/modelscope/scepter/blob/main/scepter/methods/examples/generation/stable_diffusion_xl_1024_text_lora.yaml">text_lora_cfg</a></td>
111
+ <td><a href="https://github.com/modelscope/scepter/blob/main/scepter/methods/scedit/t2i/sdxl_1024_text_sce_t2i_swift.yaml">text_sce_cfg</a></td>
112
+ </tr>
113
+ </tbody>
114
+ </table>
115
+
116
+ * Running from Source Code
117
+
118
+ ```shell
119
+ git clone https://github.com/modelscope/scepter.git
120
+ cd scepter
121
+ pip install -r requirements/recommended.txt
122
+ PYTHONPATH=. python scepter/tools/run_inference.py
123
+ --pretrained_model {this model folder}
124
+ --cfg {lora_cfg} or {sce_cfg} or {text_lora_cfg} or {text_sce_cfg}
125
+ --prompt 'a boy wearing a jacket'
126
+ --save_folder 'inference'
127
+ ```
128
+
129
+ * Running after Installing Scepter (Recommended)
130
+ ```shell
131
+ pip install scepter
132
+ python -m scepter/tools/run_inference.py
133
+ --pretrained_model {this model folder}
134
+ --cfg {lora_cfg} or {sce_cfg} or {text_lora_cfg} or {text_sce_cfg}
135
+ --prompt 'a boy wearing a jacket'
136
+ --save_folder 'inference'
137
+ ```
138
+ ### Running with Scepter Studio
139
+
140
+ ```shell
141
+ pip install scepter
142
+ # Launch Scepter Studio
143
+ python -m scepter.tools.webui
144
+ ```
145
+
146
+ * Refer to the following guides for model usage.
147
+
148
+ (video url)
149
+
150
+ ## Model Reference
151
+ If you wish to use this model for your own purposes, please cite it as follows.
152
+ ```bibtex
153
+ @misc{SD15-TEXT_LORA-3DStyle-20240523-test,
154
+ title = {SD15-TEXT_LORA-3DStyle-20240523-test, https://huggingface.co/jiangzeyinzi/SD15-TEXT_LORA-3DStyle-20240523-test},
155
+ author = {jiangzeyinzi},
156
+ year = {2024}
157
+ }
158
+ ```
159
+ This model was trained using [Scepter Studio](https://github.com/modelscope/scepter); [Scepter](https://github.com/modelscope/scepter)
160
+ is an algorithm framework and toolbox developed by the Alibaba Tongyi Wanxiang Team. It provides a suite of tools and models for image generation, editing, fine-tuning, data processing, and more. If you find our work beneficial for your research,
161
+ please cite as follows.
162
+ ```bibtex
163
+ @misc{scepter,
164
+ title = {SCEPTER, https://github.com/modelscope/scepter},
165
+ author = {SCEPTER},
166
+ year = {2023}
167
+ }
168
+ ```
configuration.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {}
image.jpg ADDED
params.yaml ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DESCRIPTION: test123
2
+ PARAMS:
3
+ base_model: stable_diffusion
4
+ base_model_revision: SD1.5
5
+ bucket_no_upscale: false
6
+ bucket_resolution_steps: 64.0
7
+ data_type: Dataset zip
8
+ enable_resolution_bucket: false
9
+ eval_prompts:
10
+ - a boy wearing a jacket
11
+ - a dog running on the lawn
12
+ learning_rate: 0.0001
13
+ lora_alpha: 64.0
14
+ lora_rank: 64.0
15
+ max_bucket_resolution: 1024.0
16
+ min_bucket_resolution: 256.0
17
+ ms_data_name: /home/scepter/cache/scepter_ui/datasets/scepter_txt2img_3D_example
18
+ ms_data_space: ''
19
+ ms_data_subname: default
20
+ ori_data_name: 3D_example
21
+ prompt_prefix: ''
22
+ push_to_hub: false
23
+ replace_keywords: ''
24
+ resolution_height: 512
25
+ resolution_width: 512
26
+ save_interval: 25
27
+ sce_ratio: 1
28
+ text_lora_alpha: 256.0
29
+ text_lora_rank: 256.0
30
+ train_batch_size: 4
31
+ train_epoch: 50
32
+ tuner_name: TEXT_LORA
33
+ work_dir: ''
34
+ work_name: ''