sharkMeow commited on
Commit
1b4cb70
1 Parent(s): 659e8b0

Training in progress, step 500

Browse files
Files changed (6) hide show
  1. README.md +56 -0
  2. all_results.json +7 -0
  3. config.json +34 -0
  4. eval_results.json +7 -0
  5. model.safetensors +3 -0
  6. training_args.bin +3 -0
README.md ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: OFA-Sys/chinese-clip-vit-base-patch16
3
+ tags:
4
+ - generated_from_trainer
5
+ model-index:
6
+ - name: aoi_clip_eval
7
+ results: []
8
+ ---
9
+
10
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
11
+ should probably proofread and complete it, then remove this comment. -->
12
+
13
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/shark_meow_team/huggingface/runs/ez175rsf)
14
+ # aoi_clip_eval
15
+
16
+ This model is a fine-tuned version of [OFA-Sys/chinese-clip-vit-base-patch16](https://huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16) on an unknown dataset.
17
+ It achieves the following results on the evaluation set:
18
+ - eval_loss: 6.0552
19
+ - eval_accuracy: 0.0276
20
+ - eval_runtime: 40.073
21
+ - eval_samples_per_second: 242.433
22
+ - eval_steps_per_second: 5.515
23
+ - step: 0
24
+
25
+ ## Model description
26
+
27
+ More information needed
28
+
29
+ ## Intended uses & limitations
30
+
31
+ More information needed
32
+
33
+ ## Training and evaluation data
34
+
35
+ More information needed
36
+
37
+ ## Training procedure
38
+
39
+ ### Training hyperparameters
40
+
41
+ The following hyperparameters were used during training:
42
+ - learning_rate: 1e-05
43
+ - train_batch_size: 40
44
+ - eval_batch_size: 44
45
+ - seed: 42
46
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
+ - lr_scheduler_type: linear
48
+ - num_epochs: 60.0
49
+ - mixed_precision_training: Native AMP
50
+
51
+ ### Framework versions
52
+
53
+ - Transformers 4.42.3
54
+ - Pytorch 2.3.1+cu121
55
+ - Datasets 2.20.0
56
+ - Tokenizers 0.19.1
all_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "eval_accuracy": 0.027586206896551724,
3
+ "eval_loss": 6.055156707763672,
4
+ "eval_runtime": 40.073,
5
+ "eval_samples_per_second": 242.433,
6
+ "eval_steps_per_second": 5.515
7
+ }
config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "OFA-Sys/chinese-clip-vit-base-patch16",
3
+ "architectures": [
4
+ "ChineseCLIPImageDualModel"
5
+ ],
6
+ "initializer_factor": 1.0,
7
+ "initializer_range": 0.02,
8
+ "logit_scale_init_value": 2.6592,
9
+ "model_type": "chinese_clip",
10
+ "projection_dim": 512,
11
+ "text_config": {
12
+ "architectures": [
13
+ "ChineseCLIPTextModel"
14
+ ],
15
+ "bos_token_id": 0,
16
+ "directionality": "bidi",
17
+ "eos_token_id": 2,
18
+ "model_type": "chinese_clip_text_model",
19
+ "output_past": true,
20
+ "pooler_fc_size": 768,
21
+ "pooler_num_attention_heads": 12,
22
+ "pooler_num_fc_layers": 3,
23
+ "pooler_size_per_head": 128,
24
+ "pooler_type": "first_token_transform",
25
+ "vocab_size": 21128
26
+ },
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.42.3",
29
+ "vision_config": {
30
+ "dropout": 0.0,
31
+ "model_type": "chinese_clip_vision_model",
32
+ "patch_size": 16
33
+ }
34
+ }
eval_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "eval_accuracy": 0.027586206896551724,
3
+ "eval_loss": 6.055156707763672,
4
+ "eval_runtime": 40.073,
5
+ "eval_samples_per_second": 242.433,
6
+ "eval_steps_per_second": 5.515
7
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da0e0977953ec0db42fcdf7a3052b6734f9cd55bb79135a173d0f03109ee8e26
3
+ size 1162455388
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6fb5509f1d27e6f0c1d6b38ca900eec28b929c1afb4e61f3836d33043b34fc63
3
+ size 5112