sravanthib commited on
Commit
cf9b9e2
·
verified ·
1 Parent(s): 62b131c

Training completed

Browse files
Files changed (4) hide show
  1. README.md +4 -4
  2. all_results.json +5 -5
  3. train_results.json +5 -5
  4. trainer_state.json +8 -8
README.md CHANGED
@@ -1,9 +1,9 @@
1
  ---
2
  library_name: peft
3
- license: llama3.2
4
- base_model: meta-llama/Llama-3.2-3b-Instruct
5
  tags:
6
- - base_model:adapter:meta-llama/Llama-3.2-3b-Instruct
7
  - lora
8
  - transformers
9
  pipeline_tag: text-generation
@@ -17,7 +17,7 @@ should probably proofread and complete it, then remove this comment. -->
17
 
18
  # qwen_model_testing
19
 
20
- This model is a fine-tuned version of [meta-llama/Llama-3.2-3b-Instruct](https://huggingface.co/meta-llama/Llama-3.2-3b-Instruct) on an unknown dataset.
21
 
22
  ## Model description
23
 
 
1
  ---
2
  library_name: peft
3
+ license: mit
4
+ base_model: deepseek-ai/DeepSeek-R1-Distill-Qwen-7B
5
  tags:
6
+ - base_model:adapter:deepseek-ai/DeepSeek-R1-Distill-Qwen-7B
7
  - lora
8
  - transformers
9
  pipeline_tag: text-generation
 
17
 
18
  # qwen_model_testing
19
 
20
+ This model is a fine-tuned version of [deepseek-ai/DeepSeek-R1-Distill-Qwen-7B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-7B) on an unknown dataset.
21
 
22
  ## Model description
23
 
all_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 0.0182648401826484,
3
- "total_flos": 5.565784553370419e+16,
4
- "train_loss": 4.055104446411133,
5
- "train_runtime": 129.2068,
6
- "train_samples_per_second": 12.383,
7
- "train_steps_per_second": 0.077
8
  }
 
1
  {
2
  "epoch": 0.0182648401826484,
3
+ "total_flos": 1.394108846267433e+17,
4
+ "train_loss": 8.501841735839843,
5
+ "train_runtime": 190.4124,
6
+ "train_samples_per_second": 8.403,
7
+ "train_steps_per_second": 0.053
8
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 0.0182648401826484,
3
- "total_flos": 5.565784553370419e+16,
4
- "train_loss": 4.055104446411133,
5
- "train_runtime": 129.2068,
6
- "train_samples_per_second": 12.383,
7
- "train_steps_per_second": 0.077
8
  }
 
1
  {
2
  "epoch": 0.0182648401826484,
3
+ "total_flos": 1.394108846267433e+17,
4
+ "train_loss": 8.501841735839843,
5
+ "train_runtime": 190.4124,
6
+ "train_samples_per_second": 8.403,
7
+ "train_steps_per_second": 0.053
8
  }
trainer_state.json CHANGED
@@ -11,19 +11,19 @@
11
  "log_history": [
12
  {
13
  "epoch": 0.0182648401826484,
14
- "grad_norm": 0.5136012434959412,
15
  "learning_rate": 0.0001,
16
- "loss": 4.0551,
17
  "step": 10
18
  },
19
  {
20
  "epoch": 0.0182648401826484,
21
  "step": 10,
22
- "total_flos": 5.565784553370419e+16,
23
- "train_loss": 4.055104446411133,
24
- "train_runtime": 129.2068,
25
- "train_samples_per_second": 12.383,
26
- "train_steps_per_second": 0.077
27
  }
28
  ],
29
  "logging_steps": 10,
@@ -43,7 +43,7 @@
43
  "attributes": {}
44
  }
45
  },
46
- "total_flos": 5.565784553370419e+16,
47
  "train_batch_size": 2,
48
  "trial_name": null,
49
  "trial_params": null
 
11
  "log_history": [
12
  {
13
  "epoch": 0.0182648401826484,
14
+ "grad_norm": 10.595149993896484,
15
  "learning_rate": 0.0001,
16
+ "loss": 8.5018,
17
  "step": 10
18
  },
19
  {
20
  "epoch": 0.0182648401826484,
21
  "step": 10,
22
+ "total_flos": 1.394108846267433e+17,
23
+ "train_loss": 8.501841735839843,
24
+ "train_runtime": 190.4124,
25
+ "train_samples_per_second": 8.403,
26
+ "train_steps_per_second": 0.053
27
  }
28
  ],
29
  "logging_steps": 10,
 
43
  "attributes": {}
44
  }
45
  },
46
+ "total_flos": 1.394108846267433e+17,
47
  "train_batch_size": 2,
48
  "trial_name": null,
49
  "trial_params": null