Upload folder using huggingface_hub
Browse files- 0000200_adapters.safetensors +3 -0
- 0001000_adapters.safetensors +3 -0
- 0002000_adapters.safetensors +3 -0
- 0003000_adapters.safetensors +3 -0
- 0004000_adapters.safetensors +3 -0
- 0005000_adapters.safetensors +3 -0
- 0006000_adapters.safetensors +3 -0
- 0007000_adapters.safetensors +3 -0
- 0008000_adapters.safetensors +3 -0
- 0009000_adapters.safetensors +3 -0
- 0010000_adapters.safetensors +3 -0
- 0011000_adapters.safetensors +3 -0
- 0012000_adapters.safetensors +3 -0
- 0013000_adapters.safetensors +3 -0
- README.md +105 -0
- adapter_config.json +40 -0
- adapters.safetensors +3 -0
0000200_adapters.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f68952dbf99cecc7c0cc24258fc998bf23ef4fb6746ef92d8e5ebe74fd79f703
|
| 3 |
+
size 46161566
|
0001000_adapters.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:67e49ce0b374ec733963c5d51bb17889d09bfce8c2e05d306fcd0ddcda90e753
|
| 3 |
+
size 46161566
|
0002000_adapters.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7bad43d6371739f695e4809370abe021c5b522d218b7a0f448a912029ad18336
|
| 3 |
+
size 46161566
|
0003000_adapters.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:275afda0b9ed2c7dc2e7914a91f9401cc3c2c68c7b437e4e284dd86682d8a057
|
| 3 |
+
size 46161566
|
0004000_adapters.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:49200915365aed902de8d7cf24d9b492ea750a9fecdf99a6856519c5560dbdb4
|
| 3 |
+
size 46161566
|
0005000_adapters.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ca9bbd859325ee8a1f5275affe10baed1d881338e500c512750bcc69f138a68f
|
| 3 |
+
size 46161566
|
0006000_adapters.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:624d2f0c8e6dc4094f733221206351b2c729d0c0868d0704c4a40d801298ab56
|
| 3 |
+
size 46161566
|
0007000_adapters.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9f9dde74c92b19a67423b067f6d34d32a7bd5a2bb167de1c9212ff84d32f5e76
|
| 3 |
+
size 46161566
|
0008000_adapters.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:54e1c662accc18553caa17c386f348e0998bd4931cc8408087fb8ba7662d0980
|
| 3 |
+
size 46161566
|
0009000_adapters.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:363334571ff35c7db3c173951e8233db6cbcbe5fbe9cbed2d29237ed228adfb5
|
| 3 |
+
size 46161566
|
0010000_adapters.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:815d95301ff19024362a2242372bb33f38d87102f018b02813b63b169f8b8059
|
| 3 |
+
size 46161566
|
0011000_adapters.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:516c4094365230219973945a8a5c50e24c9fbc003e50d776c11b028c77423508
|
| 3 |
+
size 46161566
|
0012000_adapters.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b68dce2ff7071ed2d14cd3de698fad8f9f0cd82fafbd42425f26a26700fb81f3
|
| 3 |
+
size 46161566
|
0013000_adapters.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b68dce2ff7071ed2d14cd3de698fad8f9f0cd82fafbd42425f26a26700fb81f3
|
| 3 |
+
size 46161566
|
README.md
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: apache-2.0
|
| 3 |
+
base_model: Qwen/Qwen2.5-Coder-7B-Instruct
|
| 4 |
+
library_name: mlx
|
| 5 |
+
tags:
|
| 6 |
+
- mlx
|
| 7 |
+
- lora
|
| 8 |
+
- code
|
| 9 |
+
- rhino3d
|
| 10 |
+
- rhinoscriptsyntax
|
| 11 |
+
- rhinocommon
|
| 12 |
+
- 3d-modeling
|
| 13 |
+
- cad
|
| 14 |
+
- python
|
| 15 |
+
datasets:
|
| 16 |
+
- custom
|
| 17 |
+
language:
|
| 18 |
+
- en
|
| 19 |
+
pipeline_tag: text-generation
|
| 20 |
+
model-index:
|
| 21 |
+
- name: rhino-coder-7b-lora
|
| 22 |
+
results: []
|
| 23 |
+
---
|
| 24 |
+
|
| 25 |
+
# Rhino Coder 7B — LoRA Adapter
|
| 26 |
+
|
| 27 |
+
A LoRA adapter for [Qwen2.5-Coder-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct), fine-tuned for **Rhino3D Python scripting** — generating correct `rhinoscriptsyntax` and `RhinoCommon` code from natural language instructions.
|
| 28 |
+
|
| 29 |
+
This is the **standalone LoRA adapter** (~660 MB). For the full fused model, see [rhino-coder-7b](https://huggingface.co/quocvibui/rhino-coder-7b).
|
| 30 |
+
|
| 31 |
+
## Usage
|
| 32 |
+
|
| 33 |
+
### With MLX (Apple Silicon)
|
| 34 |
+
|
| 35 |
+
```bash
|
| 36 |
+
pip install mlx-lm
|
| 37 |
+
```
|
| 38 |
+
|
| 39 |
+
```python
|
| 40 |
+
from mlx_lm import load, generate
|
| 41 |
+
|
| 42 |
+
model, tokenizer = load(
|
| 43 |
+
"Qwen/Qwen2.5-Coder-7B-Instruct",
|
| 44 |
+
adapter_path="quocvibui/rhino-coder-7b-lora"
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
messages = [
|
| 48 |
+
{"role": "system", "content": "You are an expert Rhino3D Python programmer. Write clean, working scripts using rhinoscriptsyntax and RhinoCommon. Include all necessary imports. Only output code, no explanations unless asked."},
|
| 49 |
+
{"role": "user", "content": "Create a 10x10 grid of spheres with radius 0.5"},
|
| 50 |
+
]
|
| 51 |
+
|
| 52 |
+
prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
| 53 |
+
output = generate(model, tokenizer, prompt=prompt, max_tokens=1024)
|
| 54 |
+
print(output)
|
| 55 |
+
```
|
| 56 |
+
|
| 57 |
+
### As an OpenAI-compatible server
|
| 58 |
+
|
| 59 |
+
```bash
|
| 60 |
+
mlx_lm server \
|
| 61 |
+
--model Qwen/Qwen2.5-Coder-7B-Instruct \
|
| 62 |
+
--adapter-path quocvibui/rhino-coder-7b-lora \
|
| 63 |
+
--port 8080
|
| 64 |
+
```
|
| 65 |
+
|
| 66 |
+
## Adapter Details
|
| 67 |
+
|
| 68 |
+
| Parameter | Value |
|
| 69 |
+
|-----------|-------|
|
| 70 |
+
| LoRA rank | 8 |
|
| 71 |
+
| LoRA scale | 20.0 |
|
| 72 |
+
| LoRA dropout | 0.0 |
|
| 73 |
+
| LoRA layers | 16 / 28 |
|
| 74 |
+
| Adapter size | ~660 MB |
|
| 75 |
+
|
| 76 |
+
## Training Details
|
| 77 |
+
|
| 78 |
+
| Parameter | Value |
|
| 79 |
+
|-----------|-------|
|
| 80 |
+
| Base model | Qwen2.5-Coder-7B-Instruct (4-bit) |
|
| 81 |
+
| Batch size | 1 |
|
| 82 |
+
| Learning rate | 1e-5 |
|
| 83 |
+
| Optimizer | Adam |
|
| 84 |
+
| Max sequence length | 2,048 |
|
| 85 |
+
| Iterations | 9,108 (2 epochs) |
|
| 86 |
+
| Validation loss | 0.184 |
|
| 87 |
+
| Training time | ~1.2 hours on M2 Max |
|
| 88 |
+
|
| 89 |
+
### Dataset
|
| 90 |
+
|
| 91 |
+
5,060 instruction-code pairs for Rhino3D Python scripting:
|
| 92 |
+
|
| 93 |
+
| Source | Count |
|
| 94 |
+
|--------|-------|
|
| 95 |
+
| RhinoCommon API docs | 1,355 |
|
| 96 |
+
| RhinoScriptSyntax source | 926 |
|
| 97 |
+
| Official samples | 93 |
|
| 98 |
+
| Synthetic generation | 187 |
|
| 99 |
+
| Backlabeled GitHub | 1 |
|
| 100 |
+
|
| 101 |
+
## Links
|
| 102 |
+
|
| 103 |
+
- [GitHub: rhino3d-SLM](https://github.com/quocvibui/rhino3d-SLM)
|
| 104 |
+
- [Fused model: rhino-coder-7b](https://huggingface.co/quocvibui/rhino-coder-7b)
|
| 105 |
+
- [Base model: Qwen2.5-Coder-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct)
|
adapter_config.json
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"adapter_path": "training/adapters/rhino-lora",
|
| 3 |
+
"batch_size": 1,
|
| 4 |
+
"config": null,
|
| 5 |
+
"data": "training/data",
|
| 6 |
+
"fine_tune_type": "lora",
|
| 7 |
+
"grad_accumulation_steps": 1,
|
| 8 |
+
"grad_checkpoint": false,
|
| 9 |
+
"iters": 13662,
|
| 10 |
+
"learning_rate": 2e-05,
|
| 11 |
+
"lora_parameters": {
|
| 12 |
+
"rank": 8,
|
| 13 |
+
"dropout": 0.0,
|
| 14 |
+
"scale": 20.0
|
| 15 |
+
},
|
| 16 |
+
"lr_schedule": null,
|
| 17 |
+
"mask_prompt": false,
|
| 18 |
+
"max_seq_length": 2048,
|
| 19 |
+
"model": "training/models/codeqwen-7b-4bit",
|
| 20 |
+
"num_layers": 16,
|
| 21 |
+
"optimizer": "adam",
|
| 22 |
+
"optimizer_config": {
|
| 23 |
+
"adam": {},
|
| 24 |
+
"adamw": {},
|
| 25 |
+
"muon": {},
|
| 26 |
+
"sgd": {},
|
| 27 |
+
"adafactor": {}
|
| 28 |
+
},
|
| 29 |
+
"project_name": null,
|
| 30 |
+
"report_to": null,
|
| 31 |
+
"resume_adapter_file": null,
|
| 32 |
+
"save_every": 1000,
|
| 33 |
+
"seed": 0,
|
| 34 |
+
"steps_per_eval": 500,
|
| 35 |
+
"steps_per_report": 100,
|
| 36 |
+
"test": false,
|
| 37 |
+
"test_batches": 500,
|
| 38 |
+
"train": true,
|
| 39 |
+
"val_batches": 10
|
| 40 |
+
}
|
adapters.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:363334571ff35c7db3c173951e8233db6cbcbe5fbe9cbed2d29237ed228adfb5
|
| 3 |
+
size 46161566
|