mconcat commited on
Commit
1e10d6a
·
verified ·
1 Parent(s): ed2c4b9

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ - zh
5
+ license: apache-2.0
6
+ base_model: Jackrong/Qwopus3.5-27B-v3
7
+ tags:
8
+ - qwen3.5
9
+ - reasoning
10
+ - quantized
11
+ - fp8
12
+ - fp8-dynamic
13
+ - compressed-tensors
14
+ - deltanet
15
+ - chain-of-thought
16
+ - mtp
17
+ pipeline_tag: text-generation
18
+ library_name: transformers
19
+ model_name: Qwopus3.5-27B-v3-FP8-Dynamic
20
+ quantized_by: mconcat
21
+ ---
22
+
23
+ # Qwopus3.5-27B-v3-FP8-Dynamic
24
+
25
+ FP8 Dynamic quantized version of [Jackrong/Qwopus3.5-27B-v3](https://huggingface.co/Jackrong/Qwopus3.5-27B-v3).
26
+
27
+ This checkpoint preserves the hybrid Qwen3.5 DeltaNet + softmax architecture and MTP (Multi-Token Prediction) head from the BF16 source, quantizing most linear layers to FP8 W8A8 while keeping the most sensitive projections and sidecar components in BF16.
28
+
29
+ ## Verified Inference
30
+
31
+ Local export and sanity-check evaluation were verified on **2026-04-07** on a single **NVIDIA RTX PRO 6000 Blackwell Workstation Edition (96 GB)** with:
32
+
33
+ - `transformers==5.3.0`
34
+ - `llm-compressor==0.14.1.dev24`
35
+ - `vllm==0.17.1`
36
+
37
+ What was verified:
38
+
39
+ - FP8 export completed successfully via llm-compressor
40
+ - MTP weights are included in the main safetensors file
41
+ - The checkpoint loads in vLLM and generates correct output
42
+ - Quick perplexity sanity check: **7.67** (FineWeb-Edu, 50 samples)
43
+
44
+ ## Quantization Strategy
45
+
46
+ Uniform FP8_DYNAMIC quantization using [llm-compressor](https://github.com/vllm-project/llm-compressor):
47
+
48
+ | Precision | Layers |
49
+ |-----------|--------|
50
+ | **FP8 W8A8** | most `Linear` layers (per-channel static weight scales, per-token dynamic input scales) |
51
+ | **BF16** | `lm_head`, `embed_tokens`, `self_attn.o_proj`, DeltaNet `linear_attn.out_proj`, DeltaNet `in_proj_a`/`in_proj_b`, visual encoder, MTP sidecar |
52
+
53
+ Architecture match with the BF16 source:
54
+
55
+ - `model_type=qwen3_5`
56
+ - `64` text layers (hybrid DeltaNet + softmax, `full_attention_interval=4`)
57
+ - `mtp_num_hidden_layers=1`
58
+ - `max_position_embeddings=262144`
59
+ - `hidden_size=5120`, `intermediate_size=17408`
60
+ - `vocab_size=248320`
61
+
62
+ ## Usage
63
+
64
+ ### vLLM
65
+
66
+ ```bash
67
+ pip install -U vllm>=0.17.0 transformers>=5.3.0
68
+ ```
69
+
70
+ Standard serving:
71
+
72
+ ```bash
73
+ vllm serve mconcat/Qwopus3.5-27B-v3-FP8-Dynamic \
74
+ --max-model-len 32768 \
75
+ --gpu-memory-utilization 0.85 \
76
+ --max-num-seqs 1 \
77
+ --skip-mm-profiling \
78
+ --reasoning-parser qwen3
79
+ ```
80
+
81
+ With MTP speculative decoding:
82
+
83
+ ```bash
84
+ vllm serve mconcat/Qwopus3.5-27B-v3-FP8-Dynamic \
85
+ --max-model-len 32768 \
86
+ --gpu-memory-utilization 0.85 \
87
+ --max-num-seqs 1 \
88
+ --skip-mm-profiling \
89
+ --reasoning-parser qwen3 \
90
+ --speculative-config '{"method":"mtp","num_speculative_tokens":1}'
91
+ ```
92
+
93
+ ### Transformers
94
+
95
+ ```python
96
+ from transformers import AutoTokenizer, Qwen3_5ForConditionalGeneration
97
+ import torch
98
+
99
+ model = Qwen3_5ForConditionalGeneration.from_pretrained(
100
+ "mconcat/Qwopus3.5-27B-v3-FP8-Dynamic",
101
+ torch_dtype=torch.bfloat16,
102
+ device_map="auto",
103
+ trust_remote_code=True,
104
+ )
105
+
106
+ tokenizer = AutoTokenizer.from_pretrained(
107
+ "mconcat/Qwopus3.5-27B-v3-FP8-Dynamic",
108
+ trust_remote_code=True,
109
+ )
110
+ ```
111
+
112
+ ## Compatibility
113
+
114
+ | Framework | Supported | Notes |
115
+ |-----------|-----------|-------|
116
+ | vLLM >= 0.17.0 | Yes | Verified with `vllm==0.17.1` on Blackwell; MTP works |
117
+ | transformers >= 5.3.0 | Yes | Direct loading with `device_map="auto"` |
118
+ | SGLang | Unknown | Not verified |
119
+
120
+ ## Notes
121
+
122
+ - This export keeps `self_attn.o_proj` and DeltaNet `linear_attn.out_proj` in BF16 to preserve output projection fidelity.
123
+ - MTP weights are embedded in the main `model.safetensors` file (no separate `model.mtp.safetensors`).
124
+ - The model includes a vision encoder (loaded but unused for text-only inference). Use `--skip-mm-profiling` with vLLM to skip vision encoder profiling.
125
+ - **Blackwell (SM120) note:** If you encounter TMA-related crashes, apply the one-line vLLM patch to disable TMA on Blackwell: change `>= 9` to `9 <= x < 12` in `vllm/model_executor/layers/fla/ops/utils.py`.
126
+ - **KV cache:** Do not use `--kv-cache-dtype fp8_e4m3` with this model family — the checkpoint lacks calibrated KV scales and will produce degraded output. Use the default BF16 KV cache.
chat_template.jinja ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0].role == 'system' %}
4
+ {{- messages[0].content + '\n\n' }}
5
+ {%- endif %}
6
+ {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
7
+ {%- for tool in tools %}
8
+ {{- "\n" }}
9
+ {{- tool | tojson }}
10
+ {%- endfor %}
11
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
12
+ {%- else %}
13
+ {%- if messages[0].role == 'system' %}
14
+ {{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
15
+ {%- endif %}
16
+ {%- endif %}
17
+ {%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
18
+ {%- for message in messages[::-1] %}
19
+ {%- set index = (messages|length - 1) - loop.index0 %}
20
+ {%- if ns.multi_step_tool and message.role == "user" and message.content is string and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}
21
+ {%- set ns.multi_step_tool = false %}
22
+ {%- set ns.last_query_index = index %}
23
+ {%- endif %}
24
+ {%- endfor %}
25
+ {%- for message in messages %}
26
+ {%- if message.content is string %}
27
+ {%- set content = message.content %}
28
+ {%- else %}
29
+ {%- set content = '' %}
30
+ {%- endif %}
31
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
32
+ {{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
33
+ {%- elif message.role == "assistant" %}
34
+ {%- set reasoning_content = '' %}
35
+ {%- if message.reasoning_content is string %}
36
+ {%- set reasoning_content = message.reasoning_content %}
37
+ {%- else %}
38
+ {%- if '</think>' in content %}
39
+ {%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
40
+ {%- set content = content.split('</think>')[-1].lstrip('\n') %}
41
+ {%- endif %}
42
+ {%- endif %}
43
+ {%- if loop.index0 > ns.last_query_index %}
44
+ {%- if loop.last or (not loop.last and reasoning_content) %}
45
+ {{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content.strip('\n') + '\n</think>\n\n' + content.lstrip('\n') }}
46
+ {%- else %}
47
+ {{- '<|im_start|>' + message.role + '\n' + content }}
48
+ {%- endif %}
49
+ {%- else %}
50
+ {{- '<|im_start|>' + message.role + '\n' + content }}
51
+ {%- endif %}
52
+ {%- if message.tool_calls %}
53
+ {%- for tool_call in message.tool_calls %}
54
+ {%- if (loop.first and content) or (not loop.first) %}
55
+ {{- '\n' }}
56
+ {%- endif %}
57
+ {%- if tool_call.function %}
58
+ {%- set tool_call = tool_call.function %}
59
+ {%- endif %}
60
+ {{- '<tool_call>\n{"name": "' }}
61
+ {{- tool_call.name }}
62
+ {{- '", "arguments": ' }}
63
+ {%- if tool_call.arguments is string %}
64
+ {{- tool_call.arguments }}
65
+ {%- else %}
66
+ {{- tool_call.arguments | tojson }}
67
+ {%- endif %}
68
+ {{- '}\n</tool_call>' }}
69
+ {%- endfor %}
70
+ {%- endif %}
71
+ {{- '<|im_end|>\n' }}
72
+ {%- elif message.role == "tool" %}
73
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
74
+ {{- '<|im_start|>user' }}
75
+ {%- endif %}
76
+ {{- '\n<tool_response>\n' }}
77
+ {{- content }}
78
+ {{- '\n</tool_response>' }}
79
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
80
+ {{- '<|im_end|>\n' }}
81
+ {%- endif %}
82
+ {%- endif %}
83
+ {%- endfor %}
84
+ {%- if add_generation_prompt %}
85
+ {{- '<|im_start|>assistant
86
+ <think>
87
+ ' }}
88
+ {%- endif %}
config.json ADDED
@@ -0,0 +1,467 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen3_5ForConditionalGeneration"
4
+ ],
5
+ "bos_token_id": null,
6
+ "dtype": "bfloat16",
7
+ "eos_token_id": 248046,
8
+ "image_token_id": 248056,
9
+ "model_name": "unsloth/Qwen3.5-27B",
10
+ "model_type": "qwen3_5",
11
+ "pad_token_id": 248055,
12
+ "quantization_config": {
13
+ "config_groups": {
14
+ "group_0": {
15
+ "format": "float-quantized",
16
+ "input_activations": {
17
+ "actorder": null,
18
+ "block_structure": null,
19
+ "dynamic": true,
20
+ "group_size": null,
21
+ "num_bits": 8,
22
+ "observer": null,
23
+ "observer_kwargs": {},
24
+ "scale_dtype": null,
25
+ "strategy": "token",
26
+ "symmetric": true,
27
+ "type": "float",
28
+ "zp_dtype": null
29
+ },
30
+ "output_activations": null,
31
+ "targets": [
32
+ "Linear"
33
+ ],
34
+ "weights": {
35
+ "actorder": null,
36
+ "block_structure": null,
37
+ "dynamic": false,
38
+ "group_size": null,
39
+ "num_bits": 8,
40
+ "observer": "memoryless_minmax",
41
+ "observer_kwargs": {},
42
+ "scale_dtype": null,
43
+ "strategy": "channel",
44
+ "symmetric": true,
45
+ "type": "float",
46
+ "zp_dtype": null
47
+ }
48
+ }
49
+ },
50
+ "format": "float-quantized",
51
+ "global_compression_ratio": null,
52
+ "ignore": [
53
+ "model.visual.blocks.0.attn.qkv",
54
+ "model.visual.blocks.0.attn.proj",
55
+ "model.visual.blocks.0.mlp.linear_fc1",
56
+ "model.visual.blocks.0.mlp.linear_fc2",
57
+ "model.visual.blocks.1.attn.qkv",
58
+ "model.visual.blocks.1.attn.proj",
59
+ "model.visual.blocks.1.mlp.linear_fc1",
60
+ "model.visual.blocks.1.mlp.linear_fc2",
61
+ "model.visual.blocks.2.attn.qkv",
62
+ "model.visual.blocks.2.attn.proj",
63
+ "model.visual.blocks.2.mlp.linear_fc1",
64
+ "model.visual.blocks.2.mlp.linear_fc2",
65
+ "model.visual.blocks.3.attn.qkv",
66
+ "model.visual.blocks.3.attn.proj",
67
+ "model.visual.blocks.3.mlp.linear_fc1",
68
+ "model.visual.blocks.3.mlp.linear_fc2",
69
+ "model.visual.blocks.4.attn.qkv",
70
+ "model.visual.blocks.4.attn.proj",
71
+ "model.visual.blocks.4.mlp.linear_fc1",
72
+ "model.visual.blocks.4.mlp.linear_fc2",
73
+ "model.visual.blocks.5.attn.qkv",
74
+ "model.visual.blocks.5.attn.proj",
75
+ "model.visual.blocks.5.mlp.linear_fc1",
76
+ "model.visual.blocks.5.mlp.linear_fc2",
77
+ "model.visual.blocks.6.attn.qkv",
78
+ "model.visual.blocks.6.attn.proj",
79
+ "model.visual.blocks.6.mlp.linear_fc1",
80
+ "model.visual.blocks.6.mlp.linear_fc2",
81
+ "model.visual.blocks.7.attn.qkv",
82
+ "model.visual.blocks.7.attn.proj",
83
+ "model.visual.blocks.7.mlp.linear_fc1",
84
+ "model.visual.blocks.7.mlp.linear_fc2",
85
+ "model.visual.blocks.8.attn.qkv",
86
+ "model.visual.blocks.8.attn.proj",
87
+ "model.visual.blocks.8.mlp.linear_fc1",
88
+ "model.visual.blocks.8.mlp.linear_fc2",
89
+ "model.visual.blocks.9.attn.qkv",
90
+ "model.visual.blocks.9.attn.proj",
91
+ "model.visual.blocks.9.mlp.linear_fc1",
92
+ "model.visual.blocks.9.mlp.linear_fc2",
93
+ "model.visual.blocks.10.attn.qkv",
94
+ "model.visual.blocks.10.attn.proj",
95
+ "model.visual.blocks.10.mlp.linear_fc1",
96
+ "model.visual.blocks.10.mlp.linear_fc2",
97
+ "model.visual.blocks.11.attn.qkv",
98
+ "model.visual.blocks.11.attn.proj",
99
+ "model.visual.blocks.11.mlp.linear_fc1",
100
+ "model.visual.blocks.11.mlp.linear_fc2",
101
+ "model.visual.blocks.12.attn.qkv",
102
+ "model.visual.blocks.12.attn.proj",
103
+ "model.visual.blocks.12.mlp.linear_fc1",
104
+ "model.visual.blocks.12.mlp.linear_fc2",
105
+ "model.visual.blocks.13.attn.qkv",
106
+ "model.visual.blocks.13.attn.proj",
107
+ "model.visual.blocks.13.mlp.linear_fc1",
108
+ "model.visual.blocks.13.mlp.linear_fc2",
109
+ "model.visual.blocks.14.attn.qkv",
110
+ "model.visual.blocks.14.attn.proj",
111
+ "model.visual.blocks.14.mlp.linear_fc1",
112
+ "model.visual.blocks.14.mlp.linear_fc2",
113
+ "model.visual.blocks.15.attn.qkv",
114
+ "model.visual.blocks.15.attn.proj",
115
+ "model.visual.blocks.15.mlp.linear_fc1",
116
+ "model.visual.blocks.15.mlp.linear_fc2",
117
+ "model.visual.blocks.16.attn.qkv",
118
+ "model.visual.blocks.16.attn.proj",
119
+ "model.visual.blocks.16.mlp.linear_fc1",
120
+ "model.visual.blocks.16.mlp.linear_fc2",
121
+ "model.visual.blocks.17.attn.qkv",
122
+ "model.visual.blocks.17.attn.proj",
123
+ "model.visual.blocks.17.mlp.linear_fc1",
124
+ "model.visual.blocks.17.mlp.linear_fc2",
125
+ "model.visual.blocks.18.attn.qkv",
126
+ "model.visual.blocks.18.attn.proj",
127
+ "model.visual.blocks.18.mlp.linear_fc1",
128
+ "model.visual.blocks.18.mlp.linear_fc2",
129
+ "model.visual.blocks.19.attn.qkv",
130
+ "model.visual.blocks.19.attn.proj",
131
+ "model.visual.blocks.19.mlp.linear_fc1",
132
+ "model.visual.blocks.19.mlp.linear_fc2",
133
+ "model.visual.blocks.20.attn.qkv",
134
+ "model.visual.blocks.20.attn.proj",
135
+ "model.visual.blocks.20.mlp.linear_fc1",
136
+ "model.visual.blocks.20.mlp.linear_fc2",
137
+ "model.visual.blocks.21.attn.qkv",
138
+ "model.visual.blocks.21.attn.proj",
139
+ "model.visual.blocks.21.mlp.linear_fc1",
140
+ "model.visual.blocks.21.mlp.linear_fc2",
141
+ "model.visual.blocks.22.attn.qkv",
142
+ "model.visual.blocks.22.attn.proj",
143
+ "model.visual.blocks.22.mlp.linear_fc1",
144
+ "model.visual.blocks.22.mlp.linear_fc2",
145
+ "model.visual.blocks.23.attn.qkv",
146
+ "model.visual.blocks.23.attn.proj",
147
+ "model.visual.blocks.23.mlp.linear_fc1",
148
+ "model.visual.blocks.23.mlp.linear_fc2",
149
+ "model.visual.blocks.24.attn.qkv",
150
+ "model.visual.blocks.24.attn.proj",
151
+ "model.visual.blocks.24.mlp.linear_fc1",
152
+ "model.visual.blocks.24.mlp.linear_fc2",
153
+ "model.visual.blocks.25.attn.qkv",
154
+ "model.visual.blocks.25.attn.proj",
155
+ "model.visual.blocks.25.mlp.linear_fc1",
156
+ "model.visual.blocks.25.mlp.linear_fc2",
157
+ "model.visual.blocks.26.attn.qkv",
158
+ "model.visual.blocks.26.attn.proj",
159
+ "model.visual.blocks.26.mlp.linear_fc1",
160
+ "model.visual.blocks.26.mlp.linear_fc2",
161
+ "model.visual.merger.linear_fc1",
162
+ "model.visual.merger.linear_fc2",
163
+ "model.language_model.layers.0.linear_attn.out_proj",
164
+ "model.language_model.layers.0.linear_attn.in_proj_b",
165
+ "model.language_model.layers.0.linear_attn.in_proj_a",
166
+ "model.language_model.layers.1.linear_attn.out_proj",
167
+ "model.language_model.layers.1.linear_attn.in_proj_b",
168
+ "model.language_model.layers.1.linear_attn.in_proj_a",
169
+ "model.language_model.layers.2.linear_attn.out_proj",
170
+ "model.language_model.layers.2.linear_attn.in_proj_b",
171
+ "model.language_model.layers.2.linear_attn.in_proj_a",
172
+ "model.language_model.layers.3.self_attn.o_proj",
173
+ "model.language_model.layers.4.linear_attn.out_proj",
174
+ "model.language_model.layers.4.linear_attn.in_proj_b",
175
+ "model.language_model.layers.4.linear_attn.in_proj_a",
176
+ "model.language_model.layers.5.linear_attn.out_proj",
177
+ "model.language_model.layers.5.linear_attn.in_proj_b",
178
+ "model.language_model.layers.5.linear_attn.in_proj_a",
179
+ "model.language_model.layers.6.linear_attn.out_proj",
180
+ "model.language_model.layers.6.linear_attn.in_proj_b",
181
+ "model.language_model.layers.6.linear_attn.in_proj_a",
182
+ "model.language_model.layers.7.self_attn.o_proj",
183
+ "model.language_model.layers.8.linear_attn.out_proj",
184
+ "model.language_model.layers.8.linear_attn.in_proj_b",
185
+ "model.language_model.layers.8.linear_attn.in_proj_a",
186
+ "model.language_model.layers.9.linear_attn.out_proj",
187
+ "model.language_model.layers.9.linear_attn.in_proj_b",
188
+ "model.language_model.layers.9.linear_attn.in_proj_a",
189
+ "model.language_model.layers.10.linear_attn.out_proj",
190
+ "model.language_model.layers.10.linear_attn.in_proj_b",
191
+ "model.language_model.layers.10.linear_attn.in_proj_a",
192
+ "model.language_model.layers.11.self_attn.o_proj",
193
+ "model.language_model.layers.12.linear_attn.out_proj",
194
+ "model.language_model.layers.12.linear_attn.in_proj_b",
195
+ "model.language_model.layers.12.linear_attn.in_proj_a",
196
+ "model.language_model.layers.13.linear_attn.out_proj",
197
+ "model.language_model.layers.13.linear_attn.in_proj_b",
198
+ "model.language_model.layers.13.linear_attn.in_proj_a",
199
+ "model.language_model.layers.14.linear_attn.out_proj",
200
+ "model.language_model.layers.14.linear_attn.in_proj_b",
201
+ "model.language_model.layers.14.linear_attn.in_proj_a",
202
+ "model.language_model.layers.15.self_attn.o_proj",
203
+ "model.language_model.layers.16.linear_attn.out_proj",
204
+ "model.language_model.layers.16.linear_attn.in_proj_b",
205
+ "model.language_model.layers.16.linear_attn.in_proj_a",
206
+ "model.language_model.layers.17.linear_attn.out_proj",
207
+ "model.language_model.layers.17.linear_attn.in_proj_b",
208
+ "model.language_model.layers.17.linear_attn.in_proj_a",
209
+ "model.language_model.layers.18.linear_attn.out_proj",
210
+ "model.language_model.layers.18.linear_attn.in_proj_b",
211
+ "model.language_model.layers.18.linear_attn.in_proj_a",
212
+ "model.language_model.layers.19.self_attn.o_proj",
213
+ "model.language_model.layers.20.linear_attn.out_proj",
214
+ "model.language_model.layers.20.linear_attn.in_proj_b",
215
+ "model.language_model.layers.20.linear_attn.in_proj_a",
216
+ "model.language_model.layers.21.linear_attn.out_proj",
217
+ "model.language_model.layers.21.linear_attn.in_proj_b",
218
+ "model.language_model.layers.21.linear_attn.in_proj_a",
219
+ "model.language_model.layers.22.linear_attn.out_proj",
220
+ "model.language_model.layers.22.linear_attn.in_proj_b",
221
+ "model.language_model.layers.22.linear_attn.in_proj_a",
222
+ "model.language_model.layers.23.self_attn.o_proj",
223
+ "model.language_model.layers.24.linear_attn.out_proj",
224
+ "model.language_model.layers.24.linear_attn.in_proj_b",
225
+ "model.language_model.layers.24.linear_attn.in_proj_a",
226
+ "model.language_model.layers.25.linear_attn.out_proj",
227
+ "model.language_model.layers.25.linear_attn.in_proj_b",
228
+ "model.language_model.layers.25.linear_attn.in_proj_a",
229
+ "model.language_model.layers.26.linear_attn.out_proj",
230
+ "model.language_model.layers.26.linear_attn.in_proj_b",
231
+ "model.language_model.layers.26.linear_attn.in_proj_a",
232
+ "model.language_model.layers.27.self_attn.o_proj",
233
+ "model.language_model.layers.28.linear_attn.out_proj",
234
+ "model.language_model.layers.28.linear_attn.in_proj_b",
235
+ "model.language_model.layers.28.linear_attn.in_proj_a",
236
+ "model.language_model.layers.29.linear_attn.out_proj",
237
+ "model.language_model.layers.29.linear_attn.in_proj_b",
238
+ "model.language_model.layers.29.linear_attn.in_proj_a",
239
+ "model.language_model.layers.30.linear_attn.out_proj",
240
+ "model.language_model.layers.30.linear_attn.in_proj_b",
241
+ "model.language_model.layers.30.linear_attn.in_proj_a",
242
+ "model.language_model.layers.31.self_attn.o_proj",
243
+ "model.language_model.layers.32.linear_attn.out_proj",
244
+ "model.language_model.layers.32.linear_attn.in_proj_b",
245
+ "model.language_model.layers.32.linear_attn.in_proj_a",
246
+ "model.language_model.layers.33.linear_attn.out_proj",
247
+ "model.language_model.layers.33.linear_attn.in_proj_b",
248
+ "model.language_model.layers.33.linear_attn.in_proj_a",
249
+ "model.language_model.layers.34.linear_attn.out_proj",
250
+ "model.language_model.layers.34.linear_attn.in_proj_b",
251
+ "model.language_model.layers.34.linear_attn.in_proj_a",
252
+ "model.language_model.layers.35.self_attn.o_proj",
253
+ "model.language_model.layers.36.linear_attn.out_proj",
254
+ "model.language_model.layers.36.linear_attn.in_proj_b",
255
+ "model.language_model.layers.36.linear_attn.in_proj_a",
256
+ "model.language_model.layers.37.linear_attn.out_proj",
257
+ "model.language_model.layers.37.linear_attn.in_proj_b",
258
+ "model.language_model.layers.37.linear_attn.in_proj_a",
259
+ "model.language_model.layers.38.linear_attn.out_proj",
260
+ "model.language_model.layers.38.linear_attn.in_proj_b",
261
+ "model.language_model.layers.38.linear_attn.in_proj_a",
262
+ "model.language_model.layers.39.self_attn.o_proj",
263
+ "model.language_model.layers.40.linear_attn.out_proj",
264
+ "model.language_model.layers.40.linear_attn.in_proj_b",
265
+ "model.language_model.layers.40.linear_attn.in_proj_a",
266
+ "model.language_model.layers.41.linear_attn.out_proj",
267
+ "model.language_model.layers.41.linear_attn.in_proj_b",
268
+ "model.language_model.layers.41.linear_attn.in_proj_a",
269
+ "model.language_model.layers.42.linear_attn.out_proj",
270
+ "model.language_model.layers.42.linear_attn.in_proj_b",
271
+ "model.language_model.layers.42.linear_attn.in_proj_a",
272
+ "model.language_model.layers.43.self_attn.o_proj",
273
+ "model.language_model.layers.44.linear_attn.out_proj",
274
+ "model.language_model.layers.44.linear_attn.in_proj_b",
275
+ "model.language_model.layers.44.linear_attn.in_proj_a",
276
+ "model.language_model.layers.45.linear_attn.out_proj",
277
+ "model.language_model.layers.45.linear_attn.in_proj_b",
278
+ "model.language_model.layers.45.linear_attn.in_proj_a",
279
+ "model.language_model.layers.46.linear_attn.out_proj",
280
+ "model.language_model.layers.46.linear_attn.in_proj_b",
281
+ "model.language_model.layers.46.linear_attn.in_proj_a",
282
+ "model.language_model.layers.47.self_attn.o_proj",
283
+ "model.language_model.layers.48.linear_attn.out_proj",
284
+ "model.language_model.layers.48.linear_attn.in_proj_b",
285
+ "model.language_model.layers.48.linear_attn.in_proj_a",
286
+ "model.language_model.layers.49.linear_attn.out_proj",
287
+ "model.language_model.layers.49.linear_attn.in_proj_b",
288
+ "model.language_model.layers.49.linear_attn.in_proj_a",
289
+ "model.language_model.layers.50.linear_attn.out_proj",
290
+ "model.language_model.layers.50.linear_attn.in_proj_b",
291
+ "model.language_model.layers.50.linear_attn.in_proj_a",
292
+ "model.language_model.layers.51.self_attn.o_proj",
293
+ "model.language_model.layers.52.linear_attn.out_proj",
294
+ "model.language_model.layers.52.linear_attn.in_proj_b",
295
+ "model.language_model.layers.52.linear_attn.in_proj_a",
296
+ "model.language_model.layers.53.linear_attn.out_proj",
297
+ "model.language_model.layers.53.linear_attn.in_proj_b",
298
+ "model.language_model.layers.53.linear_attn.in_proj_a",
299
+ "model.language_model.layers.54.linear_attn.out_proj",
300
+ "model.language_model.layers.54.linear_attn.in_proj_b",
301
+ "model.language_model.layers.54.linear_attn.in_proj_a",
302
+ "model.language_model.layers.55.self_attn.o_proj",
303
+ "model.language_model.layers.56.linear_attn.out_proj",
304
+ "model.language_model.layers.56.linear_attn.in_proj_b",
305
+ "model.language_model.layers.56.linear_attn.in_proj_a",
306
+ "model.language_model.layers.57.linear_attn.out_proj",
307
+ "model.language_model.layers.57.linear_attn.in_proj_b",
308
+ "model.language_model.layers.57.linear_attn.in_proj_a",
309
+ "model.language_model.layers.58.linear_attn.out_proj",
310
+ "model.language_model.layers.58.linear_attn.in_proj_b",
311
+ "model.language_model.layers.58.linear_attn.in_proj_a",
312
+ "model.language_model.layers.59.self_attn.o_proj",
313
+ "model.language_model.layers.60.linear_attn.out_proj",
314
+ "model.language_model.layers.60.linear_attn.in_proj_b",
315
+ "model.language_model.layers.60.linear_attn.in_proj_a",
316
+ "model.language_model.layers.61.linear_attn.out_proj",
317
+ "model.language_model.layers.61.linear_attn.in_proj_b",
318
+ "model.language_model.layers.61.linear_attn.in_proj_a",
319
+ "model.language_model.layers.62.linear_attn.out_proj",
320
+ "model.language_model.layers.62.linear_attn.in_proj_b",
321
+ "model.language_model.layers.62.linear_attn.in_proj_a",
322
+ "model.language_model.layers.63.self_attn.o_proj",
323
+ "lm_head"
324
+ ],
325
+ "kv_cache_scheme": null,
326
+ "quant_method": "compressed-tensors",
327
+ "quantization_status": "compressed",
328
+ "sparsity_config": {},
329
+ "transform_config": {},
330
+ "version": "0.14.1.dev24+g68f1a7d"
331
+ },
332
+ "text_config": {
333
+ "attention_bias": false,
334
+ "attention_dropout": 0.0,
335
+ "attn_output_gate": true,
336
+ "bos_token_id": null,
337
+ "dtype": "bfloat16",
338
+ "eos_token_id": 248044,
339
+ "full_attention_interval": 4,
340
+ "head_dim": 256,
341
+ "hidden_act": "silu",
342
+ "hidden_size": 5120,
343
+ "initializer_range": 0.02,
344
+ "intermediate_size": 17408,
345
+ "layer_types": [
346
+ "linear_attention",
347
+ "linear_attention",
348
+ "linear_attention",
349
+ "full_attention",
350
+ "linear_attention",
351
+ "linear_attention",
352
+ "linear_attention",
353
+ "full_attention",
354
+ "linear_attention",
355
+ "linear_attention",
356
+ "linear_attention",
357
+ "full_attention",
358
+ "linear_attention",
359
+ "linear_attention",
360
+ "linear_attention",
361
+ "full_attention",
362
+ "linear_attention",
363
+ "linear_attention",
364
+ "linear_attention",
365
+ "full_attention",
366
+ "linear_attention",
367
+ "linear_attention",
368
+ "linear_attention",
369
+ "full_attention",
370
+ "linear_attention",
371
+ "linear_attention",
372
+ "linear_attention",
373
+ "full_attention",
374
+ "linear_attention",
375
+ "linear_attention",
376
+ "linear_attention",
377
+ "full_attention",
378
+ "linear_attention",
379
+ "linear_attention",
380
+ "linear_attention",
381
+ "full_attention",
382
+ "linear_attention",
383
+ "linear_attention",
384
+ "linear_attention",
385
+ "full_attention",
386
+ "linear_attention",
387
+ "linear_attention",
388
+ "linear_attention",
389
+ "full_attention",
390
+ "linear_attention",
391
+ "linear_attention",
392
+ "linear_attention",
393
+ "full_attention",
394
+ "linear_attention",
395
+ "linear_attention",
396
+ "linear_attention",
397
+ "full_attention",
398
+ "linear_attention",
399
+ "linear_attention",
400
+ "linear_attention",
401
+ "full_attention",
402
+ "linear_attention",
403
+ "linear_attention",
404
+ "linear_attention",
405
+ "full_attention",
406
+ "linear_attention",
407
+ "linear_attention",
408
+ "linear_attention",
409
+ "full_attention"
410
+ ],
411
+ "linear_conv_kernel_dim": 4,
412
+ "linear_key_head_dim": 128,
413
+ "linear_num_key_heads": 16,
414
+ "linear_num_value_heads": 48,
415
+ "linear_value_head_dim": 128,
416
+ "mamba_ssm_dtype": "float32",
417
+ "max_position_embeddings": 262144,
418
+ "mlp_only_layers": [],
419
+ "model_type": "qwen3_5_text",
420
+ "mtp_num_hidden_layers": 1,
421
+ "mtp_use_dedicated_embeddings": false,
422
+ "num_attention_heads": 24,
423
+ "num_hidden_layers": 64,
424
+ "num_key_value_heads": 4,
425
+ "pad_token_id": null,
426
+ "partial_rotary_factor": 0.25,
427
+ "rms_norm_eps": 1e-06,
428
+ "rope_parameters": {
429
+ "mrope_interleaved": true,
430
+ "mrope_section": [
431
+ 11,
432
+ 11,
433
+ 10
434
+ ],
435
+ "partial_rotary_factor": 0.25,
436
+ "rope_theta": 10000000,
437
+ "rope_type": "default"
438
+ },
439
+ "tie_word_embeddings": false,
440
+ "use_cache": true,
441
+ "vocab_size": 248320
442
+ },
443
+ "tie_word_embeddings": false,
444
+ "transformers_version": "5.3.0",
445
+ "unsloth_fixed": true,
446
+ "unsloth_version": "2026.3.18",
447
+ "video_token_id": 248057,
448
+ "vision_config": {
449
+ "deepstack_visual_indexes": [],
450
+ "depth": 27,
451
+ "dtype": "bfloat16",
452
+ "hidden_act": "gelu_pytorch_tanh",
453
+ "hidden_size": 1152,
454
+ "in_channels": 3,
455
+ "initializer_range": 0.02,
456
+ "intermediate_size": 4304,
457
+ "model_type": "qwen3_5",
458
+ "num_heads": 16,
459
+ "num_position_embeddings": 2304,
460
+ "out_hidden_size": 5120,
461
+ "patch_size": 16,
462
+ "spatial_merge_size": 2,
463
+ "temporal_patch_size": 2
464
+ },
465
+ "vision_end_token_id": 248054,
466
+ "vision_start_token_id": 248053
467
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "eos_token_id": 248046,
4
+ "pad_token_id": 248055,
5
+ "transformers_version": "5.3.0",
6
+ "use_cache": false
7
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3cf4c9741f2c29b3580933119c3353bb3c8ab0611273da8bfdc2d4f721bf59b
3
+ size 32407100312
preprocessor_config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "data_format": "channels_first",
3
+ "do_convert_rgb": true,
4
+ "do_normalize": true,
5
+ "do_rescale": true,
6
+ "do_resize": true,
7
+ "image_mean": [
8
+ 0.5,
9
+ 0.5,
10
+ 0.5
11
+ ],
12
+ "image_processor_type": "Qwen2VLImageProcessorFast",
13
+ "image_std": [
14
+ 0.5,
15
+ 0.5,
16
+ 0.5
17
+ ],
18
+ "merge_size": 2,
19
+ "patch_size": 16,
20
+ "resample": 3,
21
+ "rescale_factor": 0.00392156862745098,
22
+ "size": {
23
+ "longest_edge": 16777216,
24
+ "shortest_edge": 65536
25
+ },
26
+ "temporal_patch_size": 2
27
+ }
processor_config.json ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "image_processor": {
3
+ "data_format": "channels_first",
4
+ "do_convert_rgb": true,
5
+ "do_normalize": true,
6
+ "do_rescale": true,
7
+ "do_resize": true,
8
+ "image_mean": [
9
+ 0.5,
10
+ 0.5,
11
+ 0.5
12
+ ],
13
+ "image_processor_type": "Qwen2VLImageProcessorFast",
14
+ "image_std": [
15
+ 0.5,
16
+ 0.5,
17
+ 0.5
18
+ ],
19
+ "merge_size": 2,
20
+ "patch_size": 16,
21
+ "resample": 3,
22
+ "rescale_factor": 0.00392156862745098,
23
+ "size": {
24
+ "longest_edge": 16777216,
25
+ "shortest_edge": 65536
26
+ },
27
+ "temporal_patch_size": 2
28
+ },
29
+ "processor_class": "Qwen3VLProcessor",
30
+ "video_processor": {
31
+ "data_format": "channels_first",
32
+ "default_to_square": true,
33
+ "do_convert_rgb": true,
34
+ "do_normalize": true,
35
+ "do_rescale": true,
36
+ "do_resize": true,
37
+ "do_sample_frames": true,
38
+ "fps": 2,
39
+ "image_mean": [
40
+ 0.5,
41
+ 0.5,
42
+ 0.5
43
+ ],
44
+ "image_std": [
45
+ 0.5,
46
+ 0.5,
47
+ 0.5
48
+ ],
49
+ "max_frames": 768,
50
+ "merge_size": 2,
51
+ "min_frames": 4,
52
+ "patch_size": 16,
53
+ "resample": 3,
54
+ "rescale_factor": 0.00392156862745098,
55
+ "return_metadata": false,
56
+ "size": {
57
+ "longest_edge": 25165824,
58
+ "shortest_edge": 4096
59
+ },
60
+ "temporal_patch_size": 2,
61
+ "video_processor_type": "Qwen3VLVideoProcessor"
62
+ }
63
+ }
recipe.yaml ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ default_stage:
2
+ default_modifiers:
3
+ QuantizationModifier:
4
+ config_groups:
5
+ group_fp8:
6
+ targets: [Linear]
7
+ weights:
8
+ num_bits: 8
9
+ type: float
10
+ symmetric: true
11
+ group_size: null
12
+ strategy: channel
13
+ block_structure: null
14
+ dynamic: false
15
+ actorder: null
16
+ scale_dtype: null
17
+ zp_dtype: null
18
+ observer: memoryless_minmax
19
+ observer_kwargs: {}
20
+ input_activations:
21
+ num_bits: 8
22
+ type: float
23
+ symmetric: true
24
+ group_size: null
25
+ strategy: token
26
+ block_structure: null
27
+ dynamic: true
28
+ actorder: null
29
+ scale_dtype: null
30
+ zp_dtype: null
31
+ observer: null
32
+ observer_kwargs: {}
33
+ output_activations: null
34
+ format: null
35
+ targets: [Linear]
36
+ ignore: [lm_head, 're:model\.embed_tokens$', 're:visual.*', 're:model\.visual.*', 're:.*\.self_attn\.o_proj$',
37
+ 're:.*\.linear_attn\.out_proj$', 're:.*\.mlp\.gate$', 're:.*\.mlp\.shared_expert_gate$',
38
+ 're:.*\.linear_attn\.in_proj_b$', 're:.*\.linear_attn\.in_proj_a$']
39
+ bypass_divisibility_checks: false
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87a7830d63fcf43bf241c3c5242e96e62dd3fdc29224ca26fed8ea333db72de4
3
+ size 19989343
tokenizer_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "audio_bos_token": "<|audio_start|>",
4
+ "audio_eos_token": "<|audio_end|>",
5
+ "audio_token": "<|audio_pad|>",
6
+ "backend": "tokenizers",
7
+ "bos_token": null,
8
+ "clean_up_tokenization_spaces": false,
9
+ "eos_token": "<|im_end|>",
10
+ "errors": "replace",
11
+ "image_token": "<|image_pad|>",
12
+ "is_local": true,
13
+ "model_max_length": 262144,
14
+ "model_specific_special_tokens": {
15
+ "audio_bos_token": "<|audio_start|>",
16
+ "audio_eos_token": "<|audio_end|>",
17
+ "audio_token": "<|audio_pad|>",
18
+ "image_token": "<|image_pad|>",
19
+ "video_token": "<|video_pad|>",
20
+ "vision_bos_token": "<|vision_start|>",
21
+ "vision_eos_token": "<|vision_end|>"
22
+ },
23
+ "pad_token": "<|vision_pad|>",
24
+ "padding_side": "right",
25
+ "pretokenize_regex": "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?[\\p{L}\\p{M}]+|\\p{N}| ?[^\\s\\p{L}\\p{M}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
26
+ "processor_class": "Qwen3VLProcessor",
27
+ "split_special_tokens": false,
28
+ "tokenizer_class": "TokenizersBackend",
29
+ "unk_token": null,
30
+ "video_token": "<|video_pad|>",
31
+ "vision_bos_token": "<|vision_start|>",
32
+ "vision_eos_token": "<|vision_end|>"
33
+ }
video_preprocessor_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "data_format": "channels_first",
3
+ "default_to_square": true,
4
+ "do_convert_rgb": true,
5
+ "do_normalize": true,
6
+ "do_rescale": true,
7
+ "do_resize": true,
8
+ "do_sample_frames": true,
9
+ "fps": 2,
10
+ "image_mean": [
11
+ 0.5,
12
+ 0.5,
13
+ 0.5
14
+ ],
15
+ "image_std": [
16
+ 0.5,
17
+ 0.5,
18
+ 0.5
19
+ ],
20
+ "max_frames": 768,
21
+ "merge_size": 2,
22
+ "min_frames": 4,
23
+ "patch_size": 16,
24
+ "resample": 3,
25
+ "rescale_factor": 0.00392156862745098,
26
+ "return_metadata": false,
27
+ "size": {
28
+ "longest_edge": 25165824,
29
+ "shortest_edge": 4096
30
+ },
31
+ "temporal_patch_size": 2,
32
+ "video_processor_type": "Qwen3VLVideoProcessor"
33
+ }