default_stage: default_modifiers: QuantizationModifier: config_groups: group_fp8: targets: [Linear] weights: num_bits: 8 type: float symmetric: true group_size: null strategy: channel block_structure: null dynamic: false actorder: null scale_dtype: null zp_dtype: null observer: memoryless_minmax observer_kwargs: {} input_activations: num_bits: 8 type: float symmetric: true group_size: null strategy: token block_structure: null dynamic: true actorder: null scale_dtype: null zp_dtype: null observer: null observer_kwargs: {} output_activations: null format: null targets: [Linear] ignore: [lm_head, 're:model\.embed_tokens$', 're:visual.*', 're:model\.visual.*', 're:.*\.self_attn\.o_proj$', 're:.*\.linear_attn\.out_proj$', 're:.*\.mlp\.gate$', 're:.*\.mlp\.shared_expert_gate$', 're:.*\.linear_attn\.in_proj_b$', 're:.*\.linear_attn\.in_proj_a$'] bypass_divisibility_checks: false