L iuvddlZddlZddlZddlZddlZddlmZmZddlm Z ddl m Z m Z ddl mZmZmZddlmZddlmZmZmZmZmZmZmZmZd d lmZerddlZej>e Z!Gd d e"e Z#Gd de"e Z$Gdde"e Z%eGddZ&eGdde&Z'eGdde&Z(eGdde&Z)Gdde*e Z+eGdde&Z,eGdde&Z-eGdd e&Z.eGd!d"e&Z/eGd#d$e&Z0eGd%d&e&Z1eGd'd(e&Z2Gd)d*e&Z3eGd+d,e&Z4eGd-d.e&Z5eGd/d0e&Z6eGd1d2e&Z7eGd3d4e&Z8eGd5d6e&Z9eGd7d8e&Z:Gd9d:e&Z;eGd;d !MrDjson_file_pathct|dd5}|j}tj|dddz}|j |dddy#1swYyxYw) a Save this instance to a JSON file. Args: json_file_path (`str` or `os.PathLike`): Path to the JSON file in which this configuration instance's parameters will be saved. use_diff (`bool`, *optional*, defaults to `True`): If set to `True`, only the difference between the config instance and the default `QuantizationConfig()` is serialized to JSON file. wzutf-8)encodingr Tindent sort_keys N)opento_dictjsondumpswrite)selfrmwriterre json_strings rE to_json_filez$QuantizationConfigMixin.to_json_filesU.# 8 &F,,.K**[dKdRK LL %  & & &s =AAreturnc@tj|jS) Serializes this instance to a Python dictionary. Returns: `dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance. )copydeepcopy__dict__rzs rErvzQuantizationConfigMixin.to_dicts }}T]]++rDc#Ktj|jjD] \}}||f yw)zTallows `dict(obj)` for situations where obj may be a dict or QuantizationConfigMixinNrrrr_)rzattrrks rE__iter__z QuantizationConfigMixin.__iter__s9==7==? KD%+  s=?cT|jjd|jS)N ) __class__r-to_json_stringrs rE__repr__z QuantizationConfigMixin.__repr__s(..))*!D,?,?,A+BCCrDuse_diffc|dur|j}n|j}tj|dddzS)a Serializes this instance to a JSON string. Args: use_diff (`bool`, *optional*, defaults to `True`): If set to `True`, only the difference between the config instance and the default `PretrainedConfig()` is serialized to JSON string. Returns: `str`: String containing all the attributes that make up this configuration instance in JSON format. Tr rqrt) to_diff_dictrvrwrx)rzrres rErz&QuantizationConfigMixin.to_json_strings= t ++-K,,.Kzz+a4@4GGrDc g}|jD]0\}}t||st||||j|2|jDcic] \}}||vs ||}}}|Scc}}w)a Updates attributes of this class instance with attributes from `kwargs` if they match existing attributes, returning all the unused kwargs. Args: kwargs (`dict[str, Any]`): Dictionary of attributes to tentatively update this class. Returns: `dict[str, Any]`: Dictionary containing all the key-value pairs that were not used to update the instance. )r_r`rarb)rzrgrirjrk unused_kwargss rEupdatezQuantizationConfigMixin.updates}  ,,. &JCtS!c5)  % & 7=lln] US\H\e] ]^s  A0&A0NF)T)r-r.r/__doc__r__annotations__ classmethodrlr rUosPathLiker}dictrrvrrboolrrrCrDrEr\r\asu%$>&5bkk1A+B&",c3h, DHtHsH$rDr\c feZdZdZ d dedededefdZdZdZ fd Z e d fd Z xZ S) AutoRoundConfigaThis is a wrapper class about all possible attributes and features that you can play with a model that has been loaded AutoRound quantization. Args: bits (`int`, *optional*, defaults to 4): The number of bits to quantize to, supported numbers are (2, 3, 4, 8). group_size (`int`, *optional*, defaults to 128): Group-size value sym (`bool`, *optional*, defaults to `True`): Symmetric quantization or not backend (`str`, *optional*, defaults to `"auto"`): The kernel to use, e.g., ipex,marlin, exllamav2, triton, etc. Ref. https://github.com/intel/auto-round?tab=readme-ov-file#specify-backend bits group_sizesymbackendc ||_||_||_||_d|_|%|j D]\}}t |||tj|_ |jy)Nzauto_round:gptq) rrrrpacking_formatr_rarrAr] post_init)rzrrrrrgrjrks rE__init__zAutoRoundConfig.__init__sk $ /  $lln * Uc5) *.88 rDc|jdvrtd|j|jdk7r|jdkr tdyy)z*Safety checker that arguments are correct.r 6Only support quantization to [2,3,4,8] bits but found r0group_size must be greater than 0 or equal to -1N)rrRrrs rErzAutoRoundConfig.post_initsT 99L (UVZV_V_U`ab b ??b T__%9OP P&: rDc"d|ji}|S)Nr)r)rzloading_attributes_dicts rEget_loading_attributesz&AutoRoundConfig.get_loading_attributess#,dll";&&rDc$t|}|SN)superrvrzrers rErvzAutoRoundConfig.to_dictsgo' rDc |d}d|vrd|vrd|vr tdd|vrd|vr tdd|vr |jdd d k7r td d|vrd ||d <t| |fd |i|S)Nr]r+rrzeFailed to convert to auto_round format. Only `gptqv1`, `awq`, and `auto-round` formats are supported.metazJFailed to convert gptq format to auto_round format. Only supports `gptqv1`r rHz]Failed to convert awq format to auto_round format. Only supports awq format with gemm versionz auto_round:rrf)NotImplementedErrorgetrrl)rdrerfrgr]rs rErlzAutoRoundConfig.from_dicts">2 | +l0Ju\hOh%w  \ !f &;%&rs s L [__Y%G6%Q%o  | +.9,,HK( )w bCWb[abbrD)rTautor)r-r.r/rintrrUrrrrvrrl __classcell__rs@rErrsg     &Q'ccrDrceZdZdZddddddgfdeded ed eed eed ee f d Z dZ e dee e ffdZdee e ffdZdZdee e ffdZy) HqqConfiga This is wrapper around hqq's BaseQuantizeConfig. Args: nbits (`int`, *optional*, defaults to 4): Number of bits. Supported values are (8, 4, 3, 2, 1). group_size (`int`, *optional*, defaults to 64): Group-size value. Supported values are any value that is divisible by weight.shape[axis]). view_as_float (`bool`, *optional*, defaults to `False`): View the quantized weight as float (used in distributed training) if set to `True`. axis (`Optional[int]`, *optional*): Axis along which grouping is performed. Supported values are 0 or 1. dynamic_config (dict, *optional*): Parameters for dynamic configuration. The key is the name tag of the layer and the value is a quantization config. If set, each layer specified by its id will use its dedicated quantization configuration. skip_modules (`list[str]`, *optional*, defaults to `['lm_head']`): List of `nn.Linear` layers to skip. kwargs (`dict[str, Any]`, *optional*): Additional parameters from which to initialize the configuration object. r@FNlm_headnbitsr view_as_floataxisdynamic_config skip_modulesc trddlm}n tddD]} | |vstj | dz!|d}tj d|dvr t d |'i|_|D]} |d i|| |j| <n|d i||||d |_tj|_ ||_ |jy) Nr)BaseQuantizeConfigzA valid HQQ version (>=0.2.1) is not available. Please follow the instructions to install it: `https://github.com/mobiusml/hqq/`.) quant_zero quant_scale offload_metazH is deprecated. This parameter will be ignored in quantization settings.rzYSetting axis=1 as faster backends such as TorchAO or BitBlas are only compatible with it.)rrz-Invalid axis value. Only 0 and 1 are allowed.)rrrrrC) rhqq.core.quantizer ImportErrorloggerinforR quant_configrr8r]rr) rzrrrrrrrgHQQBaseQuantizeConfigdeprecated_keyrjs rErzHqqConfig.__init__.s   UT L N' "%oo  <D KKs t v LM M  % "D % V)>)UPSAT)U!!#& V!6!"",%2 !D /22( rDcy)~ Safety checker that arguments are correct - also replaces some NoneType arguments with their default values. NrCrs rErzHqqConfig.post_init_ rDrhc<|}|d|_|d|_|S)zd Override from_dict, used in AutoQuantizationConfig.from_dict in quantizers/auto.py rr)rr)rdrhinstances rErlzHqqConfig.from_dictes* 5 &~ 6 &~ 6rDr~cJ|j|j|jdS)rrr]rrrs rErvzHqqConfig.to_dictos) !-- -- --  rDc|j}|jjdtj|dddSNrr Trqrtrvrr-rwrxrzres rErzHqqConfig.__repr__z;lln ..))*!DJJ{1X\,]+^^`aarDc|j}tj}i}|jD]\}}|||k7s|||<|Sa& Removes all attributes from config which correspond to the default config attributes for better readability and serializes to a Python dictionary. Returns: `dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance, )rvrr_rzredefault_config_dictserializable_config_dictrjrks rErzHqqConfig.to_diff_dict~sglln (k113#% &++- 6JC+C0005(- 6('rD)r-r.r/rrrr rlistrUrrrrrlrvrrrCrDrErrs.#")-#,+/// / sm / ! /3i/b tCH~  c3h  b(d38n(rDrceZdZdZ ddZedZejdefdZedZ e jdefdZ d Z d Z d Z d e eeffd ZdZd e eeffdZy)BitsAndBytesConfigau This is a wrapper class about all possible attributes and features that you can play with a model that has been loaded using `bitsandbytes`. This replaces `load_in_8bit` or `load_in_4bit`therefore both options are mutually exclusive. Currently only supports `LLM.int8()`, `FP4`, and `NF4` quantization. If more methods are added to `bitsandbytes`, then more arguments will be added to this class. Args: load_in_8bit (`bool`, *optional*, defaults to `False`): This flag is used to enable 8-bit quantization with LLM.int8(). load_in_4bit (`bool`, *optional*, defaults to `False`): This flag is used to enable 4-bit quantization by replacing the Linear layers with FP4/NF4 layers from `bitsandbytes`. llm_int8_threshold (`float`, *optional*, defaults to 6.0): This corresponds to the outlier threshold for outlier detection as described in `LLM.int8() : 8-bit Matrix Multiplication for Transformers at Scale` paper: https://huggingface.co/papers/2208.07339 Any hidden states value that is above this threshold will be considered an outlier and the operation on those values will be done in fp16. Values are usually normally distributed, that is, most values are in the range [-3.5, 3.5], but there are some exceptional systematic outliers that are very differently distributed for large models. These outliers are often in the interval [-60, -6] or [6, 60]. Int8 quantization works well for values of magnitude ~5, but beyond that, there is a significant performance penalty. A good default threshold is 6, but a lower threshold might be needed for more unstable models (small models, fine-tuning). llm_int8_skip_modules (`list[str]`, *optional*): An explicit list of the modules that we do not want to convert in 8-bit. This is useful for models such as Jukebox that has several heads in different places and not necessarily at the last position. For example for `CausalLM` models, the last `lm_head` is kept in its original `dtype`. llm_int8_enable_fp32_cpu_offload (`bool`, *optional*, defaults to `False`): This flag is used for advanced use cases and users that are aware of this feature. If you want to split your model in different parts and run some parts in int8 on GPU and some parts in fp32 on CPU, you can use this flag. This is useful for offloading large models such as `google/flan-t5-xxl`. Note that the int8 operations will not be run on CPU. llm_int8_has_fp16_weight (`bool`, *optional*, defaults to `False`): This flag runs LLM.int8() with 16-bit main weights. This is useful for fine-tuning as the weights do not have to be converted back and forth for the backward pass. bnb_4bit_compute_dtype (`torch.dtype` or str, *optional*, defaults to `torch.float32`): This sets the computational type which might be different than the input type. For example, inputs might be fp32, but computation can be set to bf16 for speedups. bnb_4bit_quant_type (`str`, *optional*, defaults to `"fp4"`): This sets the quantization data type in the bnb.nn.Linear4Bit layers. Options are FP4 and NF4 data types which are specified by `fp4` or `nf4`. bnb_4bit_use_double_quant (`bool`, *optional*, defaults to `False`): This flag is used for nested quantization where the quantization constants from the first quantization are quantized again. bnb_4bit_quant_storage (`torch.dtype` or str, *optional*, defaults to `torch.uint8`): This sets the storage type to pack the quantized 4-bit params. kwargs (`dict[str, Any]`, *optional*): Additional parameters from which to initialize the configuration object. Nc  $tj|_|r |r td||_||_||_||_||_||_ ||_ | |_ |tj|_nSt|t rt#t||_n-t|tj$r||_n td| tj&|_nbt| t r%| dvr tdt#t| |_n-t| tj$r| |_n td| r=t*j-dt/| j1d|j2d|j5y) NVload_in_4bit and load_in_8bit are both True, but only one can be used at the same timez8bnb_4bit_compute_dtype must be a string or a torch.dtype)float16float32int8uint8float64bfloat16zv`bnb_4bit_quant_storage` must be a valid string (one of 'float16', 'float32', 'int8', 'uint8', 'float64', 'bfloat16') z8bnb_4bit_quant_storage must be a string or a torch.dtypezUnused kwargs: z. These kwargs are not used in .)rr0r]rR _load_in_8bit _load_in_4bitllm_int8_thresholdllm_int8_skip_modules llm_int8_enable_fp32_cpu_offloadllm_int8_has_fp16_weightbnb_4bit_quant_typebnb_4bit_use_double_quanttorchrbnb_4bit_compute_dtype isinstancerUgetattrdtyperbnb_4bit_quant_storagerrrkeysrr) rz load_in_8bit load_in_4bitrrrrrrrrrgs rErzBitsAndBytesConfig.__init__s_/== Luv v))"4%:"0P-(@%#6 )B& ! )*/--D ' . 4*1%9O*PD ' . <*@D 'WX X ! )*/++D ' . 4%-kk M+2%9O*PD ' . <*@D 'WX X  KK/$v{{}*=)>>]^b^l^l]mmno p rDc|jSr)rrs rErzBitsAndBytesConfig.load_in_4bit!!!rDrkczt|ts td|jr |r t d||_y)Nload_in_4bit must be a booleanr)rr TypeErrorrrRrrzrks rErzBitsAndBytesConfig.load_in_4bit7%&<= =   uv v"rDc|jSr)rrs rErzBitsAndBytesConfig.load_in_8bit rrDczt|ts td|jr |r t d||_y)Nload_in_8bit must be a booleanr)rrrrrRrrs rErzBitsAndBytesConfig.load_in_8bitrrDct|jts tdt|jts tdt|j t s td|j%t|jts tdt|jts tdt|jts td|j/t|jtjs tdt|jts td t|j ts td |jrTt#j$t&j(j#d t#j$d k\s t+d yy)rrr z"llm_int8_threshold must be a floatNz/llm_int8_skip_modules must be a list of stringsz2llm_int8_enable_fp32_cpu_offload must be a booleanz*llm_int8_has_fp16_weight must be a booleanz*bnb_4bit_compute_dtype must be torch.dtypez$bnb_4bit_quant_type must be a stringz+bnb_4bit_use_double_quant must be a booleanrz0.39.0z[4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version)rrrrrrfloatrrrrrrrrrUrr parse importlibmetadatarRrs rErzBitsAndBytesConfig.post_initst$++T2<= =$++T2<= =$1159@A A  % % 1*TE_E_ae:fMN N$??FPQ Q$77>HI I  & & 2:dFaFachcncn;oHI I$22C8BC C$88$?IJ J   W]]93E3E3M3Mn3]%^bibobo c & m & rDc6|jxs |jS)zP Returns `True` if the model is quantizable, `False` otherwise. )rrrs rEis_quantizablez!BitsAndBytesConfig.is_quantizable?s  5D$5$55rDc|jry|jr|jdk(ry|jr|jdk(ryy)z This method returns the quantization method used for the model. If the model is not quantizable, it returns `None`. llm_int8fp4nf4N)rrrrs rEquantization_methodz&BitsAndBytesConfig.quantization_methodEsE      4#;#;u#D   4#;#;u#DrDr~c tj|j}t|dj dd|d<t|dj dd|d<|j |d<|j |d<|S)rrrrrrr)rrrrUsplitrr)rzoutputs rErvzBitsAndBytesConfig.to_dictSs t}}-+.v6N/O+P+V+VWZ+[\]+^'(+.v6N/O+P+V+VWZ+[\]+^'(!%!2!2~!%!2!2~ rDc|j}|jjdtj|dddSrrrs rErzBitsAndBytesConfig.__repr__`rrDc|j}tj}i}|jD]\}}|||k7s|||<|S)a' Removes all attributes from config which correspond to the default config attributes for better readability and serializes to a Python dictionary. Returns: `dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance, )rvrr_rs rErzBitsAndBytesConfig.to_diff_dictdshlln 12::<#% &++- 6JC+C0005(- 6('rD) FFg@NFFNrFN)r-r.r/rrpropertyrsetterrrrrrrrUrrvrrrCrDrErrs1j").!&#!"'#5n""#$##""#$###J6  c3h b(d38n(rDrceZdZdZdZy)ExllamaVersionrr N)r-r.r/ONETWOrCrDrErr{s C CrDrc0XeZdZdZ ddededeeee e fdede de de d e d e d ee e efd ee d e deedee deee dedeedee deedee e efde deeee f,dZ dZdZde e efffd ZdZedZxZS) GPTQConfigab This is a wrapper class about all possible attributes and features that you can play with a model that has been loaded using `optimum` api for gptq quantization relying on auto_gptq backend. Args: bits (`int`): The number of bits to quantize to, supported numbers are (2, 3, 4, 8). tokenizer (`str` or `PreTrainedTokenizerBase`, *optional*): The tokenizer used to process the dataset. You can pass either: - A custom tokenizer object. - A string, the *model id* of a predefined tokenizer hosted inside a model repo on huggingface.co. - A path to a *directory* containing vocabulary files required by the tokenizer, for instance saved using the [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`. dataset (`Union[list[str]]`, *optional*): The dataset used for quantization. You can provide your own dataset in a list of string or just use the original datasets used in GPTQ paper ['wikitext2','c4','c4-new'] group_size (`int`, *optional*, defaults to 128): The group size to use for quantization. Recommended value is 128 and -1 uses per-column quantization. damp_percent (`float`, *optional*, defaults to 0.1): The percent of the average Hessian diagonal to use for dampening. Recommended value is 0.1. desc_act (`bool`, *optional*, defaults to `False`): Whether to quantize columns in order of decreasing activation size. Setting it to False can significantly speed up inference but the perplexity may become slightly worse. Also known as act-order. sym (`bool`, *optional*, defaults to `True`): Whether to use symmetric quantization. true_sequential (`bool`, *optional*, defaults to `True`): Whether to perform sequential quantization even within a single Transformer block. Instead of quantizing the entire block at once, we perform layer-wise quantization. As a result, each layer undergoes quantization using inputs that have passed through the previously quantized layers. checkpoint_format (`str`, *optional*, defaults to `"gptq"`): GPTQ weight format. `gptq`(v1) is supported by both gptqmodel and auto-gptq. `gptq_v2` is gptqmodel only. meta (`dict[str, any]`, *optional*): Properties, such as tooling:version, that do not directly contributes to quantization or quant inference are stored in meta. i.e. `meta.quantizer`: ["optimum:_version_", "gptqmodel:_version_"] backend (`str`, *optional*): Controls which gptq kernel to be used. Valid values for gptqmodel are `auto`, `auto_trainable` and more. For auto-gptq, only valid value is None and `auto_trainable`. Ref gptqmodel backends: https://github.com/ModelCloud/GPTQModel/blob/main/gptqmodel/utils/backend.py use_cuda_fp16 (`bool`, *optional*, defaults to `False`): Whether or not to use optimized cuda kernel for fp16 model. Need to have model in fp16. Auto-gptq only. model_seqlen (`int`, *optional*): The maximum sequence length that the model can take. block_name_to_quantize (`str`, *optional*): The transformers block name to quantize. If None, we will infer the block name using common patterns (e.g. model.layers) module_name_preceding_first_block (`list[str]`, *optional*): The layers that are preceding the first Transformer block. batch_size (`int`, *optional*, defaults to 1): The batch size used when processing the dataset pad_token_id (`int`, *optional*): The pad token id. Needed to prepare the dataset when `batch_size` > 1. use_exllama (`bool`, *optional*): Whether to use exllama backend. Defaults to `True` if unset. Only works with `bits` = 4. max_input_length (`int`, *optional*): The maximum input length. This is needed to initialize a buffer that depends on the maximum expected input length. It is specific to the exllama backend with act-order. exllama_config (`dict[str, Any]`, *optional*): The exllama config. You can specify the version of the exllama kernel through the `version` key. Defaults to `{"version": 1}` if unset. cache_block_outputs (`bool`, *optional*, defaults to `True`): Whether to cache block outputs to reuse as inputs for the succeeding block. modules_in_block_to_quantize (`list[list[str]]`, *optional*): List of list of module names to quantize in the specified block. This argument is useful to exclude certain linear modules from being quantized. The block to quantize can be specified by setting `block_name_to_quantize`. We will quantize each list sequentially. If not set, we will quantize all linear layers. Example: `modules_in_block_to_quantize =[["self_attn.k_proj", "self_attn.v_proj", "self_attn.q_proj"], ["self_attn.o_proj"]]`. In this example, we will first quantize the q,k,v layers simultaneously since they are independent. Then, we will quantize `self_attn.o_proj` layer with the q,k,v layers quantized. This way, we will get better results since it reflects the real input `self_attn.o_proj` will get when the model is quantized. r tokenizerdatasetr damp_percentdesc_actrtrue_sequentialcheckpoint_formatrr use_cuda_fp16 model_seqlenblock_name_to_quantize!module_name_preceding_first_block batch_size pad_token_id use_exllamamax_input_lengthexllama_configcache_block_outputsmodules_in_block_to_quantizec tj|_||_||_||_||_||_||_||_ ||_ | j|_ | |_ t| tr| jn| |_| |_| |_||_||_||_||_||_||_||_||_||_|j9yr)rr1r]rr$r%rr&r'rr(rMr)rrrUrr*r+r,r-r.r/r0r1r2r3r4r)rzrr$r%rr&r'rr(r)rrr*r+r,r-r.r/r0r1r2r3r4rgs rErzGPTQConfig.__init__s4/33 " $(  .!2!8!8!: *4Wc*Bw}} *(&<#1R.$(& 0,#6 ,H) rDctj|j}gd}|jDcic] \}}||vs ||}}}|Scc}}w)N)r0r2r*r1rrrzattributes_dictloading_attributesijrs rErz!GPTQConfig.get_loading_attributessX-- 6 5D4I4I4K"gDAqqTfOf1a4"g"g&&#h AAc|jdvrtd|j|jdk7r|jdkr tdd|jcxkrdkstdtd|jt |jt rL|jd vrt|jd |jd vrJtd |jt |jtstd |jtr.|j8|j|jsdnd|_ n|jdk(rd|_ |jd|_ |jdtji|_ ned|jvr td|jdtjtjfvr|jd}td||jdk(r|jr|jdtjk(rtj!dn|jdtjk(rt#rt%j&t(j*j%d}t%j&t(j*j%d}|t%j&dks|t%j&dkrtd|d||j,Vt%j&t(j*j%d}|t%j&dkr tdyy); Safety checker that arguments are correct rrrrrrz"damp_percent must between 0 and 1.N)ptbzptb-newzh dataset was deprecated. You can only choose between ['wikitext2','c4','c4-new']) wikitext2c4zc4-newzYou have entered a string value for dataset. You can only choose between ['wikitext2','c4','c4-new'], but we found zxdataset needs to be either a list of string or a value in ['wikitext2','c4','c4-new'], but we found auto_trainablerFTr /`exllama_config` needs to have a `version` key.aOnly supported versions are in [ExllamaVersion.ONE, ExllamaVersion.TWO] - not recognized version rzYou have activated exllama backend. Note that you can get better inference speed using exllamav2 kernel by setting `exllama_config`.optimum auto_gptqz1.13.2z0.4.2zxYou need optimum > 1.13.2 and auto-gptq > 0.4.2 . Make sure to have that version installed - detected version : optimum z and autogptq z1.15.0zYou current version of `optimum` does not support `modules_in_block_to_quantize` quantization argument, please upgrade `optimum` package to a version superior than 1.15.0 .)rrRrr&r%rrUrrrr0r2rr r!rrrr r rrr4)rzexllama_versionoptimum_versionautogptq_versions rErzGPTQConfig.post_initsr 99L (UVZV_V_U`ab b ??b T__%9OP PD%%))AB B*AB B << #$,,,<<#55$!\\N+47<<'DD$CCG<<.T  d3 ??C||nP " #||#373C3C3OX\XhXh/nt ||//#(     ##D     &#,n.@.@"AD  3 33 !RSS$$Y/8J8JNL^L^7__"&"5"5i"@ wyHxIJ 99>d..""9-1C1CC P$$Y/>3E3EE)+&-mmI4F4F4N4Ny4Y&ZO'.}}Y5G5G5O5OP[5\']$&'--*AAEUY`YfYfgnYoEo(WXgWhhvwGvHI  , , 8%mmI,>,>,F,Fy,QROx!88 C9 9rDr~cHt|}|jdd|S)Ndisable_exllama)rrvrcrs rErvzGPTQConfig.to_dictPs$go' )40rDcF|j}|j |d<|S)z= Get compatible dict for optimum gptq config rK)rvr0)rz quant_dicts rEto_dict_optimumzGPTQConfig.to_dict_optimumUs)\\^ ,0,<,<(< $%rDcRd|vr|d |d<|jd|di|}|S)zD Get compatible class with optimum gptq config dict rKr0rC)rc)rdrerhs rEfrom_dict_optimumzGPTQConfig.from_dict_optimum^s>  +-89J-K)KK & OO- .#{# rD)NNrg?FTTrNNFNNNrNNNNTN)r-r.r/rrrr r rrUr rrrrrrvrNrrPrrs@rEr#r#sBN37! $!')-!%#&*04AE&*&**.37$(BF/111%S 3/0 1  1  11111tCH~&1#11sm1!) 1 ,4DI+>!1"#1$sm%1&d^'1(#3-)1*!c3h0+1,"-1.'/tDI&?/1f 'IVc3h   rDr#ceZdZdZdddej ejdddddf dedede d ed ed e e d e ed e e de e de e e effdZdZdZy) AwqConfigad This is a wrapper class about all possible attributes and features that you can play with a model that has been loaded using `auto-awq` library awq quantization relying on auto_awq backend. Args: bits (`int`, *optional*, defaults to 4): The number of bits to quantize to. group_size (`int`, *optional*, defaults to 128): The group size to use for quantization. Recommended value is 128 and -1 uses per-column quantization. zero_point (`bool`, *optional*, defaults to `True`): Whether to use zero point quantization. version (`AWQLinearVersion`, *optional*, defaults to `AWQLinearVersion.GEMM`): The version of the quantization algorithm to use. GEMM is better for big batch_size (e.g. >= 8) otherwise, GEMV is better (e.g. < 8 ). GEMM models are compatible with Exllama kernels. backend (`AwqBackendPackingMethod`, *optional*, defaults to `AwqBackendPackingMethod.AUTOAWQ`): The quantization backend. Some models might be quantized using `llm-awq` backend. This is useful for users that quantize their own models using `llm-awq` library. do_fuse (`bool`, *optional*, defaults to `False`): Whether to fuse attention and mlp layers together for faster inference fuse_max_seq_len (`int`, *optional*): The Maximum sequence length to generate when using fusing. modules_to_fuse (`dict`, *optional*, default to `None`): Overwrite the natively supported fusing scheme with the one specified by the users. modules_to_not_convert (`list`, *optional*, default to `None`): The list of modules to not quantize, useful for quantizing models that explicitly require to have some modules left in their original precision (e.g. Whisper encoder, Llava encoder, Mixtral gate layers). Note you cannot quantize directly with transformers, please refer to `AutoAWQ` documentation for quantizing HF models. exllama_config (`dict[str, Any]`, *optional*): You can specify the version of the exllama kernel through the `version` key, the maximum sequence length through the `max_input_len` key, and the maximum batch size through the `max_batch_size` key. Defaults to `{"version": 2, "max_input_len": 2048, "max_batch_size": 8}` if unset. rrTNrr zero_pointr rdo_fusefuse_max_seq_lenmodules_to_fusemodules_to_not_convertr2c  tj|_||_||_||_||_||_||_| |_ | |_ ||_ ||duxrt|dkD|_ n||_ ||_|jy)Nr)rr2r]rrrSr rrUrWr2rVlenrTr) rzrrrSr rrTrUrVrWr2rgs rErzAwqConfig.__init__s/22 $$  0&<#,. ?*$6S3;ORS;SDL"DL 0 rDcjtjtjfvr:t dtjdtjdjt j j_jt jt jt jt jfvrt djjtjk(rtjjs)tjjs t dtjjr3tjj!}|\}}|dkr t dj"rj$ t d j"rgd }d }t'rHtj(t*j,jd tj(|k\}|st d |dj.gd }d}t'rHtj(t*j,jd tj(|k\}|st d|dj"r2j0&gd}t3fd|Dst d|jt jk(rd }d}t'rHtj(t*j,jd tj(|k\}|st d|dj4t6j8ddd_ydj4vr t dj4dt6j:t6j8fvrj4d} t d| yy)r>z(Only supported quantization backends in z and z - not recognized backend zOnly supported versions are in [AWQLinearVersion.GEMM, AWQLinearVersion.GEMV, AWQLinearVersion.EXLLAMA, AWQLinearVersion.IPEX] - not recognized version z1LLM-AWQ backend is only supported on CUDA and XPUrzMLLM-AWQ backend is only supported on CUDA GPUs with compute capability >= 8.0NzYou cannot enable fused modules without specifying a `fuse_max_seq_len`, make sure to pass a valid `fuse_max_seq_len` for your usecaseFz0.1.7rXznYou current version of `autoawq` does not support module fusing, please upgrade `autoawq` package to at least rz0.1.8z}You current version of `autoawq` does not support module quantization skipping, please upgrade `autoawq` package to at least ) hidden_sizenum_attention_headsnum_key_value_headsmlp attention layernorm use_alibic3:K|]}|jvywr)rV).0rjrzs rE z&AwqConfig.post_init..sLssd222LszGRequired fields are missing in the fusing mapping, required fields are z0.2.0zpYou current version of `autoawq` does not support exllama backend, please upgrade `autoawq` package to at least i)r max_input_lenmax_batch_sizer rCrD)rrWrYrZrRrGrSr rNrOrPrQrcuda is_availablexpuget_device_capabilityrTrUrr rrrWrVallr2rr!r ) rzcompute_capabilitymajorminorawq_version_supports_fusingMIN_AWQ_VERSION#awq_version_supports_non_conversion required_keysawq_version_supports_exllamarGs ` rErzAwqConfig.post_inits << 7 ? ?AXA_A_` `:;R;Z;Z:[[`axaaaAA[\`\h\h[ij (00> <<  ! !  ! !  $ $  ! !   klplxlxkyz  <<299 9JJ++-1G1G1I !TUUzz&&(%*ZZ%E%E%G"1 u19$%tuu <B     )1c(; $5rDrvcteZdZdZdddddddddgddgddddgf deded ed ed ed ed edededededefdZdZy)VptqLayerConfiga This is used to explain vptq config params for each layer Args: enable_norm (`bool`, *optional*, defaults to `True`): to control if we have scale/bias for fp-weight enable_perm (`bool`, *optional*, defaults to `True`): to perm input_channel or not group_num (`int`, *optional*, defaults to `1`): how many single groups for vector-quantization group_size (`int`, *optional*, defaults to `-1`): depends on out-features indices_as_float (`bool`, *optional*, defaults to `False`): for Finetuning is_indice_packed (`bool`, *optional*, defaults to `True`): should always be True num_centroids (`list`, *optional*, defaults to `[-1, -1]`): centroid numbers of clusters num_res_centroids (`list`, *optional*, defaults to `[-1, -1]`): ditto for residual outlier_size (`int`, *optional*, defaults to `1`): outliers vector_lens (`list`, *optional*, defaults to `[-1, -1]`): centroid vector length in quantization TrrFr enable_norm enable_perm group_numr in_featuresindices_as_floatis_indice_packed num_centroidsnum_res_centroids out_features outlier_size vector_lensc  ||_||_||_||_||_||_||_||_| |_| |_ | |_ | |_ |jyr) rrrrrrrrrrrrr)rzrrrrrrrrrrrrrgs rErzVptqLayerConfig.__init__ksl '&"$& 0 0*!2((& rDc6|jdur tdy)r>Fz&is_indice_packed should always be TrueN)rrRrs rErzVptqLayerConfig.post_inits#  E )EF F *rDN) r-r.r/rrrtuplerrrCrDrErrZs "! !&!% "Bx$&8 "X     !<GrDrc ReZdZdZdiidfdedeeefdeeefdee fdZ d Z y) VptqConfiga1 This is a wrapper class about `vptq` parameters. Args: enable_proxy_error (`bool`, *optional*, defaults to `False`): calculate proxy error for each layer config_for_layers (`Dict`, *optional*, defaults to `{}`): quantization params for each layer shared_layer_config (`Dict`, *optional*, defaults to `{}`): shared quantization params among layers modules_to_not_convert (`list`, *optional*, default to `None`): The list of modules to not quantize, useful for quantizing models that explicitly require to have some modules left in their original precision (e.g. Whisper encoder, Llava encoder, Mixtral gate layers). kwargs (`dict[str, Any]`, *optional*): Additional parameters from which to initialize the configuration object. FNenable_proxy_errorconfig_for_layersshared_layer_configrWc tj|_||_||_||_||_|jyr)rr4r]rrrrWr)rzrrrrWrgs rErzVptqConfig.__init__s</33"41B3F &<# rDc|jjD] }tdi||jdur t dy)r>TzCenable_proxy_error should always be False until we support trainingNrC)rvaluesrrrR)rz layer_params rErzVptqConfig.post_initsL 1188: +K  *k * +  " "d *bc c +rD) r-r.r/rrrrUrr rrrrCrDrErrsZ  $),..015    S> "#s(^  !) drDrc0eZdZdZ ddeefdZdZy) QuantoConfiga This is a wrapper class about all possible attributes and features that you can play with a model that has been loaded using `quanto`. Args: weights (`str`, *optional*, defaults to `"int8"`): The target dtype for the weights after quantization. Supported values are ("float8","int8","int4","int2") activations (`str`, *optional*): The target dtype for the activations after quantization. Supported values are (None,"int8","float8") modules_to_not_convert (`list`, *optional*, default to `None`): The list of modules to not quantize, useful for quantizing models that explicitly require to have some modules left in their original precision (e.g. Whisper encoder, Llava encoder, Mixtral gate layers). NrWc xtj|_||_||_||_|j yr)rr5r]weights activationsrWr)rzrrrWrgs rErzQuantoConfig.__init__s3/55 &&<# rDcgd}gd}|j|vrtd|d|j|j|vrtd|d|jy)r>)float8rint4int2)NrrOnly support weights in but found N)rrRr)rzaccepted_weightsaccepted_activationss rErzQuantoConfig.post_initsw>7 <</ /78H7IUYUaUaTbcd d   #7 778L7M[Y]YiYiXjkl l 8rD)rNN)r-r.r/rr rrrrCrDrErrs+  15 !)  mrDrc2eZdZdZ ddedeefdZdZy) EetqConfiga This is a wrapper class about all possible attributes and features that you can play with a model that has been loaded using `eetq`. Args: weights (`str`, *optional*, defaults to `"int8"`): The target dtype for the weights. Supported value is only "int8" modules_to_not_convert (`list`, *optional*, default to `None`): The list of modules to not quantize, useful for quantizing models that explicitly require to have some modules left in their original precision. NrrWc jtj|_||_||_|j yr)rr6r]rrWr)rzrrWrgs rErzEetqConfig.__init__s+ /33 &<# rDc\dg}|j|vrtd|d|jy)r>rrrN)rrR)rzrs rErzEetqConfig.post_initsA#8 <</ /78H7IUYUaUaTbcd d 0rD)rN) r-r.r/rrUr rrrrCrDrErrs0 15  !) erDrceZdZdZ ddeeeedeeffdedddedd ee d eeed eeee fd ed e fdZ dZ edfd Zdeee ffdZdeee ffdZdZedZedZedZxZS)CompressedTensorsConfiga This is a wrapper class that handles compressed-tensors quantization config options. It is a wrapper around `compressed_tensors.QuantizationConfig` Args: config_groups (`typing.dict[str, typing.Union[ForwardRef('QuantizationScheme'), typing.list[str]]]`, *optional*): dictionary mapping group name to a quantization scheme definition format (`str`, *optional*, defaults to `"dense"`): format the model is represented as. Set `run_compressed` True to execute model as the compressed format if not `dense` quantization_status (`QuantizationStatus`, *optional*, defaults to `"initialized"`): status of model in the quantization lifecycle, ie 'initialized', 'calibration', 'frozen' kv_cache_scheme (`typing.Union[QuantizationArgs, NoneType]`, *optional*): specifies quantization of the kv cache. If None, kv cache is not quantized. global_compression_ratio (`typing.Union[float, NoneType]`, *optional*): 0-1 float percentage of model compression ignore (`typing.Union[typing.list[str], NoneType]`, *optional*): layer names or types to not quantize, supports regex prefixed by 're:' sparsity_config (`typing.dict[str, typing.Any]`, *optional*): configuration for sparsity compression quant_method (`str`, *optional*, defaults to `"compressed-tensors"`): do not override, should be compressed-tensors run_compressed (`bool`, *optional*, defaults to `True`): alter submodules (usually linear) in order to emulate compressed model execution if True, otherwise use default submodule config_groupsQuantizationSchemeformatquantization_statusQuantizationStatuskv_cache_schemeQuantizationArgsglobal_compression_ratioignoresparsity_configr]run_compressedc 6tr ddlm} ddlm} n t dd|_d|_| |_|s|r | j|||||||d| |_|r'| j|jdfi||_tj|_y)Nr)SparsityCompressionConfig)QuantizationConfigzcompressed_tensors is not installed and is required for compressed-tensors quantization. Please install it with `pip install compressed-tensors`.)rr]rrrrrr)rcompressed_tensors.configrcompressed_tensors.quantizationrrquantization_configrrmodel_validateload_from_registryrrr9r]) rzrrrrrrrr]rrgrrs rErz CompressedTensorsConfig.__init__s + , K Jd $( #, O'9'H'H%2$0$+>'60H$   (D $ #O#<#O#O##H-$1@$D /AArDc|jrS|jrtjdd|_y|jstjdd|_yyy)Nz`run_compressed` is only supported for quantized_compressed models and not for sparsified models. Setting `run_compressed=False`FzX`run_compressed` is only supported for compressed models. Setting `run_compressed=False`)ris_sparsification_compressedrwarningis_quantization_compressedrs rErz!CompressedTensorsConfig.post_initNs\   00U',#44n',# 5 rDc nd|vrtdd|jdi|d}t| |fd|i|S)a Instantiates a [`CompressedTensorsConfig`] from a Python dictionary of parameters. Optionally unwraps any args from the nested quantization_config Args: config_dict (`dict[str, Any]`): Dictionary that will be used to instantiate the configuration object. return_unused_kwargs (`bool`,*optional*, defaults to `False`): Whether or not to return a list of unused keyword arguments. Used for `from_pretrained` method in `PreTrainedModel`. kwargs (`dict[str, Any]`): Additional parameters from which to initialize the configuration object. Returns: [`QuantizationConfigMixin`]: The configuration object instantiated from those parameters. rrrfrC)rrrrl)rdrerfrgrs rErlz!CompressedTensorsConfig.from_dict\sU( !K / +0A B34K w bCWb[abbrDr~ci}|j|jj}ntj|d<|j|jj|d<|Si|d<|S)z Quantization config to be added to config.json Serializes this instance to a Python dictionary. Returns: `dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance. r]r)r model_dumprr9r)rzrs rErvzCompressedTensorsConfig.to_dictxs!  # # /"&":":"E"E"G 2D2W2W  /    +595I5I5T5T5V  1 2#"68  1 2""rDc|j}tj}i}|jD]\}}||vs |||k7s|||<|Sr)rvrr_rs rErz$CompressedTensorsConfig.to_diff_dictsplln 67??A#% &++- 6JC--:Mc:R1R05(- 6('rDcd|jiS)Nr)rrs rErz.CompressedTensorsConfig.get_loading_attributess $"5"566rDcnt|jxrt|jjSr)rrrrs rE is_quantizedz$CompressedTensorsConfig.is_quantizeds)D,,-^$t7O7O7]7]2^^rDcpddlm}|jxr#|jj|j k(S)Nr)r)rrrrr COMPRESSED)rzrs rErz2CompressedTensorsConfig.is_quantization_compresseds.F  rT%=%=%Q%QUgUrUr%rrrDcddlm}m}t|j|xr-|jj |j jk7S)Nr)CompressionFormatr)rrrrrrdenserk)rzrrs rErz4CompressedTensorsConfig.is_sparsification_compressedsF t++-F G M$$++/@/F/F/L/LL rD) Nr initializedNNNNr#Tr)r-r.r/rr rrUr rr rrrrrrlrvrrrrrrrrs@rErrsU6VZ4A8<48&*480#-BS%0Dd3i0O*P%P QR-B-B2 -B ""45 -B #+5/ -Bc#-B"$sCx.1-B-B-B^ ,cc6#c3h#((d38n(*7__ss     rDrc2eZdZdZ ddedeefdZdZy)FbgemmFp8ConfigaG This is a wrapper class about all possible attributes and features that you can play with a model that has been loaded using fbgemm fp8 quantization. Args: activation_scale_ub (`float`, *optional*, defaults to 1200.0): The activation scale upper bound. This is used when quantizing the input activation. modules_to_not_convert (`list`, *optional*, default to `None`): The list of modules to not quantize, useful for quantizing models that explicitly require to have some modules left in their original precision. Nactivation_scale_ubrWc Jtj|_||_||_yr)rr:r]rrW)rzrrWrgs rErzFbgemmFp8Config.__init__s" /99#6 &<#rDctj|j}dg}|jDcic] \}}||vs ||}}}|Scc}}w)Nrrr7s rErz&FbgemmFp8Config.get_loading_attributessV-- 6344C4I4I4K"gDAqqTfOf1a4"g"g&&#hs A A )g@N) r-r.r/rr r rrrrCrDrErrs/ &,15="=!)='rDrc`eZdZdZ d dededeeedededeeee ff d Z d Z y) HiggsConfiga HiggsConfig is a configuration class for quantization using the HIGGS method. Args: bits (int, *optional*, defaults to 4): Number of bits to use for quantization. Can be 2, 3 or 4. Default is 4. p (int, *optional*, defaults to 2): Quantization grid dimension. 1 and 2 are supported. 2 is always better in practice. Default is 2. modules_to_not_convert (`list`, *optional*, default to ["lm_head"]): List of linear layers that should not be quantized. hadamard_size (int, *optional*, defaults to 512): Hadamard size for the HIGGS method. Default is 512. Input dimension of matrices is padded to this value. Decreasing this below 512 will reduce the quality of the quantization. group_size (int, *optional*, defaults to 256): Group size for the HIGGS method. Can be 64, 128 or 256. Decreasing it barely affects the performance. Default is 256. Must be a divisor of hadamard_size. tune_metadata ('dict', *optional*, defaults to {}): Module-wise metadata (gemm block shapes, GPU metadata, etc.) for saving the kernel tuning results. Default is an empty dictionary. Is set automatically during tuning. NrprW hadamard_sizer tune_metadatac |i}tj|_||_||_||_||_||_||_|jyr) rr7r]rrrWrrrr)rzrrrWrrrrgs rErzHiggsConfig.__init__sT  M.44 &<#*$* rDc|jdvr td|jdvr td|jdvr td|j|jzdk7r tdy ) r)r rrzbits must be 2, 3, or 4)rr z0p must be 1 or 2. 2 is always better in practice)rrz"group_size must be 64, 128, or 256rz-hadamard_size must be divisible by group_sizeN)rrRrrrrs rErzHiggsConfig.post_initst 99I %67 7 66 OP P ??. 0AB B    /1 4LM M 5rD)rr NirN) r-r.r/rrr rrUrrrrrCrDrErrsv(6: 26 !)c 3      S#X/, NrDrcbeZdZdZ d dededededeeded ed eeefd Z d Z y) FPQuantConfiga FPQuantConfig is a configuration class for quantization using the FPQuant method. Args: forward_dtype (`str`, *optional*, defaults to `"nvfp4"`): The dtype to use for the forward pass. forward_method (`str`, *optional*, defaults to `"abs_max"`): The scaling to use for the forward pass. Can be `"abs_max"` or `"quest"`. `"abs_max"` is better for PTQ, `"quest"` is better for QAT. backward_dtype (`str`, *optional*, defaults to `"bf16"`): The dtype to use for the backward pass. store_master_weights (`bool`, *optional*, defaults to `False`): Whether to store the master weights. Needed for QAT over layer weights. hadamard_group_size (`int`, *optional*): The group size for the hadamard transform before quantization for `"quest"` it matches the MXFP4 group size (32). If `None`, it will be set to 16 for `"nvfp4"` and 32 for `"mxfp4"`. pseudoquantization (`bool`, *optional*, defaults to `False`): Whether to use Triton-based pseudo-quantization. Is mandatory for non-Blackwell GPUs. Doesn't provide any speedup. For debugging purposes. transform_init (`str`, *optional*, defaults to `"hadamard"`): a method to initialize the pre-processing matrix with. Can be `"hadamard"`, `"identity"` or `"gsr"`. modules_to_not_convert (`list`, *optional*): The list of modules to not quantize, useful for quantizing models that explicitly require to have some modules left in their original precision. N forward_dtypeforward_methodbackward_dtypestore_master_weightshadamard_group_sizepseudoquantizationtransform_initrWc  ||_||_||_||_||_||_||_||_tj|_ |jyr) rrrrrrrrWrr@r]r) rzrrrrrrrrWrgs rErzFPQuantConfig.__init__*s\+,,$8!#6 "4,&<#.66 rDcP|j|jdk(rd|_nd|_|jdk(rE|jdvr td|jd|_|jdvrktd |jdk(rF|jd k7r td |jd|_|jd vrtd td|jdk7r td|j dvr td|j dg|_yy)rNnvfp4r~ r,)abs_maxquestzHOnly 'abs_max' and 'quest' are supported for forward_method for 'mxfp4'.)rrrzGOnly a `hadamard_group_size` of [32, 64, 128] is supported for 'mxfp4'.rz;Only 'abs_max' is supported for forward_method for 'nvfp4'.)r~rrrzKOnly a `hadamard_group_size` of [16, 32, 64, 128] is supported for 'nvfp4'.zAOnly 'mxfp4' and 'nvfp4' are supported for forward_dtype for now.bf16z4Only 'bf16' is supported for backward_dtype for now.)hadamardidentitygsrzGOnly 'hadamard', 'identity' and 'gsr' are supported for transform_init.r)rrrrRrrrWrs rErzFPQuantConfig.post_initBs8  # # +!!W,+-(+-(    (""*>> !kll''/+-(''}< !jkk   7 *""i/ !^__''/+-(''/@@ !noo`a a   & (ST T   &E Efg g  & & .+4+D ' /rD)rrrFNFrN) r-r.r/rrUrr rrrrrCrDrErrs0%'$%*-1#((6:  #  &c] !!)c 30"6rDrc eZdZUeed<eedfed<eeed<e ee fed<e ed<e ed< ddeedfdeede de fdZ e d ejfd Zd Zd Zd ZdZfdZeddZxZS) TorchAoConfigr] AOBaseConfig quant_typerWquant_type_kwargsinclude_input_output_embeddingsuntie_embedding_weightsc tj|_||_||_|j d||_||_||_|jy)Nr) rr;r]rrWrrrrr)rzrrWrrrgs rErzTorchAoConfig.__init__sO/66$&<#!',?!H/N,'>$ rDr~cts tdtjtj jdS)zDCentralized check for TorchAO availability and version requirements.zRTorchAoConfig requires torchao to be installed. Install with `pip install torchao`r%)rrRr r rrrCrDrE_get_ao_versionzTorchAoConfig._get_ao_versions6$%qr r}}Y//77 BCCrDch|j}t|jtr|j y|t j dkDr>ddlm}t|j|s!tdt|jytdt|jd)z(Validate configuration and set defaults.0.9.0r)rzDquant_type must be either a string or an AOBaseConfig instance, got z6In torchao <= 0.9.0, quant_type must be a string. Got zB. Please upgrade to torchao > 0.9.0 to use AOBaseConfig instances.N) rrrrU_validate_string_quant_typer r torchao.quantization.quant_apirrtyperR)rz ao_versionrs rErzTorchAoConfig.post_inits))+  doos +  , , . '--0 0 Cdoo|<Z[_`d`o`o[pZqr= HdooI^H_`ST rDc T|j}|j|vr8td|jddj|j ||j}t |}|j jDchc]:}|jtjtjfvr |j<}}t|j|z }|r@  )..0 zzi44i6U6UVV JJ  T334|C -doo->b>AZ@[\!!%= 0.11.0 and torch >= 2.8.0 for XPU support. Please upgrade the version or use run on CPU with the cpu version pytorch.) Int4CPULayoutrC)rrrUrrrrrgrhrr r rrrritorchao.dtypesr%torchao.quantization.quant_primitivesrINTrRr)rzrrrrrs rEget_apply_tensor_subclassz'TorchAoConfig.get_apply_tensor_subclasssp doos +<<>G $ 6 6 ; ; = JJ++-(*OO'99MM)"4"4"<"(v=2?/%h/+74??+@.?@ @?? "rDc@t|}t|jtrd|vrd|dvrt |ddr=|ddj jtj|ddg|dd<t|ddtr[t|dddk(sJdt|dddtsJdt|dddtsJdtd |Sdd lm}d ||ji|d <|S) z&Convert configuration to a dictionary.rrr z*layout saves layout name and layout kwargsrzlayout name must be a stringrzlayout kwargs must be a dictzlayout must be a list)config_to_dictdefaultr)rrvrrrUrrr- dataclassesasdictrrYrrRtorchao.core.configr)rzdrrs rErvzTorchAoConfig.to_dict sG GO  doos +"a'H:M8N,N"5 6x @A-.x8BBKK#**1-@+A(+KL8A)*84a 34X>Eq!45h?@AEsGssE%a(;& 0.9.0 for construction from dictrrr z8Expected only one key 'default' in quant_type dictionaryr)config_from_dictrC) rr r rrcrrUrYr#r&)rdrerfrgrrr&s rErlzTorchAoConfig.from_dict<s((* GMM'22w4ww2!&&(  __\2 j# &<*< < <:!# Z(? F ? *  9%j1 8j8K88rD)NFFr)r-r.r/rrr rUr rrrrrrTr Versionrrrrrrvrrlrrs@rErrgs$$c>)**$TN*CH~%%))!!6v2605(- #~-.!)*.  "&  DW__DD(4 #D899rDrc JeZdZdZ d deededededeef dZ d Z y) BitNetQuantConfiga Configuration class for applying BitNet quantization. Args: modules_to_not_convert (`Optional[List]`, *optional*): Optionally, provides a list of full paths of `nn.Linear` weight parameters that shall not be quantized. Defaults to None. linear_class (`str`, *optional*, defaults to `"bitlinear"`): The type of linear class to use. Can be either `bitlinear` or `autobitlinear`. quantization_mode (`str`, *optional*, defaults to `"offline"`): The quantization mode to use. Can be either `online` or `offline`. In `online` mode, the weight quantization parameters are calculated dynamically during each forward pass (e.g., based on the current weight values). This can adapt to weight changes during training (Quantization-Aware Training - QAT). In `offline` mode, quantization parameters are pre-calculated *before* inference. These parameters are then fixed and loaded into the quantized model. This generally results in lower runtime overhead compared to online quantization. use_rms_norm (`bool`, *optional*, defaults to `False`): Whether to apply RMSNorm on the activations before quantization. This matches the original BitNet paper's approach of normalizing activations before quantization/packing. rms_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon value used in the RMSNorm layer for numerical stability. kwargs (`dict[str, Any]`, *optional*): Additional keyword arguments that may be used by specific quantization backends or future versions. NrW linear_classquantization_mode use_rms_norm rms_norm_epsc |dvrtd||dvrtd|tj|_||_||_||_||_||_|jy)N) bitlinear autobitlinearzDlinear_class must be either 'bitlinear' or 'autobitlinear', but got )onlineofflinez@quantization_mode must be either 'online' or 'offline', but got ) rRrr<r]rWr*r+r,r-r)rzrWr*r+r,r-rgs rErzBitNetQuantConfig.__init__rs = =cdpcqrs s $9 9_`q_rst t.55&<#(!2(( rDcy)r>NrCrs rErzBitNetQuantConfig.post_initrrD)Nr/r2Fgư>) r-r.r/rr rrUrr rrrCrDrEr)r)UsZ:26'!*"(,  (    uo * rDr)c ZeZdZdZ d dedededeeeefdeeef dZ d Z y) SpQRConfigaa This is a wrapper class about `spqr` parameters. Refer to the original publication for more details. Args: bits (`int`, *optional*, defaults to 3): Specifies the bit count for the weights and first order zero-points and scales. Currently only bits = 3 is supported. beta1 (`int`, *optional*, defaults to 16): SpQR tile width. Currently only beta1 = 16 is supported. beta2 (`int`, *optional*, defaults to 16): SpQR tile height. Currently only beta2 = 16 is supported. shapes (`Optional`, *optional*): A dictionary holding the shape of each object. We need this because it's impossible to deduce the exact size of the parameters just from bits, beta1, beta2. modules_to_not_convert (`Optional[list[str]]`, *optional*): Optionally, provides a list of full paths of `nn.Linear` weight parameters that shall not be quantized. Defaults to None. kwargs (`dict[str, Any]`, *optional*): Additional parameters from which to initialize the configuration object. Nrbeta1beta2shapesrWc |i}||_tj|_||_||_||_||_|jyr) r8rr=r]rr6r7rWr)rzrr6r7r8rWrgs rErzSpQRConfig.__init__sJ >F .33   &<# rDct|jts tdt|jts tdt|j ts td|jdk7r t d|jdk7r t d|j dk7r t dt|jts td y ) rzbits must be an intzbeta1 must be an intzbeta2 must be an intrz%SpQR currently only supports bits = 3r~z'SpQR currently only supports beta1 = 16z'SpQR currently only supports beta2 = 16zshapes must be a dictN) rrrrr6r7rRr8rrs rErzSpQRConfig.post_inits$))S)12 2$**c*23 3$**c*23 3 99>DE E :: FG G :: FG G$++t,34 4-rD)rr~r~NN) r-r.r/rrr rrUrrrrCrDrEr5r5sh.+/6:   c3h(  !)c 3 &5rDr5cBeZdZdZ ddedeeefdeefdZ dZ y) FineGrainedFP8Configam FineGrainedFP8Config is a configuration class for fine-grained FP8 quantization used mainly for deepseek models. Args: activation_scheme (`str`, *optional*, defaults to `"dynamic"`): The scheme used for activation, the defaults and only support scheme for now is "dynamic". weight_block_size (`typing.tuple[int, int]`, *optional*, defaults to `(128, 128)`): The size of the weight blocks for quantization, default is (128, 128). modules_to_not_convert (`list`, *optional*): A list of module names that should not be converted during quantization. Nactivation_schemeweight_block_sizerWc xtj|_||_||_||_|j yr)rr>r]rWr=r>r)rzr=r>rWrgs rErzFineGrainedFP8Config.__init__s4/22&<#!2!2 rDc6|jj|_|jdk7rtd|jdt|jdk7r td|jddks|jddkr tdy ) r>dynamiczActivation scheme z not supportedr z1weight_block_size must be a tuple of two integersrrz:weight_block_size must be a tuple of two positive integersN)r=rMrRrYr>rs rErzFineGrainedFP8Config.post_inits"&!7!7!=!=!?  ! !Y .1$2H2H1IXY Y t%% &! +PQ Q  ! !! $ )T-C-CA-F!-KYZ Z.LrD)rA)rrN) r-r.r/rrUrrr rrrrCrDrEr<r<sE "+-715  !c? !)  [rDr<ceZdZdZy) QuarkConfigc Vtr#trddlm}ddlm}ddlm}ddlm }n td|d|_ d|v|_ |jd vr%|j|d |_||_n|j!||_d|vrpd |dvrXt#j$|t#j$d kr-|dj'd }t(j+d|d|di|d|_n ||_t,j.|_y)Nr) __version__)JsonExporterConfig)QuantConfigParser)ConfigzWQuark is not installed. Please refer to https://quark.docs.amd.com/latest/install.html.r]export)rr(F)is_bias_quantized min_kv_scalez0.8zThe parameter `min_kv_scale=z` was found in the model config.json's `quantization_config.export` configuration, but this parameter is supported only for quark>=0.8. Ignoring this configuration parameter. Please update the `amd-quark` package.rC)rrr)rE quark.torch.export.config.configrF2quark.torch.export.main_export.quant_config_parserrG&quark.torch.quantization.config.configrHr custom_modelegacyfrom_custom_configrjson_export_configrlr r rcrrrr?r])rzrg quark_versionrFrGrHrKs rErzQuarkConfig.__init__s%  $6$8 : K \ Ei ".1f,   ~ - 1 D DV_d D eD &8&:D # & 0 0 8D 6!!VH%55'-- :VY`YfYfglYm:m#)(#3#7#7#GLNN6|nEZ[+=*Pvh?O*P'+=*>'.44rDN)r-r.r/rrCrDrErCrCs%5rDrCcHeZdZdZ d deedefdZdZde e e ffdZ y) Mxfp4Configa This is a wrapper class about all possible attributes and features that you can play with a model that has been loaded using mxfp4 quantization. Args: modules_to_not_convert (`list`, *optional*, default to `None`): The list of modules to not quantize, useful for quantizing models that explicitly require to have some modules left in their original precision. dequantize (`bool`, *optional*, default to `False`): Whether we dequantize the model to bf16 precision or not NrW dequantizec Jtj|_||_||_yr)rrBr]rWrV)rzrWrVrgs rErzMxfp4Config.__init__,s! /44&<#$rDcd|jiS)NrV)rVrs rErz"Mxfp4Config.get_loading_attributes6sdoo..rDr~c4|j|jdS)rr]rWrZrs rErvzMxfp4Config.to_dict9s !% 1 1TMhMhiirD)NF) r-r.r/rr rrrrrrUrrvrCrDrErUrUsE 26 % (%%/jc3hjrDrU)=rr!importlib.metadatarrwrrrenumrinspectrrtypingrr r packagingr utilsrrrrrrrr import_utilsrr get_loggerr-rrUrrGrWr\rrrrrr#rRrvrrrrrrrrrr)r5r<rCrUrCrDrErcs$  /(''   1   H %d,DsDD*c4  mm m` Ac-Ac AcH y('y( y(x c(0c( c(L S$  i(i iX m''m' m'` 85(85 85v 3G-3G 3Gl %d(%d %dP %m*%m %mP e(e eBs 5s l '-' '< 4N)4N 4Nn Q6+Q6 Q6h j9+j9 j9Z 5 /5  5 p ;5(;5 ;5| $[2$[ $[N&5)&5R j)j jrD