L i6UdZddlZddlZddlZddlmZmZddlmZddl m Z ddl m Z m Z ddlZddlmZddlmZmZmZmZmZddlmZgd ZGd d Zd Zd ZedefiZe ed<Gddeej@Z!Gdde!Z"e"Z#Gdde"Z$Gdde$Z%Gdde"Z&Gdde&Z'Gdde"Z(Gdde!Z)Gdd e!Z*Gd!d"e!Z+Gd#d$e!Z,Gd%d&e!Z- dd'l.m/Z/dd(l0m1Z1m2Z2Gd)d*e2Z3Gd+d,e2Z4Gd-d.e2Z5e/d/0Gd1d2Z6e/d/0Gd3d4e6Z7e/d/0Gd5d6e6Z8e/d/0Gd7d8e6Z9e/d/0Gd9d:e6Z:Gd;de6Zd@fdAe6dBe=e>d@ffdCZ?GdDdEeej j@Z@dFZAdGZBdHZCdIZDdJZEe$jddKLZG e*ZH e+ZI e$jejejMZL e$jejejdNdKdOPZM e(jddKLZN e&jejejMZP e&jejejdNdKdOPZQ e*jejddQd/RZS e&jejejdSZU e&jejejdSZW e)jdTdUejddQVZXe)jdWdejddQVZYeXZZeYZ[ e-Z\y)Xz This module implements observers which are used to collect statistics about the values observed during calibration (PTQ) or training (QAT). N)ABCMetaabstractmethod) OrderedDict)partial)AnyOptional)calculate_qmin_qmaxcheck_min_max_validis_per_channel is_per_tensorvalidate_qmin_qmax)Node)*%default_affine_fixed_qparams_observerdefault_debug_observerdefault_dynamic_quant_observer)default_fixed_qparams_range_0to1_observer,default_fixed_qparams_range_neg1to1_observerdefault_float_qparams_observer#default_float_qparams_observer_4bitdefault_histogram_observerdefault_observer#default_per_channel_weight_observerdefault_placeholder_observerdefault_reuse_input_observer(default_symmetric_fixed_qparams_observerdefault_weight_observerget_observer_state_dictload_observer_state_dict0per_channel_weight_observer_range_neg_127_to_127$weight_observer_range_neg_127_to_127FixedQParamsObserverHistogramObserverMinMaxObserverMovingAverageMinMaxObserver%MovingAveragePerChannelMinMaxObserver NoopObserver ObserverBasePerChannelMinMaxObserverPlaceholderObserverRecordingObserverReuseInputObserverUniformQuantizationObserverBaseAffineQuantizedObserverBase Granularity MappingTypePerAxisPerBlockPerGroupPerRow PerTensorPerToken TorchAODTypeZeroPointDomainget_block_sizec*eZdZdZdZdZdZdZy)_PartialWrapperc ||_i|_yN)p callable_args)selfr=s d/mnt/ssd/data/python-lab/Trading/venv/lib/python3.12/site-packages/torch/ao/quantization/observer.py__init__z_PartialWrapper.__init__Nsc|jD]!}||vsi|||j|i}#|j|i|Sr<)r>r=)r?argskeywordsarg_names r@__call__z_PartialWrapper.__call__Rs]** RHx'QhQ2N$2D2DX2N2PQ Rtvvt(x((rBcl|jj|jjzSr<)r=__repr__r>r?s r@rIz_PartialWrapper.__repr__Zs'vv 4#5#5#>#>#@@@rBc t|fi|Sr<) _with_argsr?kwargss r@ with_argsz_PartialWrapper.with_args]s$)&))rBc \t|j}i|j||_|S)N)r=)r:r=r>)r?rNresults r@with_callable_argsz"_PartialWrapper.with_callable_args`s- 466*?$"4"4?? rBN)__name__ __module__ __qualname__rArGrIrOrRrBr@r:r:Ms )A*rBr:c 0tt|fi|}|S)a9Wrapper that allows creation of class factories. This can be useful when there is a need to create classes with the same constructor arguments, but different instances. Can be used in conjunction with _callable_args Example:: >>> # xdoctest: +SKIP("Undefined vars") >>> Foo.with_args = classmethod(_with_args) >>> foo_builder = Foo.with_args(a=3, b=4).with_args(answer=42) >>> foo_instance1 = foo_builder() >>> foo_instance2 = foo_builder() >>> id(foo_instance1) == id(foo_instance2) False )r:r cls_or_selfrNrs r@rLrLfs"  6v67A HrBc Ntt|}|jdi|S)a=Wrapper that allows creation of class factories args that need to be called at construction time. This can be useful when there is a need to create classes with the same constructor arguments, but different instances and those arguments should only be calculated at construction time. Can be used in conjunction with _with_args Example:: >>> # xdoctest: +SKIP("Undefined vars") >>> Foo.with_callable_args = classmethod(_with_callable_args) >>> Foo.with_args = classmethod(_with_args) >>> foo_builder = Foo.with_callable_args(cur_time=get_time_func).with_args(name="dan") >>> foo_instance1 = foo_builder() >>> # wait 50 >>> foo_instance2 = foo_builder() >>> id(foo_instance1.creation_time) == id(foo_instance2.creation_time) False rV)r:rrRrXs r@_with_callable_argsr\{s)(  ,-A 1   )& ))rBABCcjeZdZdZddeffd ZedZedZe e Z e e Z xZS)r'a[Base observer Module. Any observer implementation should derive from this class. Concrete observers should follow the same API. In forward, they will update the statistics of the observed Tensor. And they should provide a `calculate_qparams` function that computes the quantization parameters given the collected statistics. Args: dtype: dtype argument to the `quantize` node needed to implement the reference model spec. is_dynamic: indicator for whether the observer is a placeholder for dynamic quantization or static quantization is_dynamicc>t|||_||_yr<)superrAdtyper_)r?rbr_ __class__s r@rAzObserverBase.__init__s  $rBcyr<rVr?xs r@forwardzObserverBase.forward rBc yr<rVrMs r@calculate_qparamszObserverBase.calculate_qparamsrhrB)F)rSrTrU__doc__boolrArrgrj classmethodrLrOr\rR __classcell__rcs@r@r'r'sT %$%     J'I$%89rBr'c eZdZUdZdZej ed<ejejddddejejjdf dfd Z fdZejj d ed eddfd Zejj d ej d ej deej ej ffdZejj dZxZS)r,aGCommon base for all observers using uniform quantization to calculate scale and zero_point. Args: dtype: dtype argument to the `quantize` node needed to implement the reference model spec. qscheme: Quantization scheme to be used. reduce_range: Reduces the range of the quantized data type by 1 bit. This is sometimes required to avoid instruction overflow. quant_min: Minimum quantization value. If unspecified, it will follow the 8-bit setup. quant_max: Maximum quantization value. If unspecified, it will follow the 8-bit setup. eps: Epsilon value for float32, Defaults to `torch.finfo(torch.float32).eps`. .. warning:: :attr:`dtype` can only take ``torch.qint8`` or ``torch.quint8``. or `torch.int8` or `torch.uint8` .. warning:: :attr:`qscheme` can only take one of the following options: - ``torch.per_tensor_affine`` - ``torch.per_tensor_symmetric`` - ``torch.per_channel_affine`` - ``torch.per_channel_symmetric`` epsFNreturnc tjj|}t |d||d| ||_|rt jd||_|jdtj|gfi||j tjtjtjtjtjfvsJdtj tj"tj$tj&tj(tj*tj,tj.tj0tj2tj4f } |j6| vs Jd| d|duxr|du|_|j8r t;||t=|||j8|j6|j\|_|_ y)Nrbr_zPlease use quant_min and quant_max to specify the range for observers. reduce_range will be deprecated in a future release of PyTorch.rrzDefault Observer only works for per_tensor_affine, per_tensor_symmetric, per_channel_affine, per_channel_symmetric and per_channel_float_qparams quantization schemez Default Observer only works for z data typerV)!torchnnfactory_kwargsrarAqschemewarningswarn reduce_rangeregister_buffertensorper_tensor_affineper_tensor_symmetricper_channel_affineper_channel_symmetric per_channel_affine_float_qparamsqint8quint8quint4x2qint32int8uint8int16int32 float8_e5m2 float8_e4m3fnuint16rbhas_customized_qranger r quant_min quant_max) r?rbryr|rrrxrrr_rN_ALLOWED_DTYPESrcs r@rAz(UniformQuantizationObserverBase.__init__s00@ FuFvF  MMU ) UELL#$I.$IJ||  # #  & &  $ $  ' '  2 2    Y   KK LL NN LL JJ KK KK KK       LL  zz_, ..?z J ,'0t&;%X)SWBW"  % % y) 4)<    & & JJ    * &rBc |jdd}||dk(rItjtjtjj g} | ||dz<t ||||||||y)Nversionrr)getrvr~finfofloat32rrra_load_from_state_dict) r? state_dictprefixlocal_metadatastrict missing_keysunexpected_keys error_msgsrrrrcs r@rz5UniformQuantizationObserverBase._load_from_state_dict$sv!$$Y5 ?gl,, EMM : > >?@C),Jv~ & %        rBrrcP|dcxkr |ksJdJd||ksJdy)aValidates that the user-specified quantization range is properly initialized and within the given bound supported by the observer dtype. To accommodate lower-bit quantization with respect to the existing torch.qint8 and torch.quint8 datatypes, the user can choose to use dynamic quantization range by passing in a tuple of initial qmin and qmax values. One use case is these customized qmin and qmax values are used to calculate static estimates of the scale and zero point for aggressive lower-bit fake quantization. These estimates are compared against parameters learned through backpropagation. The related literatures for scale and zero point via backpropagation are as follows: Learned Step Size Quantization: https://openreview.net/pdf?id=rkgO66VKDS Trained Quantization Thresholds: https://arxiv.org/pdf/1903.08066.pdf rz1Used-specified quantization range must include 0.zKqmin must be strictly less than qmax for user-specified quantization range.NrV)r?rrs r@_validate_qmin_qmaxz3UniformQuantizationObserverBase._validate_qmin_qmax?sK"A** ? * ? *9$ Y $rBmin_valmax_valct||sXtjdg|jjtjdg|jjfS|j |j }}tj|tj|}tj|tj|}|j}tj|jtj|}tj|jtj|} |jtj k(s|jtj"k(r tj| |}|t%||z dz z }tj||j&}|j(tj*tj,fvrV|j.r(| j1| j||zdz} nU| j1| jd} n3|j(tj2fvr| j1| jd} n|jtj4k(rT||z t%||z z }tj6||j&kD|tj8|}d|z|z } n||z t%||z z }tj||j&}|tj:||z j=tj>z } tj@| ||} tC|jDdk(r,tjt%|g|j(|}tC| jDdk(rutjt?| g| j(|} |jtj4k(r,tjt%| g| j(|} || fS) aCalculates the quantization parameters, given min and max value tensors. Works for both per tensor and per channel cases Args: min_val: Minimum values per channel max_val: Maximum values per channel Returns: scales: Scales tensor of shape (#channels,) zero_points: Zero points tensor of shape (#channels,) ?devicer)rbri)#r rvr~rtyperrmin zeros_likemaxonessizerzerosint64ryrrfloatrrrbrrrnew_fullrrwhere ones_likeroundtointclamplenshape) r?rrrr min_val_neg max_val_posrscale zero_points r@_calculate_qparamsz2UniformQuantizationObserverBase._calculate_qparamsWsA(#7G4<<gnn.A.ABELLGNN//E  $~~t~~9 ii)9)9')BC ii)9)9')BC ## ;++-U]]6R[[!1!1!35;;vV  LLE66 6||u:::))[L+>K5Y)>#?!#CDEIIeTXX.EzzellEKK88--!+!4!4")I ,Aa+G"J",!4!4Z__5F!LJ ~-'001BEJ \\UCC Cw&% I0E*FFEKK 0%9OPE g-J ;.% I8M2NNEIIeTXX.E"U[[u1D%E%H%H%SSJZIFJ u{{ q LL%,u{{6RE z A %Z!)9)9&J||uEEE"\\:&'z/?/? j  rBctd)Nz2Cannot reset min/max values in the given observer.)NotImplementedErrorrJs r@reset_min_max_valsz2UniformQuantizationObserverBase.reset_min_max_valss!"VWWrBrsN)rSrTrUrk_versionrvTensor__annotations__rrrrrrrArjitexportrrtuplerrrnros@r@r,r,sTH ll'' EKK & * *<  < | 6 YY S S T  . YYN!||N!.3llN! u||U\\) *N!N!` YYXXrBr,c eZdZUdZej ed<ej ed<ejejddddejejjdf d fd Z dZ ejjdZejjd Zejjd ZxZS) r#a Observer module for computing the quantization parameters based on the running min and max values. This observer uses the tensor min/max statistics to compute the quantization parameters. The module records the running minimum and maximum of incoming tensors, and uses this statistic to compute the quantization parameters. Args: dtype: dtype argument to the `quantize` node needed to implement the reference model spec. qscheme: Quantization scheme to be used reduce_range: Reduces the range of the quantized data type by 1 bit quant_min: Minimum quantization value. If unspecified, it will follow the 8-bit setup. quant_max: Maximum quantization value. If unspecified, it will follow the 8-bit setup. eps: Epsilon value for float32, Defaults to `torch.finfo(torch.float32).eps`. Given running min/max as :math:`x_\text{min}` and :math:`x_\text{max}`, scale :math:`s` and zero point :math:`z` are computed as: The running minimum/maximum :math:`x_\text{min/max}` is computed as: .. math:: \begin{array}{ll} x_\text{min} &= \begin{cases} \min(X) & \text{if~}x_\text{min} = \text{None} \\ \min\left(x_\text{min}, \min(X)\right) & \text{otherwise} \end{cases}\\ x_\text{max} &= \begin{cases} \max(X) & \text{if~}x_\text{max} = \text{None} \\ \max\left(x_\text{max}, \max(X)\right) & \text{otherwise} \end{cases}\\ \end{array} where :math:`X` is the observed tensor. The scale :math:`s` and zero point :math:`z` are then computed as: .. math:: \begin{aligned} \text{if Symmetric:}&\\ &s = 2 \max(|x_\text{min}|, x_\text{max}) / \left( Q_\text{max} - Q_\text{min} \right) \\ &z = \begin{cases} 0 & \text{if dtype is qint8} \\ 128 & \text{otherwise} \end{cases}\\ \text{Otherwise:}&\\ &s = \left( x_\text{max} - x_\text{min} \right ) / \left( Q_\text{max} - Q_\text{min} \right ) \\ &z = Q_\text{min} - \text{round}(x_\text{min} / s) \end{aligned} where :math:`Q_\text{min}` and :math:`Q_\text{max}` are the minimum and maximum of the quantized data type. .. warning:: :attr:`dtype` can only take ``torch.qint8`` or ``torch.quint8``. .. note:: If the running minimum equals to the running maximum, the scale and zero_point are set to 1.0 and 0. rrFNc t|s tdt | d||||||||d| tj j |}|jdt jtdfi||jdt jtdfi||jtjk(r6|jr)|jtjk(r tdyyy) NzqMinMaxObserver's qscheme only support torch.per_tensor_symmetric and torch.per_tensor_affine.rbryr|rrrxrrr_rinfr-infz`Cannot reduce range for symmetric quantization for quint8rV)r rrarArvrwrxr}r~rryrr|rbr) r?rbryr|rrrxrrr_rNrcs r@rAzMinMaxObserver.__init__sW%%2   %)!  00@ Y U5\(T^(TU Y U6](Un(UV LLE66 6!! ell*%@ +" 7rBc|jdk(r|S|j}|j|jj}t j |\}}t j||j}t j||j}|jj||jj||S)z1Records the running minimum and maximum of ``x``.r) numeldetachrrrbrvaminmaxrrrcopy_)r?x_origrf min_val_cur max_val_currrs r@rgzMinMaxObserver.forward)s <<>Q M MMO DD## $#(==#3 [))K6))K6 7# 7# rBcN|j|j|jS)z'Calculates the quantization parameters.rrrrJs r@rjz MinMaxObserver.calculate_qparams6s&&t||T\\BBrBc:d|jd|jSNzmin_val=z , max_val=rrrJs r@ extra_reprzMinMaxObserver.extra_repr;s$,,z$,,@@rBc|jjtjt d|j jtjt dy)Resets the min/max values.rrN)rrrvr~rrrJs r@rz!MinMaxObserver.reset_min_max_vals?sB 5<<e 56 5<<f 67rBr)rSrTrUrkrvrrrrrrrrrArgrrrjrrrnros@r@r#r#s=~\\ \\ll'' EKK & * *1 1f  YYCC YYAA YY88rBr#c eZdZdZdej ej dddejejjdf dfd Z dZ xZ S)r$aObserver module for computing the quantization parameters based on the moving average of the min and max values. This observer computes the quantization parameters based on the moving averages of minimums and maximums of the incoming tensors. The module records the average minimum and maximum of incoming tensors, and uses this statistic to compute the quantization parameters. Args: averaging_constant: Averaging constant for min/max. dtype: dtype argument to the `quantize` node needed to implement the reference model spec. qscheme: Quantization scheme to be used reduce_range: Reduces the range of the quantized data type by 1 bit quant_min: Minimum quantization value. If unspecified, it will follow the 8-bit setup. quant_max: Maximum quantization value. If unspecified, it will follow the 8-bit setup. eps: Epsilon value for float32, Defaults to `torch.finfo(torch.float32).eps`. The moving average min/max is computed as follows .. math:: \begin{array}{ll} x_\text{min} = \begin{cases} \min(X) & \text{if~}x_\text{min} = \text{None} \\ (1 - c) x_\text{min} + c \min(X) & \text{otherwise} \end{cases}\\ x_\text{max} = \begin{cases} \max(X) & \text{if~}x_\text{max} = \text{None} \\ (1 - c) x_\text{max} + c \max(X) & \text{otherwise} \end{cases}\\ \end{array} where :math:`x_\text{min/max}` is the running average min/max, :math:`X` is is the incoming tensor, and :math:`c` is the ``averaging_constant``. The scale and zero point are then computed as in :class:`~torch.ao.quantization.observer.MinMaxObserver`. .. note:: Only works with ``torch.per_tensor_affine`` quantization scheme. .. note:: If the running minimum equals to the running maximum, the scale and zero_point are set to 1.0 and 0. {Gz?FNc t|std|||_|r'|jdk7rtd|jt |d|||||||d| y)NzMovingAverageMinMaxObserver's qscheme only support torch.per_tensor_symmetric and torch.per_tensor_affine. but got: rz[MovingAverageMinMaxObserver doesn't support dynamic quantization for averaging constant of )rbryr|rrrrr_rV)r raveraging_constantrarA) r?rrbryr|rrrrr_rNrcs r@rAz$MovingAverageMinMaxObserver.__init__tsW%%!$  #5 $11Q6%))-)@)@(AC   %!  rBc$|jdk(r|S|j}|j|jj}|j}|j }|t dk(r'|t dk(rtj|\}}nBtj|\}}||j||z zz}||j||z zz}|jj||j j||S)Nrrr) rrrrrbrrrvrrr)r?rrfrrrrs r@rgz#MovingAverageMinMaxObserver.forwards <<>Q M MMO DD## $,,,, eEl "w%-'?$}}Q/ GW',}}Q'7 $K 7 7;;P QQG 7 7;;P QQG 7# 7# rBr) rSrTrUrkrvrrrrrrrArgrnros@r@r$r$FsW+^ ll'' EKK & * *!  ! FrBr$c*eZdZUdZej ed<ej ed<dejejddddejejjdf dfd Z dZ d Zejj d Zd Zd eeefd edeeej fdedeedeedeeffd Zd eeefd edeeej fdedeedeedeefdZejj dZxZS)r(a4Observer module for computing the quantization parameters based on the running per channel min and max values. This observer uses the tensor min/max statistics to compute the per channel quantization parameters. The module records the running minimum and maximum of incoming tensors, and uses this statistic to compute the quantization parameters. Args: ch_axis: Channel axis dtype: dtype argument to the `quantize` node needed to implement the reference model spec. qscheme: Quantization scheme to be used reduce_range: Reduces the range of the quantized data type by 1 bit quant_min: Minimum quantization value. If unspecified, it will follow the 8-bit setup. quant_max: Maximum quantization value. If unspecified, it will follow the 8-bit setup. eps: Epsilon value for float32, Defaults to `torch.finfo(torch.float32).eps`. The quantization parameters are computed the same way as in :class:`~torch.ao.quantization.observer.MinMaxObserver`, with the difference that the running min/max values are stored per channel. Scales and zero points are thus computed per channel as well. .. note:: If the running minimum equals to the running maximum, the scales and zero_points are set to 1.0 and 0. rrrFNc t|s td| r tdt | d|||||||| d| tj j |}||_|jdt jgfi||jdt jgfi||jtjk(r6|jr)|jtjk(r tdyyy)NzPerChannelMinMaxObserver's qscheme only support torch.per_channel_symmetric, torch.per_channel_affine and torch.per_channel_affine_float_qparams.z=PerChannelMinMaxObserver doesn't support dynamic quantizationrrrz9Cannot reduce range for symmetric quantization for quint8rV)r rrarArvrwrxch_axisr}r~ryrr|rbr) r?rrbryr|rrrxrrr_rNrcs r@rAz!PerChannelMinMaxObserver.__init__sg&%w  %O   %)!  00@  Y R(J>(JK Y R(J>(JK LLE77 7!! ell*%K +" 8rBc$|j|Sr<)_forward)r?rs r@rgz PerChannelMinMaxObserver.forwards}}V$$rBc|jdk(r|S|j}|j}|j}|j }t t |Dcgc]}|}}d||j<|j|d<|j|}|j|jj}tj|d}|jdk(s|jdk(rtj|d\}}nFtj|d\} } tj| |}tj| |}|jj!|j"|jj!|j"|jj%||jj%||Scc}wNrr) start_dimdim)rrrrrrangerrpermuterrbrvflattenrrrresize_rr r?rrfrrx_dimi new_axis_listyrrs r@rz!PerChannelMinMaxObserver._forwardsl <<>Q M MMO,,,,$)#e*$56q6 6&' dll#<< a IIm $ DD## $ MM!q ) ==?a 7==?a#7$}}QA6 GW',}}QA'> $Kii W5Gii W5G W]]+ W]]+ 7# 7# %7s$ G#cN|j|j|jSr<rrJs r@rjz*PerChannelMinMaxObserver.calculate_qparamss&&t||T\\BBrBc:d|jd|jSrrrJs r@rz#PerChannelMinMaxObserver.extra_repr$,,z$,,@@rBrrrrrrrc |jdd}||dkr ddg} d} d} nddg} d} d} | D] } || z} | |vr|| }| | k(r&|jj|jnC| | k(r&|jj|jnt j d| tjjs| | k(r|jj|| | k(r|jj|t j d| |s|j| tjjst|5|||d|||yy) Nrrqmin_valsmax_valsrrz2Observer load_from_state_dict got unexpected name F)rrrrrrzr{rvr is_scriptingrappendrar)r?rrrrrrrr local_stateexpected_min_nameexpected_max_namenamekeyvalrcs r@rz.PerChannelMinMaxObserver._load_from_state_dictsz!$$Y5  7Q;%z2K *  * $i0K )  )  )D4-Cj  o ,,LL((3..LL((3MMLTFS 99))+00 **3/!22 **3/ PQUPVW##C(9 )<yy%%' G ) (rBc 2|j|||||||yr<)r)r?rrrrrrrs r@_load_from_state_dict_scriptz5PerChannelMinMaxObserver._load_from_state_dict_scriptXs' ""        rBcltjd|_tjd|_y)rrN)rvrandrrrJs r@rz+PerChannelMinMaxObserver.reset_min_max_valsls, zz   zz   rBr)rSrTrUrkrvrrrrrrrrrArgrrrrjrdictstrrrllistrr rrnros@r@r(r(s6\\ \\ll(( EKK & * *, ,\%8 YYCCA:cN::S%,,./ :  : 3i :c:I:x cN  S%,,./    3i  c I ( YY    rBr(c eZdZdZddej ej dddejejjdf dfd Z dZ xZ S) r%a.Observer module for computing the quantization parameters based on the running per channel min and max values. This observer uses the tensor min/max statistics to compute the per channel quantization parameters. The module records the running minimum and maximum of incoming tensors, and uses this statistic to compute the quantization parameters. Args: averaging_constant: Averaging constant for min/max. ch_axis: Channel axis dtype: Quantized data type qscheme: Quantization scheme to be used reduce_range: Reduces the range of the quantized data type by 1 bit quant_min: Minimum quantization value. If unspecified, it will follow the 8-bit setup. quant_max: Maximum quantization value. If unspecified, it will follow the 8-bit setup. eps: Epsilon value for float32, Defaults to `torch.finfo(torch.float32).eps`. The quantization parameters are computed the same way as in :class:`~torch.ao.quantization.observer.MovingAverageMinMaxObserver`, with the difference that the running min/max values are stored per channel. Scales and zero points are thus computed per channel as well. .. note:: If the running minimum equals to the running maximum, the scales and zero_points are set to 1.0 and 0. rrFNc t|s td| r tdt | d|||||||| d| ||_y)NzMovingAveragePerChannelMinMaxObserver's qscheme only support torch.per_channel_symmetric, torch.per_channel_affine and torch.per_channel_affine_float_qparams.zJMovingAveragePerChannelMinMaxObserver doesn't support dynamic quantization)rrbryr|rrrrr_rV)r rrarAr) r?rrrbryr|rrrrr_rNrcs r@rAz.MovingAveragePerChannelMinMaxObserver.__init__sug&%w  %\   %!  #5rBc|jdk(r|S|j}|j|jj}|j}|j }|j }tt|Dcgc]}|}}d||j<|j|d<|j|}tj|d}|jdk(s|jdk(rtj|d\}}nDtj|d\} } ||j| |z zz}||j| |z zz}|jj|j |j j|j |jj#||j j#||Scc}wr)rrrrrbrrrrrrrvrrrrrrrs r@rgz-MovingAveragePerChannelMinMaxObserver.forwards~ <<>Q M MMO DD## $,,,,$)#e*$56q6 6&' dll#<< a IIm $ MM!q ) ==?a 7==?a#7$}}QA6 GW',}}QA'> $K 7 7;;P QQG 7 7;;P QQG W]]+ W]]+ 7# 7# 7s G!r) rSrTrUrkrvrrrrrrrArgrnros@r@r%r%zsY: ll(( EKK & * *!5 !5FrBr%ceZdZUdZej ed<ej ed<ej ed<dejejddddejejjdf de d ejd dffd Zd ej d ej dej d ej fdZde de fdZd eej ej ffdZdej dej dej dej dej f dZdej dej dej dej dej dej d ej fdZdej dej dej d dfdZdej d ej fdZej.j0d Zfd!Zfd"Zd#ZxZS)$r"aT The module records the running histogram of tensor values along with min/max values. ``calculate_qparams`` will calculate scale and zero_point. Args: bins: Number of bins to use for the histogram dtype: dtype argument to the `quantize` node needed to implement the reference model spec qscheme: Quantization scheme to be used reduce_range: Reduces the range of the quantized data type by 1 bit eps: Epsilon value for float32, Defaults to `torch.finfo(torch.float32).eps`. The scale and zero point are computed as follows: 1. Create the histogram of the incoming inputs. The histogram is computed continuously, and the ranges per bin change with every new tensor observed. 2. Search the distribution in the histogram for optimal min/max values. The search for the min/max values ensures the minimization of the quantization error with respect to the floating point model. 3. Compute the scale and zero point the same way as in the :class:`~torch.ao.quantization.MinMaxObserver` histogramrriFNbinsrbrsc Tt|s td| r tdt | d |||||||| d| tj j |}||_|jdt j|jfi||jdt jtdfi||jdt jtdfi|d t j|jjz|_d |_y) NztHistogramObserver's qscheme only support torch.per_tensor_symmetric and torch.per_tensor_affine.z6HistogramObserver doesn't support dynamic quantizationrrrrrrrrV)r rrarArvrwrxrr}rr~riinforbbits dst_nbins upsample_rate) r?rrbryr|rrrxrrr_rNrcs r@rAzHistogramObserver.__init__sW%%2  %H   %)!  00@  [%++dii*R>*RS Y U5\(T^(TU Y U6](Un(UVekk$**5:::  rB delta_begin delta_enddensityc4||z|z||z|zz dz }||zS)a Compute the norm of the values uniformaly distributed between delta_begin and delta_end. Currently only L2 norm is supported. norm = density * (integral_{begin, end} x^2) = density * (end^3 - begin^3) / 3 rqrV)r?rrrnorms r@ _get_normzHistogramObserver._get_norms6  !I - k0IK0W W ~rBnext_start_bin next_end_binc |jj|jjz |jz }|||z dzz|jz }|dk(ryt j |j|jj}||z |z}||z}t jt j||dd|jdz }|dz|z} t jt j||dd|jdz } |j|z } t j|j|jj} || z } |dz }| |j| t j|j|jj|z| z } | | |z dz |jt j| dz t j|dz | zz } | |z|dz z}| dz } ||z }| |jt j| || z } | jjS) z Compute the quantization error if we use start_bin to end_bin as the min and max to do the quantization. rrfloor) rounding_moder?r)ritemrrrrvarangerrrdivrr rr~sum)r?r!r" bin_width dst_bin_widthsrc_bin src_bin_begin src_bin_enddst_bin_of_begindst_bin_of_begin_centerdst_bin_of_endrrrrdst_bin_of_end_centers r@_compute_quantization_errorz-HistogramObserver._compute_quantization_error-s0 \\&&(4<<+<+<+>>$))K !\N%BQ%FG$..X C ,,tyy1F1FG!>1Y> #i/ !;; IIm]' J NNQ   $4c#9]"J IIk= H NNQ   ..9,{{499T^^-B-BC#&== !A%    JJtyy)>)> ?) K    "22Q6$.. LL-!+ ,ell=1;L.Mw;   !/ >QRAR R$nq( "77  u||K8)WMMxxz  rBc2|jjd|jk(sJd|j|jz |jz }t j |jj}t j|jd}d}d}d}d}|jdz }td} ||kr||z} ||z } |} |} | |kr!|| | |zkr| dz} | |kr || | |zkr| |kDr!|| | |zkDr| dz } | |kDr || | |zkDr|}|}| |z || z kDr| }| }n| }| }||k(r||k(r|j||}|| kDrn |} |}|}||kr|j||zz}|j||dzzz}||fS) aZNon-linear parameter search. An approximation for L2 error minimization for selecting min/max. By selecting new min/max, we filter out outliers in input distribution. This follows the implementation of NormMinimization::NonlinearQuantizationParamsSearch in caffe2/quantization/server/norm_minimization.cc rz bins mismatchrgh㈵>r$rrr) rrrrrrvr+r(cumsumrr5)r?r,totalcSumstepsizealphabeta start_binend_binnorm_min next_alpha next_betalrZr!r"rnew_minnew_maxs r@_non_linear_param_searchz*HistogramObserver._non_linear_param_searchcs~~""$Q'4994EoE4\\DLL0DII=  $..)..0||DNN2 ))a-<dl)JxIAAg+$q'J,>">Eg+$q'J,>">i-DGi%.?$?Ei-DGi%.?$?'N"LI 'A+.!"" !  *|w/F33NLQDhH&I"GGdlJ,,Y!66,,gk!::rBorig_minorig_max update_min update_maxc|j|j|jz }||z |j|jzz }tj|||j|jzdz|j ddj |j d|zz}tj|||jdz|j j |j }tj||ddz } |jdz | | |jk\<d| | dk<tj| ||j} | S) Nrrrr'T)rightr)weights minlength) repeat_interleaverrrvlinspacerr bucketizebincount) r?rrFrGrHrIbin_sizemid_points_histogramboundaries_new_histogrambucket_assignmentsupdate_histograms r@_upscale_histogramz$HistogramObserver._upscale_histogramsQ//0B0BCdFXFXX x'DII8J8J,JK NN D...2   r   2i&&'Hn   $)>>  DIIM*:K:K$ "Y    ! OO02JRV W  ?Cii!m-:;56-12 >>  TYY  rB orig_hist update_histc||k(r ||k(r||zS||k(r@tj|}tj||j|||z}||zS||ksJ||k\sJ|j |||||}||zS)N)rrr)rvr+histcrrW) r?rXrFrGrYrHrI bin_valuetransformed_orig_hists r@_combine_histogramsz%HistogramObserver._combine_histogramss  !jH&<{* * x  ),I H499**U ");6 6X%%%X%%%!% 7 7      ! 222rBrfc@|jj|j|jj||jj|j|jj||j dk(r|j dk(sJdt j||j||}|jjj|j|jj|y)Nrz(histogram min/max values must be scalar.rr) rrrrrrrvr[rrdetach_)r?rfrr new_histograms r@reset_histogramz!HistogramObserver.reset_histograms W]]+ 7# W]]+ 7#}}!# 1(< 6 < Atyyg7K   (()<)<= ]+rBrcn|jdk(r|S|j}tj|\}}|tj k(s|tjk(rft j d||jtjk7}|jdk(r|Stj|\}}|j}|j}|jtdk(xs|jtdk(}|r|j||||S||} }tj||} tj|| } | j| j} } tj||j| | j!|j"j$} | |k(rd| |k(r_|j"| z} |j"j'j)| j*|j"j-| |S|j/|j"||| | | } |j"j'j)| j*|j"j-| |jj'j)| j*|jj-| |jj'j)| j*|jj-| |S)Nrz2torch.inf detected in input tensor, ignoring inputrrr`)rrrvrrrzr{absrrrrcrrr[rrrrrarrrr^)r?rrfx_minx_max current_min current_maxis_uninitializedrHrIrCrDrVcombined_histograms r@rgzHistogramObserver.forwards <<>Q M MMO}}Q' u UYYJ %599"4 MMN O!%%'UYY&'AwwyA~  ==+LE5ll ll <<5<7X4<<5QW=;X   E5 1F C&+E Jii Z8Gii Z8G '~~/1AWG${{    b&&'  +%'[*@%)^^6F%F"&&(001C1I1IJ$$%78" &*%=%=NN$ &"&&(001C1I1IJ$$%78 $$&..w}}= ""7+ $$&..w}}= ""7+ rBc|jtdk(xr|jtdk(}|rtjdt j dg|jjjt j dg|jjjfS|jt|jk(sJd|j\}}|j||S)Nrrz~must run observer before calling calculate_qparams. Returning default scale and zero point rrrziThe number of bins in histogram should be equal to the number of bins supplied while making this observer)rrrrzr{rvr~rrrrrrEr)r?rjrCrDs r@rjz#HistogramObserver.calculate_qparams9s<<5<7 DLLE M =   MMM <<dll.A.A.F.FGDLL//44J yyC//  2 /  88:&&w88rBcpt|||||j||dz<|j||dz<y)Nrr)ra_save_to_state_dictrr)r? destinationr keep_varsrcs r@rnz%HistogramObserver._save_to_state_dictOs: #KC*.,, FY&'*.,, FY&'rBc |jdd}||dkr|dz|dz} } | |vrG|| jtjdgk(r!tjt d|| <| |vrG|| jtjdgk(r!tjt d|| <ddg} | D]2} || z} | |vr|| }t || ||s"|j| 4t|%|||||||y)Nrrqrrrrr) rrrvSizer~rsetattrrrar)r?rrrrrrrr min_val_name max_val_namerrrrrcs r@rz'HistogramObserver._load_from_state_dictTs#!$$Y5 ?gk*0));Vi=O,Lz)l+11UZZ_D/4||E%L/IJ|,z)l+11UZZ_D/4||E&M/JJ|, ),  )D4-Cj  odC(##C(  ) %        rBc:d|jd|jSrrrJs r@rzHistogramObserver.extra_repr}rrB)rSrTrUrkrvrrrrrrrrrrbrAr r5rrErWr^rcrgrrrjrnrrrnros@r@r"r"s0|| \\ \\"\\'' EKK & * ** * {{*  * X <<49LLKP<<  4!#4!S4!l= % ell0J*K= ~' <<' ,,' ,, ' LL ' LL ' R%3<<%3,,%3,, %3 \\ %3 LL %3LL%3 %3N , ,(-  ,?D|| ,  ,7ell7u||7r YY99*7 ' RArBr"ceZdZUdZej ed<ej ed<ejejdddffd Z dZ ejjd Z xZS) r!aU Observer that simulates quantize and dequantize with fixed quantization parameters in training time. Only per tensor quantization is supported. Args: `scale` (float): fixed scale for the observer `zero_point` (int): fixed zero point for the observer `dtype`, `qscheme`, `quant_min`, `quant_max` rrrFc T|r tdt |d||d|||_||_|j dt j|gt j|j dt j|gt j||_ ||_ y)Nz9FixedQParamsObserver doesn't support dynamic quantizationrur)rbrrV) rrarArrr}rvr~rrrbry) r?rrrbryrrr_rNrcs r@rAzFixedQParamsObserver.__init__s %K  FuFvF"" WellE7%++&NO \5<< EII+VW  rBc|Sr<rV)r?Xs r@rgzFixedQParamsObserver.forwardrBc2|j|jfSr<)rrrJs r@rjz&FixedQParamsObserver.calculate_qparamsszz4??**rB)rSrTrUrkrvrrrrrArgrrrjrnros@r@r!r!sa  <<  ll''. YY++rBr!ceZdZdZej dddddddf d fd ZdZejjdZ ejjdZ xZ S) r)ai Observer that doesn't do anything and just passes its configuration to the quantized module's ``.from_float()``. Can be used for quantization to float16 which doesn't require determining ranges. Args: dtype: dtype argument to the `quantize` node needed to implement the reference model spec. quant_min: minimum value in quantized domain (TODO: align behavior with other observers) quant_max: maximum value in quantized domain custom_op_name: (temporary) specify this observer for an operator that doesn't require any observation (Can be used in Graph Mode Passes for special case ops). compute_dtype (deprecated): if set, marks the future quantize function to use dynamic quantization instead of static quantization. This field is deprecated, use `is_dynamic=True` instead. is_dynamic: if True, the `quantize` function in the reference model representation taking stats from this observer instance will use dynamic quantization. NFc 2t ||||tj}|-tjtj j }||_||_||_ ||_ ||_||_ |rd}tjdyy)NruTzPlease use `is_dynamic` instead of `compute_dtype`. `compute_dtype` will be deprecated in a future release of PyTorch.)rarArvrrrrrrbryrr custom_oprzr{) r?rbcustom_op_name compute_dtyperrryrrr_rcs r@rAzPlaceholderObserver.__init__s u< ?--G ;++emm,00C  ""' J MM!  rBc|Sr<rVres r@rgzPlaceholderObserver.forwardr|rBc:d|jd|jS)Nzdtype=z , is_dynamic=rurJs r@rzPlaceholderObserver.extra_reprs |=0ABBrBctd)Nz>calculate_qparams should not be called for PlaceholderObserver ExceptionrJs r@rjz%PlaceholderObserver.calculate_qparamss L  rBr) rSrTrUrkrvrrArgrrrrjrnros@r@r)r)sy0mm    D YYCC YY  rBr)ceZdZdZdeeejiZejffd Z dZ ejjdZejjdZxZS)r*a The module is mainly for debug and records the tensor values during runtime. Args: dtype: Quantized data type qscheme: Quantization scheme to be used reduce_range: Reduces the range of the quantized data type by 1 bit tensor_valc6t||dg|_yNFru)rarAr)r?rbrcs r@rAzRecordingObserver.__init__s u7rBcX|jj|j|Sr<)rrcloneres r@rgzRecordingObserver.forwards qwwy)rBctd)Nz elements. For example if the input tensor is shape [8, 16], and the group size is 4, then the input tensor is reshaped to [64, 4] quantization parameters are calculated for each group of 4 elements, giving a total of 64 quantization parameters. Attributes: group_size (int): The size of each quantization group group_sizeNrrVrBr@r2r2s OrBr2ceZdZdZy)r3a+ Represents row-wise granularity in quantization. This is a special case of per-axis quantization and is unique to Float8 matmuls where the input is quantized with a block_size of (1, ..., input.shape[-1]). And the weight is quantized with a block_size of (1, weight.shape[1]). NrrVrBr@r3r3srBr3ceZdZdZy)r5a: Represents per-token granularity in quantization. This granularity type calculates a different set of quantization parameters for each token, which is represented as the last dimension of the tensor. For example, if the input tensor has shape [2, 3, 4], then there are 6 tokens with 4 elements each, and we will calculate 6 sets of quantization parameters, one for each token. If the input tensor has only two dimensions, e.g. [8, 16], then this is equivalent to `PerAxis(axis=0)`, which yields 8 sets of quantization parameters. NrrVrBr@r5r5s rBr5 input_shape. granularityrsct|tsJdt|tr|St|tr%t |}d||j <t |St|trdt|dz z|dfzSt|tr&t|dk(s Jd|d|jfSt|tr"dgt|z}|d|d<t |Std|)zGet the block size based on the input shape and granularity type. Args: input_shape: The input tensor shape possibly more than 2 dimensions granularity: The granularity type of the quantization z=Please provide an instance of Granularity, not subclass of itr)rrrzNExpecting input shape dim to be 2 for per group quantization, gotinput shape: zUnsupported Granularity: ) isinstancer.r4r0rrrr3rr2rr5 ValueError)rrrs r@r8r8s k; /G /+y) K )+& '( ;##$Z  K (s;'!+, B/AAA K *;1$ \]h\i j $;))** K *S3{++ $R 2Z  0 > ??rBceZdZdZeeZddddddejfde de jde de ede ed e ed e e jd e e jd ed e effd Zede j&de j&fdZedee j&e j&ffdZde j.j0defdZxZS)r-aObserver module for affine quantization (https://github.com/pytorch/ao/tree/main/torchao/quantization#affine-quantization) Args: `granularity` and `block_size`: The granularity of the quantization, must specify at least one, if both are specified `block_size` takes precedence Current supported granularity type are `PerTensor` and `PerAxis` other args: please see `:class:torchao.dtypes.AffineQuantizedTensor` NT mapping_type target_dtyperrrrr scale_dtypezero_point_dtype preserve_zerozero_point_domainc  t ||Jd||_||_||_||_||_||_||_||_ | |_ | |_ d|_ d|_ y)Nzgranularity is None)rarArrrrrrrrrrrroriginal_dtype) r?rrrrrrrrrrrrNrcs r@rAz$AffineQuantizedObserverBase.__init__s~ &=(==&((&""& 0*!2"rBinputrscy)z~forward function should take the input tensor and updates internal stats and return the original input Tensor NrV)r?rs r@rgz#AffineQuantizedObserverBase.forward<rBcy)zCalculate quantization parameter based on the stats attached to the observer module and returns a tuple of scale and zero_point Tensor NrVrJs r@rjz-AffineQuantizedObserverBase.calculate_qparamsBrrBmodel observer_nodecddlm}|jj|5|jJd|j Jdt |dr4|jr'|jjtjjj|jd|jj|j|j |j"|j$|j&|j(|j*|j,|j.jf }|jjt0j2|df}|jjt0j2|df}n|j5\}}|||jd|t7|tj8r |j:nd}|||jd |t7|tj8r |j:nd}|jjtjjj<|jd|j|||j |j"|j$|j.jfi} |jjtjjj>| |j|||j |j"|j$|j.jfd |j i} |jA| |jjC|dddy#1swYyxYw) z Converts the observer node in the graph into its quantized representation Args: model: graph module to convert the observer node in observer_node: the observer node to convert r)create_getattr_from_valueNz$Expecting block_size to be populatedz(Expecting original_dtype to be populatedr_r_scale _zero_point output_dtype)"torch.ao.quantization.fx.utilsrgraphinserting_beforerrhasattrr_ call_functionrvops pt2e_quantchoose_qparams_affinerDrrrrrrrrrrroperatorgetitemrjrrrquantize_affinedequantize_affinereplace_all_uses_with erase_node) r?rrrr scale_nodezero_point_noderrq_nodedq_nodes r@convertz#AffineQuantizedObserverBase.convertHs M [[ ) )- 8J 2??. V0V V.&&2 : 2t\*t(- (A(AII((>>%**1-))..))((--**..33 )% #[[66$$'M))M2)rSrTrUrkrmrLrOr7rr/rvrbr.rrrrlrArrrgrrjfx GraphModulerrrnros@r@r-r-s7J'I$(#'#-126"7F7J7J#!#kk#! # C= # C= #e_#ekk*##5;;/##$O4#@ U\\ ell   5u||)C#D  T2UXX11T2$T2rBr-ct|tjjrD|jj j ddd}tjdd|}||vSy)zCReturns true if given mod is an instance of Observer script module..rz\.___torch_mangle_\d+rF) rrvrRecursiveScriptModule_cqualified_namesplitresub)mod obs_type_namesuffixrs r@_is_observer_script_modulersX#uyy667&&,,S!4Q7vv.F;$$ rBct|tjjjtjjj t fxs t|dS)Nzquantization.observer)rrvao quantizationr'FakeQuantizeBaser-rmodules r@_is_activation_post_processrsT  HH ! ! . . HH ! ! 2 2 '   E $F,C DErBct|tjjrt |dxs t |dSy)Nz.quantization.observer.PerChannelMinMaxObserverz;quantization.observer.MovingAveragePerChannelMinMaxObserverF)rrvrrrrs r@#_is_per_channel_script_obs_instancersA&%))99:) D  ' Q  rBcbt}t|tjjr1|j j D]\}}d|vs |||<n0|j j D]\}}d|vs |||<|j j|_|S)z Returns the state dict corresponding to the observer stats. Traverse the model state_dict and extract out the stats. observeractivation_post_process)rrrvrrritems _metadata)rodkvs r@rrs B#uyy667NN$**, DAqQ1  NN$**, DAq(A-1 >>#--BL IrBc Jg}g}|jD]P\}}|dz}t|st|r|j||id||g:|j ||id||gR|D]}d|vsd|vs t d|d|D]}d|vsd|vs t d|dy ) z Given input model and a state_dict containing model observer stats, load the stats back into the model. The observer state_dict can be saved using torch.ao.quantization.get_observer_state_dict rTFrrzMissing keys for observer z in state_dictzUnexpected keys for observer N) named_modulesrrr rr)robs_dictrrrrrrs r@rrs !L!#O))+  f &v .26:33fb$ or,,fb%PR  ?71<,QC~>   ?71</s.A rB)rr)rbryig0?)rbryrrrrrx)rbrrr_)rbryrg?r)rrrbrrgp?)]rkrrrzabcrr collectionsr functoolsrtypingrrrvtorch.nnrwtorch.ao.quantization.utilsr r r r r torch.fxr__all__r:rLr\objectr]rModuler'r, _ObserverBaser#r$r(r%r"r!r)r*r&r+ dataclassesrenumrrr/r7r6r.r1r4r0r2r3r5rrr8r-rrrrrrOrrrrrrr rrrrrrrrrrrrrrrrVrBr@rs   '# + \2 **0 56)R (S(:3 :BsXlsXr0 P84P8f`.`FN >N bW,DWtjA7jAZ ++<++\D ,D N > < :  6 "$0 d  4   $ $  {     $  $k$ ${([ {  @sCx@/:@ 38_@@L2#uxxL2^E&D"++a3+G 3+)22 ++u993(6'?'? ++  & & (@($/881PS8T'?&H&H ++u::'I'# 4L3U3U ++  ' ' 4V40"5!>!> ,, "?" ":!C!C ,, F FPQ"D"'?&H&H ..%"H"HRS'I'# 0D/M/M #U\\QRU0N0,-A,J,J !5<<1PS-K-),X((Q% 2rB