
    (ph6                         S SK r S SKrS SKJr  S SKJrJrJrJ	r	J
r
Jr  S SKJr  / SQrSS jrSS jrSS jrS	 rS
 rSS jrSS jrg)    N)_asarray_validated)array_namespacexp_sizexp_broadcast_promotexp_copyxp_float_to_complex
is_complex)array_api_extra)	logsumexpsoftmaxlog_softmaxc           	      `   [        X5      n[        XSSUS9u  p[        R                  " U SUS9n Ub  [        R                  " USUS9OUnUc  [	        [        U R                  5      5      OUn[        U 5      S:w  a,  [        R                  " SSS9   [        XXUS	9u  pgSSS5        Od[        R                  " U R                  5      nSX'   UR                  [	        U5      UR                  * U R                  S
9nUR!                  U5      nUR#                  WR                  S5      (       a  U(       a=  UR%                  W5      n	['        [)        UR+                  U5      U5      5      n
XS-  -   nO<UR%                  U5      n	['        [)        UR+                  U5      U5      5      n
XS-  -   nU(       d  UR-                  XaS9OUnWb  U(       d  UR-                  XqS9OUnUR                  S:X  a  US   OUnUb  UR                  S:X  a  US   OUnU(       a  Xg4$ U$ ! , (       d  f       GN = f)aF  Compute the log of the sum of exponentials of input elements.

Parameters
----------
a : array_like
    Input array.
axis : None or int or tuple of ints, optional
    Axis or axes over which the sum is taken. By default `axis` is None,
    and all elements are summed.

    .. versionadded:: 0.11.0
b : array-like, optional
    Scaling factor for exp(`a`) must be of the same shape as `a` or
    broadcastable to `a`. These values may be negative in order to
    implement subtraction.

    .. versionadded:: 0.12.0
keepdims : bool, optional
    If this is set to True, the axes which are reduced are left in the
    result as dimensions with size one. With this option, the result
    will broadcast correctly against the original array.

    .. versionadded:: 0.15.0
return_sign : bool, optional
    If this is set to True, the result will be a pair containing sign
    information; if False, results that are negative will be returned
    as NaN. Default is False (no sign information).

    .. versionadded:: 0.16.0

Returns
-------
res : ndarray
    The result, ``np.log(np.sum(np.exp(a)))`` calculated in a numerically
    more stable way. If `b` is given then ``np.log(np.sum(b*np.exp(a)))``
    is returned. If ``return_sign`` is True, ``res`` contains the log of
    the absolute value of the argument.
sgn : ndarray
    If ``return_sign`` is True, this will be an array of floating-point
    numbers matching res containing +1, 0, -1 (for real-valued inputs)
    or a complex phase (for complex inputs). This gives the sign of the
    argument of the logarithm in ``res``.
    If ``return_sign`` is False, only one result is returned.

See Also
--------
numpy.logaddexp, numpy.logaddexp2

Notes
-----
NumPy has a logaddexp function which is very similar to `logsumexp`, but
only handles two arguments. `logaddexp.reduce` is similar to this
function, but may be less stable.

The logarithm is a multivalued function: for each :math:`x` there is an
infinite number of :math:`z` such that :math:`exp(z) = x`. The convention
is to return the :math:`z` whose imaginary part lies in :math:`(-pi, pi]`.

Examples
--------
>>> import numpy as np
>>> from scipy.special import logsumexp
>>> a = np.arange(10)
>>> logsumexp(a)
9.4586297444267107
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107

With weights

>>> a = np.arange(10)
>>> b = np.arange(10, 0, -1)
>>> logsumexp(a, b=b)
9.9170178533034665
>>> np.log(np.sum(b*np.exp(a)))
9.9170178533034647

Returning a sign flag

>>> logsumexp([1,2],b=[1,-1],return_sign=True)
(1.5413248546129181, -1.0)

Notice that `logsumexp` does not directly support masked arrays. To use it
on a masked array, convert the mask into zero weights:

>>> a = np.ma.array([np.log(2), 2, np.log(3)],
...                  mask=[False, True, False])
>>> b = (~a.mask).astype(int)
>>> logsumexp(a.data, b=b), np.log(5)
1.6094379124341005, 1.6094379124341005

T)ensure_writeableforce_floatingxp   )ndimr   Nr   ignore)divideinvalid)axisreturn_signr   dtypecomplex floating              ?)r    )r   r   xpx
atleast_ndtupleranger   r   nperrstate
_logsumexpasarrayshapefullinfr   signisdtyperealr   _wrap_radiansimagsqueeze)ar   bkeepdimsr   r   outsgnr&   r+   r-   s              K/var/www/html/venv/lib/python3.13/site-packages/scipy/special/_logsumexp.pyr   r      s   z 
	BtDUWXDAqqR(A,-MqqR(qA#'<5qvvTDqzQ[[(;!!TrRHC <; 

177#ggeElRVVG177g;ggcl	zz#))/00773<D&}RWWS\2'FGDb.C773<D&}RWWS\2'FGDb.C -5"**S*
$#C),"**S*
$PSCXX]#b'Co#((a-#b'cC$C:-#-1 <;s   H
H-c                     Uc  [        U 5      OUnU * [        R                  -   S[        R                  -  -  [        R                  -
  * nUR                  U 5      UR                  :  nX   X#'   U$ )N   )r   mathpiabs)xr   r2   no_wraps       r4   r,   r,      s^    !z	rBR$''\a$''k*TWW4
5CffQi"%%G:CLJ    c                    Uc  [        U 5      OUnUR                  U R                  S5      (       a  UR                  U 5      nUR	                  X1SS9nX4:H  nUR                  UR                  [        U 5      5      U R                  5      nSXe) '   UR	                  XaSS9nXg:H  n[        U 5      n SX) '   UR                  XU R                  SS9nOUR	                  XSS9nX:H  nUR                  U5      UR                  U5      4$ )Nr   Tr   r1   r   )r   r   r1   )r   r*   r   r+   maxreshapearanger   r&   r   sumr%   )r/   r   r   real_ar@   maskimax_is           r4   #_elements_and_indices_with_max_realrH      s      "z	rB	zz!''-..ffVf6}
 JJryy,agg6%qd3zAJ%ffQ4f@ffQDf1x::c?BJJt,,,r<   c           	          XR                  U S:H  UR                  SU R                  S9UR                  U 5      5      -  $ )Nr   r   r   )wherer%   r   r9   )r:   r   s     r4   _signrK      s5    xxQ

1AGG
 <bffQiHHHr<   c           	         Ub  UR                   * XS:H  '   [        XUS9u  pVUR                   * X'   UR                  X`R                  5      nUc  UR	                  XrSU R                  S9OUR	                  X-  USU R                  S9nUR                  UR                  U5      XTR                  SUR                  S95      n	Ub  XR                  X	-
  5      -  OUR                  X	-
  5      n
UR	                  XSU
R                  S9nUR                  US:H  XU-  5      n[        US-   US9[        XS9-  nUR                  UR                  S5      (       a,  UR                  US	:  U* S
-
  U5      nUR                  U5      nO=XR                  UR                  U5      UR                  SUR                  S9-  5      -  nUR                  U5      UR                  U5      -   U-   nU(       a$  [        X5      (       a  UR!                  U5      nX4$ UR                  UR                  S5      (       a  UR"                  XS:  '   X4$ )Nr   )r   r   T)r   r1   r   r   r   )r   zreal floatingr?   r6   r   )r(   rH   astyper   rC   rJ   isfiniter%   exprK   r*   r9   r-   log1plogr	   r+   nan)r/   r0   r   r   r   a_maxi_maxi_max_dtmshiftrO   sr3   r2   s                 r4   r$   r$      s   
 	}VVGq&	 7qKLE wAHyy(H GHid!''	BffQ\t177fK 
 HHR[['

1EKK
0PQE $%=!ffQY
bffQY6GC
sCII>A
aaC A A"
a
/C	zz!''?++HHQVaR!VQ'FF1I FF2775>BJJt5;;J,OOPP ((1+q	
!E
)Cc''#,C 8O 
CII	/	/vv!G8Or<   c                     [        U SS9n [        R                  " XSS9n[        R                  " X-
  5      nU[        R                  " X1SS9-  $ )a	  Compute the softmax function.

The softmax function transforms each element of a collection by
computing the exponential of each element divided by the sum of the
exponentials of all the elements. That is, if `x` is a one-dimensional
numpy array::

    softmax(x) = np.exp(x)/sum(np.exp(x))

Parameters
----------
x : array_like
    Input array.
axis : int or tuple of ints, optional
    Axis to compute values along. Default is None and softmax will be
    computed over the entire array `x`.

Returns
-------
s : ndarray
    An array the same shape as `x`. The result will sum to 1 along the
    specified axis.

Notes
-----
The formula for the softmax function :math:`\sigma(x)` for a vector
:math:`x = \{x_0, x_1, ..., x_{n-1}\}` is

.. math:: \sigma(x)_j = \frac{e^{x_j}}{\sum_k e^{x_k}}

The `softmax` function is the gradient of `logsumexp`.

The implementation uses shifting to avoid overflow. See [1]_ for more
details.

.. versionadded:: 1.2.0

References
----------
.. [1] P. Blanchard, D.J. Higham, N.J. Higham, "Accurately computing the
   log-sum-exp and softmax functions", IMA Journal of Numerical Analysis,
   Vol.41(4), :doi:`10.1093/imanum/draa038`.

Examples
--------
>>> import numpy as np
>>> from scipy.special import softmax
>>> np.set_printoptions(precision=5)

>>> x = np.array([[1, 0.5, 0.2, 3],
...               [1,  -1,   7, 3],
...               [2,  12,  13, 3]])
...

Compute the softmax transformation over the entire array.

>>> m = softmax(x)
>>> m
array([[  4.48309e-06,   2.71913e-06,   2.01438e-06,   3.31258e-05],
       [  4.48309e-06,   6.06720e-07,   1.80861e-03,   3.31258e-05],
       [  1.21863e-05,   2.68421e-01,   7.29644e-01,   3.31258e-05]])

>>> m.sum()
1.0

Compute the softmax transformation along the first axis (i.e., the
columns).

>>> m = softmax(x, axis=0)

>>> m
array([[  2.11942e-01,   1.01300e-05,   2.75394e-06,   3.33333e-01],
       [  2.11942e-01,   2.26030e-06,   2.47262e-03,   3.33333e-01],
       [  5.76117e-01,   9.99988e-01,   9.97525e-01,   3.33333e-01]])

>>> m.sum(axis=0)
array([ 1.,  1.,  1.,  1.])

Compute the softmax transformation along the second axis (i.e., the rows).

>>> m = softmax(x, axis=1)
>>> m
array([[  1.05877e-01,   6.42177e-02,   4.75736e-02,   7.82332e-01],
       [  2.42746e-03,   3.28521e-04,   9.79307e-01,   1.79366e-02],
       [  1.22094e-05,   2.68929e-01,   7.31025e-01,   3.31885e-05]])

>>> m.sum(axis=1)
array([ 1.,  1.,  1.])

Fcheck_finiteTr>   )r   r"   amaxrO   rC   )r:   r   x_maxexp_x_shifteds       r4   r   r      sG    v 	151AGGA40EFF19%M266-TJJJr<   c                    [        U SS9n [        R                  " XSS9nUR                  S:  a  SU[        R                  " U5      ) '   O[        R                  " U5      (       d  SnX-
  n[        R
                  " U5      n[        R                  " SS9   [        R                  " XASS9n[        R                  " U5      nSSS5        UW-
  nU$ ! , (       d  f       N= f)	a  Compute the logarithm of the softmax function.

In principle::

    log_softmax(x) = log(softmax(x))

but using a more accurate implementation.

Parameters
----------
x : array_like
    Input array.
axis : int or tuple of ints, optional
    Axis to compute values along. Default is None and softmax will be
    computed over the entire array `x`.

Returns
-------
s : ndarray or scalar
    An array with the same shape as `x`. Exponential of the result will
    sum to 1 along the specified axis. If `x` is a scalar, a scalar is
    returned.

Notes
-----
`log_softmax` is more accurate than ``np.log(softmax(x))`` with inputs that
make `softmax` saturate (see examples below).

.. versionadded:: 1.5.0

Examples
--------
>>> import numpy as np
>>> from scipy.special import log_softmax
>>> from scipy.special import softmax
>>> np.set_printoptions(precision=5)

>>> x = np.array([1000.0, 1.0])

>>> y = log_softmax(x)
>>> y
array([   0., -999.])

>>> with np.errstate(divide='ignore'):
...   y = np.log(softmax(x))
...
>>> y
array([  0., -inf])

FrZ   Tr>   r   r   )r   N)	r   r"   r\   r   rN   rO   r#   rC   rQ   )r:   r   r]   tmpexp_tmprX   r2   s          r4   r   r   [  s    h 	151AGGA40EzzA~%&r{{5!!"[[
)CffSkG 
H	%FF75ffQi 
& )CJ 
&	%s   ,C
C)NNFF)N)r?   N)r7   numpyr"   scipy._lib._utilr   scipy._lib._array_apir   r   r   r   r   r	   
scipy._libr
   r   __all__r   r,   rH   rK   r$   r   r   r   r<   r4   <module>rg      sN      /  .
1|.~-DI7t^KBFr<   