
    (ph                     0    S SK rSSKJr  S/rSSS.S jjrg)    N   )_nnlsnnls)atolc                B   [         R                  " U [         R                  SS9n [         R                  " U[         R                  S9n[        U R                  5      S:w  a  [        SSU R                   3-   5      e[        UR                  5      S:w  a  [        SS	UR                   3-   5      eU R                  u  pEXAR                  S
   :w  a"  [        SSU SUR                  S
   4 3-   5      eU(       d  SU-  n[        XU5      u  pgnUS:X  a  [        S5      eXg4$ )a  
Solve ``argmin_x || Ax - b ||_2`` for ``x>=0``.

This problem, often called as NonNegative Least Squares, is a convex
optimization problem with convex constraints. It typically arises when
the ``x`` models quantities for which only nonnegative values are
attainable; weight of ingredients, component costs and so on.

Parameters
----------
A : (m, n) ndarray
    Coefficient array
b : (m,) ndarray, float
    Right-hand side vector.
maxiter: int, optional
    Maximum number of iterations, optional. Default value is ``3 * n``.
atol: float
    Tolerance value used in the algorithm to assess closeness to zero in
    the projected residual ``(A.T @ (A x - b)`` entries. Increasing this
    value relaxes the solution constraints. A typical relaxation value can
    be selected as ``max(m, n) * np.linalg.norm(a, 1) * np.spacing(1.)``.
    This value is not set as default since the norm operation becomes
    expensive for large problems hence can be used only when necessary.

Returns
-------
x : ndarray
    Solution vector.
rnorm : float
    The 2-norm of the residual, ``|| Ax-b ||_2``.

See Also
--------
lsq_linear : Linear least squares with bounds on the variables

Notes
-----
The code is based on [2]_ which is an improved version of the classical
algorithm of [1]_. It utilizes an active set method and solves the KKT
(Karush-Kuhn-Tucker) conditions for the non-negative least squares problem.

References
----------
.. [1] : Lawson C., Hanson R.J., "Solving Least Squares Problems", SIAM,
   1995, :doi:`10.1137/1.9781611971217`
.. [2] : Bro, Rasmus and de Jong, Sijmen, "A Fast Non-Negativity-
   Constrained Least Squares Algorithm", Journal Of Chemometrics, 1997,
   :doi:`10.1002/(SICI)1099-128X(199709/10)11:5<393::AID-CEM483>3.0.CO;2-L`

 Examples
--------
>>> import numpy as np
>>> from scipy.optimize import nnls
...
>>> A = np.array([[1, 0], [1, 0], [0, 1]])
>>> b = np.array([2, 1, 1])
>>> nnls(A, b)
(array([1.5, 1. ]), 0.7071067811865475)

>>> b = np.array([-1, -1, -1])
>>> nnls(A, b)
(array([0., 0.]), 1.7320508075688772)

C)dtypeorder)r	      z)Expected a two-dimensional array (matrix)z, but the shape of A is r   z)Expected a one-dimensional array (vector)z, but the shape of b is r   z0Incompatible dimensions. The first dimension of zA is z, while the shape of b is    z%Maximum number of iterations reached.)npasarray_chkfinitefloat64lenshape
ValueErrorr   RuntimeError)	Abmaxiterr   mnxrnorminfos	            G/var/www/html/venv/lib/python3.13/site-packages/scipy/optimize/_nnls.pyr   r      s   D 	Qbjj<A
Qbjj1A
177|qD3AGG9=> ? 	?
177|qD3AGG9=> ? 	? 77DAGGAJBs4aggaj^4DEFG 	G A#1)NAdrzBCC8O    )N)numpyr   _cython_nnlsr   __all__r    r   r   <module>r#      s"      (YT Yr   