
    (ph                    f   S r SSKrSSKrSSKrSSKrSSKrSSKJrJr  SSK	J
r
  SSKJrJrJrJr  SSKJr  SSKrSSKJrJrJrJr  \(       a  SSKJr  SSKJr  SSKJrJrJr  SS	K J!r!  SS
K"J#r#J$r$  SSK%J&r&  SSK'J(r(J)r)J*r*J+r+J,r,J-r-J.r.  SSK/J0r0J1r1J2r2J3r3J4r4J5r5J6r6  / SQr7\SbS\S-  S\Rp                  Rr                  4S jj5       r:\S\S\4S j5       r:ScS jr:SS.SSSSSSS\;S\Rx                  4
S jjr=SSS\Rx                  4S jr>SSSS.SSS \;S!\S"   S#\S\?4
S$ jjr@  SdSSS!\S%   S&\AS\?4S' jjrBS(SSSS)\S\?4S* jrCS\Rx                  S+\DS,\DS-\DS.\?4
S/ jrES0\DS\Rx                  4S1 jrFS0\S\G\D   4S2 jrHSS3.S4\S5\S\Rx                  4S6 jjrI SeSSSSSS7.S0\S4\S8\S9\;S:S;S5\S#\S\Rx                  4S< jjjrJ " S= S>\5      rK " S? S@\K5      rL " SA SB\K5      rM " SC SD\K5      rN " SE SF\K5      rO " SG SH5      rP " SI SJ5      rQSK\SL   S-  SM\RS\S-  4SN jrSSO\Rx                  SP\DSQ\DS5\SR\RS\Rx                  4SS jrTS\Rx                  S\?4ST jrUS\Rx                  SU\?SV\AS\Rx                  4SW jrVSXSYSSZ.SSS[\S\\SV\AS-  SR\RS\Rx                  4S] jjrWSfS#\S\4S^ jjrXSSSSS_\DSS`4Sa jrYg)gz&Quasi-Monte Carlo engines and helpers.    N)ABCabstractmethod)partial)ClassVarLiteraloverloadTYPE_CHECKING)Callable)DecimalNumberGeneratorType	IntNumberSeedType)rng_integers
_rng_spawn_transition_to_rng)minimum_spanning_tree)distanceVoronoi)gammainc   )_initialize_v
_cscramble_fill_p_cumulative_draw_fast_forward_categorize_MAXDIM) _cy_wrapper_centered_discrepancy#_cy_wrapper_wrap_around_discrepancy_cy_wrapper_mixture_discrepancy_cy_wrapper_l2_star_discrepancy_cy_wrapper_update_discrepancy_cy_van_der_corput_scrambled_cy_van_der_corput)scalediscrepancygeometric_discrepancyupdate_discrepancy	QMCEngineSobolHaltonLatinHypercubePoissonDiskMultinomialQMCMultivariateNormalQMCseedreturnc                     g N r0   s    C/var/www/html/venv/lib/python3.13/site-packages/scipy/stats/_qmc.pycheck_random_stater7   1           c                     g r3   r4   r5   s    r6   r7   r7   6   r8   r9   c                 L   U b/  [        U [        R                  [        R                  45      (       a  [        R
                  R                  U 5      $ [        U [        R
                  R                  [        R
                  R                  45      (       a  U $ [        U < S35      e)a  Turn `seed` into a `numpy.random.Generator` instance.

Parameters
----------
seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
    If `seed` is an int or None, a new `numpy.random.Generator` is
    created using ``np.random.default_rng(seed)``.
    If `seed` is already a ``Generator`` or ``RandomState`` instance, then
    the provided instance is used.

Returns
-------
seed : {`numpy.random.Generator`, `numpy.random.RandomState`}
    Random number generator.

z9 cannot be used to seed a numpy.random.Generator instance)

isinstancenumbersIntegralnpintegerrandomdefault_rngRandomState	Generator
ValueErrorr5   s    r6   r7   r7   >   s    " |z$)9)92::(FGGyy$$T**	D29900"))2E2EF	G	GD8 $< < = 	=r9   F)reversesamplenpt.ArrayLikel_boundsu_boundsrF   c                   [         R                  " U 5      n U R                  S:X  d  [        S5      e[	        XU R
                  S   S9u  pEU(       d=  U R                  5       S:  d  U R                  5       S:  a  [        S5      eXU-
  -  U-   $ [         R                  " X:  5      (       a  [         R                  " X:*  5      (       d  [        S5      eX-
  XT-
  -  $ )	a  Sample scaling from unit hypercube to different bounds.

To convert a sample from :math:`[0, 1)` to :math:`[a, b), b>a`,
with :math:`a` the lower bounds and :math:`b` the upper bounds.
The following transformation is used:

.. math::

    (b - a) \cdot \text{sample} + a

Parameters
----------
sample : array_like (n, d)
    Sample to scale.
l_bounds, u_bounds : array_like (d,)
    Lower and upper bounds (resp. :math:`a`, :math:`b`) of transformed
    data. If `reverse` is True, range of the original data to transform
    to the unit hypercube.
reverse : bool, optional
    Reverse the transformation from different bounds to the unit hypercube.
    Default is False.

Returns
-------
sample : array_like (n, d)
    Scaled sample.

Examples
--------
Transform 3 samples in the unit hypercube to bounds:

>>> from scipy.stats import qmc
>>> l_bounds = [-2, 0]
>>> u_bounds = [6, 5]
>>> sample = [[0.5 , 0.75],
...           [0.5 , 0.5],
...           [0.75, 0.25]]
>>> sample_scaled = qmc.scale(sample, l_bounds, u_bounds)
>>> sample_scaled
array([[2.  , 3.75],
       [2.  , 2.5 ],
       [4.  , 1.25]])

And convert back to the unit hypercube:

>>> sample_ = qmc.scale(sample_scaled, l_bounds, u_bounds, reverse=True)
>>> sample_
array([[0.5 , 0.75],
       [0.5 , 0.5 ],
       [0.75, 0.25]])

   Sample is not a 2D arrayr   rI   rJ   d      ?        Sample is not in unit hypercubezSample is out of bounds)	r?   asarrayndimrE   _validate_boundsshapemaxminall)rG   rI   rJ   rF   loweruppers         r6   r%   r%   X   s    v ZZF ;;!344#QLE JJL26::<"#4>??'%// v''BFF6?,C,C6775=11r9   c                     [         R                  " U [         R                  SS9n U R                  S:X  d  [	        S5      eU R                  5       S:  d  U R                  5       S:  a  [	        S5      eU $ )aJ  Ensure that sample is a 2D array and is within a unit hypercube

Parameters
----------
sample : array_like (n, d)
    A 2D array of points.

Returns
-------
np.ndarray
    The array interpretation of the input sample

Raises
------
ValueError
    If the input is not a 2D array or contains points outside of
    a unit hypercube.
CdtypeorderrL   rM   rP   rQ   rR   )r?   rS   float64rT   rE   rW   rX   rG   s    r6   _ensure_in_unit_hypercuberc      s^    & ZZbjj<F;;!344

rvzz|b0:;;Mr9   CD)	iterativemethodworkersre   rf   rd   WDMDzL2-starrg   c                    [        U 5      n [        U5      n[        [        [        [
        S.nX$;   a	  XB   " XUS9$ [        U< S[        U5      < 35      e)a  Discrepancy of a given sample.

Parameters
----------
sample : array_like (n, d)
    The sample to compute the discrepancy from.
iterative : bool, optional
    Must be False if not using it for updating the discrepancy.
    Default is False. Refer to the notes for more details.
method : str, optional
    Type of discrepancy, can be ``CD``, ``WD``, ``MD`` or ``L2-star``.
    Refer to the notes for more details. Default is ``CD``.
workers : int, optional
    Number of workers to use for parallel processing. If -1 is given all
    CPU threads are used. Default is 1.

Returns
-------
discrepancy : float
    Discrepancy.

See Also
--------
geometric_discrepancy

Notes
-----
The discrepancy is a uniformity criterion used to assess the space filling
of a number of samples in a hypercube. A discrepancy quantifies the
distance between the continuous uniform distribution on a hypercube and the
discrete uniform distribution on :math:`n` distinct sample points.

The lower the value is, the better the coverage of the parameter space is.

For a collection of subsets of the hypercube, the discrepancy is the
difference between the fraction of sample points in one of those
subsets and the volume of that subset. There are different definitions of
discrepancy corresponding to different collections of subsets. Some
versions take a root mean square difference over subsets instead of
a maximum.

A measure of uniformity is reasonable if it satisfies the following
criteria [1]_:

1. It is invariant under permuting factors and/or runs.
2. It is invariant under rotation of the coordinates.
3. It can measure not only uniformity of the sample over the hypercube,
   but also the projection uniformity of the sample over non-empty
   subset of lower dimension hypercubes.
4. There is some reasonable geometric meaning.
5. It is easy to compute.
6. It satisfies the Koksma-Hlawka-like inequality.
7. It is consistent with other criteria in experimental design.

Four methods are available:

* ``CD``: Centered Discrepancy - subspace involves a corner of the
  hypercube
* ``WD``: Wrap-around Discrepancy - subspace can wrap around bounds
* ``MD``: Mixture Discrepancy - mix between CD/WD covering more criteria
* ``L2-star``: L2-star discrepancy - like CD BUT variant to rotation

See [2]_ for precise definitions of each method.

Lastly, using ``iterative=True``, it is possible to compute the
discrepancy as if we had :math:`n+1` samples. This is useful if we want
to add a point to a sampling and check the candidate which would give the
lowest discrepancy. Then you could just update the discrepancy with
each candidate using `update_discrepancy`. This method is faster than
computing the discrepancy for a large number of candidates.

References
----------
.. [1] Fang et al. "Design and modeling for computer experiments".
   Computer Science and Data Analysis Series, 2006.
.. [2] Zhou Y.-D. et al. "Mixture discrepancy for quasi-random point sets."
   Journal of Complexity, 29 (3-4) , pp. 283-301, 2013.
.. [3] T. T. Warnock. "Computational investigations of low discrepancy
   point sets." Applications of Number Theory to Numerical
   Analysis, Academic Press, pp. 319-343, 1972.

Examples
--------
Calculate the quality of the sample using the discrepancy:

>>> import numpy as np
>>> from scipy.stats import qmc
>>> space = np.array([[1, 3], [2, 6], [3, 2], [4, 5], [5, 1], [6, 4]])
>>> l_bounds = [0.5, 0.5]
>>> u_bounds = [6.5, 6.5]
>>> space = qmc.scale(space, l_bounds, u_bounds, reverse=True)
>>> space
array([[0.08333333, 0.41666667],
       [0.25      , 0.91666667],
       [0.41666667, 0.25      ],
       [0.58333333, 0.75      ],
       [0.75      , 0.08333333],
       [0.91666667, 0.58333333]])
>>> qmc.discrepancy(space)
0.008142039609053464

We can also compute iteratively the ``CD`` discrepancy by using
``iterative=True``.

>>> disc_init = qmc.discrepancy(space[:-1], iterative=True)
>>> disc_init
0.04769081147119336
>>> qmc.update_discrepancy(space[-1], space[:-1], disc_init)
0.008142039609053513

rh   rg   z* is not a valid method. It must be one of )rc   _validate_workersr   r   r    r!   rE   set)rG   re   rf   rg   methodss        r6   r&   r&      sj    j 'v.F(G /1-2	G v'BBF: & \,. / 	/r9   )mindistmstmetricc                    [        U 5      n U R                  S   S:  a  [        S5      e[        R                  " XS9n[
        R                  " US:H  5      (       a  [        R                  " SSS9  US:X  a&  [
        R                  " X3R                  5          5      $ US	:X  aI  [        R                  " U5      n[        U5      nXUR                  5          n[
        R                  " U5      $ [        U< S
35      e)a$  Discrepancy of a given sample based on its geometric properties.

Parameters
----------
sample : array_like (n, d)
    The sample to compute the discrepancy from.
method : {"mindist", "mst"}, optional
    The method to use. One of ``mindist`` for minimum distance (default)
    or ``mst`` for minimum spanning tree.
metric : str or callable, optional
    The distance metric to use. See the documentation
    for `scipy.spatial.distance.pdist` for the available metrics and
    the default.

Returns
-------
discrepancy : float
    Discrepancy (higher values correspond to greater sample uniformity).

See Also
--------
discrepancy

Notes
-----
The discrepancy can serve as a simple measure of quality of a random sample.
This measure is based on the geometric properties of the distribution of points
in the sample, such as the minimum distance between any pair of points, or
the mean edge length in a minimum spanning tree.

The higher the value is, the better the coverage of the parameter space is.
Note that this is different from `scipy.stats.qmc.discrepancy`, where lower
values correspond to higher quality of the sample.

Also note that when comparing different sampling strategies using this function,
the sample size must be kept constant.

It is possible to calculate two metrics from the minimum spanning tree:
the mean edge length and the standard deviation of edges lengths. Using
both metrics offers a better picture of uniformity than either metric alone,
with higher mean and lower standard deviation being preferable (see [1]_
for a brief discussion). This function currently only calculates the mean
edge length.

References
----------
.. [1] Franco J. et al. "Minimum Spanning Tree: A new approach to assess the quality
   of the design of computer experiments." Chemometrics and Intelligent Laboratory
   Systems, 97 (2), pp. 164-169, 2009.

Examples
--------
Calculate the quality of the sample using the minimum euclidean distance
(the defaults):

>>> import numpy as np
>>> from scipy.stats import qmc
>>> rng = np.random.default_rng(191468432622931918890291693003068437394)
>>> sample = qmc.LatinHypercube(d=2, rng=rng).random(50)
>>> qmc.geometric_discrepancy(sample)
0.03708161435687876

Calculate the quality using the mean edge length in the minimum
spanning tree:

>>> qmc.geometric_discrepancy(sample, method='mst')
0.1105149978798376

Display the minimum spanning tree and the points with
the smallest distance:

>>> import matplotlib.pyplot as plt
>>> from matplotlib.lines import Line2D
>>> from scipy.sparse.csgraph import minimum_spanning_tree
>>> from scipy.spatial.distance import pdist, squareform
>>> dist = pdist(sample)
>>> mst = minimum_spanning_tree(squareform(dist))
>>> edges = np.where(mst.toarray() > 0)
>>> edges = np.asarray(edges).T
>>> min_dist = np.min(dist)
>>> min_idx = np.argwhere(squareform(dist) == min_dist)[0]
>>> fig, ax = plt.subplots(figsize=(10, 5))
>>> _ = ax.set(aspect='equal', xlabel=r'$x_1$', ylabel=r'$x_2$',
...            xlim=[0, 1], ylim=[0, 1])
>>> for edge in edges:
...     ax.plot(sample[edge, 0], sample[edge, 1], c='k')
>>> ax.scatter(sample[:, 0], sample[:, 1])
>>> ax.add_patch(plt.Circle(sample[min_idx[0]], min_dist, color='red', fill=False))
>>> markers = [
...     Line2D([0], [0], marker='o', lw=0, label='Sample points'),
...     Line2D([0], [0], color='k', label='Minimum spanning tree'),
...     Line2D([0], [0], marker='o', lw=0, markerfacecolor='w', markeredgecolor='r',
...            label='Minimum point-to-point distance'),
... ]
>>> ax.legend(handles=markers, loc='center left', bbox_to_anchor=(1, 0.5));
>>> plt.show()

r   rL   z'Sample must contain at least two points)rr   rQ   z!Sample contains duplicate points.
stacklevelrp   rq   z< is not a valid method. It must be one of {'mindist', 'mst'})rc   rV   rE   r   pdistr?   anywarningswarnrX   nonzero
squareformr   mean)rG   rf   rr   	distancesfully_connected_graphrq   s         r6   r'   r'   P  s    L 'v.F||ABCCv5I	vvi39aHvvi 1 1 3455	5 ( 3 3I >#$9:&	 wwy!!F: &B C D 	Dr9   x_newinitial_discc                 `   [         R                  " U[         R                  SS9n[         R                  " U [         R                  SS9n UR                  S:X  d  [	        S5      eUR                  5       S:  d  UR                  5       S:  a  [	        S5      eU R                  S:X  d  [	        S	5      e[         R                  " U S
:  5      (       a  [         R                  " U S:*  5      (       d  [	        S5      eU R                  S
   UR                  S   :w  a  [	        S5      e[        XU5      $ )an  Update the centered discrepancy with a new sample.

Parameters
----------
x_new : array_like (1, d)
    The new sample to add in `sample`.
sample : array_like (n, d)
    The initial sample.
initial_disc : float
    Centered discrepancy of the `sample`.

Returns
-------
discrepancy : float
    Centered discrepancy of the sample composed of `x_new` and `sample`.

Examples
--------
We can also compute iteratively the discrepancy by using
``iterative=True``.

>>> import numpy as np
>>> from scipy.stats import qmc
>>> space = np.array([[1, 3], [2, 6], [3, 2], [4, 5], [5, 1], [6, 4]])
>>> l_bounds = [0.5, 0.5]
>>> u_bounds = [6.5, 6.5]
>>> space = qmc.scale(space, l_bounds, u_bounds, reverse=True)
>>> disc_init = qmc.discrepancy(space[:-1], iterative=True)
>>> disc_init
0.04769081147119336
>>> qmc.update_discrepancy(space[-1], space[:-1], disc_init)
0.008142039609053513

r]   r^   rL   rM   rP   rQ   rR   r   zx_new is not a 1D arrayr   zx_new is not in unit hypercubez&x_new and sample must be broadcastable)
r?   rS   ra   rT   rE   rW   rX   rY   rV   r"   )r   rG   r   s      r6   r(   r(     s    L ZZbjj<FJJuBJJc:E ;;!344

rvzz|b0:;; ::?233FF5A:266%1*#5#59::{{1~a(ABB)%FFr9   i1i2kdiscc                 t   U R                   S   nU S-
  nSUS-  -  [        R                  " SS[        XaSS24   5      -   [        U5      -   [        XaSS24   U-
  5      -
  -  SS9-  nSUS-  -  [        R                  " SS[        XbSS24   5      -   [        U5      -   [        XbSS24   U-
  5      -
  -  SS9-  nSUS-  -  [        R                  " S[        XaSS24   5      -   5      -  SU-  [        R                  " SS[        XaSS24   5      -  -   SXaSS24   S-  -  -
  5      -  -
  n	SUS-  -  [        R                  " S[        XbSS24   5      -   5      -  SU-  [        R                  " SS[        XbSS24   5      -  -   SXbSS24   S-  -  -
  5      -  -
  n
S[        XbU4   5      -   [        USS2U4   5      -   [        XbU4   USS2U4   -
  5      -
  nS[        XaU4   5      -   [        USS2U4   5      -   [        XaU4   USS2U4   -
  5      -
  nX-  nX-  nX-  nS[        XbU4   5      -   S[        XaU4   5      -   -  nS[        XbU4   5      -
  S[        XaU4   5      -
  -  n[        R                  " S[        XaSS24   5      -   5      n[        R                  " S[        XbSS24   5      -   5      n[        R                  " SS[        XaSS24   5      -  -   SXaSS24   S-  -  -
  5      n[        R                  " SS[        XbSS24   5      -  -   SXbSS24   S-  -  -
  5      nUU-  US-  -  SU-  U-  U-  U-  -
  nUUS-  U-  -  SU-  UU-  U-  -  -
  nX-
  U-   U-
  n[        R                  " U[
        S	9nS
UX/'   [        UU   5      nUU-   U	-
  U-   U
-
  SU-  -   nU$ )a|  Centered discrepancy after an elementary perturbation of a LHS.

An elementary perturbation consists of an exchange of coordinates between
two points: ``sample[i1, k] <-> sample[i2, k]``. By construction,
this operation conserves the LHS properties.

Parameters
----------
sample : array_like (n, d)
    The sample (before permutation) to compute the discrepancy from.
i1 : int
    The first line of the elementary permutation.
i2 : int
    The second line of the elementary permutation.
k : int
    The column of the elementary permutation.
disc : float
    Centered discrepancy of the design before permutation.

Returns
-------
discrepancy : float
    Centered discrepancy of the design after permutation.

References
----------
.. [1] Jin et al. "An efficient algorithm for constructing optimal design
   of computer experiments", Journal of Statistical Planning and
   Inference, 2005.

r         ?rP   g       @Nr   axisrL   r_   F)rV   r?   prodabsonesboolsum)rG   r   r   r   r   nz_ijc_i1jc_i2jc_i1i1c_i2i2numdenumgammac_p_i1jc_p_i2jalphabetag_i1g_i2h_i1h_i2c_p_i1i1c_p_i2i2sum_maskdisc_eps                              r6   _perturb_discrepancyr   
  s/   B 	QAC<D !r'\wwsb3tE{#33 #D	*,/Ud0B,CD EKLNNE !r'\wwsb3tE{#33 #D	*,/Ud0B,CD EKLNNE
 16kBGGADQK(8$899QcCU,<&<!<#&!e)9#9": ; ;;F 16kBGGADQK(8$899QcCU,<&<!<#&!e)9#9": ; ;;F
 s4A;#d1a4j/1!etAqDz)*+CTa%[!!CQT
O34A;ad+,-EKE mGmGTa%[!!a#dq5k*:&:;EDQK  QTa%[)9%9:D772DQK(()D772DQK(()D772c$1u+...Uq8H1IIJD772c$1u+...Uq8H1IIJD !q&)BJ,=,Dq,HHH!q&E)*rDyAI<L/MNH ?W$u,D771D!DD"NtDz?Dh'(2V;a$hFGNr9   r   c                 v   [         R                  " U S-  U S-  S:H  -   [        S9n[        S[	        U S-  5      S-  S-   5       H:  nSU-  S-   S-  nSXU-  S-  SSU-  2'   SXUSUS-  -  -
  S	-   -  S-  SSU-  2'   M<     [         R
                  SSS[         R                  " U5      S
   SS -  S-   S-  4   $ )a  Prime numbers from 2 to *n*.

Parameters
----------
n : int
    Sup bound with ``n >= 6``.

Returns
-------
primes : list(int)
    Primes in ``2 <= p < n``.

Notes
-----
Taken from [1]_ by P.T. Roy, written consent given on 23.04.2021
by the original author, Bruno Astrolino, for free use in SciPy under
the 3-clause BSD.

References
----------
.. [1] `StackOverflow <https://stackoverflow.com/questions/2068372>`_.

      rL   r   r   r   FN   r   )r?   r   r   rangeintr_rz   )r   sieveir   s       r6   primes_from_2_tor   d  s    0 GGAFa!eqj)6E1c!s(mq(1,-EAIM#(!eqj!a% 7<1qAE{?Q&'1,3a!e34 . 55ARZZ.q1!"559Q>?@@r9   c                     / SQSU  n[        U5      U :  a*  Sn [        U5      SU  n[        U5      U :X  a   U$ US-  nM'  U$ )zList of the n-first prime numbers.

Parameters
----------
n : int
    Number of prime numbers wanted.

Returns
-------
primes : list(int)
    List of primes.

)rL   r                              %   )   +   /   5   ;   =   C   G   I   O   S   Y   a   e   g   k   m   q                                                                           i  i  i  i  i  i  i  i%  i3  i7  i9  i=  iK  iQ  i[  i]  ia  ig  io  iu  i{  i  i  i  i  i  i  i  i  i  i  i  i  i  i  i  i  i  i  i  i  i  i  i	  i  i  i#  i-  i3  i9  i;  iA  iK  iQ  iW  iY  i_  ie  ii  ik  iw  i  i  i  i  i  i  i  i  i  i  i  i  i  i  i  i  i  i  i  i  i  i  i  i  i)  i+  i5  i7  i;  i=  iG  iU  iY  i[  i_  im  iq  is  iw  i  i  i  i  i  i  i  i  i  i  i  i  i  i  Ni  i  )lenr   )r   primes
big_numbers      r6   n_primesr     si    1 24!5F 6{Q
!%j1"15F6{a M $J	 " Mr9   )rngbaser   c                
   [        U5      n[        R                  " S[        R                  " U 5      -  5      S-
  n[        R
                  " [        R                  " U 5      S   USS9nU H  nUR                  U5        M     U$ )a  Permutations for scrambling a Van der Corput sequence.

Parameters
----------
base : int
    Base of the sequence.
rng : `numpy.random.Generator`, optional
    Pseudorandom number generator state. When `rng` is None, a new
    `numpy.random.Generator` is created using entropy from the
    operating system. Types other than `numpy.random.Generator` are
    passed to `numpy.random.default_rng` to instantiate a ``Generator``.

    .. versionchanged:: 1.15.0

        As part of the `SPEC-007 <https://scientific-python.org/specs/spec-0007/>`_
        transition from use of `numpy.random.RandomState` to
        `numpy.random.Generator`, this keyword was changed from `seed` to
        `rng`. During the transition, the behavior documented above is not
        accurate; see `check_random_state` for actual behavior. After the
        transition, this admonition can be removed.

Returns
-------
permutations : array_like
    Permutation indices.

Notes
-----
In Algorithm 1 of Owen 2017, a permutation of `np.arange(base)` is
created for each positive integer `k` such that ``1 - base**-k < 1``
using floating-point arithmetic. For double precision floats, the
condition ``1 - base**-k < 1`` can also be written as ``base**-k >
2**-54``, which makes it more apparent how many permutations we need
to create.
6   r   Nr   r   )r7   mathceillog2r?   repeatarangeshuffle)r   r   countpermutationsperms        r6   _van_der_corput_permutationsr     sl    L S
!CIIb499T?*+a/E99RYYt_T2EBLD  r9   )start_indexscrambler   r   rg   r   r   r   npt.ArrayLike | Nonec                    US:  a  [        S5      eU(       aO  Uc
  [        XS9nO[        R                  " U5      nUR	                  [        R
                  5      n[        XUXF5      $ [        XX&5      $ )a  Van der Corput sequence.

Pseudo-random number generator based on a b-adic expansion.

Scrambling uses permutations of the remainders (see [1]_). Multiple
permutations are applied to construct a point. The sequence of
permutations has to be the same for all points of the sequence.

Parameters
----------
n : int
    Number of element of the sequence.
base : int, optional
    Base of the sequence. Default is 2.
start_index : int, optional
    Index to start the sequence from. Default is 0.
scramble : bool, optional
    If True, use Owen scrambling. Otherwise no scrambling is done.
    Default is True.
permutations : array_like, optional
    Permutations used for scrambling.
rng : `numpy.random.Generator`, optional
    Pseudorandom number generator state. When `rng` is None, a new
    `numpy.random.Generator` is created using entropy from the
    operating system. Types other than `numpy.random.Generator` are
    passed to `numpy.random.default_rng` to instantiate a ``Generator``.
workers : int, optional
    Number of workers to use for parallel processing. If -1 is
    given all CPU threads are used. Default is 1.

Returns
-------
sequence : list (n,)
    Sequence of Van der Corput.

References
----------
.. [1] A. B. Owen. "A randomized Halton algorithm in R",
   :arxiv:`1706.02808`, 2017.

rL   z'base' must be at least 2r   r   )rE   r   r?   rS   astypeint64r#   r$   )r   r   r   r   r   r   rg   s          r6   van_der_corputr     sw    d ax4557L ::l3L#**2884+A[,8C 	C "!;@@r9   c                   p   \ rS rSrSr\\" SSS9SSS.S\S	\S
   S-  S\	SS4S jj5       5       r
SSS.S\S	\S
   S-  S\	SS4S jjr\ SSS.S\S\S\R                  4S jjj5       r SSS.S\S\S\R                  4S jjjrSSSSS.SSSSS\S\S\S\R                  4S jjrS S jrS\SS 4S jrSrg)!r)   i  a  A generic Quasi-Monte Carlo sampler class meant for subclassing.

QMCEngine is a base class to construct a specific Quasi-Monte Carlo
sampler. It cannot be used directly as a sampler.

Parameters
----------
d : int
    Dimension of the parameter space.
optimization : {None, "random-cd", "lloyd"}, optional
    Whether to use an optimization scheme to improve the quality after
    sampling. Note that this is a post-processing step that does not
    guarantee that all properties of the sample will be conserved.
    Default is None.

    * ``random-cd``: random permutations of coordinates to lower the
      centered discrepancy. The best sample based on the centered
      discrepancy is constantly updated. Centered discrepancy-based
      sampling shows better space-filling robustness toward 2D and 3D
      subprojections compared to using other discrepancy measures.
    * ``lloyd``: Perturb samples using a modified Lloyd-Max algorithm.
      The process converges to equally spaced samples.

    .. versionadded:: 1.10.0

rng : `numpy.random.Generator`, optional
    Pseudorandom number generator state. When `rng` is None, a new
    `numpy.random.Generator` is created using entropy from the
    operating system. Types other than `numpy.random.Generator` are
    passed to `numpy.random.default_rng` to instantiate a ``Generator``.

    .. versionchanged:: 1.15.0

        As part of the `SPEC-007 <https://scientific-python.org/specs/spec-0007/>`_
        transition from use of `numpy.random.RandomState` to
        `numpy.random.Generator`, this keyword was changed from `seed` to
        `rng`. For an interim period, both keywords will continue to work, although
        only one may be specified at a time. After the interim period, function
        calls using the `seed` keyword will emit warnings. Following a
        deprecation period, the `seed` keyword will be removed.

Notes
-----
By convention samples are distributed over the half-open interval
``[0, 1)``. Instances of the class can access the attributes: ``d`` for
the dimension; and ``rng`` for the random number generator.

**Subclassing**

When subclassing `QMCEngine` to create a new sampler,  ``__init__`` and
``random`` must be redefined.

* ``__init__(d, rng=None)``: at least fix the dimension. If the sampler
  does not take advantage of a ``rng`` (deterministic methods like
  Halton), this parameter can be omitted.
* ``_random(n, *, workers=1)``: draw ``n`` from the engine. ``workers``
  is used for parallelism. See `Halton` for example.

Optionally, two other methods can be overwritten by subclasses:

* ``reset``: Reset the engine to its original state.
* ``fast_forward``: If the sequence is deterministic (like Halton
  sequence), then ``fast_forward(n)`` is skipping the ``n`` first draw.

Examples
--------
To create a random sampler based on ``np.random.random``, we would do the
following:

>>> from scipy.stats import qmc
>>> class RandomEngine(qmc.QMCEngine):
...     def __init__(self, d, rng=None):
...         super().__init__(d=d, rng=rng)
...
...
...     def _random(self, n=1, *, workers=1):
...         return self.rng.random((n, self.d))
...
...
...     def reset(self):
...         super().__init__(d=self.d, rng=self.rng_seed)
...         return self
...
...
...     def fast_forward(self, n):
...         self.random(n)
...         return self

After subclassing `QMCEngine` to define the sampling strategy we want to
use, we can create an instance to sample from.

>>> engine = RandomEngine(2)
>>> engine.random(5)
array([[0.22733602, 0.31675834],  # random
       [0.79736546, 0.67625467],
       [0.39110955, 0.33281393],
       [0.59830875, 0.18673419],
       [0.67275604, 0.94180287]])

We can also reset the state of the generator and resample again.

>>> _ = engine.reset()
>>> engine.random(5)
array([[0.22733602, 0.31675834],  # random
       [0.79736546, 0.67625467],
       [0.39110955, 0.33281393],
       [0.59830875, 0.18673419],
       [0.67275604, 0.94180287]])

r0   Freplace_docNoptimizationr   rO   r  z	random-cdlloydr   r1   c                $    U R                  XUS9  g )Nr  )_initialize)selfrO   r  r   s       r6   __init__QMCEngine.__init__  s     	3?r9   c                   [         R                  " [        U5      [         R                  5      (       a  US:  a  [	        S5      eXl        [        U[         R                  R                  5      (       a  [        US5      S   U l
        O[        U5      U l
        [        R                  " U R                  5      U l        SU l        SSU R                  SSS S.nX l        [#        X$5      U l        g )	Nr   z&d must be a non-negative integer valuer   d   i'  h㈵>
   )
n_nochangen_itersr   tolmaxiterqhull_options)r?   
issubdtypetyper@   rE   rO   r<   rA   rD   r   r   r7   copydeepcopyrng_seednum_generated_optimization_select_optimizeroptimization_method)r	  rO   r  r   configs        r6   r  QMCEngine._initialize  s     }}T!Wbjj11QUEFFc299..//!#q)!,DH *#.DHdhh/ 88 !

 *#4\#J r9   r   rl   r   rg   c                    g r3   r4   )r	  r   rg   s      r6   _randomQMCEngine._random  s     	r9   c                    U R                  XS9nU R                  b  U R                  U5      nU =R                  U-  sl        U$ )a  Draw `n` in the half-open interval ``[0, 1)``.

Parameters
----------
n : int, optional
    Number of samples to generate in the parameter space.
    Default is 1.
workers : int, optional
    Only supported with `Halton`.
    Number of workers to use for parallel processing. If -1 is
    given all CPU threads are used. Default is 1. It becomes faster
    than one worker for `n` greater than :math:`10^3`.

Returns
-------
sample : array_like (n, d)
    QMC sample.

rl   )r!  r  r  )r	  r   rg   rG   s       r6   rA   QMCEngine.random  sF    , a1##/--f5Far9   )rJ   r   endpointrg   rI   rH   rJ   r   r%  c                P   Uc  UnSn[         R                  " U5      n[         R                  " U5      nU(       a  US-   n[         R                  " UR                  [         R                  5      (       a4  [         R                  " UR                  [         R                  5      (       d  Sn[        U5      e[        U [        5      (       a  U R                  X5S9nOU R                  US9n[        XqUS9n[         R                  " U5      R                  [         R                  5      nU$ )a  
Draw `n` integers from `l_bounds` (inclusive) to `u_bounds`
(exclusive), or if endpoint=True, `l_bounds` (inclusive) to
`u_bounds` (inclusive).

Parameters
----------
l_bounds : int or array-like of ints
    Lowest (signed) integers to be drawn (unless ``u_bounds=None``,
    in which case this parameter is 0 and this value is used for
    `u_bounds`).
u_bounds : int or array-like of ints, optional
    If provided, one above the largest (signed) integer to be drawn
    (see above for behavior if ``u_bounds=None``).
    If array-like, must contain integer values.
n : int, optional
    Number of samples to generate in the parameter space.
    Default is 1.
endpoint : bool, optional
    If true, sample from the interval ``[l_bounds, u_bounds]`` instead
    of the default ``[l_bounds, u_bounds)``. Defaults is False.
workers : int, optional
    Number of workers to use for parallel processing. If -1 is
    given all CPU threads are used. Only supported when using `Halton`
    Default is 1.

Returns
-------
sample : array_like (n, d)
    QMC sample.

Notes
-----
It is safe to just use the same ``[0, 1)`` to integer mapping
with QMC that you would use with MC. You still get unbiasedness,
a strong law of large numbers, an asymptotically infinite variance
reduction and a finite sample variance bound.

To convert a sample from :math:`[0, 1)` to :math:`[a, b), b>a`,
with :math:`a` the lower bounds and :math:`b` the upper bounds,
the following transformation is used:

.. math::

    \text{floor}((b - a) \cdot \text{sample} + a)

r   r   zD'u_bounds' and 'l_bounds' must be integers or array-like of integers)r   rg   r   )rI   rJ   )r?   
atleast_1dr  r_   r@   rE   r<   r+   rA   r%   floorr   r   )r	  rI   rJ   r   r%  rg   messagerG   s           r6   integersQMCEngine.integers  s    p HH==*==*!|Hhnnbjj99MM(.."**==1GW%%dF##[[1[6F[[1[%Fv8D&!((2r9   c                 t    [         R                  " U R                  5      n[        U5      U l        SU l        U $ )ziReset the engine to base state.

Returns
-------
engine : QMCEngine
    Engine reset to its base state.

r   )r  r  r  r7   r   r  )r	  r   s     r6   resetQMCEngine.reset:  s/     mmDMM*%c*r9   c                 $    U R                  US9  U $ )zFast-forward the sequence by `n` positions.

Parameters
----------
n : int
    Number of points to skip in the sequence.

Returns
-------
engine : QMCEngine
    Engine reset to its base state.

r'  )rA   r	  r   s     r6   fast_forwardQMCEngine.fast_forwardH  s     	ar9   )r  rO   r  r  r   r  r   )r1   r)   )__name__
__module____qualname____firstlineno____doc__r   r   r   r   r   r
  r  r?   ndarrayr!  rA   r   r+  r.  r2  __static_attributes__r4   r9   r6   r)   r)     s   m^ E2
 >B@@ 23d:	@
 @ 
@ 3 @& >B#K#K 23d:	#K
 #K 
#KJ 89,5	  89,5	B ,0P!P )	P
 P P P 
Pdi K r9   r)   c                      ^  \ rS rSrSr\" SSS9SSSS.S	\S
\S\S   S-  S\	SS4
U 4S jjj5       r
SS jr SSS.S\S\S\R                  4S jjjrSrU =r$ )r+   iZ  a  Halton sequence.

Pseudo-random number generator that generalize the Van der Corput sequence
for multiple dimensions. The Halton sequence uses the base-two Van der
Corput sequence for the first dimension, base-three for its second and
base-:math:`n` for its n-dimension.

Parameters
----------
d : int
    Dimension of the parameter space.
scramble : bool, optional
    If True, use Owen scrambling. Otherwise no scrambling is done.
    Default is True.
optimization : {None, "random-cd", "lloyd"}, optional
    Whether to use an optimization scheme to improve the quality after
    sampling. Note that this is a post-processing step that does not
    guarantee that all properties of the sample will be conserved.
    Default is None.

    * ``random-cd``: random permutations of coordinates to lower the
      centered discrepancy. The best sample based on the centered
      discrepancy is constantly updated. Centered discrepancy-based
      sampling shows better space-filling robustness toward 2D and 3D
      subprojections compared to using other discrepancy measures.
    * ``lloyd``: Perturb samples using a modified Lloyd-Max algorithm.
      The process converges to equally spaced samples.

    .. versionadded:: 1.10.0

rng : `numpy.random.Generator`, optional
    Pseudorandom number generator state. When `rng` is None, a new
    `numpy.random.Generator` is created using entropy from the
    operating system. Types other than `numpy.random.Generator` are
    passed to `numpy.random.default_rng` to instantiate a ``Generator``.

    .. versionchanged:: 1.15.0

        As part of the `SPEC-007 <https://scientific-python.org/specs/spec-0007/>`_
        transition from use of `numpy.random.RandomState` to
        `numpy.random.Generator`, this keyword was changed from `seed` to
        `rng`. For an interim period, both keywords will continue to work, although
        only one may be specified at a time. After the interim period, function
        calls using the `seed` keyword will emit warnings. Following a
        deprecation period, the `seed` keyword will be removed.

Notes
-----
The Halton sequence has severe striping artifacts for even modestly
large dimensions. These can be ameliorated by scrambling. Scrambling
also supports replication-based error estimates and extends
applicability to unbounded integrands.

References
----------
.. [1] Halton, "On the efficiency of certain quasi-random sequences of
   points in evaluating multi-dimensional integrals", Numerische
   Mathematik, 1960.
.. [2] A. B. Owen. "A randomized Halton algorithm in R",
   :arxiv:`1706.02808`, 2017.

Examples
--------
Generate samples from a low discrepancy sequence of Halton.

>>> from scipy.stats import qmc
>>> sampler = qmc.Halton(d=2, scramble=False)
>>> sample = sampler.random(n=5)
>>> sample
array([[0.        , 0.        ],
       [0.5       , 0.33333333],
       [0.25      , 0.66666667],
       [0.75      , 0.11111111],
       [0.125     , 0.44444444]])

Compute the quality of the sample using the discrepancy criterion.

>>> qmc.discrepancy(sample)
0.088893711419753

If some wants to continue an existing design, extra points can be obtained
by calling again `random`. Alternatively, you can skip some points like:

>>> _ = sampler.fast_forward(5)
>>> sample_continued = sampler.random(n=5)
>>> sample_continued
array([[0.3125    , 0.37037037],
       [0.8125    , 0.7037037 ],
       [0.1875    , 0.14814815],
       [0.6875    , 0.48148148],
       [0.4375    , 0.81481481]])

Finally, samples can be scaled to bounds.

>>> l_bounds = [0, 2]
>>> u_bounds = [10, 5]
>>> qmc.scale(sample_continued, l_bounds, u_bounds)
array([[3.125     , 3.11111111],
       [8.125     , 4.11111111],
       [1.875     , 2.44444444],
       [6.875     , 3.44444444],
       [4.375     , 4.44444444]])

r0   Fr  TN)r   r  r   rO   r   r  r  r   r1   c                   > USUS.U l         [        TU ]	  XUS9  [        U5       Vs/ s H  n[	        U5      PM     snU l        X l        U R                  5         g s  snf )NT)rO   r   r  rO   r  r   )
_init_quadsuperr  r   r   r   r   _initialize_permutations)r	  rO   r   r  r   bdim	__class__s         r6   r
  Halton.__init__  s^     !"t+79aD ,4A;7;4SY;7	 %%' 8s   Ac                     S/[        U R                  5      -  U l        U R                  (       a@  [	        U R                  5       H&  u  p[        X R                  S9nX0R                  U'   M(     gg)zhInitialize permutations for all Van der Corput sequences.

Permutations are only needed for scrambling.
Nr   )r   r   _permutationsr   	enumerater   r   )r	  r   rB  r   s       r6   rA  Halton._initialize_permutations  s]    
 %)6C		N#:==$TYY/;88  )5""1% 0 r9   r   rl   r   rg   c                @   [        U5      n[        U R                  5       VVs/ s H3  u  p4[        XU R                  U R
                  U R                  U   US9PM5     nnn[        R                  " U5      R                  R                  XR                  5      $ s  snnf )a  Draw `n` in the half-open interval ``[0, 1)``.

Parameters
----------
n : int, optional
    Number of samples to generate in the parameter space. Default is 1.
workers : int, optional
    Number of workers to use for parallel processing. If -1 is
    given all CPU threads are used. Default is 1. It becomes faster
    than one worker for `n` greater than :math:`10^3`.

Returns
-------
sample : array_like (n, d)
    QMC sample.

)r   r   r   rg   )rm   rG  r   r   r  r   rF  r?   arrayTreshaperO   )r	  r   rg   r   rB  rG   s         r6   r!  Halton._random  s    ( $G, "+499!5	7 "6ga	 !d6H6H*.--.2.@.@.C)02 "6	 	 7 xx!!))!VV447s   :B)r?  rF  r   r   r1   Nr4  )r5  r6  r7  r8  r9  r   r   r   r   r   r
  rA  r?   r:  r!  r;  __classcell__rC  s   @r6   r+   r+   Z  s    gP E204=A(()-(23d:( ( 
	( 3( 5 58955,55	5 5r9   r+   c                     ^  \ rS rSrSr\" SSS9SSSSS	.S
\S\S\S\	S   S-  S\
SS4U 4S jjj5       r SSS.S\S\S\R                  4S jjjrSS\S\R                  4S jjrSS\S\R                  4S jjrSrU =r$ )r,   i  a#  Latin hypercube sampling (LHS).

A Latin hypercube sample [1]_ generates :math:`n` points in
:math:`[0,1)^{d}`. Each univariate marginal distribution is stratified,
placing exactly one point in :math:`[j/n, (j+1)/n)` for
:math:`j=0,1,...,n-1`. They are still applicable when :math:`n << d`.

Parameters
----------
d : int
    Dimension of the parameter space.
scramble : bool, optional
    When False, center samples within cells of a multi-dimensional grid.
    Otherwise, samples are randomly placed within cells of the grid.

    .. note::
        Setting ``scramble=False`` does not ensure deterministic output.
        For that, use the `rng` parameter.

    Default is True.

    .. versionadded:: 1.10.0

optimization : {None, "random-cd", "lloyd"}, optional
    Whether to use an optimization scheme to improve the quality after
    sampling. Note that this is a post-processing step that does not
    guarantee that all properties of the sample will be conserved.
    Default is None.

    * ``random-cd``: random permutations of coordinates to lower the
      centered discrepancy. The best sample based on the centered
      discrepancy is constantly updated. Centered discrepancy-based
      sampling shows better space-filling robustness toward 2D and 3D
      subprojections compared to using other discrepancy measures.
    * ``lloyd``: Perturb samples using a modified Lloyd-Max algorithm.
      The process converges to equally spaced samples.

    .. versionadded:: 1.8.0
    .. versionchanged:: 1.10.0
        Add ``lloyd``.

strength : {1, 2}, optional
    Strength of the LHS. ``strength=1`` produces a plain LHS while
    ``strength=2`` produces an orthogonal array based LHS of strength 2
    [7]_, [8]_. In that case, only ``n=p**2`` points can be sampled,
    with ``p`` a prime number. It also constrains ``d <= p + 1``.
    Default is 1.

    .. versionadded:: 1.8.0

rng : `numpy.random.Generator`, optional
    Pseudorandom number generator state. When `rng` is None, a new
    `numpy.random.Generator` is created using entropy from the
    operating system. Types other than `numpy.random.Generator` are
    passed to `numpy.random.default_rng` to instantiate a ``Generator``.

    .. versionchanged:: 1.15.0

        As part of the `SPEC-007 <https://scientific-python.org/specs/spec-0007/>`_
        transition from use of `numpy.random.RandomState` to
        `numpy.random.Generator`, this keyword was changed from `seed` to
        `rng`. For an interim period, both keywords will continue to work, although
        only one may be specified at a time. After the interim period, function
        calls using the `seed` keyword will emit warnings. Following a
        deprecation period, the `seed` keyword will be removed.

See Also
--------
:ref:`quasi-monte-carlo`

Notes
-----

When LHS is used for integrating a function :math:`f` over :math:`n`,
LHS is extremely effective on integrands that are nearly additive [2]_.
With a LHS of :math:`n` points, the variance of the integral is always
lower than plain MC on :math:`n-1` points [3]_. There is a central limit
theorem for LHS on the mean and variance of the integral [4]_, but not
necessarily for optimized LHS due to the randomization.

:math:`A` is called an orthogonal array of strength :math:`t` if in each
n-row-by-t-column submatrix of :math:`A`: all :math:`p^t` possible
distinct rows occur the same number of times. The elements of :math:`A`
are in the set :math:`\{0, 1, ..., p-1\}`, also called symbols.
The constraint that :math:`p` must be a prime number is to allow modular
arithmetic. Increasing strength adds some symmetry to the sub-projections
of a sample. With strength 2, samples are symmetric along the diagonals of
2D sub-projections. This may be undesirable, but on the other hand, the
sample dispersion is improved.

Strength 1 (plain LHS) brings an advantage over strength 0 (MC) and
strength 2 is a useful increment over strength 1. Going to strength 3 is
a smaller increment and scrambled QMC like Sobol', Halton are more
performant [7]_.

To create a LHS of strength 2, the orthogonal array :math:`A` is
randomized by applying a random, bijective map of the set of symbols onto
itself. For example, in column 0, all 0s might become 2; in column 1,
all 0s might become 1, etc.
Then, for each column :math:`i` and symbol :math:`j`, we add a plain,
one-dimensional LHS of size :math:`p` to the subarray where
:math:`A^i = j`. The resulting matrix is finally divided by :math:`p`.

References
----------
.. [1] Mckay et al., "A Comparison of Three Methods for Selecting Values
   of Input Variables in the Analysis of Output from a Computer Code."
   Technometrics, 1979.
.. [2] M. Stein, "Large sample properties of simulations using Latin
   hypercube sampling." Technometrics 29, no. 2: 143-151, 1987.
.. [3] A. B. Owen, "Monte Carlo variance of scrambled net quadrature."
   SIAM Journal on Numerical Analysis 34, no. 5: 1884-1910, 1997
.. [4]  Loh, W.-L. "On Latin hypercube sampling." The annals of statistics
   24, no. 5: 2058-2080, 1996.
.. [5] Fang et al. "Design and modeling for computer experiments".
   Computer Science and Data Analysis Series, 2006.
.. [6] Damblin et al., "Numerical studies of space filling designs:
   optimization of Latin Hypercube Samples and subprojection properties."
   Journal of Simulation, 2013.
.. [7] A. B. Owen , "Orthogonal arrays for computer experiments,
   integration and visualization." Statistica Sinica, 1992.
.. [8] B. Tang, "Orthogonal Array-Based Latin Hypercubes."
   Journal of the American Statistical Association, 1993.
.. [9] Seaholm, Susan K. et al. (1988). Latin hypercube sampling and the
   sensitivity analysis of a Monte Carlo epidemic model. Int J Biomed
   Comput, 23(1-2), 97-112. :doi:`10.1016/0020-7101(88)90067-0`

Examples
--------
Generate samples from a Latin hypercube generator.

>>> from scipy.stats import qmc
>>> sampler = qmc.LatinHypercube(d=2)
>>> sample = sampler.random(n=5)
>>> sample
array([[0.1545328 , 0.53664833], # random
        [0.84052691, 0.06474907],
        [0.52177809, 0.93343721],
        [0.68033825, 0.36265316],
        [0.26544879, 0.61163943]])

Compute the quality of the sample using the discrepancy criterion.

>>> qmc.discrepancy(sample)
0.0196... # random

Samples can be scaled to bounds.

>>> l_bounds = [0, 2]
>>> u_bounds = [10, 5]
>>> qmc.scale(sample, l_bounds, u_bounds)
array([[1.54532796, 3.609945 ], # random
        [8.40526909, 2.1942472 ],
        [5.2177809 , 4.80031164],
        [6.80338249, 3.08795949],
        [2.65448791, 3.83491828]])

Below are other examples showing alternative ways to construct LHS with
even better coverage of the space.

Using a base LHS as a baseline.

>>> sampler = qmc.LatinHypercube(d=2)
>>> sample = sampler.random(n=5)
>>> qmc.discrepancy(sample)
0.0196...  # random

Use the `optimization` keyword argument to produce a LHS with
lower discrepancy at higher computational cost.

>>> sampler = qmc.LatinHypercube(d=2, optimization="random-cd")
>>> sample = sampler.random(n=5)
>>> qmc.discrepancy(sample)
0.0176...  # random

Use the `strength` keyword argument to produce an orthogonal array based
LHS of strength 2. In this case, the number of sample points must be the
square of a prime number.

>>> sampler = qmc.LatinHypercube(d=2, strength=2)
>>> sample = sampler.random(n=9)
>>> qmc.discrepancy(sample)
0.00526...  # random

Options could be combined to produce an optimized centered
orthogonal array based LHS. After optimization, the result would not
be guaranteed to be of strength 2.

**Real-world example**

In [9]_, a Latin Hypercube sampling (LHS) strategy was used to sample a
parameter space to study the importance of each parameter of an epidemic
model. Such analysis is also called a sensitivity analysis.

Since the dimensionality of the problem is high (6), it is computationally
expensive to cover the space. When numerical experiments are costly, QMC
enables analysis that may not be possible if using a grid.

The six parameters of the model represented the probability of illness,
the probability of withdrawal, and four contact probabilities. The
authors assumed uniform distributions for all parameters and generated
50 samples.

Using `scipy.stats.qmc.LatinHypercube` to replicate the protocol,
the first step is to create a sample in the unit hypercube:

>>> from scipy.stats import qmc
>>> sampler = qmc.LatinHypercube(d=6)
>>> sample = sampler.random(n=50)

Then the sample can be scaled to the appropriate bounds:

>>> l_bounds = [0.000125, 0.01, 0.0025, 0.05, 0.47, 0.7]
>>> u_bounds = [0.000375, 0.03, 0.0075, 0.15, 0.87, 0.9]
>>> sample_scaled = qmc.scale(sample, l_bounds, u_bounds)

Such a sample was used to run the model 50 times, and a polynomial
response surface was constructed. This allowed the authors to study the
relative importance of each parameter across the range of possibilities
of every other parameter.

In this computer experiment, they showed a 14-fold reduction in the
number of samples required to maintain an error below 2% on their
response surface when compared to a grid sampling.

r0   Fr  Tr   N)r   strengthr  r   rO   r   rR  r  r  r   r1   c                   > USUUS.U l         [        T	U ]	  XUS9  X l        U R                  U R
                  S.n Xc   U l        g ! [         a#  nU< S[        U5      < 3n[        U5      UeS nAff = f)NT)rO   r   rR  r  )rO   r   r  )r   rL   z, is not a valid strength. It must be one of )
r?  r@  r  r   _random_lhs_random_oa_lhs
lhs_methodKeyErrorrn   rE   )
r	  rO   r   rR  r  r   lhs_method_strengthexcr*  rC  s
            r6   r
  LatinHypercube.__init__  s     !"t+79a|D  ""

	/(;(EDO 	/" &!"569;GW%3.	/s   	A 
A3A..A3rl   r   rg   c                (    U R                  U5      nU$ r3   )rV  )r	  r   rg   lhss       r6   r!  LatinHypercube._random  s     ooa 
r9   c                    U R                   (       d  SnO$U R                  R                  XR                  4S9n[        R
                  " [        R                  " SUS-   5      U R                  S45      n[        U R                  5       H$  nU R                  R                  X4SS24   5        M&     UR                  nX2-
  U-  nU$ )zBase LHS algorithm.r   sizer   N)
r   r   uniformrO   r?   tiler   r   r   rK  )r	  r   samplespermsr   s        r6   rT  LatinHypercube._random_lhs  s    }}*-Ghh&&QK&8G		!QU+%tvvAHHUa4[) ?a'r9   c                    [         R                  " U5      R                  [        5      nUS-  nUS-   n[	        US-   5      nX%;  d  X:w  a  [        SUSS S-   35      eU R                  US-   :  a  [        S5      e[         R                  " X44[        S9n[         R                  " [         R                  " U5      S5      n[         R                  " [         R                  " U6 S	S
9R                  S	S5      USS2SS24'   [        SU5       H:  n[         R                  " USS2S4   XSS2S4   -  -   U5      USS2SU-   S-
  4'   M<     [         R                  " X44[        S9n	[        U5       H0  n
U R                   R#                  U5      nXSS2U
4      U	SS2U
4'   M2     U	n[         R                  " X44S9n[%        SU R&                  SU R                   S9n[        U5       HY  n
[        U5       HG  nUSS2U
4   U:H  nUR)                  U5      R+                  5       nUUSS2U
4   U   -   USS2U
4   U'   MI     M[     X-  nUSS2SU R                  24   $ )z)Orthogonal array based LHS of strength 2.rL   r   z8n is not the square of a prime number. Close values are Nz*n is too small for d. Must be n > (d-1)**2)rV   r_   )rL   r   r   r   )rV   )rO   r   rR  r   )r?   sqrtr   r   r   rE   rO   zerosrb  r   stackmeshgridrL  r   modemptyr   permutationr,   r   rA   flatten)r	  r   pn_rown_colr   	oa_samplearraysp_
oa_sample_jrd  oa_lhs_sample
lhs_enginer   idxr\  s                    r6   rU  LatinHypercube._random_oa_lhs  sH   GGAJc"1A!!a%(?aj%bc{A~.0  66AE>IJJHHE>=	 1v.88BKK$8)+--4WR^ 	!RaR%1+B#%66)AqD/,.A,>+?@A$CIa2ai  
 XXUN#>
uAHH((+E$q!t_5Jq!t  	~6#a$--!(,2
uA1X1o* ''*224+.1a41E+Ead#C(   	QZ((r9   )r?  rV  r   r4  )r   )r5  r6  r7  r8  r9  r   r   r   r   r   r   r
  r?   r:  r!  rT  rU  r;  rO  rP  s   @r6   r,   r,     s    aF E2 =A/// / 23d:	/
 / 
/ 3/4 89,5	Y rzz  ,)	 ,)"** ,) ,)r9   r,   c                     ^  \ rS rSr% Sr\r\\   \	S'   \
" SSS9SSSSS	.S
\S\S\S-  S\S\S   S-  SS4U 4S jjj5       rSS jr SSS.S\S\S\R$                  4S jjjrS\S\R$                  4S jrSU 4S jjrS\SS 4S jrSrU =r$ ) r*   iD  aa  Engine for generating (scrambled) Sobol' sequences.

Sobol' sequences are low-discrepancy, quasi-random numbers. Points
can be drawn using two methods:

* `random_base2`: safely draw :math:`n=2^m` points. This method
  guarantees the balance properties of the sequence.
* `random`: draw an arbitrary number of points from the
  sequence. See warning below.

Parameters
----------
d : int
    Dimensionality of the sequence. Max dimensionality is 21201.
scramble : bool, optional
    If True, use LMS+shift scrambling. Otherwise, no scrambling is done.
    Default is True.
bits : int, optional
    Number of bits of the generator. Control the maximum number of points
    that can be generated, which is ``2**bits``. Maximal value is 64.
    It does not correspond to the return type, which is always
    ``np.float64`` to prevent points from repeating themselves.
    Default is None, which for backward compatibility, corresponds to 30.

    .. versionadded:: 1.9.0
optimization : {None, "random-cd", "lloyd"}, optional
    Whether to use an optimization scheme to improve the quality after
    sampling. Note that this is a post-processing step that does not
    guarantee that all properties of the sample will be conserved.
    Default is None.

    * ``random-cd``: random permutations of coordinates to lower the
      centered discrepancy. The best sample based on the centered
      discrepancy is constantly updated. Centered discrepancy-based
      sampling shows better space-filling robustness toward 2D and 3D
      subprojections compared to using other discrepancy measures.
    * ``lloyd``: Perturb samples using a modified Lloyd-Max algorithm.
      The process converges to equally spaced samples.

    .. versionadded:: 1.10.0

rng : `numpy.random.Generator`, optional
    Pseudorandom number generator state. When `rng` is None, a new
    `numpy.random.Generator` is created using entropy from the
    operating system. Types other than `numpy.random.Generator` are
    passed to `numpy.random.default_rng` to instantiate a ``Generator``.

    .. versionchanged:: 1.15.0

        As part of the `SPEC-007 <https://scientific-python.org/specs/spec-0007/>`_
        transition from use of `numpy.random.RandomState` to
        `numpy.random.Generator`, this keyword was changed from `seed` to
        `rng`. For an interim period, both keywords will continue to work, although
        only one may be specified at a time. After the interim period, function
        calls using the `seed` keyword will emit warnings. Following a
        deprecation period, the `seed` keyword will be removed.

Notes
-----
Sobol' sequences [1]_ provide :math:`n=2^m` low discrepancy points in
:math:`[0,1)^{d}`. Scrambling them [3]_ makes them suitable for singular
integrands, provides a means of error estimation, and can improve their
rate of convergence. The scrambling strategy which is implemented is a
(left) linear matrix scramble (LMS) followed by a digital random shift
(LMS+shift) [2]_.

There are many versions of Sobol' sequences depending on their
'direction numbers'. This code uses direction numbers from [4]_. Hence,
the maximum number of dimension is 21201. The direction numbers have been
precomputed with search criterion 6 and can be retrieved at
https://web.maths.unsw.edu.au/~fkuo/sobol/.

.. warning::

   Sobol' sequences are a quadrature rule and they lose their balance
   properties if one uses a sample size that is not a power of 2, or skips
   the first point, or thins the sequence [5]_.

   If :math:`n=2^m` points are not enough then one should take :math:`2^M`
   points for :math:`M>m`. When scrambling, the number R of independent
   replicates does not have to be a power of 2.

   Sobol' sequences are generated to some number :math:`B` of bits.
   After :math:`2^B` points have been generated, the sequence would
   repeat. Hence, an error is raised.
   The number of bits can be controlled with the parameter `bits`.

References
----------
.. [1] I. M. Sobol', "The distribution of points in a cube and the accurate
   evaluation of integrals." Zh. Vychisl. Mat. i Mat. Phys., 7:784-802,
   1967.
.. [2] J. Matousek, "On the L2-discrepancy for anchored boxes."
   J. of Complexity 14, 527-556, 1998.
.. [3] Art B. Owen, "Scrambling Sobol and Niederreiter-Xing points."
   Journal of Complexity, 14(4):466-489, December 1998.
.. [4] S. Joe and F. Y. Kuo, "Constructing sobol sequences with better
   two-dimensional projections." SIAM Journal on Scientific Computing,
   30(5):2635-2654, 2008.
.. [5] Art B. Owen, "On dropping the first Sobol' point."
   :arxiv:`2008.08051`, 2020.

Examples
--------
Generate samples from a low discrepancy sequence of Sobol'.

>>> from scipy.stats import qmc
>>> sampler = qmc.Sobol(d=2, scramble=False)
>>> sample = sampler.random_base2(m=3)
>>> sample
array([[0.   , 0.   ],
       [0.5  , 0.5  ],
       [0.75 , 0.25 ],
       [0.25 , 0.75 ],
       [0.375, 0.375],
       [0.875, 0.875],
       [0.625, 0.125],
       [0.125, 0.625]])

Compute the quality of the sample using the discrepancy criterion.

>>> qmc.discrepancy(sample)
0.013882107204860938

To continue an existing design, extra points can be obtained
by calling again `random_base2`. Alternatively, you can skip some
points like:

>>> _ = sampler.reset()
>>> _ = sampler.fast_forward(4)
>>> sample_continued = sampler.random_base2(m=2)
>>> sample_continued
array([[0.375, 0.375],
       [0.875, 0.875],
       [0.625, 0.125],
       [0.125, 0.625]])

Finally, samples can be scaled to bounds.

>>> l_bounds = [0, 2]
>>> u_bounds = [10, 5]
>>> qmc.scale(sample_continued, l_bounds, u_bounds)
array([[3.75 , 3.125],
       [8.75 , 4.625],
       [6.25 , 2.375],
       [1.25 , 3.875]])

MAXDIMr0   Fr  TN)r   bitsr   r  rO   r   r  r   r  r  r1   c                  > USUUS.U l         [        TU ]	  XUS9  XR                  :  a  [	        SU R                   S35      eX0l        U   X l        U R
                  c  SU l        U R
                  S::  a  [        R                  U l	        O;SU R
                  s=:  a  S::  a  O  O[        R                  U l	        O[	        S	5      eS
U R
                  -  U l        [        R                  " XR
                  4U R                  S9U l        [        U R                  XR
                  S9  U(       d$  [        R                  " XR                  S9U l        OU R!                  5         U R                  R#                  5       U l        SS
U R
                  -  -  U l        U R$                  U R&                  -  R)                  SS5      U l        U R*                  R-                  [        R.                  5      U l        g )NT)rO   r   r  r  r>  z$Maximum supported dimensionality is .       @   zMaximum supported 'bits' is 64rL   r   )dimr  rP   r   rh  )r?  r@  r  r~  rE   r  r   r?   uint32dtype_iuint64maxnrj  _svr   _shift	_scrambler  _quasi_scalerL  _first_pointr   ra   )r	  rO   r   r  r   r  rC  s         r6   r
  Sobol.__init__  s{    !"tT+79 	aD{{?6t{{m1E  	 99DI99?99DL$))!r!99DL=>>tyyL	  "xxIIdllKdhhAII6&(hhq&EDK NNkk&&( AN*![[4;;6??2F --44RZZ@r9   c           	         [         R                  " [        U R                  SU R                  U R
                  4U R                  S9S[         R                  " U R
                  U R                  S9-  5      U l        [         R                  " [        U R                  SU R                  U R
                  U R
                  4U R                  S95      n[        U R                  U R
                  XR                  S9  g)z&Scramble the sequence using LMS+shift.rL   )r`  r_   r   )r  r  ltmsvN)r?   dotr   r   rO   r  r  r   r  trilr   r  )r	  r  s     r6   r  Sobol._scramble  s     ff1DFFDII+>#||-499DLL99
 ggl488Q)-DII(F)-7 8 	TYY	
r9   r   rl   r   rg   c                N   [         R                  " XR                  4[         R                  S9nUS:X  a  U$ U R                  U-   nX@R
                  :  a]  SU R                   SU R
                   SU R                   SU R                   SU SU S3nU R                  S	:w  a  US
-  n[        U5      eU R                  S:X  a  XS-
  -  S:X  d  [        R                  " SSS9  US:X  a  U R                  nU$ [        US-
  U R                  U R                  U R                  U R                  U R                  US9  [         R                  " U R                  U/5      SU n U$ [        XR                  S-
  U R                  U R                  U R                  U R                  US9  U$ )zDraw next point(s) in the Sobol' sequence.

Parameters
----------
n : int, optional
    Number of samples to generate in the parameter space. Default is 1.

Returns
-------
sample : array_like (n, d)
    Sobol' sample.

r   r   zAt most 2**=z# distinct points can be generated. 0 points have been previously generated, then: n=+z. r  zConsider increasing `bits`.r   zEThe balance properties of Sobol' points require n to be a power of 2.rL   rt   )r   num_genr  r%   r  quasirG   N)r?   rn  rO   ra   r  r  r  rE   rx   ry   r  r   r  r  r  concatenate)r	  r   rg   rG   total_nmsgs         r6   r!  Sobol._random!  s      XXq&&kD6M$$q(YYdii[$)) 5"001 2&&*&8&8%91#QwirK 
 yyB44S/!"QK1$ 7CDF Av**"  !eT%7%7TVV++$(($++!
 &&/1  //!3kkdhhdkk r9   mc                     SU-  nU R                   U-   nX3S-
  -  S:X  d,  [        SU R                    SU R                    SU SU S3	5      eU R                  U5      $ )	a:  Draw point(s) from the Sobol' sequence.

This function draws :math:`n=2^m` points in the parameter space
ensuring the balance properties of the sequence.

Parameters
----------
m : int
    Logarithm in base 2 of the number of samples; i.e., n = 2^m.

Returns
-------
sample : array_like (n, d)
    Sobol' sample.

rL   r   r   zFThe balance properties of Sobol' points require n to be a power of 2. r  z+2**r  zJ. If you still want to do this, the function 'Sobol.random()' can be used.)r  rE   rA   )r	  r  r   r  s       r6   random_base2Sobol.random_base2[  s    " F$$q(Q;'1, 66:6H6H5I J""&"4"4!5T!AgY G??   {{1~r9   c                 b   > [         TU ]  5         U R                  R                  5       U l        U $ )zeReset the engine to base state.

Returns
-------
engine : Sobol
    Engine reset to its base state.

)r@  r.  r  r  r  r	  rC  s    r6   r.  Sobol.resetz  s'     	kk&&(r9   c                 0   U R                   S:X  a9  [        US-
  U R                   U R                  U R                  U R                  S9  O7[        XR                   S-
  U R                  U R                  U R                  S9  U =R                   U-  sl         U $ )zFast-forward the sequence by `n` positions.

Parameters
----------
n : int
    Number of points to skip in the sequence.

Returns
-------
engine : Sobol
    The fast-forwarded engine.

r   r   )r   r  r  r  r  )r  r   rO   r  r  r1  s     r6   r2  Sobol.fast_forward  s     "a%!3!3884;;
 //!3884;; 	ar9   )
r  r?  r  r  r  r  r  r  r  r   rN  r4  )r1   r*   )r5  r6  r7  r8  r9  r   r~  r   r   __annotations__r   r   r   r   r   r
  r  r?   r:  r!  r  r.  r2  r;  rO  rP  s   @r6   r*   r*   D  s    Sj $FHSM#E204!%t=A1A1A)-1A$1A,41A 23d:1A 
	1A 31Af
$ 88988,58	8ti BJJ >i G  r9   r*   c                     ^  \ rS rSrSr\" SSS9SSSS	S	S	S	S
.S\S\S\S   S\S\S   S	-  S\	SSSSSS	4U 4S jjj5       r
S r S%SS.S\S\S\R                  4S jjjrS\R                  4S jrS&U 4S jjr S%S \R                  S\S!\S\R                  4S" jjr S%S \R                  S\S!\S\R                  4S# jjrS$rU =r$ )'r-   i  a8  Poisson disk sampling.

Parameters
----------
d : int
    Dimension of the parameter space.
radius : float
    Minimal distance to keep between points when sampling new candidates.
hypersphere : {"volume", "surface"}, optional
    Sampling strategy to generate potential candidates to be added in the
    final sample. Default is "volume".

    * ``volume``: original Bridson algorithm as described in [1]_.
      New candidates are sampled *within* the hypersphere.
    * ``surface``: only sample the surface of the hypersphere.
ncandidates : int
    Number of candidates to sample per iteration. More candidates result
    in a denser sampling as more candidates can be accepted per iteration.
optimization : {None, "random-cd", "lloyd"}, optional
    Whether to use an optimization scheme to improve the quality after
    sampling. Note that this is a post-processing step that does not
    guarantee that all properties of the sample will be conserved.
    Default is None.

    * ``random-cd``: random permutations of coordinates to lower the
      centered discrepancy. The best sample based on the centered
      discrepancy is constantly updated. Centered discrepancy-based
      sampling shows better space-filling robustness toward 2D and 3D
      subprojections compared to using other discrepancy measures.
    * ``lloyd``: Perturb samples using a modified Lloyd-Max algorithm.
      The process converges to equally spaced samples.

    .. versionadded:: 1.10.0

rng : `numpy.random.Generator`, optional
    Pseudorandom number generator state. When `rng` is None, a new
    `numpy.random.Generator` is created using entropy from the
    operating system. Types other than `numpy.random.Generator` are
    passed to `numpy.random.default_rng` to instantiate a ``Generator``.

    .. versionchanged:: 1.15.0

        As part of the `SPEC-007 <https://scientific-python.org/specs/spec-0007/>`_
        transition from use of `numpy.random.RandomState` to
        `numpy.random.Generator`, this keyword was changed from `seed` to
        `rng`. For an interim period, both keywords will continue to work, although
        only one may be specified at a time. After the interim period, function
        calls using the `seed` keyword will emit warnings. Following a
        deprecation period, the `seed` keyword will be removed.

l_bounds, u_bounds : array_like (d,)
    Lower and upper bounds of target sample data.

Notes
-----
Poisson disk sampling is an iterative sampling strategy. Starting from
a seed sample, `ncandidates` are sampled in the hypersphere
surrounding the seed. Candidates below a certain `radius` or outside the
domain are rejected. New samples are added in a pool of sample seed. The
process stops when the pool is empty or when the number of required
samples is reached.

The maximum number of point that a sample can contain is directly linked
to the `radius`. As the dimension of the space increases, a higher radius
spreads the points further and help overcome the curse of dimensionality.
See the :ref:`quasi monte carlo tutorial <quasi-monte-carlo>` for more
details.

.. warning::

   The algorithm is more suitable for low dimensions and sampling size
   due to its iterative nature and memory requirements.
   Selecting a small radius with a high dimension would
   mean that the space could contain more samples than using lower
   dimension or a bigger radius.

Some code taken from [2]_, written consent given on 31.03.2021
by the original author, Shamis, for free use in SciPy under
the 3-clause BSD.

References
----------
.. [1] Robert Bridson, "Fast Poisson Disk Sampling in Arbitrary
   Dimensions." SIGGRAPH, 2007.
.. [2] `StackOverflow <https://stackoverflow.com/questions/66047540>`__.

Examples
--------
Generate a 2D sample using a `radius` of 0.2.

>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from matplotlib.collections import PatchCollection
>>> from scipy.stats import qmc
>>>
>>> rng = np.random.default_rng()
>>> radius = 0.2
>>> engine = qmc.PoissonDisk(d=2, radius=radius, rng=rng)
>>> sample = engine.random(20)

Visualizing the 2D sample and showing that no points are closer than
`radius`. ``radius/2`` is used to visualize non-intersecting circles.
If two samples are exactly at `radius` from each other, then their circle
of radius ``radius/2`` will touch.

>>> fig, ax = plt.subplots()
>>> _ = ax.scatter(sample[:, 0], sample[:, 1])
>>> circles = [plt.Circle((xi, yi), radius=radius/2, fill=False)
...            for xi, yi in sample]
>>> collection = PatchCollection(circles, match_original=True)
>>> ax.add_collection(collection)
>>> _ = ax.set(aspect='equal', xlabel=r'$x_1$', ylabel=r'$x_2$',
...            xlim=[0, 1], ylim=[0, 1])
>>> plt.show()

Such visualization can be seen as circle packing: how many circle can
we put in the space. It is a np-hard problem. The method `fill_space`
can be used to add samples until no more samples can be added. This is
a hard problem and parameters may need to be adjusted manually. Beware of
the dimension: as the dimensionality increases, the number of samples
required to fill the space increases exponentially
(curse-of-dimensionality).

r0   Fr  g?volumer  N)radiushyperspherencandidatesr  r   rI   rJ   rO   r  r  r  surfacer  r  r  r   rI   r   rJ   r1   c                f  > XUUUS.U l         [        TU ]	  XUS9  U R                  U R                  S.n	 X   U l        US:X  a  SOSU l	        X l
        U R                  S-  U l        X@l        Uc  [        R                  " U5      nUc  [        R                  " U5      n[!        Xx[#        U5      S9u  U l        U l        [        R(                  " S	S
9   U R                  [        R*                  " U R,                  5      -  U l        [        R0                  " U R&                  U R$                  -
  U R.                  -  5      R3                  ["        5      U l        S S S 5        U R7                  5         g ! [         a#  n
U< S[        U	5      < 3n[        U5      U
eS n
A
ff = f! , (       d  f       NO= f)N)rO   r  r  r  r  r>  r  z? is not a valid hypersphere sampling method. It must be one of r  rL   gjt?rN   ignore)divide)r?  r@  r  _hypersphere_volume_sample_hypersphere_surface_samplehypersphere_methodrW  rn   rE   radius_factorr  radius_squaredr  r?   r   rj  rU   r   rI   rJ   errstateri  rO   	cell_sizer   r   	grid_size_initialize_grid_pool)r	  rO   r  r  r  r  r   rI   rJ   hypersphere_samplerY  r*  rC  s               r6   r
  PoissonDisk.__init__!  s    !"*5*5+79 	aD 5577

	/&8&ED# #."9Qu"kk1n 'wwqzHxx{H'7CF(
$t} [[)![[277466?:DN6$..HIfSk N * 	""$?  	// "..12D.E-HJ  W%3.		/2 *)s$   	E2 BF"2
F<FF"
F0c                    / U l         [        R                  " [        R                  " U R                  U R
                  5      [        R                  S9U l        U R                  R                  [        R                  5        g)zSampling pool and sample grid.r   N)
sample_poolr?   rn  appendr  rO   float32sample_gridfillnanr	  s    r6   r  !PoissonDisk._initialize_grid_pool]  sT     88IIdnndff-**

 	bff%r9   r   rl   r   rg   c                  ^ ^ US:X  d  T R                   S:X  a"  [        R                  " UT R                   45      $ S[        R                  S[        4U 4S jjnSS[        R                  S[
        S[        4U 4S jjjnS[        R                  SS4UU 4S	 jjn/ m[        T R                  5      S:X  a9  U" T R                  R                  T R                  T R                  5      5        S
nOSn[        T R                  5      (       a  Xa:  a  [        T R                  [        T R                  5      5      nT R                  U   nT R                  U	 T R                  UT R                  T R                  -  T R                   5      n	U	 H5  n
U" U
5      (       d  M  U" U
5      (       a  M!  U" U
5        US
-  nXa:  d  M5    O   [        T R                  5      (       a  Xa:  a  M  T =R"                  U-  sl        [        R$                  " T5      $ )aA  Draw `n` in the interval ``[l_bounds, u_bounds]``.

Note that it can return fewer samples if the space is full.
See the note section of the class.

Parameters
----------
n : int, optional
    Number of samples to generate in the parameter space. Default is 1.

Returns
-------
sample : array_like (n, d)
    QMC sample.

r   rG   r1   c                    > [        TR                  5       H/  nX   TR                  U   :  d  X   TR                  U   :  d  M/    g   g)NFT)r   rO   rJ   rI   )rG   r   r	  s     r6   	in_limits&PoissonDisk._random.<locals>.in_limits  sB    466]Ia 00FIa@P4P  # r9   	candidater   c                 L  > U TR                   -
  TR                  -  R                  [        5      n[        R
                  " X!-
  TR                   R                  [        5      5      n[        R                  " X!-   S-   TR                  5      n[        R                  " TR                  [        U5         S   5      (       d  g[        TR                  5       Vs/ s H  n[        X5   XE   5      PM     nn[        R                  " SS9   [        R                  " [        R                   " [        R"                  " U TR                  [        U5         -
  5      TR                  S9TR$                  :  5      (       a
   SSS5        g SSS5        gs  snf ! , (       d  f       g= f)	zV
Check if there are samples closer than ``radius_squared`` to the
`candidate` sample.
r   r   Tr  )invalidr   NF)rI   r  r   r   r?   maximumminimumr  isnanr  tupler   rO   slicer  rw   r   squarer  )r  r   indicesind_minind_maxr   ar	  s          r6   in_neighborhood,PoissonDisk._random.<locals>.in_neighborhood  sC   
 "DMM1T^^CKKCPGjjdmm.B.B3.GHGjjq$..AG 88D,,U7^<Q?@@8=dffF1wz7:.AF X.66FF		)d.>.>uQx.H"HI!VV ++,    /. /  G /. s   F	A4F
F#Nc                    > TR                   R                  U 5        U TR                  -
  TR                  -  R	                  [
        5      nU TR                  [        U5      '   TR                  U 5        g r3   )r  r  rI   r  r   r   r  r  )r  r  curr_sampler	  s     r6   
add_sample'PoissonDisk._random.<locals>.add_sample  s[    ##I.!DMM1T^^CKKCPG/8DU7^,y)r9   r   rL   )rO   r?   rn  r:  r   r   r   r  r   ra  rI   rJ   r   r  r  r  r  r  rJ  )r	  r   rg   r  r  r  	num_drawn
idx_centercenter
candidatesr  r  s   `          @r6   r!  PoissonDisk._randomi  s   & 6TVVq[88QK((	bjj 	T 		rzz 	c 	$ 	 	8	*"** 	* 	* 	* )+t A%txx''t}}EFII $""##	%dhhD4D4D0EFJ%%j1F  , 00d&8&88$:J:JJ
 (	Y''	0J0Jy)NI ~ ( $""##	( 	i'xx$$r9   c                 @    U R                  [        R                  5      $ )a  Draw ``n`` samples in the interval ``[l_bounds, u_bounds]``.

Unlike `random`, this method will try to add points until
the space is full. Depending on ``candidates`` (and to a lesser extent
other parameters), some empty areas can still be present in the sample.

.. warning::

   This can be extremely slow in high dimensions or if the
   ``radius`` is very small-with respect to the dimensionality.

Returns
-------
sample : array_like (n, d)
    QMC sample.

)rA   r?   infr  s    r6   
fill_spacePoissonDisk.fill_space  s    $ {{266""r9   c                 D   > [         TU ]  5         U R                  5         U $ )zkReset the engine to base state.

Returns
-------
engine : PoissonDisk
    Engine reset to its base state.

)r@  r.  r  r  s    r6   r.  PoissonDisk.reset  s     	""$r9   r  r  c                    U R                   R                  X0R                  4S9n[        R                  " US-  SS9nU[        U R                  S-  US-  5      SU R                  -  -  -  [        R                  " U5      -  n[        R                  " UR                  SS5      SU R                  45      nU[        R                  " XG5      -   nU$ )z$Uniform sampling within hypersphere.r_  rL   r   r   rh  )
r   standard_normalrO   r?   r   r   ri  rb  rL  multiply)	r	  r  r  r  xssqfrfr_tiledrq  s	            r6   r  &PoissonDisk._hypersphere_volume_sample  s     HH$$:vv*>$?ffQT"htvvaxQ/!DFF(;;bggclJ77JJr1466{
 R[[--r9   c                     U R                   R                  X0R                  4S9nU[        R                  R                  USS9SS2S4   -  nU[        R                  " XB5      -   nU$ )z.Uniform sampling on the hypersphere's surface.r_  r   r   N)r   r  rO   r?   linalgnormr  )r	  r  r  r  vecrq  s         r6   r  'PoissonDisk._hypersphere_surface_sample  s^    
 hh&&Z,@&Aryy~~c~*1d733R[[--r9   )r?  r  r  r  rI   r  r  r  r  r  r  rJ   r4  )r1   r-   )r5  r6  r7  r8  r9  r   r   r   r   r   r
  r  r?   r:  r!  r  r.  r  r  r;  rO  rP  s   @r6   r-   r-     sa   {z E2
 !%4<!#=A+/+/9%9% 	9%
 019% 9% 23d:9% 9% )9% )9% 
9% 39%v
& ]%89]%]%,5]%	]%~#BJJ #( !"jj*7 
" !"jj*7 
 r9   r-   c                       \ rS rSrSr\" SSS9 SSSSSS.S	S
SSSSS\S\S-  S\SS4S jjj5       r	SS\
S\R                  4S jjrS\R                  S\R                  4S jrSS\
S\R                  4S jjrSrg)r/   i	  a*  QMC sampling from a multivariate Normal :math:`N(\mu, \Sigma)`.

Parameters
----------
mean : array_like (d,)
    The mean vector. Where ``d`` is the dimension.
cov : array_like (d, d), optional
    The covariance matrix. If omitted, use `cov_root` instead.
    If both `cov` and `cov_root` are omitted, use the identity matrix.
cov_root : array_like (d, d'), optional
    A root decomposition of the covariance matrix, where ``d'`` may be less
    than ``d`` if the covariance is not full rank. If omitted, use `cov`.
inv_transform : bool, optional
    If True, use inverse transform instead of Box-Muller. Default is True.
engine : QMCEngine, optional
    Quasi-Monte Carlo engine sampler. If None, `Sobol` is used.
rng : `numpy.random.Generator`, optional
    Pseudorandom number generator state. When `rng` is None, a new
    `numpy.random.Generator` is created using entropy from the
    operating system. Types other than `numpy.random.Generator` are
    passed to `numpy.random.default_rng` to instantiate a ``Generator``.

    .. versionchanged:: 1.15.0

        As part of the `SPEC-007 <https://scientific-python.org/specs/spec-0007/>`_
        transition from use of `numpy.random.RandomState` to
        `numpy.random.Generator`, this keyword was changed from `seed` to
        `rng`. For an interim period, both keywords will continue to work, although
        only one may be specified at a time. After the interim period, function
        calls using the `seed` keyword will emit warnings. Following a
        deprecation period, the `seed` keyword will be removed.

Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import qmc
>>> dist = qmc.MultivariateNormalQMC(mean=[0, 5], cov=[[1, 0], [0, 1]])
>>> sample = dist.random(512)
>>> _ = plt.scatter(sample[:, 0], sample[:, 1])
>>> plt.show()

r0   Fr  NT)cov_rootinv_transformenginer   r|   rH   covr   r  r  r  r   r1   c                f   [         R                  " [         R                  " U5      5      nUR                  S   nUb  [         R                  " [         R                  " U5      5      nUR                  S   UR                  S   :X  d  [        S5      e[         R                  " X"R                  5       5      (       d  [        S5      e [         R                  R                  U5      R                  5       nOGUbB  [         R                  " U5      nUR                  S   UR                  S   :X  d  [        S5      eOS nX@l        U(       d  S[         R"                  " US-  5      -  n
OUn
UcE  [%        U[         R&                  R(                  5      (       a  SOS	nX0n[+        SU
S
SS.UD6U l        OB[%        U[.        5      (       a"  UR0                  U
:w  a  [        S5      eXPl        O[        S5      eXl        X0l        Xpl        g ! [         R                  R                   a    [         R                  R                  U5      u  p[         R                  " US:  5      (       d  [        S5      e[         R                  " USS 5      nU	[         R                  " U5      -  R                  5       n GNsf = f)Nr   z/Dimension mismatch between mean and covariance.z#Covariance matrix is not symmetric.g:0yEzCovariance matrix not PSD.rQ   rL   r0   r   Tr  rO   r   r  zDimension of `engine` must be consistent with dimensions of mean and covariance. If `inv_transform` is False, it must be an even number.F`engine` must be an instance of `scipy.stats.qmc.QMCEngine` or `None`.r4   )r?   rS   r(  rV   
atleast_2drE   allclose	transposer  choleskyLinAlgErroreighrY   clipri  _inv_transformr   r   r<   rA   rC   r*   r  r)   rO   _mean_corr_matrix_d)r	  r|   r  r  r  r  r   rO   eigvaleigvec
engine_dimkwargkwargss                r6   r
  MultivariateNormalQMC.__init__/	  sK    zz"---.JJqM?**R]]3/0C ::a=CIIaL0  "/ 0 0;;sMMO44 !FGGB99--c2<<> !}}X.H::a=HNN1$55  "/ 0 0 6
 H+TYYq1u--JJ> )bii.C.CDDF%E\F t"8>DK 	**xx:%  "4 5 5 !K F G G 
$[ 99(( B!#!4vvf/00$%ABBd3"RWWV_4??ABs   -H B'J0/J0r   c                 F    U R                  U5      nU R                  U5      $ )zDraw `n` QMC samples from the multivariate Normal.

Parameters
----------
n : int, optional
    Number of samples to generate in the parameter space. Default is 1.

Returns
-------
sample : array_like (n, d)
    Sample.

)_standard_normal_samples
_correlate)r	  r   base_sampless      r6   rA   MultivariateNormalQMC.randomx	  s#     44Q7|,,r9   r  c                 n    U R                   b  XR                   -  U R                  -   $ XR                  -   $ r3   )r
  r	  )r	  r  s     r6   r   MultivariateNormalQMC._correlate	  s5    ("3"33djj@@  **,,r9   c                 x   U R                   R                  U5      nU R                  (       a(  [        R                  R                  SSUS-
  -  -   5      $ [        R                  " SUR                  S   S5      n[        R                  " S[        R                  " USS2U4   5      -  5      nS[        R                  -  USS2SU-   4   -  n[        R                  " U5      n[        R                  " U5      n[        R                  " XF-  XG-  /S5      R!                  US5      nUSS2SU R"                  24   $ )	zDraw `n` QMC samples from the standard Normal :math:`N(0, I_d)`.

Parameters
----------
n : int, optional
    Number of samples to generate in the parameter space. Default is 1.

Returns
-------
sample : array_like (n, d)
    Sample.

r   gA?r   rh  rL   rg  Nr   )r  rA   r  statsr  ppfr?   r   rV   ri  logr   picossinrk  rL  r  )	r	  r   rc  evenRsthetasr  r  transf_sampless	            r6   r  .MultivariateNormalQMC._standard_normal_samples	  s    ++$$Q' ::>>#w}(E"EFF 99Qb 115DbffWQW%5667B[71a$h;#77F&&.C&&.CXXrx&:&(**1'!R.  "!YtwwY,//r9   )r
  r  r  r	  r  r3   r4  )r5  r6  r7  r8  r9  r   r   r)   r   r
  r   r?   r:  rA   r  r  r;  r4   r9   r6   r/   r/   	  s    )V E2 +/F
 04"&'+ F!F (F
 -F  F $F F 
F 3FP-	 -"** -"-rzz -bjj -0) 0BJJ 0 0r9   r/   c                       \ rS rSrSr\" SSS9SSS.SS	S
\S\S-  S\SS4
S jj5       r	SS\S\
R                  4S jjrSrg)r.   i	  a  QMC sampling from a multinomial distribution.

Parameters
----------
pvals : array_like (k,)
    Vector of probabilities of size ``k``, where ``k`` is the number
    of categories. Elements must be non-negative and sum to 1.
n_trials : int
    Number of trials.
engine : QMCEngine, optional
    Quasi-Monte Carlo engine sampler. If None, `Sobol` is used.
rng : `numpy.random.Generator`, optional
    Pseudorandom number generator state. When `rng` is None, a new
    `numpy.random.Generator` is created using entropy from the
    operating system. Types other than `numpy.random.Generator` are
    passed to `numpy.random.default_rng` to instantiate a ``Generator``.

    .. versionchanged:: 1.15.0

        As part of the `SPEC-007 <https://scientific-python.org/specs/spec-0007/>`_
        transition from use of `numpy.random.RandomState` to
        `numpy.random.Generator`, this keyword was changed from `seed` to
        `rng`. For an interim period, both keywords will continue to work, although
        only one may be specified at a time. After the interim period, function
        calls using the `seed` keyword will emit warnings. Following a
        deprecation period, the `seed` keyword will be removed.

Examples
--------
Let's define 3 categories and for a given sample, the sum of the trials
of each category is 8. The number of trials per category is determined
by the `pvals` associated to each category.
Then, we sample this distribution 64 times.

>>> import matplotlib.pyplot as plt
>>> from scipy.stats import qmc
>>> dist = qmc.MultinomialQMC(
...     pvals=[0.2, 0.4, 0.4], n_trials=10, engine=qmc.Halton(d=1)
... )
>>> sample = dist.random(64)

We can plot the sample and verify that the median of number of trials
for each category is following the `pvals`. That would be
``pvals * n_trials = [2, 4, 4]``.

>>> fig, ax = plt.subplots()
>>> ax.yaxis.get_major_locator().set_params(integer=True)
>>> _ = ax.boxplot(sample)
>>> ax.set(xlabel="Categories", ylabel="Trials")
>>> plt.show()

r0   Fr  N)r  r   pvalsrH   n_trialsr  r   r1   c                @   [         R                  " [         R                  " U5      5      U l        [         R                  " U5      S:  a  [        S5      e[         R                  " [         R                  " U5      S5      (       d  [        S5      eX l        UcE  [        U[         R                  R                  5      (       a  SOSnXT0n[        SSSSS	.UD6U l        g [        U[        5      (       a"  UR                  S:w  a  [        S
5      eX0l        g [        S5      e)Nr   z'Elements of pvals must be non-negative.r   z Elements of pvals must sum to 1.r0   r   Tr  r  z Dimension of `engine` must be 1.r   r4   )r?   r(  rS   r&  rX   rE   iscloser   r'  r<   rA   rC   r*   r  r)   rO   )r	  r&  r'  r  r   r  r  s          r6   r
  MultinomialQMC.__init__	  s     ]]2::e#45
66%=1FGGzz"&&-++?@@ > )bii.C.CDDF%E\F d/5DK 	**xx1} !CDD K F G Gr9   r   c                     [         R                  " U[        U R                  5      45      n[	        U5       H  nU R
                  R                  U R                  5      R                  5       n[         R                  " U R                  [        S9n[        [         R                  " U R                  [        S9U5        [         R                  " U R                  [         R                  S9n[        XEU5        XbU'   M     U$ )zDraw `n` QMC samples from the multinomial distribution.

Parameters
----------
n : int, optional
    Number of samples to generate in the parameter space. Default is 1.

Returns
-------
samples : array_like (n, pvals)
    Sample.

r   )r?   rn  r   r&  r   r  rA   r'  ravel
empty_likefloatr   rJ  
zeros_likeintpr   )r	  r   rG   r   
base_drawsp_cumulativesample_s          r6   rA   MultinomialQMC.random
  s     1c$**o./qA++DMM:@@BJ==5ALrxx

%@,OmmDJJbgg>G
':1I  r9   )r  r'  r&  r4  )r5  r6  r7  r8  r9  r   r   r)   r   r
  r?   r:  rA   r;  r4   r9   r6   r.   r.   	  s    3j E2 $(GG G
 D G G 
G 3G<	 "**  r9   r.   r  r  r  c                     [         [        S.nU b#   U R                  5       n X    n[        U40 UD6nU$ SnU$ ! [         a#  nU < S[	        U5      < 3n[        U5      UeSnAff = f)z#A factory for optimization methods.r  Nz7 is not a valid optimization method. It must be one of )
_random_cd&_lloyd_centroidal_voronoi_tessellationrZ   rW  rn   rE   r   )r  r  r  
optimizer_rY  r*  	optimizers          r6   r  r  
  s    
  70 	/'--/L,:J J1&1	  	  	/&) *2368G W%3.		/s   8 
A%A  A%best_sampler  r  r  c                    AU R                   u  pVUS:X  d  US:X  a  [        R                  " XV45      $ US:X  d  US:X  a  U $ [        U 5      nSUS-
  /SUS-
  /SUS-
  /4nSn	Sn
X:  a~  X:  ay  U
S-  n
[	        U/US   Q7SS06n[	        U/US   Q7SS06n[	        U/US   Q7SS06n[        U XUU5      nX:  a  XU4   XU4   sXU4'   XU4'   UnSn	OU	S-  n	X:  a  X:  a  My  U $ )ax  Optimal LHS on CD.

Create a base LHS and do random permutations of coordinates to
lower the centered discrepancy.
Because it starts with a normal LHS, it also works with the
`scramble` keyword argument.

Two stopping criterion are used to stop the algorithm: at most,
`n_iters` iterations are performed; or if there is no improvement
for `n_nochange` consecutive iterations.
r   r   r%  TrL   )rV   r?   rn  r&   r   r   )r:  r  r  r   r  r   rO   	best_discboundsn_nochange_n_iters_colrow_1row_2r   s                  r6   r6  r6  ;
  sP    	DAAvaxxAvaK(I!a%j!a%j!a%jF KH

"x'9A3::T:S<6!9<t<S<6!9<t<#K$)#$-/ 3J'CZ)@ =Ks
#[%< IK1K! 
"x'9$ r9   c                 L    [         R                  " U S5      R                  5       $ )N	cityblock)r   rv   rX   rb   s    r6   _l1_normrE  r
  s    >>&+.2244r9   decayr  c                    [         R                  " U 5      n[        XS9n[        UR                  5       H`  u  pVUR
                  U    Vs/ s H  owS:w  d  M
  UPM     nnUR                  U   n	[         R                  " U	SS9n
X   XU   -
  U-  -   X5'   Mb     [         R                  " [         R                  " US:  US:*  5      SS9nX;   X'   U $ s  snf )u
  Lloyd-Max algorithm iteration.

Based on the implementation of Stéfan van der Walt:

https://github.com/stefanv/lloyd

which is:

    Copyright (c) 2021-04-21 Stéfan van der Walt
    https://github.com/stefanv/lloyd
    MIT License

Parameters
----------
sample : array_like (n, d)
    The sample to iterate on.
decay : float
    Relaxation decay. A positive value would move the samples toward
    their centroid, and negative value would move them away.
    1 would move the samples to their centroid.
qhull_options : str
    Additional options to pass to Qhull. See Qhull manual
    for details. (Default: "Qbb Qc Qz Qj Qx" for ndim > 4 and
    "Qbb Qc Qz Qj" otherwise.)

Returns
-------
sample : array_like (n, d)
    The sample after an iteration of Lloyd's algorithm.

)r  rh  r   r   r   )
r?   r-  r   rG  point_regionregionsverticesr|   rY   logical_and)rG   rF  r  
new_samplevoronoiiir{  r   regionvertscentroidis_valids               r6   _lloyd_iterationrS  v
  s    H v&Jf:GW112 %__S1=1"W!1=   ( 775q)x*'<&EE
 3$ vvbnnZ1_jAoFQOH!+FM% >s   	CCr  r  )r  r  r  r  r  c                   A[         R                  " U 5      R                  5       n U R                  S:X  d  [	        S5      eU R
                  S   S:  d  [	        S5      eU R                  5       S:  d  U R                  5       S:  a  [	        S5      eUc  SnU R
                  S   S	:  a  US
-  nU* [         R                  " S5      -  n[        U5       Vs/ s H   n[         R                  " U* U-  5      S-   PM"     nn[        U S9n[        U5       H/  n	[        XU	   US9n [        U S9n
[        X-
  5      U:  a    U $ U
nM1     U $ s  snf )a  Approximate Centroidal Voronoi Tessellation.

Perturb samples in N-dimensions using Lloyd-Max algorithm.

Parameters
----------
sample : array_like (n, d)
    The sample to iterate on. With ``n`` the number of samples and ``d``
    the dimension. Samples must be in :math:`[0, 1]^d`, with ``d>=2``.
tol : float, optional
    Tolerance for termination. If the min of the L1-norm over the samples
    changes less than `tol`, it stops the algorithm. Default is 1e-5.
maxiter : int, optional
    Maximum number of iterations. It will stop the algorithm even if
    `tol` is above the threshold.
    Too many iterations tend to cluster the samples as a hypersphere.
    Default is 10.
qhull_options : str, optional
    Additional options to pass to Qhull. See Qhull manual
    for details. (Default: "Qbb Qc Qz Qj Qx" for ndim > 4 and
    "Qbb Qc Qz Qj" otherwise.)

Returns
-------
sample : array_like (n, d)
    The sample after being processed by Lloyd-Max algorithm.

Notes
-----
Lloyd-Max algorithm is an iterative process with the purpose of improving
the dispersion of samples. For given sample: (i) compute a Voronoi
Tessellation; (ii) find the centroid of each Voronoi cell; (iii) move the
samples toward the centroid of their respective cell. See [1]_, [2]_.

A relaxation factor is used to control how fast samples can move at each
iteration. This factor is starting at 2 and ending at 1 after `maxiter`
following an exponential decay.

The process converges to equally spaced samples. It implies that measures
like the discrepancy could suffer from too many iterations. On the other
hand, L1 and L2 distances should improve. This is especially true with
QMC methods which tend to favor the discrepancy over other criteria.

.. note::

    The current implementation does not intersect the Voronoi Tessellation
    with the boundaries. This implies that for a low number of samples,
    empirically below 20, no Voronoi cell is touching the boundaries.
    Hence, samples cannot be moved close to the boundaries.

    Further improvements could consider the samples at infinity so that
    all boundaries are segments of some Voronoi cells. This would fix
    the computation of the centroid position.

.. warning::

   The Voronoi Tessellation step is expensive and quickly becomes
   intractable with dimensions as low as 10 even for a sample
   of size as low as 1000.

.. versionadded:: 1.9.0

References
----------
.. [1] Lloyd. "Least Squares Quantization in PCM".
   IEEE Transactions on Information Theory, 1982.
.. [2] Max J. "Quantizing for minimum distortion".
   IEEE Transactions on Information Theory, 1960.

Examples
--------
>>> import numpy as np
>>> from scipy.spatial import distance
>>> from scipy.stats._qmc import _lloyd_centroidal_voronoi_tessellation
>>> rng = np.random.default_rng()
>>> sample = rng.random((128, 2))

.. note::

    The samples need to be in :math:`[0, 1]^d`. `scipy.stats.qmc.scale`
    can be used to scale the samples from their
    original bounds to :math:`[0, 1]^d`. And back to their original bounds.

Compute the quality of the sample using the L1 criterion.

>>> def l1_norm(sample):
...    return distance.pdist(sample, 'cityblock').min()

>>> l1_norm(sample)
0.00161...  # random

Now process the sample using Lloyd's algorithm and check the improvement
on the L1. The value should increase.

>>> sample = _lloyd_centroidal_voronoi_tessellation(sample)
>>> l1_norm(sample)
0.0278...  # random

rL   z`sample` is not a 2D arrayr   z`sample` dimension is not >= 2rP   rQ   z!`sample` is not in unit hypercubezQbb Qc Qz QJr   z Qxg?g?rb   )rG   rF  r  )r?   rS   r  rT   rE   rV   rW   rX   r  r   exprE  rS  r   )rG   r  r  r  r  rootr  rF  l1_oldr   l1_news              r6   r7  r7  
  sG   V 	ZZ$$&F;;!566<<?a9:: 	

rvzz|b0<==&<<?aU"M
 8bffSk!D,1'N;NqRVVQBIs"NE;V$F7^!1X+

 (v#% M F  M! <s   'Ec                     [        U 5      n U S:X  a&  [        R                  " 5       n U c  [        S5      e U $ U S::  a  [	        SU  S35      eU $ )a  Validate `workers` based on platform and value.

Parameters
----------
workers : int, optional
    Number of workers to use for parallel processing. If -1 is
    given all CPU threads are used. Default is 1.

Returns
-------
Workers : int
    Number of CPU used by the algorithm

rh  zaCannot determine the number of cpus using os.cpu_count(), cannot use -1 for the number of workersr   zInvalid number of workers: z, must be -1 or > 0)r   os	cpu_countNotImplementedErrorrE   rl   s    r6   rm   rm   L  sq     'lG"},,.?%:   N	 
A6wi @" " # 	# Nr9   rO   z7tuple[npt.NDArray[np.generic], npt.NDArray[np.generic]]c                      [         R                  " X5      n[         R                  " X5      n[         R                  " X4:  5      (       d  [        S5      eX44$ ! [         a  nSn[        U5      UeSnAff = f)zBounds input validation.

Parameters
----------
l_bounds, u_bounds : array_like (d,)
    Lower and upper bounds.
d : int
    Dimension to use for broadcasting.

Returns
-------
l_bounds, u_bounds : array_like (d,)
    Lower and upper bounds.

zP'l_bounds' and 'u_bounds' must be broadcastable and respect the sample dimensionNz1Bounds are not consistent 'l_bounds' < 'u_bounds')r?   broadcast_torE   rY   )rI   rJ   rO   rZ   r[   rY  r  s          r6   rU   rU   j  so    $',, 66%-  LMM<  ''o3&'s   ,A 
A6#A11A6).r3   )rp   	euclideanr  r4  )Zr9  r  r   r=   rZ  rx   abcr   r   	functoolsr   typingr   r   r   r	   collections.abcr
   numpyr?   scipy._lib._utilr   r   r   r   numpy.typingnptscipy.statsr  r   r   r   scipy.sparse.csgraphr   scipy.spatialr   r   scipy.specialr   _sobolr   r   r   r   r   r   r   _qmc_cyr   r   r    r!   r"   r#   r$   __all__rA   rD   r7   r   r:  r%   rc   r.  r&   strr'   r(   r   r   r   listr   r   r   r)   r+   r,   r*   r-   r/   r.   dictr  r6  rE  rS  r7  rm   rU   r4   r9   r6   <module>rr     s   ,    	  #   %  N N  I I 6 + "    6
 
Y- 		8K8K  
 
] }  
=> P2P2P2 P2
 P2 ZZP2fo "** B  7;D/D/ D/ 34	D/
 D/
 $)D/R -6!zDzD()zD zD ',zDz:G:G:G $:G ).:GzW W W# W# W$WtA A

 A@$	 $d3i $P )-,
,%,ZZ,b BA "#/3BABABA 	BA
 BA -BA BA BA $&::BAJx xv	d5Y d5N@)Y @)F
\I \~
]) ]@k0 k0\k k\./$6@D_844&)47:4AN44 ZZ4n5RZZ 5E 5=JJ== = ZZ	=F  $SS 
S 	S
 :S S ZZSly  <)8=@>r9   