
    (phB5                    `   S SK Jr  S SKJr  S SKJr  S SKrS SKrS SK	J
r
  S SKrS SKJr  SSKJr  SS	KJr  SS
KJr  S SKJrJrJr  S SKJr  SSKJr  SSKJrJr   SSK!J"r"  S SKJ#r#  / SQr$\" SS5      r%\"" \%SSS9S:S j5       r&S SS.S jr'S r( " S S5      r)S r*S r+S;S  jr,S! r-\"" \)SS\-S"9S<S# j5       r.S$ r/S% r0S& r1S=S' jr2\ " S( S)5      5       r3S>S* jr4S+ r5S, r6\ " S- S.5      5       r7S?S/ jr8\ " S0 S15      5       r9S@S2 jr:S3 r;S4 r<\"" \)SS\-S"9SAS5 j5       r= " S6 S75      r>S8 r?S9 r@g)B    )
namedtuple)	dataclass)combN)combinations)shgo   )distributions)ConfidenceInterval)norm)gammakvgammaln)ifft)_a_ij_Aij_Dij2)_concordant_pairs_discordant_pairs)_axis_nan_policy_factory)	_stats_py)epps_singleton_2sampcramervonmisessomersdbarnard_exactboschloo_exactcramervonmises_2samp	tukey_hsdpoisson_means_testEpps_Singleton_2sampResult	statisticpvalue      )	n_samples	too_smallc                    [         R                  " U5      n[        U 5      [        U5      pCUS:  d  US:  a  [        SU SU S35      e[         R                  " U 5      R                  5       (       d  [        S5      e[         R                  " U5      R                  5       (       d  [        S5      eX4-   nUR                  S:  a  [        SUR                   S35      e[         R                  " US	5      R                  5       (       a  [        S
5      eS	SK	J
n  U" [         R                  " X45      5      S-  n[         R                  " US5      U-  n[         R                  " [         R                  " X-  5      [         R                  " X-  5      45      R                   n	[         R                  " [         R                  " X-  5      [         R                  " X-  5      45      R                   n
[         R"                  " U	R                   SS9n[         R"                  " U
R                   SS9nXS-  U-  XT-  U-  -   n[         R$                  R'                  U5      n[         R$                  R)                  U5      nUS[        U5      -  :  a  [*        R,                  " SSS9  [         R.                  " U	S	S9[         R.                  " U
S	S9-
  nU[         R0                  " UR                   [         R0                  " UU5      5      -  n[3        X45      S:  a  SSUS-  -   SUS-  US-  -   -  -   -  nUU-  n[4        R6                  " U5      n[4        R8                  " UUSS[         S9n[;        UU5      $ )a.
  Compute the Epps-Singleton (ES) test statistic.

Test the null hypothesis that two samples have the same underlying
probability distribution.

Parameters
----------
x, y : array-like
    The two samples of observations to be tested. Input must not have more
    than one dimension. Samples can have different lengths, but both
    must have at least five observations.
t : array-like, optional
    The points (t1, ..., tn) where the empirical characteristic function is
    to be evaluated. It should be positive distinct numbers. The default
    value (0.4, 0.8) is proposed in [1]_. Input must not have more than
    one dimension.

Returns
-------
statistic : float
    The test statistic.
pvalue : float
    The associated p-value based on the asymptotic chi2-distribution.

See Also
--------
ks_2samp, anderson_ksamp

Notes
-----
Testing whether two samples are generated by the same underlying
distribution is a classical question in statistics. A widely used test is
the Kolmogorov-Smirnov (KS) test which relies on the empirical
distribution function. Epps and Singleton introduce a test based on the
empirical characteristic function in [1]_.

One advantage of the ES test compared to the KS test is that is does
not assume a continuous distribution. In [1]_, the authors conclude
that the test also has a higher power than the KS test in many
examples. They recommend the use of the ES test for discrete samples as
well as continuous samples with at least 25 observations each, whereas
`anderson_ksamp` is recommended for smaller sample sizes in the
continuous case.

The p-value is computed from the asymptotic distribution of the test
statistic which follows a `chi2` distribution. If the sample size of both
`x` and `y` is below 25, the small sample correction proposed in [1]_ is
applied to the test statistic.

The default values of `t` are determined in [1]_ by considering
various distributions and finding good values that lead to a high power
of the test in general. Table III in [1]_ gives the optimal values for
the distributions tested in that study. The values of `t` are scaled by
the semi-interquartile range in the implementation, see [1]_.

References
----------
.. [1] T. W. Epps and K. J. Singleton, "An omnibus test for the two-sample
   problem using the empirical characteristic function", Journal of
   Statistical Computation and Simulation 26, p. 177--203, 1986.

.. [2] S. J. Goerg and J. Kaiser, "Nonparametric testing of distributions
   - the Epps-Singleton two-sample test using the empirical characteristic
   function", The Stata Journal 9(3), p. 454--465, 2009.

   z6x and y should have at least 5 elements, but len(x) = z and len(y) = .z$x must not contain nonfinite values.z$y must not contain nonfinite values.r   z t must be 1d, but t.ndim equals r   z&t must contain positive elements only.)iqrr!   )r   T)biaszEstimated covariance matrix does not have full rank. This indicates a bad choice of the input t and the test might not be consistent.)
stacklevelaxis         ?gܿg333333$@g333333greaterF)alternative	symmetricxp)npasarraylen
ValueErrorisfiniteallndim
less_equalanyscipy.statsr(   hstackreshapevstackcossinTcovlinalgpinvmatrix_rankwarningswarnmeandotmaxr   _SimpleChi2_get_pvaluer   )xytnxnynr(   sigmatsgxgycov_xcov_yest_covest_cov_invrg_diffwcorrchi2ps                        I/var/www/html/venv/lib/python3.13/site-packages/scipy/stats/_hypotests.pyr   r      s   J 	

1AVSV
QBF  Tt16 7 	7;;q>?@@;;q>?@@
A 	vvz;AFF81EFF	}}Q  ABB  		1&!"Q&E	Aw	%	'B 
BFF24L"&&,/	0	2	2B	BFF24L"&&,/	0	2	2BFF244d#EFF244d#EtUladE\)G))..)K
		k*A1SV8| 6 "#	$ WWRa 2772A#66F	"&&266+v6
77A 	BbC!e*$tR$Z"t*-D'EEF1H  #Da9RTUA%a++    	two-sided)diffr1   c                .   [        XX#XE5        X-   X-   -  XA-  X-   -  -
  nUS::  a  [        R                  " SS5      $ XS-  -  X#S-  -  -   nX-  X#-  -
  U-
  [        R                  " U5      -  nXU-   -  n	X6-  n
[
        R                  R                  SS/U	5      u  p[
        R                  R                  SS/U
5      u  p[        R                  " XS-   5      n[        R                  " XS-   5      SS2S4   n[
        R                  R                  X5      n[
        R                  R                  UU
5      nX-  nUU-  nUU-
  U-
  nUU-  UU-  -   n[        R                  " SSS9   U[        R                  " U5      -  nSSS5        US	:X  a.  [        R                  " W5      [        R                  " U5      :  nOUS
:X  a  WU:*  nOWU:  n[        R                  " UU-  U   5      n[        R                  " UU5      $ ! , (       d  f       N= f)a  
Performs the Poisson means test, AKA the "E-test".

This is a test of the null hypothesis that the difference between means of
two Poisson distributions is `diff`. The samples are provided as the
number of events `k1` and `k2` observed within measurement intervals
(e.g. of time, space, number of observations) of sizes `n1` and `n2`.

Parameters
----------
k1 : int
    Number of events observed from distribution 1.
n1: float
    Size of sample from distribution 1.
k2 : int
    Number of events observed from distribution 2.
n2 : float
    Size of sample from distribution 2.
diff : float, default=0
    The hypothesized difference in means between the distributions
    underlying the samples.
alternative : {'two-sided', 'less', 'greater'}, optional
    Defines the alternative hypothesis.
    The following options are available (default is 'two-sided'):

      * 'two-sided': the difference between distribution means is not
        equal to `diff`
      * 'less': the difference between distribution means is less than
        `diff`
      * 'greater': the difference between distribution means is greater
        than `diff`

Returns
-------
statistic : float
    The test statistic (see [1]_ equation 3.3).
pvalue : float
    The probability of achieving such an extreme value of the test
    statistic under the null hypothesis.

Notes
-----

Let:

.. math:: X_1 \sim \mbox{Poisson}(\mathtt{n1}\lambda_1)

be a random variable independent of

.. math:: X_2  \sim \mbox{Poisson}(\mathtt{n2}\lambda_2)

and let ``k1`` and ``k2`` be the observed values of :math:`X_1`
and :math:`X_2`, respectively. Then `poisson_means_test` uses the number
of observed events ``k1`` and ``k2`` from samples of size ``n1`` and
``n2``, respectively, to test the null hypothesis that

.. math::
   H_0: \lambda_1 - \lambda_2 = \mathtt{diff}

A benefit of the E-test is that it has good power for small sample sizes,
which can reduce sampling costs [1]_. It has been evaluated and determined
to be more powerful than the comparable C-test, sometimes referred to as
the Poisson exact test.

References
----------
.. [1]  Krishnamoorthy, K., & Thomson, J. (2004). A more powerful test for
   comparing two Poisson means. Journal of Statistical Planning and
   Inference, 119(1), 23-35.

.. [2]  Przyborowski, J., & Wilenski, H. (1940). Homogeneity of results in
   testing samples from Poisson series: With an application to testing
   clover seed for dodder. Biometrika, 31(3/4), 313-323.

Examples
--------

Suppose that a gardener wishes to test the number of dodder (weed) seeds
in a sack of clover seeds that they buy from a seed company. It has
previously been established that the number of dodder seeds in clover
follows the Poisson distribution.

A 100 gram sample is drawn from the sack before being shipped to the
gardener. The sample is analyzed, and it is found to contain no dodder
seeds; that is, `k1` is 0. However, upon arrival, the gardener draws
another 100 gram sample from the sack. This time, three dodder seeds are
found in the sample; that is, `k2` is 3. The gardener would like to
know if the difference is significant and not due to chance. The
null hypothesis is that the difference between the two samples is merely
due to chance, or that :math:`\lambda_1 - \lambda_2 = \mathtt{diff}`
where :math:`\mathtt{diff} = 0`. The alternative hypothesis is that the
difference is not due to chance, or :math:`\lambda_1 - \lambda_2 \ne 0`.
The gardener selects a significance level of 5% to reject the null
hypothesis in favor of the alternative [2]_.

>>> import scipy.stats as stats
>>> res = stats.poisson_means_test(0, 100, 3, 100)
>>> res.statistic, res.pvalue
(-1.7320508075688772, 0.08837900929018157)

The p-value is .088, indicating a near 9% chance of observing a value of
the test statistic under the null hypothesis. This exceeds 5%, so the
gardener does not reject the null hypothesis as the difference cannot be
regarded as significant at this level.
r   r   r!   g|=g?Nignore)invaliddividere   less)_poisson_means_test_ivr   SignificanceResultr4   sqrtr	   poissonppfarangepmferrstateabssum)k1n1k2n2rf   r1   	lmbd_hat2vart_k1k2
nlmbd_hat1
nlmbd_hat2x1_lbx1_ubx2_lbx2_ubx1x2prob_x1prob_x2lmbd_x1lmbd_x2
lmbds_diffvar_x1x2t_x1x2	indicatorr    s                             rc   r   r      s   V 224= 'bg&bg)>>I
 A~++Aq11 a.2q>
)C
 g$&"''#,6F 4'(JJ
 !((,,eY-?LLE ((,,eY-?LLE
 
5!)	$B	5!)	$QW	-B ##''7G##''J7G gG2gG7"T)J|gl*H 
Xh	7bggh// 
8
 k!FF6NbffVn4			f$	f$	 VVWw&	23F''77 
8	7s   ,H
Hc                 &   U [        U 5      :w  d  U[        U5      :w  a  [        S5      eSnU S:  d  US:  a  [        U5      eUS::  d  US::  a  [        S5      eUS:  a  [        S5      e1 SknUR                  5       U;  a  [        SU S35      eg )	Nz`k1` and `k2` must be integers.z1`k1` and `k2` must be greater than or equal to 0.r   z%`n1` and `n2` must be greater than 0.z(diff must be greater than or equal to 0.>   rk   r0   re   zAlternative must be one of 'z'.)int	TypeErrorr7   lower)rv   rw   rx   ry   rf   r1   	count_erralternativess           rc   rl   rl   P  s    	SW}c"g9::CI	Ava##	Qw"'@AAaxCDD3L,.7~RHII /rd   c                        \ rS rSrS rS rSrg)CramerVonMisesResultid  c                     Xl         X l        g Nr   )selfr   r    s      rc   __init__CramerVonMisesResult.__init__e  s    "rd   c                 h    U R                   R                   SU R                   SU R                   S3$ )Nz(statistic=z	, pvalue=))	__class____name__r   r    )r   s    rc   __repr__CramerVonMisesResult.__repr__i  s8    >>**+;t~~6F G++a) 	*rd   )r    r   N)r   
__module____qualname____firstlineno__r   r   __static_attributes__ rd   rc   r   r   d  s    *rd   r   c                   ^^ S mS mUU4S jn[         R                  " U 5      n [         R                  " U SS9n[         R                  " U SS9nSn[         R                  " U5      (       ar  U" X@U   5      * [         R
                  [        US-   5      -  -  nX#   U-   X#'   [         R                  " U5      S	:  X3'   US-  n[         R                  " U5      (       a  Mr  U$ )
u  
psi1 is defined in equation 1.10 in Csörgő, S. and Faraway, J. (1996).
This implements a modified version by excluding the term V(x) / 12
(here: _cdf_cvm_inf(x) / 12) to avoid evaluating _cdf_cvm_inf(x)
twice in _cdf_cvm.

Implementation based on MAPLE code of Julian Faraway and R code of the
function pCvM in the package goftest (v1.1.1), permission granted
by Adrian Baddeley. Main difference in the implementation: the code
here keeps adding terms of the series until the terms are small enough.
c                     U S-  S-  n[        SU5      [        SU5      -   n[        R                  " U* 5      U S-  S-  -  U-  [        R                  " [        R                  5      -  $ )Nr!   r"         ?      ?      ?)r   r4   exprn   pi)rP   zbs      rc   _ed2_psi1_mod.<locals>._ed2{  sX    qD1HsAJC#vvqbzQqSCL(1,rwwruu~==rd   c                     U S-  S-  n[         R                  " U* 5      [         R                  " [         R                  5      -  nX S-  S-  -  S[	        SU5      -  S[	        SU5      -  -   [	        SU5      -
  -  $ )Nr!   r"   g      @r      r         ?)r4   r   rn   r   r   )rP   r   cs      rc   _ed3_psi1_mod.<locals>._ed3  si    qD1HFFA2J'aC3<1RQZ<!BsAJ,#>C#KLLrd   c                    > SU -  S-   nS[         R                  " U5      -  nUS-  nUS-  nU[        U S-   5      -  T" SU -  S-   U-  5      -  SU-  -  n[        U S-   5      T" SU -  S-   U-  5      -  S	U-  -  nSUS-   -  [        U S
-   5      -  T" SU -  S-   U-  5      -  SU-  -  nSU-  [        U S-   5      -  T" SU -  S-   U-  5      -  SU-  -  n	SU-  [        U S-   5      -  T" SU -  S-   U-  5      -  SU-  -  n
Xg-   U-   U	-   U
-   $ )Nr!   r   r   r         ?r"   r   	   H   r   r&            )r4   rn   r   )krO   msxy1y2e1e2e3e4e5r   r   s              rc   _Ak_psi1_mod.<locals>._Ak  sM   aC!G^XXq3w$A	2~"66!b&A1s7^dAEAI#344R@!a%[5S>)D!a%!)r1A,BBb2gNUU1s7^#dAEAI+;&<<bIUU1s7^#dAEAI+;&<<bIw|b 2%%rd   floatdtypeboolr   r   Hz>)r4   r5   
zeros_like	ones_liker<   r   r   rt   )rO   r   totcondr   r   r   r   s         @@rc   	_psi1_modr   n  s    >
M
& 	

1A
--
)C<<(D	A
&&,,dG_a!e 45IM	VVAY$&
	Q	 &&,, Jrd   c                 h   [         R                  " U 5      n S n[         R                  " U SS9n[         R                  " U SS9nSn[         R                  " U5      (       aQ  U" X   U5      nX#   U-   X#'   [         R
                  " U5      S:  X3'   US-  n[         R                  " U5      (       a  MQ  U$ )u  
Calculate the cdf of the Cramér-von Mises statistic (infinite sample size).

See equation 1.2 in Csörgő, S. and Faraway, J. (1996).

Implementation based on MAPLE code of Julian Faraway and R code of the
function pCvM in the package goftest (v1.1.1), permission granted
by Adrian Baddeley. Main difference in the implementation: the code
here keeps adding terms of the series until the terms are small enough.

The function is not expected to be accurate for large values of x, say
x > 4, when the cdf is very close to 1.
c                 `   [         R                  " [        US-   5      [        US-   5      -
  5      [         R                  S-  [         R                  " U 5      -  -  nSU-  S-   nUS-  SU -  -  n[        SU5      nU[         R                  " U5      -  [         R                  " U* 5      -  U-  $ )Nr   r   r   r"   r!      r   )r4   r   r   r   rn   r   )rO   r   urP   qr   s         rc   term_cdf_cvm_inf.<locals>.term  s    FF71s7#gacl23ruuczBGGAJ7NOaC!GqDBqDMtQK2771:~r
*Q..rd   r   r   r   r   r   r   )r4   r5   r   r   r<   rt   )rO   r   r   r   r   r   s         rc   _cdf_cvm_infr     s     	

1A/ --
)C<<(D	A
&&,,!IM	VVAY$&
	Q	 &&,, Jrd   c                 6   [         R                  " U 5      n Uc  [        U 5      nO^[         R                  " U SS9nSSU-  -  U :  XS-  :  -  n[        X   5      SSSU-  -  -   -  [	        X   5      U-  -   X#'   SX US-  :  '   UR
                  S:X  a  US	   $ U$ )
u  
Calculate the cdf of the Cramér-von Mises statistic for a finite sample
size n. If N is None, use the asymptotic cdf (n=inf).

See equation 1.8 in Csörgő, S. and Faraway, J. (1996) for finite samples,
1.2 for the asymptotic cdf.

The function is not expected to be accurate for large values of x, say
x > 2, when the cdf is very close to 1 and it might return values > 1
in that case, e.g. _cdf_cvm(2.0, 12) = 1.0000027556716846. Moreover, it
is not accurate for small values of n, especially close to the bounds of
the distribution's domain, [1/(12*n), n/3], where the value jumps to 0
and 1, respectively. These are limitations of the approximation by Csörgő
and Faraway (1996) implemented in this function.
r   r   r/   r   g      @r   r   r   r   )r4   r5   r   r   r   r:   )rO   rT   rP   sups       rc   _cdf_cvmr     s      	

1AyO MM!7+2a4y1}rT* af%RAY7)AF:Ka:OOqs(vv{uHrd   c                 2    U R                   U R                  4$ r   r   )ress    rc   _cvm_result_to_tupler     s    ==#**$$rd   )r#   r$   result_to_tuplec                     [        U[        5      (       a  [        [        U5      R                  n[
        R                  " [
        R                  " U 5      5      nUR                  S::  a  [        S5      e[        U5      nU" U/UQ76 nS[
        R                  " SUS-   5      -  S-
  SU-  -  nSSU-  -  [
        R                  " Xe-
  S-  5      -   n[
        R                  " S[        Xt5      -
  SS5      n[        XxS9$ )	u  Perform the one-sample Cramér-von Mises test for goodness of fit.

This performs a test of the goodness of fit of a cumulative distribution
function (cdf) :math:`F` compared to the empirical distribution function
:math:`F_n` of observed random variates :math:`X_1, ..., X_n` that are
assumed to be independent and identically distributed ([1]_).
The null hypothesis is that the :math:`X_i` have cumulative distribution
:math:`F`.

Parameters
----------
rvs : array_like
    A 1-D array of observed values of the random variables :math:`X_i`.
    The sample must contain at least two observations.
cdf : str or callable
    The cumulative distribution function :math:`F` to test the
    observations against. If a string, it should be the name of a
    distribution in `scipy.stats`. If a callable, that callable is used
    to calculate the cdf: ``cdf(x, *args) -> float``.
args : tuple, optional
    Distribution parameters. These are assumed to be known; see Notes.

Returns
-------
res : object with attributes
    statistic : float
        Cramér-von Mises statistic.
    pvalue : float
        The p-value.

See Also
--------
kstest, cramervonmises_2samp

Notes
-----
.. versionadded:: 1.6.0

The p-value relies on the approximation given by equation 1.8 in [2]_.
It is important to keep in mind that the p-value is only accurate if
one tests a simple hypothesis, i.e. the parameters of the reference
distribution are known. If the parameters are estimated from the data
(composite hypothesis), the computed p-value is not reliable.

References
----------
.. [1] Cramér-von Mises criterion, Wikipedia,
       https://en.wikipedia.org/wiki/Cram%C3%A9r%E2%80%93von_Mises_criterion
.. [2] Csörgő, S. and Faraway, J. (1996). The Exact and Asymptotic
       Distribution of Cramér-von Mises Statistics. Journal of the
       Royal Statistical Society, pp. 221-234.

Examples
--------

Suppose we wish to test whether data generated by ``scipy.stats.norm.rvs``
were, in fact, drawn from the standard normal distribution. We choose a
significance level of ``alpha=0.05``.

>>> import numpy as np
>>> from scipy import stats
>>> rng = np.random.default_rng(165417232101553420507139617764912913465)
>>> x = stats.norm.rvs(size=500, random_state=rng)
>>> res = stats.cramervonmises(x, 'norm')
>>> res.statistic, res.pvalue
(0.1072085112565724, 0.5508482238203407)

The p-value exceeds our chosen significance level, so we do not
reject the null hypothesis that the observed sample is drawn from the
standard normal distribution.

Now suppose we wish to check whether the same samples shifted by 2.1 is
consistent with being drawn from a normal distribution with a mean of 2.

>>> y = x + 2.1
>>> res = stats.cramervonmises(y, 'norm', args=(2,))
>>> res.statistic, res.pvalue
(0.8364446265294695, 0.00596286797008283)

Here we have used the `args` keyword to specify the mean (``loc``)
of the normal distribution to test the data against. This is equivalent
to the following, in which we create a frozen normal distribution with
mean 2.1, then pass its ``cdf`` method as an argument.

>>> frozen_dist = stats.norm(loc=2)
>>> res = stats.cramervonmises(y, frozen_dist.cdf)
>>> res.statistic, res.pvalue
(0.8364446265294695, 0.00596286797008283)

In either case, we would reject the null hypothesis that the observed
sample is drawn from a normal distribution with a mean of 2 (and default
variance of 1) because the p-value is less than our chosen
significance level.

r   z2The sample must contain at least two observations.r!   r   r/   g        Nr   )
isinstancestrgetattrr	   cdfr4   sortr5   sizer7   r6   rq   ru   clipr   r   )	rvsr   argsvalsrT   cdfvalsr   r_   rb   s	            rc   r   r     s    D #smS)--772::c?#DyyA~MNND	A$G	
299Q!	q	 1Q3'A	2a42661;*++A 	Xa^#R.A!66rd   c                     [         R                  " S[         R                  S9n[        SU S-   5       HW  nUn[         R                  " X"S-   -  S-  S-   [         R                  S9n[        U5      nUS-  USU& X* S=== US-  -  sss& MY     U$ )z
Distribution of probability of the Wilcoxon ranksum statistic r_plus (sum
of ranks of positive differences).
Returns an array with the probabilities of all the possible ranks
r = 0, ..., n*(n+1)/2
r   r   r!   r   N)r4   onesfloat64rangezerosr6   )rT   r   r   prev_cr   s        rc   _get_wilcoxon_distrr   _  s     	$A1a!e_HHQa%[A%)<K"1	"#&3,  Hrd   c                    [         R                  " SU S-   5      SS2S4   nX S-   -  S-  nSU-  n[         R                  " U5      nS[         R                  -  U-  U-  n[         R                  " [         R                  " XQ-  5      SS9n[         R
                  " SU-  U-  5      U-  n[         R                  " [        U5      5      n[         R                  " [        U5      S-   5      n	USSS2   U	SS& U	S==   S-  ss'   U	S   U	S'   U	$ )a  
Distribution of probability of the Wilcoxon ranksum statistic r_plus (sum
of ranks of positive differences).
Returns an array with the probabilities of all the possible ranks
r = 0, ..., n*(n+1)/2
This is a slower reference function
References
----------
.. [1] 1. Harris T, Hardin JW. Exact Wilcoxon Signed-Rank and Wilcoxon
    Mann-Whitney Ranksum Tests. The Stata Journal. 2013;13(2):337-343.
r   Nr!   r   r,   y              ?r)   )
r4   rq   r   prodrA   r   realr   r   r   )
rT   airQ   r   jthetaphi_spphi_srb   r   s
             rc   _get_wilcoxon_distr2r   p  s     
1ac	1d7	#B	Q3	A	!A
		!AbeeGAIaKEWWRVVEH%A.FFF2e8A:'E
UA
((3q6!8
C#A#CIFaKF!fCGJrd   c                 6   U R                   S   S:X  d  U R                   S   S:X  a   [        R                  [        R                  4$ U R                  5       n[	        U 5      n[        U 5      nU R                  SS9S-  R                  5       nU R                  SS9S-  R                  5       nUS-  U-
  US-  U-
  -  nX#-
  US-  -  nS[        U 5      X#-
  S-  U-  -
  -  nX-  n	U	S:X  a  US4$ XyS-  -  n
S[        R                  " [        U
5      5      -  nX{4$ )z=Calculate Kendall's tau-b and p-value from contingency table.r   r   r,   r!   r   r"   )
shaper4   nanru   _P_Qr   r   sfrt   )ANAPAQASri2Scj2denominatortau	numerator	s02_tau_bZrb   s               rc   _tau_br    s   
 	wwqzQ!''!*/vvrvv~	
B	AB	ABEEqEM1!!#DEEqEM1!!#Dq54<"a%$,/K5;$
$C>!$!|b'889I%IA~AvsNA	$''#a&/A6Mrd   c                 <   U R                   S   S::  d  U R                   S   S::  a   [        R                  [        R                  4$ U R                  5       nUS-  n[	        U 5      n[        U 5      nU R                  SS9S-  R                  5       nXE-
  X6-
  -  n[        U 5      XE-
  S-  U-  -
  n[        R                  " SS9   XE-
  SU-  S-  -  n	S	S	S	5        [        R                  " 5       n
[        R                  " W	X[        S
9nX{4$ ! , (       d  f       N@= f)z7Calculate Somers' D and p-value from contingency table.r   r   r!   r,   rh   )rj   r"   r   N)r3   )r   r4   r   ru   r  r  r   rs   r   _SimpleNormalrN   )r  r1   r  NA2r  r  r  dSr  r   rb   s               rc   	_somers_dr    s    
 	wwqzQ!''!*/vvrvv~	
B
a%C	AB	ABEEqEM1!!#D	3:AqRUQJrM)A	H	%Wq!usl" 
& ""$Dar:A4K 
&	%s   D
Dc                   H    \ rS rSr% \\S'   \\S'   \R                  \S'   Srg)SomersDResulti  r   r    tabler   N)	r   r   r   r   r   __annotations__r4   ndarrayr   r   rd   rc   r  r    s    M::rd   r  c                    [         R                  " U 5      [         R                  " U5      pU R                  S:X  aR  U R                  UR                  :w  a  [	        S5      e[
        R                  R                  R                  X5      S   nOU R                  S:X  a  [         R                  " U S:  5      (       a  [	        S5      e[         R                  " X R                  [        5      :g  5      (       a  [	        S5      eU R                  5       S   R                  S:  a  [	        S5      eU nO[	        S5      e[        UR                  [        5      U5      u  pE[        XEU5      nXFl        U$ )	a  Calculates Somers' D, an asymmetric measure of ordinal association.

Like Kendall's :math:`\tau`, Somers' :math:`D` is a measure of the
correspondence between two rankings. Both statistics consider the
difference between the number of concordant and discordant pairs in two
rankings :math:`X` and :math:`Y`, and both are normalized such that values
close  to 1 indicate strong agreement and values close to -1 indicate
strong disagreement. They differ in how they are normalized. To show the
relationship, Somers' :math:`D` can be defined in terms of Kendall's
:math:`\tau_a`:

.. math::
    D(Y|X) = \frac{\tau_a(X, Y)}{\tau_a(X, X)}

Suppose the first ranking :math:`X` has :math:`r` distinct ranks and the
second ranking :math:`Y` has :math:`s` distinct ranks. These two lists of
:math:`n` rankings can also be viewed as an :math:`r \times s` contingency
table in which element :math:`i, j` is the number of rank pairs with rank
:math:`i` in ranking :math:`X` and rank :math:`j` in ranking :math:`Y`.
Accordingly, `somersd` also allows the input data to be supplied as a
single, 2D contingency table instead of as two separate, 1D rankings.

Note that the definition of Somers' :math:`D` is asymmetric: in general,
:math:`D(Y|X) \neq D(X|Y)`. ``somersd(x, y)`` calculates Somers'
:math:`D(Y|X)`: the "row" variable :math:`X` is treated as an independent
variable, and the "column" variable :math:`Y` is dependent. For Somers'
:math:`D(X|Y)`, swap the input lists or transpose the input table.

Parameters
----------
x : array_like
    1D array of rankings, treated as the (row) independent variable.
    Alternatively, a 2D contingency table.
y : array_like, optional
    If `x` is a 1D array of rankings, `y` is a 1D array of rankings of the
    same length, treated as the (column) dependent variable.
    If `x` is 2D, `y` is ignored.
alternative : {'two-sided', 'less', 'greater'}, optional
    Defines the alternative hypothesis. Default is 'two-sided'.
    The following options are available:
    * 'two-sided': the rank correlation is nonzero
    * 'less': the rank correlation is negative (less than zero)
    * 'greater':  the rank correlation is positive (greater than zero)

Returns
-------
res : SomersDResult
    A `SomersDResult` object with the following fields:

        statistic : float
           The Somers' :math:`D` statistic.
        pvalue : float
           The p-value for a hypothesis test whose null
           hypothesis is an absence of association, :math:`D=0`.
           See notes for more information.
        table : 2D array
           The contingency table formed from rankings `x` and `y` (or the
           provided contingency table, if `x` is a 2D array)

See Also
--------
kendalltau : Calculates Kendall's tau, another correlation measure.
weightedtau : Computes a weighted version of Kendall's tau.
spearmanr : Calculates a Spearman rank-order correlation coefficient.
pearsonr : Calculates a Pearson correlation coefficient.

Notes
-----
This function follows the contingency table approach of [2]_ and
[3]_. *p*-values are computed based on an asymptotic approximation of
the test statistic distribution under the null hypothesis :math:`D=0`.

Theoretically, hypothesis tests based on Kendall's :math:`tau` and Somers'
:math:`D` should be identical.
However, the *p*-values returned by `kendalltau` are based
on the null hypothesis of *independence* between :math:`X` and :math:`Y`
(i.e. the population from which pairs in :math:`X` and :math:`Y` are
sampled contains equal numbers of all possible pairs), which is more
specific than the null hypothesis :math:`D=0` used here. If the null
hypothesis of independence is desired, it is acceptable to use the
*p*-value returned by `kendalltau` with the statistic returned by
`somersd` and vice versa. For more information, see [2]_.

Contingency tables are formatted according to the convention used by
SAS and R: the first ranking supplied (``x``) is the "row" variable, and
the second ranking supplied (``y``) is the "column" variable. This is
opposite the convention of Somers' original paper [1]_.

References
----------
.. [1] Robert H. Somers, "A New Asymmetric Measure of Association for
       Ordinal Variables", *American Sociological Review*, Vol. 27, No. 6,
       pp. 799--811, 1962.

.. [2] Morton B. Brown and Jacqueline K. Benedetti, "Sampling Behavior of
       Tests for Correlation in Two-Way Contingency Tables", *Journal of
       the American Statistical Association* Vol. 72, No. 358, pp.
       309--315, 1977.

.. [3] SAS Institute, Inc., "The FREQ Procedure (Book Excerpt)",
       *SAS/STAT 9.2 User's Guide, Second Edition*, SAS Publishing, 2009.

.. [4] Laerd Statistics, "Somers' d using SPSS Statistics", *SPSS
       Statistics Tutorials and Statistical Guides*,
       https://statistics.laerd.com/spss-tutorials/somers-d-using-spss-statistics.php,
       Accessed July 31, 2020.

Examples
--------
We calculate Somers' D for the example given in [4]_, in which a hotel
chain owner seeks to determine the association between hotel room
cleanliness and customer satisfaction. The independent variable, hotel
room cleanliness, is ranked on an ordinal scale: "below average (1)",
"average (2)", or "above average (3)". The dependent variable, customer
satisfaction, is ranked on a second scale: "very dissatisfied (1)",
"moderately dissatisfied (2)", "neither dissatisfied nor satisfied (3)",
"moderately satisfied (4)", or "very satisfied (5)". 189 customers
respond to the survey, and the results are cast into a contingency table
with the hotel room cleanliness as the "row" variable and customer
satisfaction as the "column" variable.

+-----+-----+-----+-----+-----+-----+
|     | (1) | (2) | (3) | (4) | (5) |
+=====+=====+=====+=====+=====+=====+
| (1) | 27  | 25  | 14  | 7   | 0   |
+-----+-----+-----+-----+-----+-----+
| (2) | 7   | 14  | 18  | 35  | 12  |
+-----+-----+-----+-----+-----+-----+
| (3) | 1   | 3   | 2   | 7   | 17  |
+-----+-----+-----+-----+-----+-----+

For example, 27 customers assigned their room a cleanliness ranking of
"below average (1)" and a corresponding satisfaction of "very
dissatisfied (1)". We perform the analysis as follows.

>>> from scipy.stats import somersd
>>> table = [[27, 25, 14, 7, 0], [7, 14, 18, 35, 12], [1, 3, 2, 7, 17]]
>>> res = somersd(table)
>>> res.statistic
0.6032766111513396
>>> res.pvalue
1.0007091191074533e-27

The value of the Somers' D statistic is approximately 0.6, indicating
a positive correlation between room cleanliness and customer satisfaction
in the sample.
The *p*-value is very small, indicating a very small probability of
observing such an extreme value of the statistic under the null
hypothesis that the statistic of the entire population (from which
our sample of 189 customers is drawn) is zero. This supports the
alternative hypothesis that the true value of Somers' D for the population
is nonzero.

r   z!Rankings must be of equal length.r!   r   z;All elements of the contingency table must be non-negative.z6All elements of the contingency table must be integer.z?At least two elements of the contingency table must be nonzero.z!x must be either a 1D or 2D array)r4   arrayr:   r   r7   scipystatscontingencycrosstabr<   astyper   nonzeror  r   r  correlation)rO   rP   r1   r  r  rb   r   s          rc   r   r     s&   v 88A;qvv{66QVV@AA''006q9	
166!a%== - . .66!xx}$%% ( ) )99;q>" 0 1 1<==U\\%(+6DA e
$COJrd   c              #      #    [         R                  " X-   5      n[        X 5       HC  n[         R                  " U5      n[         R                  " X-   [
        5      nSXT'   X%   nXF4v   ME     g7f)z
Partition a set of indices into two fixed-length sets in all possible ways

Partition a set of indices 0 ... nx + ny - 1 into two sets of length nx and
ny in all possible ways (ignoring order of elements).
FN)r4   rq   r   r  r   r   )rR   rS   r   r   rO   maskrP   s          rc   _all_partitionsr&    sZ      			"%A! HHQKwwrud#Gd
 !s   A,A.c                     [        [        R                  " U S-   5      S-   5      n[        U S-   5      U-
  USSS2   -
  $ )z'Compute all log combination of C(n, k).r   Nr)   )r   r4   rq   )rT   gammaln_arrs     rc   _compute_log_combinationsr)    s?    "))AE*Q./K1q5>K'+dd*;;;rd   c                   *    \ rS rSr% \\S'   \\S'   Srg)BarnardExactResulti  r   r    r   Nr   r   r   r   r   r  r   r   rd   rc   r+  r+        Mrd   r+  c                 F   US::  a  [        SU< 35      e[        R                  " U [        R                  S9n U R                  S:X  d  [        S5      e[        R
                  " U S:  5      (       a  [        S5      eSU R                  SS9;   a  [        [        R                  S5      $ U R                  SS9u  pE[        R                  " US	-   [        R                  S9R                  S
S	5      n[        R                  " US	-   [        R                  S9R                  S	S
5      nXd-  Xu-  pU(       a  Xg-   XE-   -  n
U
S	U
-
  -  S	U-  S	U-  -   -  nOUS	U-
  -  U-  U	S	U	-
  -  U-  -   n[        R                  " SSS9   [        R                  " X-
  [        R                  " U5      5      nSSS5        SWX:H  '   XS   U S   4   nUS:X  a#  [        R                  " U5      [        U5      :  nO'US:X  a  X:*  nOUS:X  a  X:  nOSU< 3n[        U5      eXg-   n[        U5      n[        U5      nUU   UU   -   n[!        ["        UUU4SUSS9n[        R$                  " [        R&                  " UR(                  * 5      SS	S9n[        UU5      $ ! , (       d  f       N= f)a  Perform a Barnard exact test on a 2x2 contingency table.

Parameters
----------
table : array_like of ints
    A 2x2 contingency table.  Elements should be non-negative integers.

alternative : {'two-sided', 'less', 'greater'}, optional
    Defines the null and alternative hypotheses. Default is 'two-sided'.
    Please see explanations in the Notes section below.

pooled : bool, optional
    Whether to compute score statistic with pooled variance (as in
    Student's t-test, for example) or unpooled variance (as in Welch's
    t-test). Default is ``True``.

n : int, optional
    Number of sampling points used in the construction of the sampling
    method. Note that this argument will automatically be converted to
    the next higher power of 2 since `scipy.stats.qmc.Sobol` is used to
    select sample points. Default is 32. Must be positive. In most cases,
    32 points is enough to reach good precision. More points comes at
    performance cost.

Returns
-------
ber : BarnardExactResult
    A result object with the following attributes.

    statistic : float
        The Wald statistic with pooled or unpooled variance, depending
        on the user choice of `pooled`.

    pvalue : float
        P-value, the probability of obtaining a distribution at least as
        extreme as the one that was actually observed, assuming that the
        null hypothesis is true.

See Also
--------
chi2_contingency : Chi-square test of independence of variables in a
    contingency table.
fisher_exact : Fisher exact test on a 2x2 contingency table.
boschloo_exact : Boschloo's exact test on a 2x2 contingency table,
    which is an uniformly more powerful alternative to Fisher's exact test.

Notes
-----
Barnard's test is an exact test used in the analysis of contingency
tables. It examines the association of two categorical variables, and
is a more powerful alternative than Fisher's exact test
for 2x2 contingency tables.

Let's define :math:`X_0` a 2x2 matrix representing the observed sample,
where each column stores the binomial experiment, as in the example
below. Let's also define :math:`p_1, p_2` the theoretical binomial
probabilities for  :math:`x_{11}` and :math:`x_{12}`. When using
Barnard exact test, we can assert three different null hypotheses :

- :math:`H_0 : p_1 \geq p_2` versus :math:`H_1 : p_1 < p_2`,
  with `alternative` = "less"

- :math:`H_0 : p_1 \leq p_2` versus :math:`H_1 : p_1 > p_2`,
  with `alternative` = "greater"

- :math:`H_0 : p_1 = p_2` versus :math:`H_1 : p_1 \neq p_2`,
  with `alternative` = "two-sided" (default one)

In order to compute Barnard's exact test, we are using the Wald
statistic [3]_ with pooled or unpooled variance.
Under the default assumption that both variances are equal
(``pooled = True``), the statistic is computed as:

.. math::

    T(X) = \frac{
        \hat{p}_1 - \hat{p}_2
    }{
        \sqrt{
            \hat{p}(1 - \hat{p})
            (\frac{1}{c_1} +
            \frac{1}{c_2})
        }
    }

with :math:`\hat{p}_1, \hat{p}_2` and :math:`\hat{p}` the estimator of
:math:`p_1, p_2` and :math:`p`, the latter being the combined probability,
given the assumption that :math:`p_1 = p_2`.

If this assumption is invalid (``pooled = False``), the statistic is:

.. math::

    T(X) = \frac{
        \hat{p}_1 - \hat{p}_2
    }{
        \sqrt{
            \frac{\hat{p}_1 (1 - \hat{p}_1)}{c_1} +
            \frac{\hat{p}_2 (1 - \hat{p}_2)}{c_2}
        }
    }

The p-value is then computed as:

.. math::

    \sum
        \binom{c_1}{x_{11}}
        \binom{c_2}{x_{12}}
        \pi^{x_{11} + x_{12}}
        (1 - \pi)^{t - x_{11} - x_{12}}

where the sum is over all  2x2 contingency tables :math:`X` such that:
* :math:`T(X) \leq T(X_0)` when `alternative` = "less",
* :math:`T(X) \geq T(X_0)` when `alternative` = "greater", or
* :math:`T(X) \geq |T(X_0)|` when `alternative` = "two-sided".
Above, :math:`c_1, c_2` are the sum of the columns 1 and 2,
and :math:`t` the total (sum of the 4 sample's element).

The returned p-value is the maximum p-value taken over the nuisance
parameter :math:`\pi`, where :math:`0 \leq \pi \leq 1`.

This function's complexity is :math:`O(n c_1 c_2)`, where `n` is the
number of sample points.

References
----------
.. [1] Barnard, G. A. "Significance Tests for 2x2 Tables". *Biometrika*.
       34.1/2 (1947): 123-138. :doi:`dpgkg3`

.. [2] Mehta, Cyrus R., and Pralay Senchaudhuri. "Conditional versus
       unconditional exact tests for comparing two binomials."
       *Cytel Software Corporation* 675 (2003): 1-5.

.. [3] "Wald Test". *Wikipedia*. https://en.wikipedia.org/wiki/Wald_test

Examples
--------
An example use of Barnard's test is presented in [2]_.

    Consider the following example of a vaccine efficacy study
    (Chan, 1998). In a randomized clinical trial of 30 subjects, 15 were
    inoculated with a recombinant DNA influenza vaccine and the 15 were
    inoculated with a placebo. Twelve of the 15 subjects in the placebo
    group (80%) eventually became infected with influenza whereas for the
    vaccine group, only 7 of the 15 subjects (47%) became infected. The
    data are tabulated as a 2 x 2 table::

            Vaccine  Placebo
        Yes     7        12
        No      8        3

When working with statistical hypothesis testing, we usually use a
threshold probability or significance level upon which we decide
to reject the null hypothesis :math:`H_0`. Suppose we choose the common
significance level of 5%.

Our alternative hypothesis is that the vaccine will lower the chance of
becoming infected with the virus; that is, the probability :math:`p_1` of
catching the virus with the vaccine will be *less than* the probability
:math:`p_2` of catching the virus without the vaccine.  Therefore, we call
`barnard_exact` with the ``alternative="less"`` option:

>>> import scipy.stats as stats
>>> res = stats.barnard_exact([[7, 12], [8, 3]], alternative="less")
>>> res.statistic
-1.894
>>> res.pvalue
0.03407

Under the null hypothesis that the vaccine will not lower the chance of
becoming infected, the probability of obtaining test results at least as
extreme as the observed data is approximately 3.4%. Since this p-value is
less than our chosen significance level, we have evidence to reject
:math:`H_0` in favor of the alternative.

Suppose we had used Fisher's exact test instead:

>>> _, pvalue = stats.fisher_exact([[7, 12], [8, 3]], alternative="less")
>>> pvalue
0.0640

With the same threshold significance of 5%, we would not have been able
to reject the null hypothesis in favor of the alternative. As stated in
[2]_, Barnard's test is uniformly more powerful than Fisher's exact test
because Barnard's test does not condition on any margin. Fisher's test
should only be used when both sets of marginals are fixed.

r   6Number of points `n` must be strictly positive, found r   r!   r!   *The input `table` must be of shape (2, 2).*All values in `table` must be nonnegative.r,   r/   r   r)   rh   rj   ri   Nr   r   r   r   re   rk   r0   zG`alternative` should be one of {'two-sided', 'less', 'greater'}, found r5  sobolr   boundsrT   sampling_methoda_mina_max)r7   r4   r5   int64r   r<   ru   r+  r   rq   r?   rs   rj   rn   rt   r)  r   -_get_binomial_log_p_value_with_nuisance_paramr   r   fun)r  r1   pooledrT   total_col_1total_col_2r   r   p1p2rb   	varianceswald_statisticwald_stat_obs	index_arrmsg	x1_sum_x2x1_log_combx2_log_combx1_sum_x2_log_combresultp_values                         rc   r   r     s   | 	AvE
 	

 JJuBHH-E;;& EFF	vveaiEFFEII1I ""&&#..$yyay0K	;?"((	3	;	;B	BB	;?"((	3	;	;Ar	BB r/W23QK1{?Q_#DE	!b&MK/"B-+2MM	 
Hh	7BGbggi.@A 
8  !N28";d#;<Mk!FF>*c-.@@			"3				!"3	!_& 	 oI+K8K+K8K$R;r?:5+Y7
F ggbfffjj[)!<GmW55K 
8	7s   ?.J
J c                   *    \ rS rSr% \\S'   \\S'   Srg)BoschlooExactResulti  r   r    r   Nr,  r   rd   rc   rR  rR    r-  rd   rR  c                    [         R                  nUS::  a  [        SU< 35      e[        R                  " U [        R
                  S9n U R                  S:X  d  [        S5      e[        R                  " U S:  5      (       a  [        S5      eSU R                  SS9;   a(  [        [        R                  [        R                  5      $ U R                  SS9u  pEXE-   n[        R                  " US-   [        R
                  S9R                  SS	5      n[        R                  " US-   [        R
                  S9R                  S	S5      nXx-   n	US
:X  a  UR                  XvX5      R                  n
OUS:X  a  UR                  XX5      R                  n
OUS:X  am  [        U S
US9n[        U SUS9nUR                   UR                   :  a  UOUn[        R"                  " SUR                   -  SSS9n[        UR$                  U5      $ SS SU< 3n[        U5      eXS   U S   4   nU
US-  :*  nUR                  UR                  U	R                  pn['        U5      n['        U5      nUU   UU   -   n[)        [*        U	UU4SUSS9n[        R"                  " [        R,                  " UR.                  * 5      SSS9n[        UU5      $ )ah  Perform Boschloo's exact test on a 2x2 contingency table.

Parameters
----------
table : array_like of ints
    A 2x2 contingency table.  Elements should be non-negative integers.

alternative : {'two-sided', 'less', 'greater'}, optional
    Defines the null and alternative hypotheses. Default is 'two-sided'.
    Please see explanations in the Notes section below.

n : int, optional
    Number of sampling points used in the construction of the sampling
    method. Note that this argument will automatically be converted to
    the next higher power of 2 since `scipy.stats.qmc.Sobol` is used to
    select sample points. Default is 32. Must be positive. In most cases,
    32 points is enough to reach good precision. More points comes at
    performance cost.

Returns
-------
ber : BoschlooExactResult
    A result object with the following attributes.

    statistic : float
        The statistic used in Boschloo's test; that is, the p-value
        from Fisher's exact test.

    pvalue : float
        P-value, the probability of obtaining a distribution at least as
        extreme as the one that was actually observed, assuming that the
        null hypothesis is true.

See Also
--------
chi2_contingency : Chi-square test of independence of variables in a
    contingency table.
fisher_exact : Fisher exact test on a 2x2 contingency table.
barnard_exact : Barnard's exact test, which is a more powerful alternative
    than Fisher's exact test for 2x2 contingency tables.

Notes
-----
Boschloo's test is an exact test used in the analysis of contingency
tables. It examines the association of two categorical variables, and
is a uniformly more powerful alternative to Fisher's exact test
for 2x2 contingency tables.

Boschloo's exact test uses the p-value of Fisher's exact test as a
statistic, and Boschloo's p-value is the probability under the null
hypothesis of observing such an extreme value of this statistic.

Let's define :math:`X_0` a 2x2 matrix representing the observed sample,
where each column stores the binomial experiment, as in the example
below. Let's also define :math:`p_1, p_2` the theoretical binomial
probabilities for  :math:`x_{11}` and :math:`x_{12}`. When using
Boschloo exact test, we can assert three different alternative hypotheses:

- :math:`H_0 : p_1=p_2` versus :math:`H_1 : p_1 < p_2`,
  with `alternative` = "less"

- :math:`H_0 : p_1=p_2` versus :math:`H_1 : p_1 > p_2`,
  with `alternative` = "greater"

- :math:`H_0 : p_1=p_2` versus :math:`H_1 : p_1 \neq p_2`,
  with `alternative` = "two-sided" (default)

There are multiple conventions for computing a two-sided p-value when the
null distribution is asymmetric. Here, we apply the convention that the
p-value of a two-sided test is twice the minimum of the p-values of the
one-sided tests (clipped to 1.0). Note that `fisher_exact` follows a
different convention, so for a given `table`, the statistic reported by
`boschloo_exact` may differ from the p-value reported by `fisher_exact`
when ``alternative='two-sided'``.

.. versionadded:: 1.7.0

References
----------
.. [1] R.D. Boschloo. "Raised conditional level of significance for the
   2 x 2-table when testing the equality of two probabilities",
   Statistica Neerlandica, 24(1), 1970

.. [2] "Boschloo's test", Wikipedia,
   https://en.wikipedia.org/wiki/Boschloo%27s_test

.. [3] Lise M. Saari et al. "Employee attitudes and job satisfaction",
   Human Resource Management, 43(4), 395-407, 2004,
   :doi:`10.1002/hrm.20032`.

Examples
--------
In the following example, we consider the article "Employee
attitudes and job satisfaction" [3]_
which reports the results of a survey from 63 scientists and 117 college
professors. Of the 63 scientists, 31 said they were very satisfied with
their jobs, whereas 74 of the college professors were very satisfied
with their work. Is this significant evidence that college
professors are happier with their work than scientists?
The following table summarizes the data mentioned above::

                     college professors   scientists
    Very Satisfied   74                     31
    Dissatisfied     43                     32

When working with statistical hypothesis testing, we usually use a
threshold probability or significance level upon which we decide
to reject the null hypothesis :math:`H_0`. Suppose we choose the common
significance level of 5%.

Our alternative hypothesis is that college professors are truly more
satisfied with their work than scientists. Therefore, we expect
:math:`p_1` the proportion of very satisfied college professors to be
greater than :math:`p_2`, the proportion of very satisfied scientists.
We thus call `boschloo_exact` with the ``alternative="greater"`` option:

>>> import scipy.stats as stats
>>> res = stats.boschloo_exact([[74, 31], [43, 32]], alternative="greater")
>>> res.statistic
0.0483
>>> res.pvalue
0.0355

Under the null hypothesis that scientists are happier in their work than
college professors, the probability of obtaining test
results at least as extreme as the observed data is approximately 3.55%.
Since this p-value is less than our chosen significance level, we have
evidence to reject :math:`H_0` in favor of the alternative hypothesis.

r   r/  r   r0  r1  r2  r,   r   r)   rk   r0   re   )r1   rT   r!   r;  z`alternative` should be one of )re   rk   r0   z, found r4  r5  g    ?r6  r7  r8  )r	   	hypergeomr7   r4   r5   r>  r   r<   ru   rR  r   rq   r?   r   rC   r   r    r   r   r)  r   r?  r   r@  )r  r1   rT   rT  rB  rC  totalr   r   rK  pvaluesboschloo_lessboschloo_greaterr   r    rJ  fisher_statrI  rL  rM  rN  rO  rP  s                          rc   r   r     s   F ''IAvU
 	

 JJuBHH-E;;& EFF	vveaiEFFEII1I #26626622$yyay0K%E	;?"((	3	;	;Ar	BB	;?"((	3	;	;B	BBIf--9BDD			!--9BDD		#&u&AF)%Y!L +114D4K4KKM! 	 SZZq:"3==&99 ..L-M N!_& 	 o+uT{23K
 ;'22IbddIKKIB+K8K+K8K$R;r?:5+Y7
F ggbfffjj[)!<G{G44rd   c           	         UR                   u  pEXE-   S-
  n[        R                  " SSS9   [        R                  " U [        R                  " U 5      U S:  S9n[        R                  " SU -
  [        R                  " U 5      SU -
  S:  S9nXq-  n	SXS:H  SS2SS24   '   XU-
  -  n
SXU:H  SS2SS24   '   UU	-   U
-   nSSS5        WU   nUR                  5       n[        R                  " SSS9   [        R                  " X-
  5      R                  5       nU[        R                  " U[        R                  " U[        R                  * 5      US:  S9-   nSSS5        U* $ ! , (       d  f       N= f! , (       d  f       W* $ = f)a  
Compute the log pvalue in respect of a nuisance parameter considering
a 2x2 sample space.

Parameters
----------
nuisance_param : float
    nuisance parameter used in the computation of the maximisation of
    the p-value. Must be between 0 and 1

x1_sum_x2 : ndarray
    Sum of x1 and x2 inside barnard_exact

x1_sum_x2_log_comb : ndarray
    sum of the log combination of x1 and x2

index_arr : ndarray of boolean

Returns
-------
p_value : float
    Return the maximum p-value considering every nuisance parameter
    between 0 and 1

Notes
-----

Both Barnard's test and Boschloo's test iterate over a nuisance parameter
:math:`\pi \in [0, 1]` to find the maximum p-value. To search this
maxima, this function return the negative log pvalue with respect to the
nuisance parameter passed in params. This negative log p-value is then
used in `shgo` to find the minimum negative pvalue which is our maximum
pvalue.

Also, to compute the different combination used in the
p-values' computation formula, this function uses `gammaln` which is
more tolerant for large value than `scipy.special.comb`. `gammaln` gives
a log combination. For the little precision loss, performances are
improved a lot.
r!   rh   r3  r   )outwherer   N)
r   r4   rs   logr   rL   r   ru   	full_likeinf)nuisance_paramrK  rN  rI  t1t2rT   log_nuisancelog_1_minus_nuisancenuisance_power_x1_x2nuisance_power_n_minus_x1_x2tmp_log_values_arrtmp_values_from_index	max_value	log_probs
log_pvalues                   rc   r?  r?  {  s   V __FB
!A	Hh	7vvn- A%

  "vvn-n$) 
  ,7781nad34';9}'M$?@$1nad%;< "#*+ 	% 
80 /y9 &))+I 
Hh	7FF0<=AAC	Y0a-"
 

 
8 ;W 
8	7D 
8	7 ;s   BE#1A'E4#
E14
Fc                     [         R                  " X5      nX1-  nX2-  nX-  nUS-  X-   -  SU -  USU-  S-
  -  -
  -  SUS-  -  -  nUS-  X-   -  n[        X-   U5      n	[        X5      n
[         R                  " U
5      n[         R
                  " S/S//US9/[        U5       Vs/ s H  n[         R                  " SUS9PM     sn-   n[        US-   5       H  n/ n[         R                  " SUS9n[        U5       H  u  nn[         R                  " US   US   SS	9u  nnn[         R                  " [         R                  " UUSU4   USU4   -   /5      [         R                  " UUS5      [         R                  " UUS5      /S5      nUU-  X^-  -
  S-  nUS==   UR                  U5      -  ss'   UR                  U5        M     UnM     X   u  nn[         R                  " [         R                   " UUU:     5      U	-  5      $ s  snf )
u  
Compute the exact p-value of the Cramer-von Mises two-sample test
for a given value s of the test statistic.
m and n are the sizes of the samples.

[1] Y. Xiao, A. Gordon, and A. Yakovlev, "A C++ Program for
    the Cramér-Von Mises Two-Sample Test", J. Stat. Soft.,
    vol. 17, no. 8, pp. 1-15, Dec. 2006.
[2] T. W. Anderson "On the Distribution of the Two-Sample Cramer-von Mises
    Criterion," The Annals of Mathematical Statistics, Ann. Math. Statist.
    33(3), 1148-1159, (September, 1962)
r!      r"   r   r   r   )r!   r   T)return_indices)r4   lcmr   rL   min_scalar_typer  r   empty	enumerateintersect1dconcatenatestackdeleter!  appendr   ru   )sr   rT   ro  ar   mnzeta
zeta_boundr   max_gsr   _gsr   next_gstmpvgvii0i1r   valuefreqs                            rc   _pval_cvm_2samp_exactr    s    &&,CAA 
B!8quQq2vz):!:;B!GLD a15!Jq>L*Fv&E 88aS1#Je,
-49!H=HqRXXfE*H=>B1q5\hhvU+bMDAq A!TJJBB.."c!R%j1QU8345		#r1%		!R#" 	C
 q515=Q&CFcjj''FNN3 "    %KE4::bffT%4-01L@AA% >s   'Hc           
      T   [         R                  " [         R                  " U 5      5      n[         R                  " [         R                  " U5      5      nUR                  S::  d  UR                  S::  a  [	        S5      eUS;  a  [	        S5      e[        U5      n[        U5      nUS:X  a  [        XV5      S:  a  SnOSn[         R                  " X4/5      n[        R                  R                  US	S
9nUSU n	XS n
U[         R                  " U	[         R                  " SUS-   5      -
  S-  5      -  nX[         R                  " U
[         R                  " SUS-   5      -
  S-  5      -  -  nXV-  XV-   pXU-  -  SU-  S-
  SU-  -  -
  nUS:X  a  [        XU5      nOSSU-  -   S-  nUS-   SU-  U-  SUS-  US-  -   -  -
  SU-  -
  -  nUSUS-  -  S-  U-  -  nSUU-
  [         R                  " SU-  5      -  -   nUS:  a  SnO[        SS[        U5      -
  5      n[!        XS9$ )u  Perform the two-sample Cramér-von Mises test for goodness of fit.

This is the two-sample version of the Cramér-von Mises test ([1]_):
for two independent samples :math:`X_1, ..., X_n` and
:math:`Y_1, ..., Y_m`, the null hypothesis is that the samples
come from the same (unspecified) continuous distribution.

Parameters
----------
x : array_like
    A 1-D array of observed values of the random variables :math:`X_i`.
    Must contain at least two observations.
y : array_like
    A 1-D array of observed values of the random variables :math:`Y_i`.
    Must contain at least two observations.
method : {'auto', 'asymptotic', 'exact'}, optional
    The method used to compute the p-value, see Notes for details.
    The default is 'auto'.

Returns
-------
res : object with attributes
    statistic : float
        Cramér-von Mises statistic.
    pvalue : float
        The p-value.

See Also
--------
cramervonmises, anderson_ksamp, epps_singleton_2samp, ks_2samp

Notes
-----
.. versionadded:: 1.7.0

The statistic is computed according to equation 9 in [2]_. The
calculation of the p-value depends on the keyword `method`:

- ``asymptotic``: The p-value is approximated by using the limiting
  distribution of the test statistic.
- ``exact``: The exact p-value is computed by enumerating all
  possible combinations of the test statistic, see [2]_.

If ``method='auto'``, the exact approach is used
if both samples contain equal to or less than 20 observations,
otherwise the asymptotic distribution is used.

If the underlying distribution is not continuous, the p-value is likely to
be conservative (Section 6.2 in [3]_). When ranking the data to compute
the test statistic, midranks are used if there are ties.

References
----------
.. [1] https://en.wikipedia.org/wiki/Cramer-von_Mises_criterion
.. [2] Anderson, T.W. (1962). On the distribution of the two-sample
       Cramer-von-Mises criterion. The Annals of Mathematical
       Statistics, pp. 1148-1159.
.. [3] Conover, W.J., Practical Nonparametric Statistics, 1971.

Examples
--------

Suppose we wish to test whether two samples generated by
``scipy.stats.norm.rvs`` have the same distribution. We choose a
significance level of alpha=0.05.

>>> import numpy as np
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> x = stats.norm.rvs(size=100, random_state=rng)
>>> y = stats.norm.rvs(size=70, random_state=rng)
>>> res = stats.cramervonmises_2samp(x, y)
>>> res.statistic, res.pvalue
(0.29376470588235293, 0.1412873014573014)

The p-value exceeds our chosen significance level, so we do not
reject the null hypothesis that the observed samples are drawn from the
same distribution.

For small sample sizes, one can compute the exact p-values:

>>> x = stats.norm.rvs(size=7, random_state=rng)
>>> y = stats.t.rvs(df=2, size=6, random_state=rng)
>>> res = stats.cramervonmises_2samp(x, y, method='exact')
>>> res.statistic, res.pvalue
(0.197802197802198, 0.31643356643356646)

The p-value based on the asymptotic distribution is a good approximation
even though the sample size is small.

>>> res = stats.cramervonmises_2samp(x, y, method='asymptotic')
>>> res.statistic, res.pvalue
(0.197802197802198, 0.2966041181527128)

Independent of the method, one would not reject the null hypothesis at the
chosen significance level in this example.

r   z/x and y must contain at least two observations.)autoexact
asymptoticz0method must be either auto, exact or asymptotic.r     r  r  average)methodNr!   r"   rm  r   -   gUUUUUU?g~jth?r/   r   r   )r4   r   r5   r   r7   r6   rL   rt  r  r  rankdataru   rq   r  rn   r   r   )rO   rP   r  xayarR   rS   r   r]   rxryr   r   NrQ   rb   etvttns                      rc   r   r     s(   J 
A	B	A	B	ww!|rww!|JKK44KLL	RB	RBr;!FF 	x AQy1A	
3BB	
3B 	RVVR"))Ar!t,,q011Abffb299Q1--12	22A 5"'q	qS	QqS1WqsO#A!!, !A#gq[cac!eaQQ//!A#56219q=1$% AFbggb2g...
 :AArL,,-A!66rd   c                   .    \ rS rSrSrS rS rSS jrSrg)	TukeyHSDResulti  a^  Result of `scipy.stats.tukey_hsd`.

Attributes
----------
statistic : float ndarray
    The computed statistic of the test for each comparison. The element
    at index ``(i, j)`` is the statistic for the comparison between groups
    ``i`` and ``j``.
pvalue : float ndarray
    The associated p-value from the studentized range distribution. The
    element at index ``(i, j)`` is the p-value for the comparison
    between groups ``i`` and ``j``.

Notes
-----
The string representation of this object displays the most recently
calculated confidence interval, and if none have been previously
calculated, it will evaluate ``confidence_interval()``.

References
----------
.. [1] NIST/SEMATECH e-Handbook of Statistical Methods, "7.4.7.1. Tukey's
       Method."
       https://www.itl.nist.gov/div898/handbook/prc/section4/prc471.htm,
       28 November 2020.
c                 \    Xl         X l        X@l        X0l        XPl        S U l        S U l        g r   )r   r    _ntreatments_nobs
_stand_err_ci_ci_cl)r   r   r    r  r  r  s         rc   r   TukeyHSDResult.__init__  s*    "(
$rd   c                    U R                   c  U R                  SS9  SU R                  S-  S S3nUS-  n[        U R                  R
                  S   5       H  n[        U R                  R
                  S   5       Ho  nX#:w  d  M
  US	U S
U SU R                  X#4   S U R                  X#4   S U R                   R                  X#4   S U R                   R                  X#4   S S3
-  nMq     M     U$ )Nffffff?)confidence_levelz(Tukey's HSD Pairwise Group Comparisons (d   z.1fz% Confidence Interval)
z3Comparison  Statistic  p-value  Lower CI  Upper CI
r   z (z - z) z>10.3f
)	r  confidence_intervalr  r   r    r   r   lowhigh)r   rx  ir   s       rc   __str__TukeyHSDResult.__str__  s    88$$c$:++c/#&&>@	CCt{{((+,A4;;,,Q/06BqcQCr$..*>v)F![[.v6!XX\\!$/7!XX]]1408< =A 1 - rd   c                    U R                   b(  U R                  b  XR                  :X  a  U R                   $ SUs=:  a  S:  d  O  [        S5      eXR                  U R                  U R                  -
  4n[
        R                  R                  " U6 nX0R                  -  nU R                  U-   nU R                  U-
  n[        XeS9U l         Xl        U R                   $ )a  Compute the confidence interval for the specified confidence level.

Parameters
----------
confidence_level : float, optional
    Confidence level for the computed confidence interval
    of the estimated proportion. Default is .95.

Returns
-------
ci : ``ConfidenceInterval`` object
    The object has attributes ``low`` and ``high`` that hold the
    lower and upper bounds of the confidence intervals for each
    comparison. The high and low values are accessible for each
    comparison at index ``(i, j)`` between groups ``i`` and ``j``.

References
----------
.. [1] NIST/SEMATECH e-Handbook of Statistical Methods, "7.4.7.1.
       Tukey's Method."
       https://www.itl.nist.gov/div898/handbook/prc/section4/prc471.htm,
       28 November 2020.

Examples
--------
>>> from scipy.stats import tukey_hsd
>>> group0 = [24.5, 23.5, 26.4, 27.1, 29.9]
>>> group1 = [28.4, 34.2, 29.5, 32.2, 30.1]
>>> group2 = [26.1, 28.3, 24.3, 26.2, 27.8]
>>> result = tukey_hsd(group0, group1, group2)
>>> ci = result.confidence_interval()
>>> ci.low
array([[-3.649159, -8.249159, -3.909159],
       [ 0.950841, -3.649159,  0.690841],
       [-3.389159, -7.989159, -3.649159]])
>>> ci.high
array([[ 3.649159, -0.950841,  3.389159],
       [ 8.249159,  3.649159,  7.989159],
       [ 3.909159, -0.690841,  3.649159]])
r   r   z)Confidence level must be between 0 and 1.)r  r  )r  r  r7   r  r  r	   studentized_rangerp   r  r   r
   )r   r  paramssrdtukey_criterion
upper_conf
lower_confs          rc   r  "TukeyHSDResult.confidence_interval  s    V HH T[[%< KK/88O#'a'HII #JJ0A0ADJJ0NO--116: / ^^o5
^^o5
%*F&xxrd   )r  r  r  r  r  r    r   N)r  )	r   r   r   r   __doc__r   r  r  r   r   rd   rc   r  r    s    6$Crd   r  c                 v   [        U 5      S:  a  [        S5      eU  Vs/ s H  n[        R                  " U5      PM     n nU  Hm  nUR                  S:w  a  [        S5      eUR
                  S::  a  [        S5      e[        R                  " U5      R                  5       (       d  Md  [        S5      e   U $ s  snf )Nr!   z$There must be more than 1 treatment.r   z&Input samples must be one-dimensional.z+Input sample size must be greater than one.zInput samples must be finite.)r6   r7   r4   r5   r:   r   isinfr<   )r   args     rc   _tukey_hsd_ivr  #  s    D	Q?@@'+,tBJJsOtD,88q=EFF88q=JKK88C=<==  K -s    B6c            
      N   [        U 5      n [        U 5      n[        R                  " U  Vs/ s H  n[        R                  " U5      PM     sn5      n[        R                  " U  Vs/ s H  oDR
                  PM     sn5      n[        R                  " U5      n[        R                  " U  Vs/ s H  n[        R                  " USS9PM     snUS-
  -  5      Xa-
  -  n[        R                  " U5      R
                  S:X  a	  SUS   -  nOSU-  SUS   R                  -  -   n[        R                  " X-  S-  5      n	US   R                  U-
  n
[        R                  " U
5      U	-  nXXa-
  4n[        R                  R                  " U6 n[        XUXi5      $ s  snf s  snf s  snf )a  Perform Tukey's HSD test for equality of means over multiple treatments.

Tukey's honestly significant difference (HSD) test performs pairwise
comparison of means for a set of samples. Whereas ANOVA (e.g. `f_oneway`)
assesses whether the true means underlying each sample are identical,
Tukey's HSD is a post hoc test used to compare the mean of each sample
to the mean of each other sample.

The null hypothesis is that the distributions underlying the samples all
have the same mean. The test statistic, which is computed for every
possible pairing of samples, is simply the difference between the sample
means. For each pair, the p-value is the probability under the null
hypothesis (and other assumptions; see notes) of observing such an extreme
value of the statistic, considering that many pairwise comparisons are
being performed. Confidence intervals for the difference between each pair
of means are also available.

Parameters
----------
sample1, sample2, ... : array_like
    The sample measurements for each group. There must be at least
    two arguments.

Returns
-------
result : `~scipy.stats._result_classes.TukeyHSDResult` instance
    The return value is an object with the following attributes:

    statistic : float ndarray
        The computed statistic of the test for each comparison. The element
        at index ``(i, j)`` is the statistic for the comparison between
        groups ``i`` and ``j``.
    pvalue : float ndarray
        The computed p-value of the test for each comparison. The element
        at index ``(i, j)`` is the p-value for the comparison between
        groups ``i`` and ``j``.

    The object has the following methods:

    confidence_interval(confidence_level=0.95):
        Compute the confidence interval for the specified confidence level.

See Also
--------
dunnett : performs comparison of means against a control group.

Notes
-----
The use of this test relies on several assumptions.

1. The observations are independent within and among groups.
2. The observations within each group are normally distributed.
3. The distributions from which the samples are drawn have the same finite
   variance.

The original formulation of the test was for samples of equal size [6]_.
In case of unequal sample sizes, the test uses the Tukey-Kramer method
[4]_.

References
----------
.. [1] NIST/SEMATECH e-Handbook of Statistical Methods, "7.4.7.1. Tukey's
       Method."
       https://www.itl.nist.gov/div898/handbook/prc/section4/prc471.htm,
       28 November 2020.
.. [2] Abdi, Herve & Williams, Lynne. (2021). "Tukey's Honestly Significant
       Difference (HSD) Test."
       https://personal.utdallas.edu/~herve/abdi-HSD2010-pretty.pdf
.. [3] "One-Way ANOVA Using SAS PROC ANOVA & PROC GLM." SAS
       Tutorials, 2007, www.stattutorials.com/SAS/TUTORIAL-PROC-GLM.htm.
.. [4] Kramer, Clyde Young. "Extension of Multiple Range Tests to Group
       Means with Unequal Numbers of Replications." Biometrics, vol. 12,
       no. 3, 1956, pp. 307-310. JSTOR, www.jstor.org/stable/3001469.
       Accessed 25 May 2021.
.. [5] NIST/SEMATECH e-Handbook of Statistical Methods, "7.4.3.3.
       The ANOVA table and tests of hypotheses about means"
       https://www.itl.nist.gov/div898/handbook/prc/section4/prc433.htm,
       2 June 2021.
.. [6] Tukey, John W. "Comparing Individual Means in the Analysis of
       Variance." Biometrics, vol. 5, no. 2, 1949, pp. 99-114. JSTOR,
       www.jstor.org/stable/3001913. Accessed 14 June 2021.


Examples
--------
Here are some data comparing the time to relief of three brands of
headache medicine, reported in minutes. Data adapted from [3]_.

>>> import numpy as np
>>> from scipy.stats import tukey_hsd
>>> group0 = [24.5, 23.5, 26.4, 27.1, 29.9]
>>> group1 = [28.4, 34.2, 29.5, 32.2, 30.1]
>>> group2 = [26.1, 28.3, 24.3, 26.2, 27.8]

We would like to see if the means between any of the groups are
significantly different. First, visually examine a box and whisker plot.

>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
>>> ax.boxplot([group0, group1, group2])
>>> ax.set_xticklabels(["group0", "group1", "group2"]) # doctest: +SKIP
>>> ax.set_ylabel("mean") # doctest: +SKIP
>>> plt.show()

From the box and whisker plot, we can see overlap in the interquartile
ranges group 1 to group 2 and group 3, but we can apply the ``tukey_hsd``
test to determine if the difference between means is significant. We
set a significance level of .05 to reject the null hypothesis.

>>> res = tukey_hsd(group0, group1, group2)
>>> print(res)
Tukey's HSD Pairwise Group Comparisons (95.0% Confidence Interval)
Comparison  Statistic  p-value   Lower CI   Upper CI
(0 - 1)     -4.600      0.014     -8.249     -0.951
(0 - 2)     -0.260      0.980     -3.909      3.389
(1 - 0)      4.600      0.014      0.951      8.249
(1 - 2)      4.340      0.020      0.691      7.989
(2 - 0)      0.260      0.980     -3.389      3.909
(2 - 1)     -4.340      0.020     -7.989     -0.691

The null hypothesis is that each group has the same mean. The p-value for
comparisons between ``group0`` and ``group1`` as well as ``group1`` and
``group2`` do not exceed .05, so we reject the null hypothesis that they
have the same means. The p-value of the comparison between ``group0``
and ``group2`` exceeds .05, so we accept the null hypothesis that there
is not a significant difference between their means.

We can also compute the confidence interval associated with our chosen
confidence level.

>>> group0 = [24.5, 23.5, 26.4, 27.1, 29.9]
>>> group1 = [28.4, 34.2, 29.5, 32.2, 30.1]
>>> group2 = [26.1, 28.3, 24.3, 26.2, 27.8]
>>> result = tukey_hsd(group0, group1, group2)
>>> conf = res.confidence_interval(confidence_level=.99)
>>> for ((i, j), l) in np.ndenumerate(conf.low):
...     # filter out self comparisons
...     if i != j:
...         h = conf.high[i,j]
...         print(f"({i} - {j}) {l:>6.3f} {h:>6.3f}")
(0 - 1) -9.480  0.280
(0 - 2) -5.140  4.620
(1 - 0) -0.280  9.480
(1 - 2) -0.540  9.220
(2 - 0) -4.620  5.140
(2 - 1) -9.220  0.540
r   )ddofr!   r   N)r  r6   r4   r5   rJ   r   ru   r{   uniquerC   rn   rt   r	   r  r  r  )r   ntreatmentsr  meansry  nsamples_treatmentsnobsmse	normalize	stand_errmean_differencest_statr  rV  s                 rc   r   r   1  s   h Dd)KJJ556E**d%;dffd%;<66%&D 66$7$3266#A&$7&*, -040BDC
 
yy$%**a/ +A..	
 ++a2Ed2K2M2M.MM	 	!+,I T{}}u, VV$%	1F$"44F--00&9G*[+ +G 6%;
 8s    F&F,F"))g?g?r   )r   )re   )Nre   )re   T    )re   r  )r  )Acollectionsr   dataclassesr   mathr   numpyr4   rH   	itertoolsr   r=   r  scipy.optimizer    r	   _commonr
   _continuous_distnsr   scipy.specialr   r   r   	scipy.fftr   _stats_pythranr   r   r  r   r  _axis_nan_policyr   r   __all__r   r   r   rl   r   r   r   r   r   r   r   r   r  r  r  r   r&  r)  r+  r   rR  r   r?  r  r   r  r  r   r   rd   rc   <module>r     s   " !    "    ' $ , ,  * 7 !. ((D(?A  4QOv, Pv,r 01k v8rJ(* */d!HB% .!q*>@q7@q7h"666   sn <   
F6R   
N5bXv2Bj .!q*>@V7@V7rz zzz+rd   