
    =h-                      S SK Jr  S SKJr  S SKJr  S SKrS SKrS SK	r
S SKJr  S SKJr  S SKJr  S SKJs  Jr  S SKJr  S S	KJrJrJr  S S
KJr  S SKJrJrJ r   S SK!J"r"  S SK#J$r$J%r%  S SK&J'r'J(r(  S SK)J*r*  Sr+Sr,Sr-Sr. " S S5      r/ " S S\/5      r0 " S S\05      r1 " S S5      r2 " S S\25      r3 " S S\Rh                  5      r5\Rl                  " \5\35         " S  S!5      r7 " S" S#5      r8 " S$ S%\3\75      r9g)&    )annotations)lzip)reduceN)stats)handle_data)	Optimizer)handle_formula_data)ContrastResultsWaldTestResultst_test_pairwise)_is_using_pandas)cache_readonlycached_datacached_value)approx_fprime)HessianInversionWarningValueWarning)nan_dotrecipr)	bool_likeFa  Parameters
    ----------
    endog : array_like
        A 1-d endogenous response variable. The dependent variable.
    exog : array_like
        A nobs x k array where `nobs` is the number of observations and `k`
        is the number of regressors. An intercept is not included by default
        and should be added by the user. See
        :func:`statsmodels.tools.add_constant`.zmissing : str
        Available options are 'none', 'drop', and 'raise'. If 'none', no nan
        checking is done. If 'drop', any observations with nans are dropped.
        If 'raise', an error is raised. Default is 'none'.a  
    hasconst : None or bool
        Indicates whether the RHS includes a user-supplied constant. If True,
        a constant is not checked for and k_constant is set to 1 and all
        result statistics are calculated as if a constant is present. If
        False, a constant is not checked for and k_constant is set to 0.
    **kwargs
        Extra arguments that are used to set model properties when using the
        formula interface.c                      \ rS rSrSR	                  \\\-   S9rSr	/ SQr
SS jrS rS\4S	 jrS
 r\SS j5       r\S 5       r\SS j5       rS rSS jrSrg)Model?   a  
    A (predictive) statistical model. Intended to be subclassed not used.

    {params_doc}
    {extra_params_doc}

    Attributes
    ----------
    exog_names
    endog_names

    Notes
    -----
    `endog` and `exog` are references to any data provided.  So if the data is
    already stored in numpy arrays and it is changed then `endog` and `exog`
    will change as well.
    )
params_docextra_params_doc   missingmissing_idxformuladesign_infohasconstNc                (   UR                  SS5      nUR                  SS 5      nU R                  " XXE40 UD6U l        U R                  R                  U l        U R                  R                  U l        U R                  R
                  U l        / U l        U R                  R                  / SQ5        SU;  a  U R                  R                  SS/5        [        UR                  5       5      U l
        Ub  U R                  R                  S5        g g )Nr   noner"   )exogendogz	data.exogz
data.endogr    zdata.orig_endogzdata.orig_exog)pop_handle_datadata
k_constantr%   r&   
_data_attrextendlistkeys
_init_keysappend)selfr&   r%   kwargsr   r"   s         iC:\Users\julio\OneDrive\Documentos\Trabajo\Ideas Frescas\venv\Lib\site-packages\statsmodels/base/model.py__init__Model.__init__\   s    **Y/::j$/%%e7 0(.0	))..IINN	YY__
KLF"OO""$57G#HI v{{}-OO"":.      c           	     ^    U R                    Vs0 s H  nU[        XS5      _M     nnU$ s  snf )zAreturn dictionary with extra keys used in model.__init__
        N)r/   getattr)r1   keykwdss      r3   _get_init_kwdsModel._get_init_kwdsn   s@     !%1 / WT-- / 	 1 1s   *c                    / SQnU(       a  UR                  U5        U Vs/ s H  oUU;  d  M
  UPM     nnU(       a:  S[        U5      -   nUSL a  [        R                  " U[        5        g [        U5      eg s  snf )Nr   zunknown kwargs F)r,   reprwarningswarnr   
ValueError)r1   r2   
keys_extraerrorkwargs_allowedikwargs_invalidmsgs           r3   _check_kwargsModel._check_kwargsv   sq     !!*-%+GV/F!VG#d>&::C~c<0 o%  Hs
   	A8A8c                    [        XX440 UD6nU H1  nUS;   a  M   [        XUR                  R                  U5      5        M3     U$ ! [         a     MD  f = f)N)r!   r    )r   setattr__dict__r'   KeyError)r1   r&   r%   r   r"   r2   r)   r9   s           r3   r(   Model._handle_data   sb    5DVDC004==#4#4S#9:    s   %A
AAc                   Ub  UR                   U   nUR                  SS5      nUc  SnO/US:X  a  SSKJn  U" 0 5      nO[	        U[
        5      (       a  US-  nUR                  SS	5      n	U	S
:X  a  Sn	[        USXU	S9n
U
u  u  ppU R                  nUbG  UR                  S:  a7  UR                  S   U:  a$  [        SR                  UR                  5      5      eUb  [        U5      S:  a  UR                   Vs/ s H  nUU;  d  M  UPM     nn[        U5      [        UR                  5      :  aF  UU   n[        UR                   5      nU H  n UR#                  U5        M     UR%                  U5      nUR'                  UU	UUS.5        U " X/UQ70 UD6nUUl        UUR*                  l        U$ s  snf ! [         a     Mx  f = f)a  
Create a Model from a formula and dataframe.

Parameters
----------
formula : str or generic Formula object
    The formula specifying the model.
data : array_like
    The data for the model. See Notes.
subset : array_like
    An array-like object of booleans, integers, or index values that
    indicate the subset of df to use in the model. Assumes df is a
    `pandas.DataFrame`.
drop_cols : array_like
    Columns to drop from the design matrix.  Cannot be used to
    drop terms involving categoricals.
*args
    Additional positional argument that are passed to the model.
**kwargs
    These are passed to the model with one exception. The
    ``eval_env`` keyword is passed to patsy. It can be either a
    :class:`patsy:patsy.EvalEnvironment` object or an integer
    indicating the depth of the namespace to use. For example, the
    default ``eval_env=0`` uses the calling namespace. If you wish
    to use a "clean" environment set ``eval_env=-1``.

Returns
-------
model
    The model instance.

Notes
-----
data must define __getitem__ with the keys in the formula terms
args and kwargs are passed on to the model instantiation. E.g.,
a numpy structured or rec array, a dictionary, or a pandas DataFrame.
Neval_env   r   )EvalEnvironmentr   r   dropr$   raise)depthr   zendog has evaluated to an array with multiple columns that has shape {}. This occurs when the variable converted to endog is non-numeric (e.g., bool or str).)r   r   r    r!   )locr'   patsyrS   
isinstanceintgetr	   _formula_max_endogndimshaperA   formatlencolumnsr-   
term_namesremovesubsetupdater    r)   frame)clsr    r)   rd   	drop_colsargsr2   rP   rS   r   tmpr&   r%   r   r!   	max_endogxcolscolmods                       r3   from_formulaModel.from_formula   s   T 88F#D::j$/H^-&r*H#&&MH**Y/fG!$g*13471%**	!

Q5;;q>I#= 5 6<VEKK5HJ J  S^a%7#||B|!q	/AA|DB4y3t||,,DzK223$CC( %
 *006k")")&13 	4 %///
' C & s   =
F7F7F<<
G
	G
c                .    U R                   R                  $ )z 
Names of endogenous variables.
)r)   ynamesr1   s    r3   endog_namesModel.endog_names       
 yyr6   c                .    U R                   R                  $ )z
Names of exogenous variables.
)r)   xnamesrt   s    r3   
exog_namesModel.exog_names   rw   r6   c                    [         e)z
Fit a model to data.
NotImplementedErrorrt   s    r3   fit	Model.fit   s
     "!r6   c                    [         e)z
After a model has been fit predict returns the fitted values.

This is a placeholder intended to be overwritten by individual models.
r}   )r1   paramsr%   ri   r2   s        r3   predictModel.predict   
     "!r6   )r+   r/   r)   r&   r%   r*   NNN)returnzlist[str] | None)__name__
__module____qualname____firstlineno__r_   _model_params_doc_missing_param_doc_extra_param_doc__doc__r\   _kwargs_allowedr4   r;   ERROR_INIT_KWARGSrH   r(   classmethodrp   propertyru   rz   r   r   __static_attributes__ r6   r3   r   r   ?   s      	+.1AA 	 	C! , 
O/$ 04;L &  U Un        ""r6   r   c                  v   ^  \ rS rSrSrSU 4S jjrS rS rS rS r	S r
   SS
 jr  SS jrSS jrS	rU =r$ )LikelihoodModeli  z*
Likelihood model is a subclass of Model.
c                H   > [         TU ]  " X40 UD6  U R                  5         g r   )superr4   
initialize)r1   r&   r%   r2   	__class__s       r3   r4   LikelihoodModel.__init__  s    //r6   c                    g)z
Initialize (possibly re-initialize) a Model instance.

For example, if the the design matrix of a linear model changes then
initialized can be used to recompute values using the modified design
matrix.
Nr   rt   s    r3   r   LikelihoodModel.initialize  s     	r6   c                    [         e)z
Log-likelihood of model.

Parameters
----------
params : ndarray
    The model parameters used to compute the log-likelihood.

Notes
-----
Must be overridden by subclasses.
r}   r1   r   s     r3   loglikeLikelihoodModel.loglike  s
     "!r6   c                    [         e)z
Score vector of model.

The gradient of logL with respect to each parameter.

Parameters
----------
params : ndarray
    The parameters to use when evaluating the Hessian.

Returns
-------
ndarray
    The score vector evaluated at the parameters.
r}   r   s     r3   scoreLikelihoodModel.score-  s
      "!r6   c                    [         e)z
Fisher information matrix of model.

Returns -1 * Hessian of the log-likelihood evaluated at params.

Parameters
----------
params : ndarray
    The model parameters.
r}   r   s     r3   informationLikelihoodModel.information?  s
     "!r6   c                    [         e)z
The Hessian matrix of the model.

Parameters
----------
params : ndarray
    The parameters to use when evaluating the Hessian.

Returns
-------
ndarray
    The hessian evaluated at the parameters.
r}   r   s     r3   hessianLikelihoodModel.hessianL  s
     "!r6   r   c
                  ^ ^ SnUcT  [        T S5      (       a  T R                  nO6T R                  b  S/T R                  R                  S   -  nO[	        S5      eT R
                  R                  S   mUU 4S jnUS:X  a  UU 4S	 jnUU 4S
 jnOUU 4S jnUU 4S jnU
R                  SS5      nSU
;   a(  U
R                  S0 5      nU
S   US.nU(       a  U
S	 U
S	 O0 nSU
;   a  U
S   US'   U
S	 [        5       nUR                  XUXjUUUUUUUS9u  nnnUR                  U5        U
R                  SS5      nU(       a  U" T UU5      nGOKUS:X  a/  U(       a(  [        R                  R                  US   * 5      T-  nGOU	(       Gd  ST R                  U5      -  nSn[        R                   " [        R"                  " U5      5      (       a>  [        R                  R%                  U5      u  nn[        R&                  " U5      S:  a  SnU(       ag  WR)                  [        R*                  " SW-  5      5      R)                  UR,                  5      n[        R.                  " XR,                  -   S-  5      nO[0        R2                  " S[4        5        Sn[7        T UU4SS0UD6nUUl        [;        U[<        5      (       a.  U(       a'  US   (       d  SSKJ n  [0        R2                  " SU5        UUl!        U$ )a  
Fit method for likelihood based models

Parameters
----------
start_params : array_like, optional
    Initial guess of the solution for the loglikelihood maximization.
    The default is an array of zeros.
method : str, optional
    The `method` determines which solver from `scipy.optimize`
    is used, and it can be chosen from among the following strings:

    - 'newton' for Newton-Raphson, 'nm' for Nelder-Mead
    - 'bfgs' for Broyden-Fletcher-Goldfarb-Shanno (BFGS)
    - 'lbfgs' for limited-memory BFGS with optional box constraints
    - 'powell' for modified Powell's method
    - 'cg' for conjugate gradient
    - 'ncg' for Newton-conjugate gradient
    - 'basinhopping' for global basin-hopping solver
    - 'minimize' for generic wrapper of scipy minimize (BFGS by default)

    The explicit arguments in `fit` are passed to the solver,
    with the exception of the basin-hopping solver. Each
    solver has several optional arguments that are not the same across
    solvers. See the notes section below (or scipy.optimize) for the
    available arguments and for the list of explicit arguments that the
    basin-hopping solver supports.
maxiter : int, optional
    The maximum number of iterations to perform.
full_output : bool, optional
    Set to True to have all available output in the Results object's
    mle_retvals attribute. The output is dependent on the solver.
    See LikelihoodModelResults notes section for more information.
disp : bool, optional
    Set to True to print convergence messages.
fargs : tuple, optional
    Extra arguments passed to the likelihood function, i.e.,
    loglike(x,*args)
callback : callable callback(xk), optional
    Called after each iteration, as callback(xk), where xk is the
    current parameter vector.
retall : bool, optional
    Set to True to return list of solutions at each iteration.
    Available in Results object's mle_retvals attribute.
skip_hessian : bool, optional
    If False (default), then the negative inverse hessian is calculated
    after the optimization. If True, then the hessian will not be
    calculated. However, it will be available in methods that use the
    hessian in the optimization (currently only with `"newton"`).
kwargs : keywords
    All kwargs are passed to the chosen solver with one exception. The
    following keyword controls what happens after the fit::

        warn_convergence : bool, optional
            If True, checks the model for the converged flag. If the
            converged flag is False, a ConvergenceWarning is issued.

Notes
-----
The 'basinhopping' solver ignores `maxiter`, `retall`, `full_output`
explicit arguments.

Optional arguments for solvers (see returned Results.mle_settings)::

    'newton'
        tol : float
            Relative error in params acceptable for convergence.
    'nm' -- Nelder Mead
        xtol : float
            Relative error in params acceptable for convergence
        ftol : float
            Relative error in loglike(params) acceptable for
            convergence
        maxfun : int
            Maximum number of function evaluations to make.
    'bfgs'
        gtol : float
            Stop when norm of gradient is less than gtol.
        norm : float
            Order of norm (np.inf is max, -np.inf is min)
        epsilon
            If fprime is approximated, use this value for the step
            size. Only relevant if LikelihoodModel.score is None.
    'lbfgs'
        m : int
            This many terms are used for the Hessian approximation.
        factr : float
            A stop condition that is a variant of relative error.
        pgtol : float
            A stop condition that uses the projected gradient.
        epsilon
            If fprime is approximated, use this value for the step
            size. Only relevant if LikelihoodModel.score is None.
        maxfun : int
            Maximum number of function evaluations to make.
        bounds : sequence
            (min, max) pairs for each element in x,
            defining the bounds on that parameter.
            Use None for one of min or max when there is no bound
            in that direction.
    'cg'
        gtol : float
            Stop when norm of gradient is less than gtol.
        norm : float
            Order of norm (np.inf is max, -np.inf is min)
        epsilon : float
            If fprime is approximated, use this value for the step
            size. Can be scalar or vector.  Only relevant if
            Likelihoodmodel.score is None.
    'ncg'
        fhess_p : callable f'(x,*args)
            Function which computes the Hessian of f times an arbitrary
            vector, p.  Should only be supplied if
            LikelihoodModel.hessian is None.
        avextol : float
            Stop when the average relative error in the minimizer
            falls below this amount.
        epsilon : float or ndarray
            If fhess is approximated, use this value for the step size.
            Only relevant if Likelihoodmodel.hessian is None.
    'powell'
        xtol : float
            Line-search error tolerance
        ftol : float
            Relative error in loglike(params) for acceptable for
            convergence.
        maxfun : int
            Maximum number of function evaluations to make.
        start_direc : ndarray
            Initial direction set.
    'basinhopping'
        niter : int
            The number of basin hopping iterations.
        niter_success : int
            Stop the run if the global minimum candidate remains the
            same for this number of iterations.
        T : float
            The "temperature" parameter for the accept or reject
            criterion. Higher "temperatures" mean that larger jumps
            in function value will be accepted. For best results
            `T` should be comparable to the separation (in function
            value) between local minima.
        stepsize : float
            Initial step size for use in the random displacement.
        interval : int
            The interval for how often to update the `stepsize`.
        minimizer : dict
            Extra keyword arguments to be passed to the minimizer
            `scipy.optimize.minimize()`, for example 'method' - the
            minimization method (e.g. 'L-BFGS-B'), or 'tol' - the
            tolerance for termination. Other arguments are mapped from
            explicit argument of `fit`:
              - `args` <- `fargs`
              - `jac` <- `score`
              - `hess` <- `hess`
    'minimize'
        min_method : str, optional
            Name of minimization method to use.
            Any method specific arguments can be passed directly.
            For a list of methods and their arguments, see
            documentation of `scipy.optimize.minimize`.
            If no method is specified, then BFGS is used.
Nstart_paramsg        r   z6If exog is None, then start_params should be specifiedr   c                2   > TR                   " U /UQ76 * T-  $ r   )r   r   ri   nobsr1   s     r3   fLikelihoodModel.fit.<locals>.f  s    LL/$//$66r6   newtonc                0   > TR                   " U /UQ76 T-  $ r   r   r   s     r3   r   "LikelihoodModel.fit.<locals>.score  s    zz&040477r6   c                0   > TR                   " U /UQ76 T-  $ r   r   r   s     r3   hess!LikelihoodModel.fit.<locals>.hess  s    ||F2T2T99r6   c                2   > TR                   " U /UQ76 * T-  $ r   r   r   s     r3   r   r      s    

61D11D88r6   c                2   > TR                   " U /UQ76 * T-  $ r   r   r   s     r3   r   r   #  s    V3d33d::r6   warn_convergenceTcov_typecov_kwds)r   r   use_t)r   methoddispmaxitercallbackretallfull_outputcov_params_funcHessianrR   F      ?g       @z8Inverting hessian failed, no bse or cov_params availablescale	converged)ConvergenceWarningzEMaximum Likelihood optimization failed to converge. Check mle_retvals)"hasattrr   r%   r^   rA   r&   r'   r[   r   _fitre   
setdefaultnplinalginvr   allisfiniteeighmindotdiagTasfortranarrayr?   r@   r   LikelihoodModelResultsmle_retvalsrY   dictstatsmodels.tools.sm_exceptionsr   mle_settings)r1   r   r   r   r   r   fargsr   r   skip_hessianr2   Hinvr   r   r   r   r   r:   	optimizerxoptretvalsoptim_settingsr   H
invertibleeigvalseigvecsmlefitr   r   s   `                            @r3   r   LikelihoodModel.fit\  s   L t^,,#00& #utyyq'99  "0 1 1 zz"	7 X8:9; "::&8$? zz*b1H &z 2ID:&z"Df"7ODMwK	(1q7<?C>D<@?F@H>DCN )7 )P%g~ 	d# ++,=tD"4w7DxK99==')"4!45<DT\\$''AJvvbkk!n%%#%99>>!#4 66'?Q&!%J{{2773=#9:>>wyyI(($-3)>? *+BD (dDKKdK %gt$$(<N <02 -r6   c           	     	   [        U S5      (       ay  U R                  S:  ai  [        R                  " USS9nU R                  R
                  S   n[        R                  " XfU R                  -   5      n[        R                  " X45      nOUnUb  X(   US'   [        U5      nU R                  5       n	U R                  " U R                  U R                  SS2U4   40 U	D6n
U
R                  " S0 UD6nUnUc)  U R                  R
                  S   nU[        U SS5      -  n[        R                  " U5      nUR                  X'    U R                  SSSSS	US
9n[        UR$                  S5      (       a6  UR$                  R&                  =UR$                  l        UR(                  l        [        US5      (       a  UR*                  UR(                  l        [        US5      (       a  UR,                  UR(                  l        XR(                  l        [        UR(                  S5      (       a  UR(                  R.                  c'  [        R                  " XD45      UR(                  l        OSUR(                  R.                  S'   [        R                  " U5      nUR.                  UR(                  R.                  USS2S4   U4'   UR0                  UR(                  R0                  -
  n[        US5      (       aR  [        R                  " XD45      UR(                  l        UR2                  UR(                  R2                  USS2S4   U4'   [        US5      (       a6  UR4                  UR(                  l        UR6                  UR(                  l        XR(                  l        UR0                  UR(                  l        UR:                  UR(                  l        XR(                  l        XR(                  l        [        UR$                  S5      (       a  UR(                  R@                  S	 UR(                  R@                  S	 UR(                  R@                  S	 UR(                  R@                  S   nSUS'   URB                  XSS2S4   U4'   XR(                  l        U$ ! [         ["        4 a    U R                  5       n GNjf = f)a"  experimental, fit the model subject to zero constraints

Intended for internal use cases until we know what we need.
API will need to change to handle models with two exog.
This is not yet supported by all model subclasses.

This is essentially a simplified version of `fit_constrained`, and
does not need to use `offset`.

The estimation creates a new model with transformed design matrix,
exog, and converts the results back to the original parameterization.

Some subclasses could use a more efficient calculation than using a
new model.

Parameters
----------
keep_index : array_like (int or bool) or slice
    variables that should be dropped.
start_params : None or array_like
    starting values for the optimization. `start_params` needs to be
    given in the original parameter space and are internally
    transformed.
k_params : int or None
    If None, then we try to infer from start_params or model.
**fit_kwds : keyword arguments
    fit_kwds are used in the optimization of the transformed model.

Returns
-------
results : Results instance
k_extrar   T)copyr   Nr   nmF)r   r   r   r   r   r   r   r   r   normalized_cov_params.cov_params_defaultr   Mresidfittedvaluessresidbcov_scaledr   )"r   r   r   arrayr%   r^   arangeconcatenater`   r;   r   r&   r   r8   zerosr   	TypeErrorrA   modelr   _resultsr   r   r   df_residr   r   r   
keep_indexdf_modelk_constrresults_constrained_cacher   )r1   r  r   return_auxiliaryk_paramsfit_kwdskextra_indexkeep_index_p	init_kwds
mod_constr
res_constrparams_fullresr  covs                   r3   
_fit_zerosLikelihoodModel._fit_zerosf  s   H 4##q(8*48J		"A))A4<<'78K>>:*CDL%L #'3'AH^$<(H '')	^^DJJ		!Z-0H 1&/1
^^/h/
!
yyq)Hi33Hhhx(","3"3	((11T,1  MC :##W-- 4>3C3C3I3IICIIOcll0:}--'1'='=CLL$ :~..(2(?(?CLL%)&=>>22:138:N1OCLL.67CLL..s3 XXj)
,, 	**:ag+>
+JK&&)>)>>:344.0hh7K.LCLL+-- LL++Jq$w,?,KL:z**$.$7$7CLL!$.$7$7CLL!", * 3 3 * 3 3 (+5( 399c""##G,##N3##H-,,%%m4CCH3=3I3IC1d7#Z/0.1LL+
 :& 	((*C	s   R! ! SSc                T   U R                   nXUR                  S5      -  -   n[        R                  R	                  USS9n[        R
                  " UR                  5       5      [        R                  " U5      :  n[        R                  " U) 5      S   nU R                  " SSU0UD6$ )zexperimental, fit of the model without collinear variables

This currently uses QR to drop variables based on the given
sequence.
Options will be added in future, when the supporting functions
to identify collinear variables become available.
r   r)moder  r   )
r%   varr   r   qrabsdiagonalsqrtwherer  )	r1   atolrtolr:   rl   tolr  maskidx_keeps	            r3   _fit_collinearLikelihoodModel._fit_collinear  s     IIAEE!H_$IILLL%vvajjl#bggcl2 88TE?1%;(;d;;r6   r   )	Nr   d   TTr   NFF)NNFN)g+=gvIh%<=)r   r   r   r   r   r4   r   r   r   r   r   r   r  r"  r   __classcell__r   s   @r3   r   r     sR    ""$""  ?BINHT 8<48KZ< <r6   r   c                     ^  \ rS rSrSr  SU 4S jjrS rU 4S jrS rS r	S r
S	 rS
 rS rS rS rSS jr  SU 4S jjrSrU =r$ )GenericLikelihoodModeli  a8  
Allows the fitting of any likelihood function via maximum likelihood.

A subclass needs to specify at least the log-likelihood
If the log-likelihood is specified for each observation, then results that
require the Jacobian will be available. (The other case is not tested yet.)

Notes
-----
Optimization methods that require only a likelihood function are 'nm' and
'powell'

Optimization methods that require a likelihood function and a
score/gradient are 'bfgs', 'cg', and 'ncg'. A function to compute the
Hessian is optional for 'ncg'.

Optimization method that require a likelihood function, a score/gradient,
and a Hessian is 'newton'

If they are not overwritten by a subclass, then numerical gradient,
Jacobian and Hessian of the log-likelihood are calculated by numerical
forward differentiation. This might results in some cases in precision
problems, and the Hessian might not be positive definite. Even if the
Hessian is not positive definite the covariance matrix of the parameter
estimates based on the outer product of the Jacobian might still be valid.


Examples
--------
see also subclasses in directory miscmodels

import statsmodels.api as sm
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog)
# in this dir
from model import GenericLikelihoodModel
probit_mod = sm.Probit(data.endog, data.exog)
probit_res = probit_mod.fit()
loglike = probit_mod.loglike
score = probit_mod.score
mod = GenericLikelihoodModel(data.endog, data.exog, loglike, score)
res = mod.fit(method="nm", maxiter = 500)
import numpy as np
np.allclose(res.params, probit_res.params)
c                L  > Ub  X0l         Ub  X@l        Ub  XPl        UR                  SS 5      n	U R                  R                  U5        [        T
U ]  " X4XiS.UD6  Ub0  [        R                  " U5      S:X  a  UR                  S   OSU l        Ub  U R                  U5        g g )Nr"   )r   r"   rQ   r   )r   r   r   r'   rL   re   r   r4   r   r]   r^   nparams_set_extra_params_names)r1   r&   r%   r   r   r   r   extra_params_namesr:   r"   r   s             r3   r4   GenericLikelihoodModel.__init__6  s     "LJ"L88J-T" 		
!(	
?C	

 -/WWT]a-?DJJqMQDL)(();< *r6   c                0   Uby  U R                   b  U R                  R                  U5        OXR                  l        [        U5      U l        [        U S5      (       a  U =R                  U R                  -  sl        [        U R                  5      U l	        g )Nr   )
r%   rz   r,   r)   ry   r`   r   r   r   r*  )r1   r,  s     r3   r+  .GenericLikelihoodModel._set_extra_params_namesT  sk    )yy$&&'9:#5		 12DLtZ((-4??+r6   c                  >^  T R                   (       d  U 4S jT l         T R                  (       d   OT R                  (       d   T R                  bg  [        R                  R                  T R                  5      n[        US-
  5      T l        [        T R                  R                  S   U-
  5      T l	        O*[        R                  T l        [        R                  T l	        [        TT ]1  5         g)z
Initialize (possibly re-initialize) a Model instance. For
instance, the design matrix of a linear model may change
and some things must be recomputed.
c                0   > [        U TR                  5      $ r   )r   r   )rl   r1   s    r3   <lambda>3GenericLikelihoodModel.initialize.<locals>.<lambda>j  s    =DLL#Ar6   Nr   r   )r   r   r%   r   r   matrix_rankfloatr  r^   r   nanr   r   )r1   err   s   ` r3   r   !GenericLikelihoodModel.initializec  s     zzADJ<<<< 99 &&tyy1B!"q&MDM!$))//!"4r"9:DMFFDMFFDMr6   c                V    U R                   R                  5       nXU R                  '   U$ )a  
expand to full parameter array when some parameters are fixed

Parameters
----------
params : ndarray
    reduced parameter array

Returns
-------
paramsfull : ndarray
    expanded parameter array where fixed parameters are included

Notes
-----
Calling this requires that self.fixed_params and self.fixed_paramsmask
are defined.

*developer notes:*

This can be used in the log-likelihood to ...

this could also be replaced by a more general parameter
transformation.
)fixed_paramsr   fixed_paramsmask)r1   r   
paramsfulls      r3   expandparams#GenericLikelihoodModel.expandparams}  s,    4 &&++-
,24(()r6   c                    XR                      $ )zReduce parameters)r;  r   s     r3   reduceparams#GenericLikelihoodModel.reduceparams  s    ++,,r6   c                B    U R                  U5      R                  S5      $ )z!Log-likelihood of model at paramsr   
loglikeobssumr   s     r3   r   GenericLikelihoodModel.loglike  s    v&**1--r6   c                D    U R                  U5      R                  S5      * $ )z*Negative log-likelihood of model at paramsr   rC  r   s     r3   nloglikeGenericLikelihoodModel.nloglike  s    '++A...r6   c                &    U R                  U5      * $ )z
Log-likelihood of the model for all observations at params.

Parameters
----------
params : array_like
    The parameters of the model.

Returns
-------
loglike : array_like
    The log likelihood of the model evaluated at `params`.
)nloglikeobsr   s     r3   rD  !GenericLikelihoodModel.loglikeobs  s       (((r6   c                r    0 nUR                  SS5        [        XR                  40 UD6R                  5       $ )z0
Gradient of log-likelihood evaluated at params
centeredT)r   r   r   ravelr1   r   r:   s      r3   r   GenericLikelihoodModel.score  s4     
D)V\\:T:@@BBr6   c                R    UR                  SS5        [        XR                  40 UD6$ )zO
Jacobian/Gradient of log-likelihood evaluated at params for each
observation.
rN  T)r   r   rD  rP  s      r3   	score_obs GenericLikelihoodModel.score_obs  s&     	
D)V__===r6   c                2    SSK Jn  U" XR                  5      $ )z/
Hessian of log-likelihood evaluated at params
r   )approx_hess)statsmodels.tools.numdiffrV  r   )r1   r   rV  s      r3   r   GenericLikelihoodModel.hessian  s     	: 6<<00r6   c                    [         e)a}  Weights for calculating Hessian

Parameters
----------
params : ndarray
    parameter at which Hessian is evaluated
scale : None or float
    If scale is None, then the default scale will be calculated.
    Default scale is defined by `self.scaletype` and set in fit.
    If scale is not None, then it is used as a fixed scale.
observed : bool
    If True, then the observed Hessian is returned. If false then the
    expected information matrix is returned.

Returns
-------
hessian_factor : ndarray, 1d
    A 1d weight vector used in the calculation of the Hessian.
    The hessian is obtained by `(exog.T * hessian_factor).dot(exog)`
r}   )r1   r   r   observeds       r3   hessian_factor%GenericLikelihoodModel.hessian_factor  s
    , "!r6   c           
     8  > UcA  [        U S5      (       a  U R                  nO#S[        R                  " U R                  5      -  nSU;  a  SUS'   [
        TU ]  n	U	" S
UX#UXVS.UD6n
[        U S[        5      nU" X
5      nU R                  c  / OU R                  n[        U5      [        U
R                  5      -
  nUS:X  dR  US:  a1  U R                  [        U* 5       Vs/ s H  nSU-  PM
     sn5        U$ [        R                  " S	[         5        U$ s  snf )Nr   g?r   	nonrobust)r   r   r   r   r   r   results_classr   zpar%dzmore exog_names than parametersr   )r   r   r   onesr*  r   r   r8   GenericLikelihoodModelResultsrz   r`   r   r+  ranger?   r@   r   )r1   r   r   r   r   r   r   r   r2   
fit_methodr   r_  genericmlefitrz   k_missrE   r   s                   r3   r   GenericLikelihoodModel.fit  s0    t^,,#00"RWWT\\%::V#!,F:W[
 D#)(3!%D =CD
  o =?%d3 !OO3R$//
Z3v}}#55{z,,7<fW~.G7E! /6k7E.G H  ?N.Gs   "D)r  r   r   r   r   r*  r   )NNNNr$   NNT)Nr   i  r   r   Nr   )r   r   r   r   r   r4   r+  r   r=  r@  r   rH  rD  r   rS  r   r[  r   r   r%  r&  s   @r3   r(  r(    sc    ,Z >BBF=<,4<-./) C>1"0 LM*+" "r6   r(  c                  >    \ rS rSrSrS rS rS
S jrSS jrS r	S	r
g)Resultsi  z
Class to contain model results

Parameters
----------
model : class instance
    the previously specified model instance
params : ndarray
    parameter estimates from the fit model
c                    U R                   R                  U5        U R                  " X40 UD6  / U l        / SQU l        g )N)r   r   wresid)rL   re   r   r+   _data_in_cache)r1   r   r   kwds       r3   r4   Results.__init__  s4    S!--Ar6   c                b    X l         Xl        [        US5      (       a  UR                  U l        gg)z
Initialize (possibly re-initialize) a Results instance.

Parameters
----------
model : Model
    The model instance.
params : ndarray
    The model parameters.
**kwargs
    Any additional keyword arguments required to initialize the model.
r*   N)r   r   r   r*   )r1   r   r   r2   s       r3   r   Results.initialize   s-     
5,''#..DO (r6   c                   [        US 5      nS nU(       aN  UR                  S:X  d  U R                  R                  S:X  a  UR                  nOUR                  R
                  /nU(       Ga  [        U R                  S5      (       Gam  UGbi  [        U R                  SS 5      =(       d     U R                  R                  R                  nSSKJn  [        U[        R                  5      (       a  [        US5      (       aT  [        UR
                  [         5      (       a5  UR
                  UR#                  5       ;   a  [        R$                  " U5      nO [        R$                  " U5      R&                  nUR                  n[)        U5      n[        U[*        5      n U" XQSS	9nU[)        U5      :  a7  U(       d0  Uc  [2        R4                  " S[6        5        OUR9                  U5      nUR                  nUb  [:        R<                  " U5      nUR                  S:X  aT  U R                  R>                  R                  S:X  d'  U R                  R>                  R@                  S   S:X  a	  US S 2S 4   n[:        RB                  " U5      nX4$ ! [,         a9  n	S
R/                  [!        [!        U	5      5      5      n
U	R1                  U
5      eS n	A	ff = f)NrQ   r   r    r!   r   )dmatrixname	dataframe)return_typezpredict requires that you use a DataFrame when predicting from a model
that was created using the formula api.

The original error message returned by patsy is:
{}znan values have been dropped)"r   r]   r   sizeindexrs  r   r   r8   r)   r!   rX   rr  rY   pdSeriesstrdescribe	DataFramer   r`   r   	Exceptionr_   r   r?   r@   r   reindexr   asarrayr%   r^   
atleast_2d)r1   r%   	transform	is_pandas
exog_indexr!   rr  orig_exog_lenis_dictexcrG   s              r3   _transform_predict_exogResults._transform_predict_exog2  s   $T40	
yyA~!1!1Q!6!ZZ
"jjoo.
Y77T=M"4::}dC 7::??66 %$		**D&))jC.H.H		[%9%9%;;<<-D <<-//D!ZZ
IM t,G){kJ s4y(%MM"@,O<<
3DJ::d#DyyA~4::??#7#71#<#'::??#8#8#;q#@AtG}==&D+  ) #F3s3x=1	 
 mmC(()s   J 
K4K		KNc                   U R                  UUS9u  pU R                  R                  " U R                  U/UQ70 UD6nUbI  [	        US5      (       d8  UR
                  S:X  a  [        R                  " XeS9$ [        R                  " XeS9$ U$ )a  
Call self.model.predict with self.params as the first argument.

Parameters
----------
exog : array_like, optional
    The values for which you want to predict. see Notes below.
transform : bool, optional
    If the model was fit via a formula, do you want to pass
    exog through the formula. Default is True. E.g., if you fit
    a model y ~ log(x1) + log(x2), and transform is True, then
    you can pass a data structure that contains x1 and x2 in
    their original form. Otherwise, you'd need to log the data
    first.
*args
    Additional arguments to pass to the model, see the
    predict method of the model for the details.
**kwargs
    Additional keywords arguments to pass to the model, see the
    predict method of the model for the details.

Returns
-------
array_like
    See self.model.predict.

Notes
-----
The types of exog that are supported depends on whether a formula
was used in the specification of the model.

If a formula was used, then exog is processed in the same way as
the original data. This transformation needs to have key access to the
same variable names, and can be a pandas DataFrame or a dict like
object that contains numpy arrays.

If no formula was used, then the provided exog needs to have the
same number of columns as the original exog in the model. No
transformation of the data is performed except converting it to
a numpy array.

Row indices as in pandas data frames are supported, and added to the
returned prediction.
)r  predicted_valuesr   )rw  )	r  r   r   r   r   r]   rx  ry  r|  )r1   r%   r  ri   r2   r  predict_resultss          r3   r   Results.predictf  s    Z  77BK 8 M **,,T[[$ 7 7/57 !'/2D+F +F##q(yyCC||OFF""r6   c                    [         e)z
Summary

Not implemented
r}   rt   s    r3   summaryResults.summary  r   r6   )r+   rl  r*   r   r   )Trg  )r   r   r   r   r   r4   r   r  r   r  r   r   r6   r3   ri  ri    s#    	B/$2 h:#x"r6   ri  c                  8  ^  \ rS rSrSrSU 4S jjrS r  SS jr\S 5       r	\	R                  S 5       r	\S 5       r\S	 5       r\S
 5       r\S 5       r  SS jrSS jrSS jr  SS jr  SS jr  SS jrSS jrSS jrS S jr\S 5       rS rSrU =r$ )!r   i  a  
Class to contain results from likelihood models

Parameters
----------
model : LikelihoodModel instance or subclass instance
    LikelihoodModelResults holds a reference to the model that is fit.
params : 1d array_like
    parameter estimates from estimated model
normalized_cov_params : 2d array
   Normalized (before scaling) covariance of params. (dot(X.T,X))**-1
scale : float
    For (some subset of models) scale will typically be the
    mean square error from the estimated model (sigma^2)

Attributes
----------
mle_retvals : dict
    Contains the values returned from the chosen optimization method if
    full_output is True during the fit.  Available only if the model
    is fit by maximum likelihood.  See notes below for the output from
    the different methods.
mle_settings : dict
    Contains the arguments passed to the chosen optimization method.
    Available if the model is fit by maximum likelihood.  See
    LikelihoodModel.fit for more information.
model : model instance
    LikelihoodResults contains a reference to the model that is fit.
params : ndarray
    The parameters estimated for the model.
scale : float
    The scaling factor of the model given during instantiation.
tvalues : ndarray
    The t-values of the standard errors.


Notes
-----
The covariance of params is given by scale times normalized_cov_params.

Return values by solver if full_output is True during fit:

    'newton'
        fopt : float
            The value of the (negative) loglikelihood at its
            minimum.
        iterations : int
            Number of iterations performed.
        score : ndarray
            The score vector at the optimum.
        Hessian : ndarray
            The Hessian at the optimum.
        warnflag : int
            1 if maxiter is exceeded. 0 if successful convergence.
        converged : bool
            True: converged. False: did not converge.
        allvecs : list
            List of solutions at each iteration.
    'nm'
        fopt : float
            The value of the (negative) loglikelihood at its
            minimum.
        iterations : int
            Number of iterations performed.
        warnflag : int
            1: Maximum number of function evaluations made.
            2: Maximum number of iterations reached.
        converged : bool
            True: converged. False: did not converge.
        allvecs : list
            List of solutions at each iteration.
    'bfgs'
        fopt : float
            Value of the (negative) loglikelihood at its minimum.
        gopt : float
            Value of gradient at minimum, which should be near 0.
        Hinv : ndarray
            value of the inverse Hessian matrix at minimum.  Note
            that this is just an approximation and will often be
            different from the value of the analytic Hessian.
        fcalls : int
            Number of calls to loglike.
        gcalls : int
            Number of calls to gradient/score.
        warnflag : int
            1: Maximum number of iterations exceeded. 2: Gradient
            and/or function calls are not changing.
        converged : bool
            True: converged.  False: did not converge.
        allvecs : list
            Results at each iteration.
    'lbfgs'
        fopt : float
            Value of the (negative) loglikelihood at its minimum.
        gopt : float
            Value of gradient at minimum, which should be near 0.
        fcalls : int
            Number of calls to loglike.
        warnflag : int
            Warning flag:

            - 0 if converged
            - 1 if too many function evaluations or too many iterations
            - 2 if stopped for another reason

        converged : bool
            True: converged.  False: did not converge.
    'powell'
        fopt : float
            Value of the (negative) loglikelihood at its minimum.
        direc : ndarray
            Current direction set.
        iterations : int
            Number of iterations performed.
        fcalls : int
            Number of calls to loglike.
        warnflag : int
            1: Maximum number of function evaluations. 2: Maximum number
            of iterations.
        converged : bool
            True : converged. False: did not converge.
        allvecs : list
            Results at each iteration.
    'cg'
        fopt : float
            Value of the (negative) loglikelihood at its minimum.
        fcalls : int
            Number of calls to loglike.
        gcalls : int
            Number of calls to gradient/score.
        warnflag : int
            1: Maximum number of iterations exceeded. 2: Gradient and/
            or function calls not changing.
        converged : bool
            True: converged. False: did not converge.
        allvecs : list
            Results at each iteration.
    'ncg'
        fopt : float
            Value of the (negative) loglikelihood at its minimum.
        fcalls : int
            Number of calls to loglike.
        gcalls : int
            Number of calls to gradient/score.
        hcalls : int
            Number of calls to hessian.
        warnflag : int
            1: Maximum number of iterations exceeded.
        converged : bool
            True: converged. False: did not converge.
        allvecs : list
            Results at each iteration.
    c                D  > [         T
U ]  X5        X0l        X@l        SU l        SU;   a  US   nUb  UOSU l        SU;   a`  UR                  SS5      nUR                  S0 5      nUS:X  a  SU l        SS0U l        g SS	K	J
n	  Uc  0 nU R
                  nU	" U 4US
US.UD6  g g )NFr   r   r^  r   descriptionWStandard Errors assume that the covariance matrix of the errors is correctly specified.r   get_robustcov_resultsTr   use_selfr   )r   r4   r   r   _use_tr   r[   r   r   statsmodels.base.covtyper  )r1   r   r   r   r   r2   r   r   r   r  r   s             r3   r4   LikelihoodModelResults.__init__J  s    '%:"
 f7OE"'"3DJzz*k:Hzz*b1H;& +!. 1. !/ K#!H

%d ?X,1?5=?  r6   c                    [         e)z"See specific model class docstringr}   rt   s    r3   r   ,LikelihoodModelResults.normalized_cov_paramsh  s    !!r6   c                    USL a  [        S5      eSSKJn  Uc  0 nUS:X  a  SU l        SS0U l        g U" U 4USUS	.UD6  g )
NFz8use_self should have been removed long ago.  See GH#4401r   r  r^  r  r  Tr  )rA   r  r  r   r   )r1   r   r  r   r   r  s         r3   _get_robustcov_results-LikelihoodModelResults._get_robustcov_resultsl  sj    u + , ,BH{"'DM* -* +DM
 "$ ;D(-;19;r6   c                    U R                   $ )z?Flag indicating to use the Student's distribution in inference.)r  rt   s    r3   r   LikelihoodModelResults.use_t~  s     {{r6   c                $    [        U5      U l        g r   )boolr  )r1   values     r3   r   r    s    5kr6   c                L    U R                   R                  U R                  5      $ )zLog-likelihood of model)r   r   r   rt   s    r3   llfLikelihoodModelResults.llf  s     zz!!$++..r6   c                   [        U S5      (       dK  U R                  c>  [        R                  " [	        U R
                  5      5      n[        R                  USS& U$ [        R                  " 5          [        R                  " S[        5        [        R                  " [        R                  " U R                  5       5      5      nSSS5        U$ ! , (       d  f       W$ = f)z/The standard errors of the parameter estimates.r   Nignore)r   r   r   emptyr`   r   r6  r?   catch_warningssimplefilterRuntimeWarningr  r   
cov_params)r1   bse_s     r3   bseLikelihoodModelResults.bse  s     344++388C,-DffDG
  ((*%%h?wwrwwt'89: +  +* s   2AC
Cc                    [         R                  " 5          [         R                  " S[        5        U R                  U R
                  -  sSSS5        $ ! , (       d  f       g= f)z8
Return the t-statistic for a given parameter estimate.
r  N)r?   r  r  r  r   r  rt   s    r3   tvaluesLikelihoodModelResults.tvalues  s<    
 $$&!!(N;;;) '&&s   4A
A"c                   [         R                  " 5          [         R                  " S[        5        U R                  (       aa  [        U SU R                  5      n[        R                  R                  [        R                  " U R                  5      U5      S-  sSSS5        $ [        R                  R                  [        R                  " U R                  5      5      S-  sSSS5        $ ! , (       d  f       g= f)z6The two-tailed p values for the t-stats of the params.r  df_resid_inferencerQ   N)r?   r  r  r  r   r8   r   r   tsfr   r  r  norm)r1   r   s     r3   pvaluesLikelihoodModelResults.pvalues  s     $$&!!(N;zz"4)=t}}Mwwzz"&&"6AAE	 '& zz}}RVVDLL%9:Q> '&&s   BC-$?C--
C;c           	        [        U S5      (       a  U R                  S   S;   a  [        nO[        R                  nUc)  U R
                  c  [        U S5      (       d  [        S5      eUb  Uc  Ub  [        S5      eUb  Uc  [        S5      eUc<  [        U S5      (       a  U R                  nOUc  U R                  nU R
                  U-  nUb9  [        R                  " U5      nUR                  S	:X  a  XBU4   $ XBSS2S4   U4   $ Ubr  [        R                  " U5      nUR                  S	:X  a  [        S
5      eUc  UnO[        R                  " U5      nU" X" U[        R                  " U5      5      5      nU$ U$ )a  
Compute the variance/covariance matrix.

The variance/covariance matrix can be of a linear contrast of the
estimated parameters or all params multiplied by scale which will
usually be an estimate of sigma^2.  Scale is assumed to be a scalar.

Parameters
----------
r_matrix : array_like
    Can be 1d, or 2d.  Can be used alone or with other.
column : array_like, optional
    Must be used on its own.  Can be 0d or 1d see below.
scale : float, optional
    Can be specified or not.  Default is None, which means that
    the scale argument is taken from the model.
cov_p : ndarray, optional
    The covariance of the parameters. If not provided, this value is
    read from `self.normalized_cov_params` or
    `self.cov_params_default`.
other : array_like, optional
    Can be used when r_matrix is specified.

Returns
-------
ndarray
    The covariance matrix of the parameter estimates or of linear
    combination of parameter estimates. See Notes.

Notes
-----
(The below are assumed to be in matrix notation.)

If no argument is specified returns the covariance matrix of a model
``(scale)*(X.T X)^(-1)``

If contrast is specified it pre and post-multiplies as follows
``(scale) * r_matrix (X.T X)^(-1) r_matrix.T``

If contrast and other are specified returns
``(scale) * r_matrix (X.T X)^(-1) other.T``

If column is specified returns
``(scale) * (X.T X)^(-1)[column,column]`` if column is 0d

OR

``(scale) * (X.T X)^(-1)[column][:,column]`` if column is 1d
r   r   l1l1_cvxopt_cpNr   zFneed covariance of parameters for computing (unnormalized) covariancesz3Column should be specified without other arguments.z)other can only be specified with r_matrixr   zr_matrix should be 1d or 2d)r   r   r   r   r   r   rA   r   r   r  r^   	transpose)r1   r_matrixcolumnr   cov_potherdot_funrj   s           r3   r  !LikelihoodModelResults.cov_params  s   f D.))!!+.2HHGffGMd88@D"677 : ; ;8#75;L * + +!1HII=t122//= JJE22U:ZZ'F||r!V^,,AtG_f455!zz(+H~~# !>??} 

5)(GE2<<3F$GHCJLr6   c           	        SSK Jn  [        USSSS9nU R                  R                  S:X  a=  U R
                  R                  R                   Vs/ s H  nSUS    SUS	    3PM     nnO U R
                  R                  R                  nU" U5      R                  U5      nUR                  UR                  pUR                  S   n	UR                  S	   n
Uc)  U R                  c  [        U S
5      (       d  [        S5      eU R                  R                  SS9nXR                  S   :w  a  [        S5      eUc  [         R"                  " U	5      nO&[         R$                  " U5      nUR'                  5       nUR(                  S	:  a  UR                  S   U	:w  a  [        S5      eUc  [        U S5      =(       a    U R*                  n[         R,                  " X5      nU	S	:  a8  [         R.                  " [         R0                  " U R3                  XS95      5      nO#[         R.                  " U R3                  XS95      nX-
  [5        U5      -  n[7        U SU R8                  5      nU(       a  [;        XUUS9$ [;        XUUSS9$ s  snf )a  
Compute a t-test for a each linear hypothesis of the form Rb = q.

Parameters
----------
r_matrix : {array_like, str, tuple}
    One of:

    - array : If an array is given, a p x k 2d array or length k 1d
      array specifying the linear restrictions. It is assumed
      that the linear combination is equal to zero.
    - str : The full hypotheses to test can be given as a string.
      See the examples.
    - tuple : A tuple of arrays in the form (R, q). If q is given,
      can be either a scalar or a length p row vector.

cov_p : array_like, optional
    An alternative estimate for the parameter covariance matrix.
    If None is given, self.normalized_cov_params is used.
use_t : bool, optional
    If use_t is None, then the default of the model is used. If use_t
    is True, then the p-values are based on the t distribution. If
    use_t is False, then the p-values are based on the normal
    distribution.

Returns
-------
ContrastResults
    The results for the test are attributes of this results instance.
    The available results have the same elements as the parameter table
    in `summary()`.

See Also
--------
tvalues : Individual t statistics for the estimated parameters.
f_test : Perform an F tests on model parameters.
patsy.DesignInfo.linear_constraint : Specify a linear constraint.

Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> results = sm.OLS(data.endog, data.exog).fit()
>>> r = np.zeros_like(results.params)
>>> r[5:] = [1,-1]
>>> print(r)
[ 0.  0.  0.  0.  0.  1. -1.]

r tests that the coefficients on the 5th and 6th independent
variable are the same.

>>> T_test = results.t_test(r)
>>> print(T_test)
                             Test for Constraints
==============================================================================
                 coef    std err          t      P>|t|      [0.025      0.975]
------------------------------------------------------------------------------
c0         -1829.2026    455.391     -4.017      0.003   -2859.368    -799.037
==============================================================================
>>> T_test.effect
-1829.2025687192481
>>> T_test.sd
455.39079425193762
>>> T_test.tvalue
-4.0167754636411717
>>> T_test.pvalue
0.0015163772380899498

Alternatively, you can specify the hypothesis tests using a string

>>> from statsmodels.formula.api import ols
>>> dta = sm.datasets.longley.load_pandas().data
>>> formula = 'TOTEMP ~ GNPDEFL + GNP + UNEMP + ARMED + POP + YEAR'
>>> results = ols(formula, dta).fit()
>>> hypotheses = 'GNPDEFL = GNP, UNEMP = 2, YEAR/1829 = 1'
>>> t_test = results.t_test(hypotheses)
>>> print(t_test)
                             Test for Constraints
==============================================================================
                 coef    std err          t      P>|t|      [0.025      0.975]
------------------------------------------------------------------------------
c0            15.0977     84.937      0.178      0.863    -177.042     207.238
c1            -2.0202      0.488     -8.231      0.000      -3.125      -0.915
c2             1.0001      0.249      0.000      1.000       0.437       1.563
==============================================================================
r   
DesignInfor   TstrictoptionalrQ   y_r   r   z8Need covariance of parameters for computing T statisticsForderz#r_matrix and params are not aligned7r_matrix and q_matrix must have the same number of rowsr  r  r  )effectr  sddf_denomr  )r  	statisticr  r  distribution)rX   r  r   r   r]   r   r)   	cov_nameslinear_constraintcoefs	constantsr^   r   r   rA   rO  r   r   r  squeezerv  r   r   r  r   r  r   r8   r   r
   )r1   r  r  r   r  rE   namesLCq_matrix
num_ttests
num_paramsr   _effect_sd_tr   s                   r3   t_testLikelihoodModelResults.t_test  sX   r 	%%E;;q "jjoo7797 1a!v&7  9E JJOO--E00:XXr||(^^A&
^^A&
Md88@D"677 , - -"""-a(BCCxx
+Hzz(+H'')H==1~~a J.  "2 3 3 =T7+:

E&&* >''"''$//! #2 #0 1 2C ''$//8/IJC F3K/4!5t}}E"'C,46 6 #'C,4068 8]9s   Jc                *    U R                  XUSSS9nU$ )aT  
Compute the F-test for a joint linear hypothesis.

This is a special case of `wald_test` that always uses the F
distribution.

Parameters
----------
r_matrix : {array_like, str, tuple}
    One of:

    - array : An r x k array where r is the number of restrictions to
      test and k is the number of regressors. It is assumed
      that the linear combination is equal to zero.
    - str : The full hypotheses to test can be given as a string.
      See the examples.
    - tuple : A tuple of arrays in the form (R, q), ``q`` can be
      either a scalar or a length k row vector.

cov_p : array_like, optional
    An alternative estimate for the parameter covariance matrix.
    If None is given, self.normalized_cov_params is used.
invcov : array_like, optional
    A q x q array to specify an inverse covariance matrix based on a
    restrictions matrix.

Returns
-------
ContrastResults
    The results for the test are attributes of this results instance.

See Also
--------
t_test : Perform a single hypothesis test.
wald_test : Perform a Wald-test using a quadratic form.
statsmodels.stats.contrast.ContrastResults : Test results.
patsy.DesignInfo.linear_constraint : Specify a linear constraint.

Notes
-----
The matrix `r_matrix` is assumed to be non-singular. More precisely,

r_matrix (pX pX.T) r_matrix.T

is assumed invertible. Here, pX is the generalized inverse of the
design matrix of the model. There can be problems in non-OLS models
where the rank of the covariance of the noise is not full.

Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> results = sm.OLS(data.endog, data.exog).fit()
>>> A = np.identity(len(results.params))
>>> A = A[1:,:]

This tests that each coefficient is jointly statistically
significantly different from zero.

>>> print(results.f_test(A))
<F test: F=array([[ 330.28533923]]), p=4.984030528700946e-10, df_denom=9, df_num=6>

Compare this to

>>> results.fvalue
330.2853392346658
>>> results.f_pvalue
4.98403096572e-10

>>> B = np.array(([0,0,1,-1,0,0,0],[0,0,0,0,0,1,-1]))

This tests that the coefficient on the 2nd and 3rd regressors are
equal and jointly that the coefficient on the 5th and 6th regressors
are equal.

>>> print(results.f_test(B))
<F test: F=array([[ 9.74046187]]), p=0.005605288531708235, df_denom=9, df_num=2>

Alternatively, you can specify the hypothesis tests using a string

>>> from statsmodels.datasets import longley
>>> from statsmodels.formula.api import ols
>>> dta = longley.load_pandas().data
>>> formula = 'TOTEMP ~ GNPDEFL + GNP + UNEMP + ARMED + POP + YEAR'
>>> results = ols(formula, dta).fit()
>>> hypotheses = '(GNPDEFL = GNP), (UNEMP = 2), (YEAR/1829 = 1)'
>>> f_test = results.f_test(hypotheses)
>>> print(f_test)
<F test: F=array([[ 144.17976065]]), p=6.322026217355609e-08, df_denom=9, df_num=3>
T)r  invcovuse_fscalar)	wald_test)r1   r  r  r  r  s        r3   f_testLikelihoodModelResults.f_test  s!    z nnX6VZn[
r6   c                "   [        USSSS9n[        USSSS9nUc  [        U S5      =(       a    U R                  nSSKJn  U R
                  R                  S	:X  a=  U R                  R                  R                   Vs/ s H  nS
US    SUS    3PM     n	nO U R                  R                  R                  n	U R
                  R                  SS9n
U" U	5      R                  U5      nUR                  UR                  pU R                  c"  Uc  Uc  [        U S5      (       d  [        S5      e[         R"                  " XSS2S4   5      n[%        UR&                  S   5      nUc  [         R(                  " U5      nO[         R*                  " U5      nUR                  S:X  a'  USS2S4   nUR&                  S   U:w  a  [        S5      eX-
  nUc  U R-                  XS9n[         R.                  " U5      R1                  5       (       a  [        S5      e[         R2                  R5                  U5      n[         R2                  R7                  U5      nUU:  a"  [8        R:                  " SUU4-  [<        5        UnUb  Un[        U S5      (       a4  U R>                  S   S;   a!  [A        [A        URB                  U5      U5      nO6[         R"                  " [         R"                  " URB                  U5      U5      n[E        U SU RF                  5      nUc  [8        R:                  " S[H        5        SnU(       a/  URJ                  S:X  a  [%        [         RL                  " U5      5      nU(       a  UU-  n[O        UUUS9$ [O        UUUSU4S9$ s  snf )a  
Compute a Wald-test for a joint linear hypothesis.

Parameters
----------
r_matrix : {array_like, str, tuple}
    One of:

    - array : An r x k array where r is the number of restrictions to
      test and k is the number of regressors. It is assumed that the
      linear combination is equal to zero.
    - str : The full hypotheses to test can be given as a string.
      See the examples.
    - tuple : A tuple of arrays in the form (R, q), ``q`` can be
      either a scalar or a length p row vector.

cov_p : array_like, optional
    An alternative estimate for the parameter covariance matrix.
    If None is given, self.normalized_cov_params is used.
invcov : array_like, optional
    A q x q array to specify an inverse covariance matrix based on a
    restrictions matrix.
use_f : bool
    If True, then the F-distribution is used. If False, then the
    asymptotic distribution, chisquare is used. If use_f is None, then
    the F distribution is used if the model specifies that use_t is True.
    The test statistic is proportionally adjusted for the distribution
    by the number of constraints in the hypothesis.
df_constraints : int, optional
    The number of constraints. If not provided the number of
    constraints is determined from r_matrix.
scalar : bool, optional
    Flag indicating whether the Wald test statistic should be returned
    as a sclar float. The current behavior is to return an array.
    This will switch to a scalar float after 0.14 is released. To
    get the future behavior now, set scalar to True. To silence
    the warning and retain the legacy behavior, set scalar to
    False.

Returns
-------
ContrastResults
    The results for the test are attributes of this results instance.

See Also
--------
f_test : Perform an F tests on model parameters.
t_test : Perform a single hypothesis test.
statsmodels.stats.contrast.ContrastResults : Test results.
patsy.DesignInfo.linear_constraint : Specify a linear constraint.

Notes
-----
The matrix `r_matrix` is assumed to be non-singular. More precisely,

r_matrix (pX pX.T) r_matrix.T

is assumed invertible. Here, pX is the generalized inverse of the
design matrix of the model. There can be problems in non-OLS models
where the rank of the covariance of the noise is not full.
r  Tr  r  Nr   r   r  rQ   r  r  r   r  r  r   z8need covariance of parameters for computing F statisticsr  r  zPr_matrix performs f_test for using dimensions that are asymptotically non-normalzbcovariance of constraints does not have full rank. The number of constraints is %d, but rank is %dr   r   r  r  zThe behavior of wald_test will change after 0.14 to returning scalar test statistic values. To get the future behavior now, set scalar to True. To silence this message while retaining the legacy behavior, set scalar to False.F)r  r  df_numchi2)r  r  r  r  distargs)(r   r   r   rX   r  r   r]   r   r)   r  rO  r  r  r  r   rA   r   r   r5  r^   r   r  r  isnanmaxr   pinvr4  r?   r@   r   r   r   r   r8   r   FutureWarningrv  r  r
   )r1   r  r  r  r  df_constraintsr  r  rE   r  r   r  r  cparamsJRbqJ_r  r   s                      r3   r   LikelihoodModelResults.wald_test  s?   ~ %E68D4H=T7+:

E$;;q "jjoo7797 1a!v&7  9E JJOO--E"""-00:XXr||(&&.5=wt5I'J'J , - - &&!T'?3(..#$xx{Hzz(+H==A4(H~~a A%  "2 3 3 >OOXOCExx""$$  ". / / YY^^E*F&&u-BAv +./W56BD  %AD.))!!+.2HHv.4ArvvceeV,c2A4!5t}}E>MM<  Faffkbjjm$AFA"Q*+- - #A06!G GI9s   =Nc                   SSK Jn  U nUc  / nUc  / n[        UR                  R                  SS5      nUc  Uc  [        S5      e[        R                  " [        UR                  5      5      n/ n	U" [        5      n
Ub  UR                   H{  nUR                  U5      nUR                  5       nX   nU H  nX;   d  M
  X   R                  U5        M     UR                  S   nU(       a  US:X  a  Mi  U	R                  X45        M}     / nU H,  nUR                  U[        R                   " X   5      45        M.     O[#        UR                  R$                  5       H]  u  nn[        R&                  " UU   5      nU H  nX;   d  M
  X   R                  U5        M     U(       a  MK  U	R                  X45        M_     / nU H,  nUR                  U[        R                   " X   5      45        M.     UR(                  nSS/U   n/ n/ nU	U-   U-    H  u  nnUR+                  UUS	9nUR,                  UR.                  UR                  S   /nU(       a  UR                  UR0                  5        UR                  U5        UR                  U5        M     / S
QnU(       a  UR                  S5        SSKJn  U" UUUS9n[7        SUSUS9nU	U-   U-   Ul        U$ )a
  
Compute a sequence of Wald tests for terms over multiple columns.

This computes joined Wald tests for the hypothesis that all
coefficients corresponding to a `term` are zero.
`Terms` are defined by the underlying formula or by string matching.

Parameters
----------
skip_single : bool
    If true, then terms that consist only of a single column and,
    therefore, refers only to a single parameter is skipped.
    If false, then all terms are included.
extra_constraints : ndarray
    Additional constraints to test. Note that this input has not been
    tested.
combine_terms : {list[str], None}
    Each string in this list is matched to the name of the terms or
    the name of the exogenous variables. All columns whose name
    includes that string are combined in one joint test.
scalar : bool, optional
    Flag indicating whether the Wald test statistic should be returned
    as a sclar float. The current behavior is to return an array.
    This will switch to a scalar float after 0.14 is released. To
    get the future behavior now, set scalar to True. To silence
    the warning and retain the legacy behavior, set scalar to
    False.

Returns
-------
WaldTestResults
    The result instance contains `table` which is a pandas DataFrame
    with the test results: test statistic, degrees of freedom and
    pvalues.

Examples
--------
>>> res_ols = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)", data).fit()
>>> res_ols.wald_test_terms()
<class 'statsmodels.stats.contrast.WaldTestResults'>
                                          F                P>F  df constraint  df denom
Intercept                        279.754525  2.37985521351e-22              1        51
C(Duration, Sum)                   5.367071    0.0245738436636              1        51
C(Weight, Sum)                    12.432445  3.99943118767e-05              2        51
C(Duration, Sum):C(Weight, Sum)    0.176002      0.83912310946              2        51

>>> res_poi = Poisson.from_formula("Days ~ C(Weight) * C(Duration)",                                            data).fit(cov_type='HC0')
>>> wt = res_poi.wald_test_terms(skip_single=False,                                          combine_terms=['Duration', 'Weight'])
>>> print(wt)
                            chi2             P>chi2  df constraint
Intercept              15.695625  7.43960374424e-05              1
C(Weight)              16.132616  0.000313940174705              2
C(Duration)             1.009147     0.315107378931              1
C(Weight):C(Duration)   0.216694     0.897315972824              2
Duration               11.187849     0.010752286833              3
Weight                 30.263368  4.32586407145e-06              4
r   )defaultdictNr!   zno constraints, nothing to dor   r  r  )r  )r  pvaluedf_constraintr  )r|  )rw  ra   )table)collectionsr  r8   r   r)   rA   r   eyer`   r   r-   termsslicers  r0   r^   vstack	enumeraterz   r  r   r  r  r  r  pandasr|  r   temp)r1   skip_singleextra_constraintscombine_termsr  r  resultr!   identityconstraintscombinedtermrm   rs  constraint_matrixcnamek_constraintcombined_constraintsrn   r   r  res_waldrw  
constraintwtrow	col_namesr|  r  r  s                                 r3   wald_test_terms&LikelihoodModelResults.wald_test_terms  s   | 	,$ " Mfll//E#4#<<==66#fmm,-t$"#))"((.yy{$,N! +E} ../@A +  166q9#q( ""D#<= *" $& &$++UBIIho4N,OP ' 'v||'>'>?	T$&MM(3-$@! +E} ../@A + ""D#<= @ $& &$++UBIIho4N,OP ' }U+ +.B BEV VD*!!*V!<B<<J,<,<Q,?@C

2;;'OOC LL !W =	Z($(%CdL$eD!558II
r6   c                    [        XX#US9nU$ )a  
Perform pairwise t_test with multiple testing corrected p-values.

This uses the formula design_info encoding contrast matrix and should
work for all encodings of a main effect.

Parameters
----------
term_name : str
    The name of the term for which pairwise comparisons are computed.
    Term names for categorical effects are created by patsy and
    correspond to the main part of the exog names.
method : {str, list[str]}
    The multiple testing p-value correction to apply. The default is
    'hs'. See stats.multipletesting.
alpha : float
    The significance level for multiple testing reject decision.
factor_labels : {list[str], None}
    Labels for the factor levels used for pairwise labels. If not
    provided, then the labels from the formula design_info are used.

Returns
-------
MultiCompResult
    The results are stored as attributes, the main attributes are the
    following two. Other attributes are added for debugging purposes
    or as background information.

    - result_frame : pandas DataFrame with t_test results and multiple
      testing corrected p-values.
    - contrasts : matrix of constraints of the null hypothesis in the
      t_test.

Notes
-----
Status: experimental. Currently only checked for treatment coding with
and without specified reference level.

Currently there are no multiple testing corrected confidence intervals
available.

Examples
--------
>>> res = ols("np.log(Days+1) ~ C(Weight) + C(Duration)", data).fit()
>>> pw = res.t_test_pairwise("C(Weight)")
>>> pw.result_frame
         coef   std err         t         P>|t|  Conf. Int. Low
2-1  0.632315  0.230003  2.749157  8.028083e-03        0.171563
3-1  1.302555  0.230003  5.663201  5.331513e-07        0.841803
3-2  0.670240  0.230003  2.914044  5.119126e-03        0.209488
     Conf. Int. Upp.  pvalue-hs reject-hs
2-1         1.093067   0.010212      True
3-1         1.763307   0.000002      True
3-2         1.130992   0.010212      True
)r   alphafactor_labels)r   )r1   	term_namer   r  r  r  s         r3   r   &LikelihoodModelResults.t_test_pairwise  s    r df,9;
r6   c                V    SSK Jn  SnU" XR                  U R                  5       X$S9nU$ )ao  Experimental method for nonlinear prediction and tests

Parameters
----------
func : callable, f(params)
    nonlinear function of the estimation parameters. The return of
    the function can be vector valued, i.e. a 1-D array
deriv : function or None
    first derivative or Jacobian of func. If deriv is None, then a
    numerical derivative will be used. If func returns a 1-D array,
    then the `deriv` should have rows corresponding to the elements
    of the return of func.

Returns
-------
nl : instance of `NonlinearDeltaCov` with attributes and methods to
    calculate the results for the prediction or tests

r   )NonlinearDeltaCovN)deriv	func_args)statsmodels.stats._delta_methodr#  r   r  )r1   funcr$  r#  r%  nls         r3   _get_wald_nonlinear*LikelihoodModelResults._get_wald_nonlinearU  s1    ( 	F	t[[$//2C%*A 	r6   c                   U R                   nU R                  (       a@  [        R                  n[	        U SU R
                  5      nUR                  SUS-  -
  U5      nO'[        R                  nUR                  SUS-  -
  5      nU R                  nXvU-  -
  nXvU-  -   n	Ub9  [        R                  " S[        5        [        R                  " U5      nX   nX   n	[        R                  " [        X5      5      $ )a  
Construct confidence interval for the fitted parameters.

Parameters
----------
alpha : float, optional
    The significance level for the confidence interval. The default
    `alpha` = .05 returns a 95% confidence interval.
cols : array_like, optional
    Specifies which confidence intervals to return.

.. deprecated: 0.13

   cols is deprecated and will be removed after 0.14 is released.
   cols only works when inputs are NumPy arrays and will fail
   when using pandas Series or DataFrames as input. You can
   subset the confidence intervals using slices.

Returns
-------
array_like
    Each row contains [lower, upper] limits of the confidence interval
    for the corresponding parameter. The first column contains all
    lower, the second column contains all upper limits.

Notes
-----
The confidence interval is based on the standard normal distribution
if self.use_t is False. If self.use_t is True, then uses a Student's t
with self.df_resid_inference (or self.df_resid if df_resid_inference is
not defined) degrees of freedom.

Examples
--------
>>> import statsmodels.api as sm
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> results = sm.OLS(data.endog, data.exog).fit()
>>> results.conf_int()
array([[-5496529.48322745, -1467987.78596704],
       [    -177.02903529,      207.15277984],
       [      -0.1115811 ,        0.03994274],
       [      -3.12506664,       -0.91539297],
       [      -1.5179487 ,       -0.54850503],
       [      -0.56251721,        0.460309  ],
       [     798.7875153 ,     2859.51541392]])

>>> results.conf_int(cols=(2,3))
array([[-0.1115811 ,  0.03994274],
       [-3.12506664, -0.91539297]])
r  r   rQ   a  cols is deprecated and will be removed after 0.14 is released. cols only works when inputs are NumPy arrays and will fail when using pandas Series or DataFrames as input. Subsets of confidence intervals can be selected using slices of the full confidence interval array.)r  r   r   r  r8   r   ppfr  r   r?   r@   r  r   r  r   )
r1   r  rm   r  distr   qr   loweruppers
             r3   conf_intLikelihoodModelResults.conf_intp  s    h hh::77Dt%94==IHUQY1A::DUQY'AS S MM9
  ::d#DKEKEzz$u,--r6   c                N    SSK Jn  U(       a  U R                  5         U" X5        g)a  
Save a pickle of this instance.

Parameters
----------
fname : {str, handle}
    A string filename or a file handle.
remove_data : bool
    If False (default), then the instance is pickled without changes.
    If True, then all arrays with length nobs are set to None before
    pickling. See the remove_data method.
    In some cases not all arrays will be set to None.

Notes
-----
If remove_data is true and the model result does not implement a
remove_data method then this will raise an exception.
r   )save_pickleN)statsmodels.iolib.smpickler4  remove_data)r1   fnamer6  r4  s       r3   saveLikelihoodModelResults.save  s    ( 	;D r6   c                    SSK Jn  U" U5      $ )a{  
Load a pickled results instance

.. warning::

   Loading pickled models is not secure against erroneous or
   maliciously constructed data. Never unpickle data received from
   an untrusted or unauthenticated source.

Parameters
----------
fname : {str, handle, pathlib.Path}
    A string filename or a file handle.

Returns
-------
Results
    The unpickled results instance.
r   )load_pickle)r5  r;  )rg   r7  r;  s      r3   loadLikelihoodModelResults.load  s    , 	;5!!r6   c                   U R                   n0 n[        U5       H  n [        R                  X5      nXBU'   M     U Vs/ s H  n[        X%   [        5      (       d  M  UPM      nnU H  nSU R                  U'   M     S n[        U S/ 5       Vs/ s H  nSU-   PM
     n	nU R                  R                   Vs/ s H  nSU-   PM
     n
nU R                  U
-   U	-    H  nX;   a  M
  U" X5        M     U R                   H  n SU R                  U'   M     g! [         a     GM
  f = fs  snf s  snf s  snf ! [        [        4 a     ML  f = f)aV  
Remove data arrays, all nobs arrays from result and model.

This reduces the size of the instance, so it can be pickled with less
memory. Currently tested for use with predict from an unpickled
results and model instance.

.. warning::

   Since data and some intermediate results have been removed
   calculating new statistics that require them will raise exceptions.
   The exception will occur the first time an attribute is accessed
   that has been set to None.

Not fully tested for time series models, tsa, and might delete too much
for prediction or not all that would be possible.

The lists of arrays to delete are maintained as attributes of
the result and model instance, except for cached values. These
lists could be changed before calling remove_data.

The attributes to remove are named in:

model._data_attr : arrays attached to both the model instance
    and the results instance with the same attribute name.

result._data_in_cache : arrays that may exist as values in
    result._cache

result._data_attr_model : arrays attached to the model
    instance but not to the results instance
Nc                    UR                  S5      nUR                  S5      n [        [        U /U-   5      n[	        XC5      (       a  [        XCS 5        g g ! [         a     g f = f)N.rR   )splitr'   r   r8   r   rK   AttributeError)objattpatt_obj_s        r3   wipe0LikelihoodModelResults.remove_data.<locals>.wipe$	  s_    		#A559Dguqy14&&D- '! s   0A 
A#"A#_data_attr_modelzmodel.)r   dirobject__getattribute__rB  rY   r   r  r8   r   r+   rl  rM   )r1   rg   	cls_attrsrs  attrrl   
data_attrsrH  rE   
model_only
model_attrrD  r9   s                r3   r6  "LikelihoodModelResults.remove_data  s]   B nn 	HD'..s9 #'$  "+ @A#IL+> 
 @D $DKK 		 -4D:Lb,QR,Qqhl,Q
R,0JJ,A,AB,Aqhl,A
B??Z/*<C  O = &&C#'C  '; " @  SB #H- s:   DD(D(D-9D2D7
D%$D%7E
E)r  r   r   r   r   r   )Nr   )r^  TN)NNNNNr   )FNNN)hs皙?Nr   )rU  N)F)r   r   r   r   r   r4   r   r  r   r   setterr   r  r  r  r  r  r  r  r  r  r   r)  r1  r8  r   r<  r6  r   r%  r&  s   @r3   r   r     s   Xz?<" EI%);$   \\" " / /   * * ? ? HL\~L8\^B 6::>LG\ DH37L\ =A&*;z6M.^!6 " "0I Ir6   r   c                  6    \ rS rSrSSSSSSSS.r\rSSS.rSrg)	LikelihoodResultsWrapperi?	  ra   rowsr  )r   r  r  r  r   r   r   )r  r1  r   N)r   r   r   r   _attrs_wrap_attrs_wrap_methodsr   r   r6   r3   rX  rX  ?	  s2    !&F KMr6   rX  c                      \ rS rSr\S 5       r\S 5       r\S 5       r\S 5       r\S 5       r	\S 5       r
\S 5       r\S	 5       r\S
 5       rSS jrS rSrg)ResultMixiniT	  c                   [        U R                  SS5      n[        U S5      (       aP  [        U S5      (       a  U R                  nO [        U S5      (       a  U R                  nOSnU R
                  U-   U-   $ U R                  R                  $ )zModel WCr   r   r  r*   r"   r   )r8   r   r   r*   r"   r  r   rv  )r1   r   r"   s      r3   
df_modelwcResultMixin.df_modelwcV	  sz    
 $**i34$$t\**??z**== ==8+g55;;###r6   c                @    SU R                   -  SU R                  -  -   $ )zAkaike information criterionrQ   )r  r`  rt   s    r3   aicResultMixin.aich	  s      DHH}qDOO444r6   c                |    SU R                   -  [        R                  " U R                  5      U R                  -  -   $ )zBayesian information criterionrc  )r  r   logr   r`  rt   s    r3   bicResultMixin.bicm	  s-     DHH}rvvdii0DOODDDr6   c                L    U R                   R                  U R                  5      $ )z*cached Jacobian of log-likelihood
        )r   rS  r   rt   s    r3   
score_obsvResultMixin.score_obsvr	  s     zz##DKK00r6   c                L    U R                   R                  U R                  5      $ )z)cached Hessian of log-likelihood
        )r   r   r   rt   s    r3   hessvResultMixin.hessvx	  s     zz!!$++..r6   c                    U R                   n[        R                  R                  [        R                  " UR
                  U5      5      $ )zO
covariance of parameters based on outer product of jacobian of
log-likelihood
)rk  r   r   r   r   r   )r1   jacvs     r3   covjacResultMixin.covjac~	  s/     yy}}RVVDFFD122r6   c           	        U R                   nU R                  n[        R                  R	                  U5      n[        R
                  " U[        R
                  " [        R
                  " UR                  U5      U5      5      $ )zcovariance of parameters based on HJJH

dot product of Hessian, Jacobian, Jacobian, Hessian of likelihood

name should be covhjh
)rk  rn  r   r   r   r   r   )r1   rq  rn  hessinvs       r3   covjhjResultMixin.covjhj	  sS     

))--&vvgrvvbffTVVT&:GDEEr6   c                j    [         R                  " [         R                  " U R                  5      5      $ )zBstandard deviation of parameter estimates based on covHJH
        )r   r  r   rv  rt   s    r3   bsejhjResultMixin.bsejhj	        wwrwwt{{+,,r6   c                j    [         R                  " [         R                  " U R                  5      5      $ )zBstandard deviation of parameter estimates based on covjac
        )r   r  r   rr  rt   s    r3   bsejacResultMixin.bsejac	  r{  r6   c           
     
   / n[        U R                  S5      (       a  SOSn[        U5       GH  n[        R                  R                  U R                  U R                  S9nU R                  b  U R                  USS24   n	OSn	U R                  R                  5       n
U R                  R                  " U R                  U   4SU	0U
D6nU(       a=  U R                  R                   H#  n[        X[        U R                  U5      5        M%     UR                  X#S9nUR                  UR                   5        GM     [        R"                  " U5      nU(       a  XPl        UR'                  S5      UR)                  S5      U4$ )	a}  simple bootstrap to get mean and variance of estimator

see notes

Parameters
----------
nrep : int
    number of bootstrap replications
method : str
    optimization method to use
disp : bool
    If true, then optimization prints results
store : bool
    If true, then parameter estimates for all bootstrap iterations
    are attached in self.bootstrap_results

Returns
-------
mean : ndarray
    mean of parameter estimates over bootstrap replications
std : ndarray
    standard deviation of parameter estimates over bootstrap
    replications

Notes
-----
This was mainly written to compare estimators of the standard errors of
the parameter estimates.  It uses independent random sampling from the
original endog and exog, and therefore is only correct if observations
are independently distributed.

This will be moved to apply only to models with independently
distributed observations.
	cloneattrTF)rv  Nr%   )r   r   r   )r   r   rb  r   randomrandintr   r%   r;   r   r&   r  rK   r8   r   r0   r   r   bootstrap_resultsmeanstd)r1   nrepr   r   storeresultshascloneattrrE   rvsindexog_resampr  fitmodrO  fitress                 r3   	bootstrapResultMixin.bootstrap	  s;   F &tzz;??tUtAYY&&tyytyy&AF yy$"ii	2"

113IZZ))$**V*< I/:I>GIF JJ00DF'$**d*CD 1 ZZvZ9FNN6==)# $ ((7#%,"||AA77r6   c                    [         e)z$
get_nlfun

This is not Implemented
r}   )r1   funs     r3   	get_nlfunResultMixin.get_nlfun	  s
     "!r6   )r  N)r$  r   r   r   )r   r   r   r   r   r`  rd  rh  rk  rn  rr  rv  ry  r}  r  r  r   r   r6   r3   r^  r^  T	  s    $ $" 5 5 E E 1 1
 / /
 
3 
3 F F - -
 - -
:8x"r6   r^  c                  \    \ rS rSrSrS
S jr\S 5       r\S 5       rSS jr	\S 5       r
S	rg)	_LLRMixini	  z4Mixin class for Null model and likelihood ratio
    c                \   UR                  5       nUR                  S5      (       a  SU R                  U R                  -  -
  nU$ UR                  S5      (       d  US;   aB  S[        R
                  " U R                  U R                  -
  SU R                  -  -  5      -
  nU$ [        S5      e)z3
McFadden's pseudo-R-squared. `1 - (llf / llnull)`
mcfr   cox)cslrrQ   z)only McFadden and Cox-Snell are available)r/  
startswithr  llnullr   expr   rA   )r1   kindprsqs      r3   pseudo_rsquared_LLRMixin.pseudo_rsquared	  s     zz|??5!!txx$++--D
 	 __U##t|';rvvt{{TXX5!dii-HIID  HIIr6   c                :    SU R                   U R                  -
  -  $ )z=
Likelihood ratio chi-squared statistic; `-2*(llnull - llf)`
rc  )r  r  rt   s    r3   llr_LLRMixin.llr	  s    
 4;;)**r6   c                    U R                   nU R                  nU R                  nX2-
  nX@l        [        R
                  R                  R                  X5      $ )z
The chi-squared probability of getting a log-likelihood ratio
statistic greater than llr.  llr has a chi-squared distribution
with degrees of freedom `df_model`.
)r  r   df_resid_null
df_lr_nullr   distributionsr  r  )r1   r  df_fulldf_restrlrdfs        r3   
llr_pvalue_LLRMixin.llr_pvalue
  sK     hh--%%"""''**355r6   Nc                D   U R                   R                  SS5        U R                   R                  SS5        U R                   R                  SS5        U R                   R                  SS5        [        U S5      (       a  U ?Ub  XR                   S'   X l        X0l        g)a]  
Set the fit options for the Null (constant-only) model.

This resets the cache for related attributes which is potentially
fragile. This only sets the option, the null model is estimated
when llnull is accessed, if llnull is not yet in cache.

Parameters
----------
llnull : {None, float}
    If llnull is not None, then the value will be directly assigned to
    the cached attribute "llnull".
attach_results : bool
    Sets an internal flag whether the results instance of the null
    model should be attached. By default without calling this method,
    thenull model results are not attached and only the loglikelihood
    value llnull is stored.
**kwargs
    Additional keyword arguments used as fit keyword arguments for the
    null model. The override and model default values.

Notes
-----
Modifies attributes of this instance, and so has no return.
r  Nr  r  	prsquaredres_null)r  r'   r   r  _attach_nullmodel_optim_kwds_null)r1   r  attach_resultsr2   s       r3   set_null_options_LLRMixin.set_null_options
  s|    : 	$'t$d+T*4$$$*KK!!/ &r6   c                   U R                   nUR                  5       R                  5       n[        US/ 5       H  nX#	 M     UR                  " UR
                  [        R                  " U R                  5      40 UD6n[        U S0 5      R                  5       nSU;   a  UR                  S5      nO$[        US5      (       a  UR                  5       nOSn[        SSSS	S
9nUR                  U5        U(       a  UR                  " SSU0UD6nO0UR                  USSSS	S9nUR                  UR                  SSSS	S9n[        U SS5      SLa  Xl        [#        UR                  5      U l        UR&                  U l        UR*                  $ )z*
Value of the constant-only loglikelihood
_null_drop_keysr  r   _get_start_params_nullNbfgsFi'  r   )r   r   r   r   r   )r   r   r   r   r   r  r   )r   r;   r   r8   r   r&   r   r`  r   r'   r   r  r   re   r   r   r  r`   k_nullr   r  r  )	r1   r   r:   r9   mod_null
optim_kwdssp_nullopt_kwdsr  s	            r3   r  _LLRMixin.llnull=
  su   
 

##%**,5"3R8C	 9 ??5;;		0BKdK
 T#5r:??A
Z' nn^4GU455224GGvu 
#||EEHEH  ||5:,1 $ ;H  ||5:,1 $ ;H 4,e4EA$M(//*%..||r6   )r  r  r  r  r  r  )r  rg  )r   r   r   r   r   r  r   r  r  r  r  r   r   r6   r3   r  r  	  sN     + + 6 6''R - -r6   r  c                  >    \ rS rSrSrS r      SS jrS	S jrSrg)
ra  in
  a^  
A results class for the discrete dependent variable models.

..Warning :

The following description has not been updated to this version/class.
Where are AIC, BIC, ....? docstring looks like copy from discretemod

Parameters
----------
model : A DiscreteModel instance
mlefit : instance of LikelihoodResults
    This contains the numerical optimization results as returned by
    LikelihoodModel.fit(), in a superclass of GnericLikelihoodModels


Attributes
----------
aic : float
    Akaike information criterion.  -2*(`llf` - p) where p is the number
    of regressors including the intercept.
bic : float
    Bayesian information criterion. -2*`llf` + ln(`nobs`)*p where p is the
    number of regressors including the intercept.
bse : ndarray
    The standard errors of the coefficients.
df_resid : float
    See model definition.
df_model : float
    See model definition.
fitted_values : ndarray
    Linear predictor XB.
llf : float
    Value of the loglikelihood
llnull : float
    Value of the constant-only loglikelihood
llr : float
    Likelihood ratio chi-squared statistic; -2*(`llnull` - `llf`)
llr_pvalue : float
    The chi-squared probability of getting a log-likelihood ratio
    statistic greater than llr.  llr has a chi-squared distribution
    with degrees of freedom `df_model`.
prsquared : float
    McFadden's pseudo-R-squared. 1 - (`llf`/`llnull`)
c                n   Xl         UR                  U l        UR                  U l        UR                  R                  S   U l        [        U R                   SS5      n[        US5      (       a7  [        R                  " UR                  5      (       d  UR                  U l	        OE[        UR                  5      U R                   R                  -
  U-
  nX@l	        X@R                   l	        [        US5      (       a7  [        R                  " UR                  5      (       d  UR                  U l        OIU R                  R                  S   U R                  -
  U-
  U l        U R                  U R                   l        0 U l        U R                  R!                  UR                  5        [        UR                  5      nU R                  U R                   R                  -   U-   U:w  a  ["        R$                  " S[&        5        U R                  U R                  U-
  :w  a  ["        R$                  " S5        g g )Nr   r   r  r   z5df_model + k_constant + k_extra differs from k_paramsz%df_resid differs from nobs - k_params)r   r&   r%   r^   r   r8   r   r   r  r  r`   r   r*   r   r  rL   re   r?   r@   UserWarning)r1   r   r   r   r  r  s         r3   r4   &GenericLikelihoodModelResults.__init__
  s   
[[
JJ	KK%%a(	 $**i35*%%bhhu~~.F.F!NNDM6==)DJJ,A,AAGKH$M"*JJ5*%%bhhu~~.F.F!NNDM JJ,,Q/$--?'IDM"&--DJJV__-v}}% ==4::0007:hFMM 23>@ ==DII00MMAB 1r6   Nc                0    SSK Jn  Un	U" U UUUUUUU	S9n
U
$ )a  
Compute prediction results when endpoint transformation is valid.

Parameters
----------
exog : array_like, optional
    The values for which you want to predict.
transform : bool, optional
    If the model was fit via a formula, do you want to pass
    exog through the formula. Default is True. E.g., if you fit
    a model y ~ log(x1) + log(x2), and transform is True, then
    you can pass a data structure that contains x1 and x2 in
    their original form. Otherwise, you'd need to log the data
    first.
which : str
    Which statistic is to be predicted. Default is "mean".
    The available statistics and options depend on the model.
    see the model.predict docstring
row_labels : list of str or None
    If row_lables are provided, then they will replace the generated
    labels.
average : bool
    If average is True, then the mean prediction is computed, that is,
    predictions are computed for individual exog and then the average
    over observation is used.
    If average is False, then the results are the predictions for all
    observations, i.e. same length as ``exog``.
agg_weights : ndarray, optional
    Aggregation weights, only used if average is True.
    The weights are not normalized.
**kwargs :
    Some models can take additional keyword arguments, such as offset,
    exposure or additional exog in multi-part models like zero inflated
    models.
    See the predict method of the model for the details.

Returns
-------
prediction_results : PredictionResults
    The prediction results instance contains prediction and prediction
    variance and can on demand calculate confidence intervals and
    summary dataframe for the prediction.

Notes
-----
Status: new in 0.14, experimental
r   )get_prediction)r%   whichr  
row_labelsaverageagg_weights	pred_kwds)&statsmodels.base._prediction_inferencer  )r1   r%   r  r  r  r  r  r2   r  r  r  s              r3   r  ,GenericLikelihoodModelResults.get_prediction
  s7    r 	J	!#	 
r6   c           	     0   SSSS/4SSSSS	/nS
SSU R                   -  /4SSU R                  -  /4/nUc&  U R                  R                  R                  S-   S-   nSSKJn  U" 5       nUR                  XUXUS9  UR                  XX$U R                  S9  U$ )a  Summarize the Regression Results

Parameters
----------
yname : str, optional
    Default is `y`
xname : list[str], optional
    Names for the exogenous variables, default is "var_xx".
    Must match the number of parameters in the model
title : str, optional
    Title for the top table. If not None, then this replaces the
    default title
alpha : float
    significance level for the confidence intervals

Returns
-------
smry : Summary instance
    this holds the summary tables and text, which can be printed or
    converted to various output formats.

See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary results
)zDep. Variable:N)zModel:NzMethod:zMaximum Likelihood)zDate:N)zTime:N)zNo. Observations:N)zDf Residuals:N)z	Df Model:N)zLog-Likelihood:NzAIC:z%#8.4gzBIC: ri  r   )Summary)gleftgrightynamexnametitle)r  r  r  r   )
rd  rh  r   r   r   statsmodels.iolib.summaryr  add_table_2colsadd_table_paramsr   )	r1   r  r  r  r  top_left	top_rightr  smrys	            r3   r  %GenericLikelihoodModelResults.summary  s    6 -$!5 67##/+' /x$((234x$((234	
 =JJ((11C7)CE 	6yT)#(U 	 	Ddu$(JJ 	 	0 r6   )r  r  r   r&   r%   r   r   )Nr  TNFN)NNNrU  )	r   r   r   r   r   r4   r  r  r   r   r6   r3   ra  ra  n
  s/    ,\#CN GR5r6   ra  ):
__future__r   statsmodels.compat.pythonr   	functoolsr   r?   numpyr   r  rx  scipyr   statsmodels.base.datar   statsmodels.base.optimizerr   statsmodels.base.wrapperbasewrapperwrapstatsmodels.formular	   statsmodels.stats.contrastr
   r   r   statsmodels.tools.datar   statsmodels.tools.decoratorsr   r   r   rW  r   r   r   r   statsmodels.tools.toolsr   r   statsmodels.tools.validationr   r   r   r   r   r   r   r(  ri  r   ResultsWrapperrX  populate_wrapperr^  r  ra  r   r6   r3   <module>r     s   " *      - 0 ' ' 3 
 4 
 4 4 2 3 > 
 F" F"R|<e |<@C_ CLZ" Z"|PW Pf$t22 "   .,.U" U"p DR$:K Rr6   