ó
    à'·h¶| ã                  ó2  • S SK Jr  S SKrS SKrS SKrS SKrS SKrS SKJrJ	r	J
r
  S SKJr  S SKJr  S SKJrJr  S SKJr  S SKJrJrJrJrJrJr  S SKJr  S S	KJr   S S
K!J"r"J#r#  S SK$J%r%J&r&J'r'  S SK(J)r)J*r*J+r+  S SK,J-r-J.r.  S SK/J0r0J1r1J2r2J3r3J4r4J5r5J6r6  S SK7J8r8J9r9  S SK:J;r;J<r<  S SK=J>r>  S SK=J?r@  S SKAJBrBJCrCJDrD  S SKEJFrF  S SKGJHrH  S SKIJJrJ  S SKKJLrL  S SKMJNrN  S SKOJPrP  S SKQJRrR  S SKSJTrT  S SKUJVrV  S SKWJXrX  \R²                  " \Z5         S SK[J\r]  SSS5        \R²                  " \Z5         S SK[J^r^  SSS5        \(       aÓ  \R²                  " \Z5         S S K[J_r_  SSS5        \R²                  " \Z5         S SK[J`ra  SSS5        S S!KJbrb  S S"KJcrc  S S#KJdrdJereJfrf  S S$KgJhrhJiriJjrjJkrkJlrlJmrmJnrnJoroJprpJqrqJrrrJsrsJtrtJuruJvrvJwrwJxrx  S S%K/Jyry  \Rô                  S&:¼  a	  S S'KJ{r{J|r|  OS S'K}J{r{J|r|  \Rô                  S(:¼  a  S S)KJ&r&  OS S)K}J&r&  \" S*5      r~\|" S+5      rO\0(       a  \GR                   \   r‚\5\‚lƒ         " S, S-5      r„    S0         S1S. jjr…S2S/ jr†g! , (       d  f       GNL= f! , (       d  f       GN== f! , (       d  f       GN'= f! , (       d  f       GN= f)3é    )ÚannotationsN)Ú
CollectionÚMappingÚSequence)Ú	timedelta)Úreduce)ÚBytesIOÚStringIO)ÚPath)ÚTYPE_CHECKINGÚAnyÚCallableÚClassVarÚNoReturnÚTypeVar)Ú	functions)Únegate_duration_stringÚparse_as_duration_string)Údeprecate_renamed_parameterÚ
deprecatedÚissue_deprecation_warning)Úparse_into_expressionÚparse_into_list_of_expressionsÚ,parse_predicates_constraints_into_expression)Úissue_unstable_warningÚunstable)ÚBUILDING_SPHINX_DOCSÚextend_boolÚfind_stacklevelÚ
no_defaultÚnormalize_filepathÚsphinx_accessorÚwarn_null_comparison)Ú	wrap_exprÚwrap_s)ÚInt64Úparse_into_datatype_expr)Ú_check_for_numpy)Únumpy)ÚCustomUFuncWarningÚOutOfBoundsErrorÚPolarsInefficientMapWarning©ÚExprArrayNameSpace©ÚExprBinaryNameSpace©ÚExprCatNameSpace©ÚExprDateTimeNameSpace©ÚExprListNameSpace©ÚExprMetaNameSpace©ÚExprNameNameSpace©ÚExprStringNameSpace©ÚExprStructNameSpace)Úthread_pool_size)Ú	arg_where)ÚPyExpr)ÚPySeries)ÚIterable)ÚIOBase)Ú	DataFrameÚ	LazyFrameÚSeries)ÚClosedIntervalÚFillNullStrategyÚInterpolationMethodÚIntoExprÚIntoExprColumnÚMapElementsStrategyÚNullBehaviorÚNumericLiteralÚPolarsDataTypeÚQuantileMethodÚ
RankMethodÚ	RoundModeÚ
SchemaDictÚSearchSortedSideÚSerializationFormatÚTemporalLiteralÚWindowMappingStrategy)Ú	NoDefault)é   é   )ÚConcatenateÚ	ParamSpec)rZ   é   )r   ÚTÚPc            
      ó  • \ rS rSr% SrSrS\S'   1 SkrS\S'   \GSAS	 j5       r	GSBS
 jr
GSBS jrGSBS jrGSCS jrGSDS jrGSES jrGSES jrGSFS jrGSFS jrGSES jrGSES jrGSES jrGSES jrGSES jrGSDS jrGSES jrGSES jrGSES jrGSES jrGSES jrGSES jrGSES jrGSDS  jr GSFS! jr!GSFS" jr"GSDS# jr#GSGS$ jr$GSHS% jr%GSES& jr&GSES' jr'GSES( jr(GSES) jr)GSFS* jr*GSFS+ jr+GSIS, jr,GSJS- jr-          GSKS. jr.\S/S0.     GSLS1 jj5       r/GSDS2 jr0S3S4.GSMS5 jjr1S3S4.GSMS6 jjr2GSDS7 jr3GSDS8 jr4GSDS9 jr5GSDS: jr6GSDS; jr7GSNS< jr8      GSOS= jr9        GSPS> jr:GSDS? jr;GSDS@ jr<GSDSA jr=GSDSB jr>GSDSC jr?GSDSD jr@GSDSE jrAGSDSF jrBGSDSG jrCGSDSH jrDGSQGSRSI jjrES3SJ.GSSSK jjrFGSDSL jrGGSDSM jrHGSDSN jrISOSP.GSTSR jjrJSOSP.GSTSS jjrKSOSP.GSTST jjrLSOSP.GSTSU jjrMSOSP.GSTSV jjrNGSDSW jrOGSDSX jrPGSUGSVSY jjrQGSWSZ jrRGSXS[ jrSGSDS\ jrTS3SOS].       GSYS^ jjrUSOSOS_.GSZSa jjrVGS[GS\Sb jjrW\X" S`SQScSd9 GS[SOSP.       GS]Se jjj5       rYGS[GS\Sf jjrZ\X" S`SQScSd9 GS[SOSP.       GS]Sg jjj5       r[SOSOS_.GSZSh jjr\GSDSi jr]GSDSj jr^GS^Sk jr_ GS_SOSl.       GS`Sm jjjr`SOSOS3SOSn.             GSaSo jjra    GSbSp jrbGScSq jrc GSdSSs.     GSeSt jjjrd   GSf       GSgSu jjreGShSv jrfGSQGSiSw jjrgGSQGSiSx jjrhGSDSy jriGSdGSjSz jjrjGSdGSjS{ jjrkGSDS| jrlGSDS} jrmGSDS~ jrnGSDS jroGSDS€ jrpGSDS jrqGSDS‚ jrrGSDSƒ jrsGSDS„ jrtGSDS… jruGSDS† jrvGSDS‡ jrwGSDSˆ jrxSOS‰.GSkSŠ jjryGSDS‹ jrzGSDSŒ jr{ GSQSSOSOSSŽ.             GSlS jjjr|SSS‘.         GSmS’ jjr}GSDS“ jr~GSDS” jrGSDS• jr€GSDS– jrGSDS— jr‚GSDS˜ jrƒ GSn     GSoSš jjr„\…" 5       SSOSOS›.         GSpSœ jj5       r†\…" 5       SSOSOSOS.           GSqSž jj5       r‡GSDSŸ jrˆGSDS  jr‰      GSrS¡ jrŠ\‹" S¢5      GSsS£ j5       rŒ GSQSOSOSOS¤.           GStS¥ jjjr GSQS3SOS¦SOS§.             GSuS¨ jjjrŽGSDS© jrGSDSª jrGSDS« jr‘GSvGSwS¬ jjr’GSxGSyS­ jjr“GSxGSyS® jjr”GSxGSyS¯ jjr•GSzS° jr–GSzS± jr—GS{S² jr˜GS{S³ jr™GS{S´ jršGS{Sµ jr›GS{S¶ jrœGS{S· jrGS{S¸ jržGS{S¹ jrŸGS{Sº jr GS{S» jr¡GS{S¼ jr¢GS{S½ jr£GS{S¾ jr¤GSDS¿ jr¥GS{SÀ jr¦GSGSÁ jr§GS{SÂ jr¨SOSÃ.     GS|SÄ jjr©GS}SÅ jrª GS~       GSSÆ jjr«SÇSÈSOSÉ.         GS€SÊ jjr¬    GS         GS‚SË jjr­S3SÌ.GSƒSÍ jjr®GS„GS…SÎ jjr¯GS†GS‡SÏ jjr°GSˆSÐ jr±\…" 5       \X" SÑSÒSÓSd9SrSSÔ.         GS‰SÕ jj5       5       r²\…" 5       \X" SÑSÒSÓSd9SrSSÔ.         GS‰SÖ jj5       5       r³\…" 5       \X" SÑSÒSÓSd9SrSSÔ.         GS‰S× jj5       5       r´\…" 5       \X" SÑSÒSÓSd9SrSSÔ.         GS‰SØ jj5       5       rµ\…" 5       \X" SÑSÒSÓSd9SrSSrSÙ.           GSŠSÚ jj5       5       r¶\…" 5       \X" SÑSÒSÓSd9SrSSrSÙ.           GSŠSÛ jj5       5       r·\…" 5       \X" SÑSÒSÓSd9SrSSÔ.         GS‰SÜ jj5       5       r¸\…" 5       \X" SÑSÒSÓSd9S™SrSSÝ.             GS‹SÞ jj5       5       r¹\X" SÑSÒSÓSd9 GSQSSOSß.         GSŒSà jjj5       rº\X" SÑSÒSÓSd9 GSQSSOSß.         GSŒSá jjj5       r»\X" SÑSÒSÓSd9 GSQSSOSß.         GSŒSâ jjj5       r¼\X" SÑSÒSÓSd9 GSQSSOSß.         GSŒSã jjj5       r½\X" SÑSÒSÓSd9 GSQSSOSrSä.           GSSå jjj5       r¾\X" SÑSÒSÓSd9 GSQSSOSrSä.           GSSæ jjj5       r¿\X" SÑSÒSÓSd9 GSQSSOSß.         GSŒSç jjj5       rÀ\X" SÑSÒSÓSd9   GSŽSSOSß.             GSSè jjj5       rÁ\…" 5       S3SSOSé.         GSSê jj5       rÂ\…" 5       S3S3SSOSë.           GS‘Sì jj5       rÃ\…" 5       \X" SÑSÒSÓSd9 GSQSSOSß.           GS’Sí jjj5       5       rÄGSDSî jrÅ GS“SOSSï.       GS”Sð jjjrÆ GS•     GS–Sñ jjrÇGSdGS—Sò jjrÈS3Só.GS˜Sô jjrÉS3S3Sõ.GS™Sö jjrÊ  GSš     GS›S÷ jjrËGSDSø jrÌGSDSù jrÍGSDSú jrÎGSDSû jrÏGSDSü jrÐGSDSý jrÑGSDSþ jrÒGSDSÿ jrÓGSDGS  jrÔGSDGS jrÕGSDGS jrÖGSDGS jr×GSDGS jrØGSDGS jrÙGSDGS jrÚGSDGS jrÛGSDGS jrÜGSDGS	 jrÝGSœGS
 jrÞGSQGSGS jjrß GSQSSOSOSGS.           GSžGS jjjrà\X" SÑSÒSÓSd9SSSSS3SrSOGS.               GSŸGS jj5       rá      GS GS jrâ\X" SÑSÒSÓSd9SSSSS3SOSrSOGS.                 GS¡GS jj5       rã\X" SÑSÒSÓSd9SSSSS3SOSrSOGS.                 GS¡GS jj5       räGS¢GS jråSOSOSSOGS.         GS£GS jjræGSDGS jrç\èGRÒ                  4GS¤GS jjrêGSDGS jrë\èGRÒ                  4S3GS.GS¥GS jjjrì\…" 5       \X" SÑSÒSÓSd9SrGS.GS¦GS jj5       5       ríSOSl.GS§GS jjrî\‹" GS5      GSDGS  j5       rï\…" 5        GSQSSOSOGS!.         GS¨GS" jjj5       rð\ñ4\ñSGS#.         GS©GS$ jjjrò\ñ4\ñSGS#.         GSªGS% jjjróGSDGS& jrôGSDGS' jrõGSDGS( jröGSDGS) jr÷GSDGS* jrøGSDGS+ jrùGSDGS, jrúGSDGS- jrûGSDGS. jrü\‹" GS/5      SSSOSOSOSOSOSOGS0.                     GS«GS1 jj5       rýSOSSGS2.       GS¬GS3 jjrþSOSSGS2.           GS­GS4 jjrÿ\GS®GS5 j5       Gr G\GS¯GS6 j5       GrG\GS°GS7 j5       GrG\GS±GS8 j5       GrG\GS²GS9 j5       GrG\GS³GS: j5       GrG\GS´GS; j5       GrG\GSµGS< j5       GrG\GS¶GS= j5       Gr	G\GS·GS> j5       Gr
GS¸GS? jGrGS@Grg(¹  ÚExpré~   z1Expressions that can be used in various contexts.NrA   Ú_pyexpr>	   ÚdtÚarrÚbinÚcatÚstrÚlistÚmetaÚnameÚstructzClassVar[set[str]]Ú
_accessorsc                ó4   • U R                  U 5      nXl        U$ ©N)Ú__new__rd   )ÚclsÚpyexprÚexprs      ÚcC:\Users\julio\OneDrive\Documentos\Trabajo\Ideas Frescas\venv\Lib\site-packages\polars/expr/expr.pyÚ_from_pyexprÚExpr._from_pyexpr   s   € à{‰{˜3ÓˆØŒØˆó    c                ó6   • U R                   R                  5       $ rp   ©rd   Úto_str©Úselfs    ru   Ú_repr_html_ÚExpr._repr_html_•   ó   € Ø|‰|×"Ñ"Ó$Ð$rx   c                óº   • [        U R                  R                  5       =n5      S:”  a  US S  S3nSU R                  R                   SU< S[        U 5      S S3$ )Né   u   â€¦Ú<z [z] at 0xÚXÚ>)Úlenrd   r{   Ú	__class__Ú__name__Úid)r}   Úexpr_strs     ru   Ú__repr__ÚExpr.__repr__˜   s_   € Ü˜4Ÿ<™<×.Ñ.Ó0Ð0ˆxÓ1°BÓ6Ø" 3 B˜-˜¨Ð,ˆHØ4—>‘>×*Ñ*Ð+¨2¨h©\¸ÄÀDÃÈ!ÀÈAÐNÐNrx   c                ó6   • U R                   R                  5       $ rp   rz   r|   s    ru   Ú__str__ÚExpr.__str__   r€   rx   c                ó   • Sn[        U5      e)Na°  the truth value of an Expr is ambiguous

You probably got here by using a Python standard library function instead of the native expressions API.
Here are some things you might want to try:
- instead of `pl.col('a') and pl.col('b')`, use `pl.col('a') & pl.col('b')`
- instead of `pl.col('a') in [y, z]`, use `pl.col('a').is_in([y, z])`
- instead of `max(pl.col('a'), pl.col('b'))`, use `pl.max_horizontal(pl.col('a'), pl.col('b'))`
)Ú	TypeError)r}   Úmsgs     ru   Ú__bool__ÚExpr.__bool__    s   € ðpð 	ô ˜‹nÐrx   c                ó"   • U R                  5       $ rp   )Úabsr|   s    ru   Ú__abs__ÚExpr.__abs__­   s   € Øx‰x‹zÐrx   c                óF   • [        USS9n[        U R                  U-   5      $ ©NT©Ú
str_as_lit©r   r$   rd   ©r}   ÚotherÚother_pyexprs      ru   Ú__add__ÚExpr.__add__±   s"   € Ü,¨U¸tÑDˆÜ˜Ÿ™¨Ñ4Ó5Ð5rx   c                óD   • [        USS9n[        X R                  -   5      $ rš   r   rž   s      ru   Ú__radd__ÚExpr.__radd__µ   s    € Ü,¨U¸tÑDˆÜ˜¯©Ñ4Ó5Ð5rx   c                ó`   • [        U5      n[        U R                  R                  U5      5      $ rp   )r   r$   rd   Úand_rž   s      ru   Ú__and__ÚExpr.__and__¹   ó&   € Ü,¨UÓ3ˆÜ˜Ÿ™×*Ñ*¨<Ó8Ó9Ð9rx   c                ó`   • [        U5      n[        UR                  U R                  5      5      $ rp   )r   r$   r§   rd   ©r}   rŸ   Ú
other_exprs      ru   Ú__rand__ÚExpr.__rand__½   ó$   € Ü*¨5Ó1ˆ
Ü˜Ÿ™¨¯©Ó6Ó7Ð7rx   c                ót   • [        U5        [        USS9n[        U R                  R	                  U5      5      $ rš   )r#   r   r$   rd   Úeqrž   s      ru   Ú__eq__ÚExpr.__eq__Á   ó.   € Ü˜UÔ#Ü,¨U¸tÑDˆÜ˜Ÿ™Ÿ™¨Ó6Ó7Ð7rx   c                óH   • [        U5      n[        U R                  U-  5      $ rp   r   rž   s      ru   Ú__floordiv__ÚExpr.__floordiv__Æ   s    € Ü,¨UÓ3ˆÜ˜Ÿ™¨Ñ5Ó6Ð6rx   c                óF   • [        U5      n[        X R                  -  5      $ rp   r   rž   s      ru   Ú__rfloordiv__ÚExpr.__rfloordiv__Ê   s   € Ü,¨UÓ3ˆÜ˜¯©Ñ5Ó6Ð6rx   c                ót   • [        U5        [        USS9n[        U R                  R	                  U5      5      $ rš   )r#   r   r$   rd   Úgt_eqrž   s      ru   Ú__ge__ÚExpr.__ge__Î   ó0   € Ü˜UÔ#Ü,¨U¸tÑDˆÜ˜Ÿ™×+Ñ+¨LÓ9Ó:Ð:rx   c                ót   • [        U5        [        USS9n[        U R                  R	                  U5      5      $ rš   )r#   r   r$   rd   Úgtrž   s      ru   Ú__gt__ÚExpr.__gt__Ó   rµ   rx   c                ó"   • U R                  5       $ rp   )Únot_r|   s    ru   Ú
__invert__ÚExpr.__invert__Ø   s   € Øy‰y‹{Ðrx   c                ót   • [        U5        [        USS9n[        U R                  R	                  U5      5      $ rš   )r#   r   r$   rd   Últ_eqrž   s      ru   Ú__le__ÚExpr.__le__Û   rÀ   rx   c                ót   • [        U5        [        USS9n[        U R                  R	                  U5      5      $ rš   )r#   r   r$   rd   Últrž   s      ru   Ú__lt__ÚExpr.__lt__à   rµ   rx   c                óH   • [        U5      n[        U R                  U-  5      $ rp   r   rž   s      ru   Ú__mod__ÚExpr.__mod__å   ó    € Ü,¨UÓ3ˆÜ˜Ÿ™¨Ñ4Ó5Ð5rx   c                óF   • [        U5      n[        X R                  -  5      $ rp   r   rž   s      ru   Ú__rmod__ÚExpr.__rmod__é   ó   € Ü,¨UÓ3ˆÜ˜¯©Ñ4Ó5Ð5rx   c                óH   • [        U5      n[        U R                  U-  5      $ rp   r   rž   s      ru   Ú__mul__ÚExpr.__mul__í   rÔ   rx   c                óF   • [        U5      n[        X R                  -  5      $ rp   r   rž   s      ru   Ú__rmul__ÚExpr.__rmul__ñ   rØ   rx   c                ót   • [        U5        [        USS9n[        U R                  R	                  U5      5      $ rš   )r#   r   r$   rd   Úneqrž   s      ru   Ú__ne__ÚExpr.__ne__õ   s0   € Ü˜UÔ#Ü,¨U¸tÑDˆÜ˜Ÿ™×)Ñ)¨,Ó7Ó8Ð8rx   c                ó.   • [        U R                  * 5      $ rp   )r$   rd   r|   s    ru   Ú__neg__ÚExpr.__neg__ú   s   € Ü˜$Ÿ,™,˜Ó'Ð'rx   c                ó`   • [        U5      n[        U R                  R                  U5      5      $ rp   )r   r$   rd   Úor_rž   s      ru   Ú__or__ÚExpr.__or__ý   s&   € Ü,¨UÓ3ˆÜ˜Ÿ™×)Ñ)¨,Ó7Ó8Ð8rx   c                ó`   • [        U5      n[        UR                  U R                  5      5      $ rp   )r   r$   rç   rd   r¬   s      ru   Ú__ror__ÚExpr.__ror__  s$   € Ü*¨5Ó1ˆ
Ü˜Ÿ™¨¯©Ó5Ó6Ð6rx   c                ó   • U $ rp   © r|   s    ru   Ú__pos__ÚExpr.__pos__  s   € Øˆrx   c                ó`   • [        U5      n[        U R                  R                  U5      5      $ rp   )r   r$   rd   Úpow)r}   ÚexponentÚexponent_pyexprs      ru   Ú__pow__ÚExpr.__pow__  s&   € Ü/°Ó9ˆÜ˜Ÿ™×)Ñ)¨/Ó:Ó;Ð;rx   c                ó4   • [        U5      n[        U5      U -  $ rp   )r   r$   ©r}   ÚbaseÚbase_pyexprs      ru   Ú__rpow__ÚExpr.__rpow__  s   € Ü+¨DÓ1ˆÜ˜Ó%¨Ñ-Ð-rx   c                óH   • [        U5      n[        U R                  U-
  5      $ rp   r   rž   s      ru   Ú__sub__ÚExpr.__sub__  rÔ   rx   c                óF   • [        U5      n[        X R                  -
  5      $ rp   r   rž   s      ru   Ú__rsub__ÚExpr.__rsub__  rØ   rx   c                óH   • [        U5      n[        U R                  U-  5      $ rp   r   rž   s      ru   Ú__truediv__ÚExpr.__truediv__  rÔ   rx   c                óF   • [        U5      n[        X R                  -  5      $ rp   r   rž   s      ru   Ú__rtruediv__ÚExpr.__rtruediv__  rØ   rx   c                ó`   • [        U5      n[        U R                  R                  U5      5      $ rp   )r   r$   rd   Úxor_rž   s      ru   Ú__xor__ÚExpr.__xor__   rª   rx   c                ó`   • [        U5      n[        UR                  U R                  5      5      $ rp   )r   r$   r
  rd   r¬   s      ru   Ú__rxor__ÚExpr.__rxor__$  r°   rx   c                ó6   • U R                   R                  5       $ rp   )rd   Ú__getstate__r|   s    ru   r  ÚExpr.__getstate__(  s   € Ø|‰|×(Ñ(Ó*Ð*rx   c                ó„   • [         R                  " S5      R                  U l        U R                  R                  U5        g ©Nr   )ÚFÚlitrd   Ú__setstate__)r}   Ústates     ru   r  ÚExpr.__setstate__+  s)   € Ü—u’u˜Q“x×'Ñ'ˆŒØ‰×!Ñ! %Õ(rx   c                óL  ^^^^• US:w  a  SU 3n[        U5      e[        TS5      SLnUSL a2  STR                   S3n[        R                  " U[
        [        5       S9  [        U5      S	:X  aL  [        T5      S
:X  a=  [        US
   [        5      (       d  Sn[        U5      eUS
   R                  TU(       + S9$ [        S U 5       5      m[        U5       VVs/ s H$  u  px[        U[        5      (       a  USU4OUSU4PM&     snnmTS	:X  a  [        S T 5       5      n	O^Sn
/ nT H>  u  pŒnU(       d  M  U
(       a  Sn
OUR                  SU 35      nUR!                  U5        M@     ["        R$                  " U5      n	SUUUU4S jjnU	R                  Xæ(       + S9$ s  snnf )zNumpy universal functions.Ú__call__zOnly call is implemented not Ú	signatureNTa  Native numpy ufuncs are dispatched using `map_batches(ufunc, is_elementwise=True)` which is safe for native Numpy and Scipy ufuncs but custom ufuncs in a group_by context won't be properly grouped. Custom ufuncs are dispatched with is_elementwise=False. If z8 needs elementwise then please use map_batches directly.)Ú
stacklevelé   r   zInput must be expression.)Úis_elementwisec              3  óB   #   • U  H  n[        U[        5      v •  M     g 7frp   )Ú
isinstancerb   )Ú.0Úinps     ru   Ú	<genexpr>Ú'Expr.__array_ufunc__.<locals>.<genexpr>L  s   é € Ð?º°”z #¤t×,Ð,ºùs   ‚Fc              3  óB   #   • U  H  oS    (       d  M  US   v •  M     g7f)r  r   Nrî   )r"  rt   s     ru   r$  r%  S  s   é € ÐB²¨¸q½'›W˜T !žW²ùs   ‚“Ú	argument_c                ó  >• / n[        T5       Hf  u  p#US   (       a&  TS:”  a   UR                  U R                  U   5        M5  US   (       a  UR                  U 5        MR  UR                  US   5        Mh     T" U0 TD6$ )Nr  r   )Ú	enumerateÚappendrm   )ÚsÚargsÚirt   ÚexprsÚkwargsÚnum_exprÚufuncs       €€€€ru   ÚfunctionÚ&Expr.__array_ufunc__.<locals>.functionc  sn   ø€ Ø ˆDÜ$ UÖ+‘Ø˜—7˜x¨!›|Ø—K‘K §¡¨¡Ö,Ø˜!—WØ—K‘K –Nà—K‘K  Q¡Ö(ñ ,ñ ˜$Ð) &Ñ)Ð)rx   ©r+  rG   ÚreturnrG   )ÚNotImplementedErrorÚgetattrrˆ   ÚwarningsÚwarnr*   r   r†   r!  rb   r+   Úmap_batchesÚsumr)  ÚnextÚaliasr*  r  rm   )r}   r1  ÚmethodÚinputsr/  r’   Úis_custom_ufuncr-  r#  Ú	root_exprÚfirst_renameable_exprÚactual_exprsÚis_actual_exprÚindexr2  r.  r0  s    `  `          @@ru   Ú__array_ufunc__ÚExpr.__array_ufunc__/  sÁ  û€ ð ZÓØ1°&°Ð:ˆCÜ% cÓ*Ð*ä! %¨Ó5¸TÐAˆØ˜dÒ"ðð —n‘nÐ%Ð%]ð_ð ô MŠMØÜ"Ü*Ó,òô
 ˆv‹;˜!Ó¤ F£¨qÓ 0ô ˜f Q™i¬×.Ñ.Ø1Ü& sÓ+Ð+Ø˜!‘9×(Ñ(¨À?Ô?RÐ(ÐSÐSÜÑ?¹Ó?Ó?ˆô $ FÔ+ô
â+‘ô )¨¬d×3Ñ3ˆS$˜‰N¸#¸uÀa¸ÒHÙ+ò
ˆð
 q‹=ÜÑB±ÓBÓB‰Ið
 %)Ð!ØˆLÛ.3Ñ* Uß!>Þ,Ø05Ñ-à!Ÿi™i¨)°E°7Ð(;Ó<˜Ø ×'Ñ'¨Ö,ñ /4ô Ÿš Ó.ˆI÷		*ò 		*ð ×$Ñ$ XÔ>QÐ$ÐRÐRùóC
s   Ã+F Úbinary©Úformatc               ó¶  • [        U[        5      (       a(  [        UR                  5       R	                  5       5      nOG[        U[
        [        45      (       a  [        U5      nO [        U[        5      (       a  [        U5      nUS:X  a  [        R                  nO(US:X  a  [        R                  nOSU< 3n[        U5      eU R                  U" U5      5      $ )uS  
Read a serialized expression from a file.

Parameters
----------
source
    Path to a file or a file-like object (by file-like object, we refer to
    objects that have a `read()` method, such as a file handler (e.g.
    via builtin `open` function) or `BytesIO`).
format
    The format with which the Expr was serialized. Options:

    - `"binary"`: Deserialize from binary format (bytes). This is the default.
    - `"json"`: Deserialize from JSON format (string).

Warnings
--------
This function uses :mod:`pickle` if the logical plan contains Python UDFs,
and as such inherits the security implications. Deserializing can execute
arbitrary code, so it should only be attempted on trusted data.

See Also
--------
Expr.meta.serialize

Notes
-----
Serialization is not stable across Polars versions: a LazyFrame serialized
in one Polars version may not be deserializable in another Polars version.

Examples
--------
>>> import io
>>> expr = pl.col("foo").sum().over("bar")
>>> bytes = expr.meta.serialize()
>>> pl.Expr.deserialize(io.BytesIO(bytes))
<Expr ['col("foo").sum().over([col("baâ€¦'] at ...>
rH  Újsonz0`format` must be one of {'binary', 'json'}, got )r!  r
   r	   ÚgetvalueÚencoderi   r   r!   ÚbytesrA   Údeserialize_binaryÚdeserialize_jsonÚ
ValueErrorrv   )rr   ÚsourcerJ  Údeserializerr’   s        ru   ÚdeserializeÚExpr.deserializep  s¯   € ôZ fœh×'Ñ'Ü˜VŸ_™_Ó.×5Ñ5Ó7Ó8‰FÜ˜¤¤d ×,Ñ,Ü'¨Ó/‰FÜ˜¤×&Ñ&Ü˜V“_ˆFàXÓÜ!×4Ñ4‰LØvÓÜ!×2Ñ2‰LàFÀvÁjÐQˆCÜ˜S“/Ð!à×Ñ¡¨VÓ 4Ó5Ð5rx   c                óH   • [        U R                  R                  5       5      $ )uå  
Cast to physical representation of the logical dtype.

- :func:`polars.datatypes.Date` -> :func:`polars.datatypes.Int32`
- :func:`polars.datatypes.Datetime` -> :func:`polars.datatypes.Int64`
- :func:`polars.datatypes.Time` -> :func:`polars.datatypes.Int64`
- :func:`polars.datatypes.Duration` -> :func:`polars.datatypes.Int64`
- :func:`polars.datatypes.Categorical` -> :func:`polars.datatypes.UInt32`
- `List(inner)` -> `List(physical of inner)`
- `Array(inner)` -> `Struct(physical of inner)`
- `Struct(fields)` -> `Array(physical of fields)`

Other data types will be left unchanged.

Warnings
--------
The physical representations are an implementation detail
and not guaranteed to be stable.

Examples
--------
Replicating the pandas
`pd.factorize
<https://pandas.pydata.org/docs/reference/api/pandas.factorize.html>`_
function.

>>> pl.DataFrame({"vals": ["a", "x", None, "a"]}).with_columns(
...     pl.col("vals").cast(pl.Categorical),
...     pl.col("vals")
...     .cast(pl.Categorical)
...     .to_physical()
...     .alias("vals_physical"),
... )
shape: (4, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ vals â”† vals_physical â”‚
â”‚ ---  â”† ---           â”‚
â”‚ cat  â”† u32           â”‚
â•žâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ a    â”† 0             â”‚
â”‚ x    â”† 1             â”‚
â”‚ null â”† null          â”‚
â”‚ a    â”† 0             â”‚
â””â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úto_physicalr|   s    ru   rX  ÚExpr.to_physical®  s   € ô\ ˜Ÿ™×1Ñ1Ó3Ó4Ð4rx   T)Úignore_nullsc               óJ   • [        U R                  R                  U5      5      $ )u;  
Return whether any of the values in the column are `True`.

Only works on columns of data type :class:`Boolean`.

Parameters
----------
ignore_nulls

    * If set to `True` (default), null values are ignored. If there
      are no non-null values, the output is `False`.
    * If set to `False`, `Kleene logic`_ is used to deal with nulls:
      if the column contains any null values and no `True` values,
      the output is null.

    .. _Kleene logic: https://en.wikipedia.org/wiki/Three-valued_logic

Returns
-------
Expr
    Expression of data type :class:`Boolean`.

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "a": [True, False],
...         "b": [False, False],
...         "c": [None, False],
...     }
... )
>>> df.select(pl.col("*").any())
shape: (1, 3)
â”Œâ”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a    â”† b     â”† c     â”‚
â”‚ ---  â”† ---   â”† ---   â”‚
â”‚ bool â”† bool  â”† bool  â”‚
â•žâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•¡
â”‚ true â”† false â”† false â”‚
â””â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”˜

Enable Kleene logic by setting `ignore_nulls=False`.

>>> df.select(pl.col("*").any(ignore_nulls=False))
shape: (1, 3)
â”Œâ”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”
â”‚ a    â”† b     â”† c    â”‚
â”‚ ---  â”† ---   â”† ---  â”‚
â”‚ bool â”† bool  â”† bool â”‚
â•žâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•¡
â”‚ true â”† false â”† null â”‚
â””â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úany©r}   rZ  s     ru   r\  ÚExpr.anyÞ  s   € ôl ˜Ÿ™×)Ñ)¨,Ó7Ó8Ð8rx   c               óJ   • [        U R                  R                  U5      5      $ )u¨  
Return whether all values in the column are `True`.

Only works on columns of data type :class:`Boolean`.

.. note::
    This method is not to be confused with the function :func:`polars.all`,
    which can be used to select all columns.

Parameters
----------
ignore_nulls

    * If set to `True` (default), null values are ignored. If there
      are no non-null values, the output is `True`.
    * If set to `False`, `Kleene logic`_ is used to deal with nulls:
      if the column contains any null values and no `False` values,
      the output is null.

    .. _Kleene logic: https://en.wikipedia.org/wiki/Three-valued_logic

Returns
-------
Expr
    Expression of data type :class:`Boolean`.

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "a": [True, True],
...         "b": [False, True],
...         "c": [None, True],
...     }
... )
>>> df.select(pl.col("*").all())
shape: (1, 3)
â”Œâ”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”
â”‚ a    â”† b     â”† c    â”‚
â”‚ ---  â”† ---   â”† ---  â”‚
â”‚ bool â”† bool  â”† bool â”‚
â•žâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•¡
â”‚ true â”† false â”† true â”‚
â””â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”˜

Enable Kleene logic by setting `ignore_nulls=False`.

>>> df.select(pl.col("*").all(ignore_nulls=False))
shape: (1, 3)
â”Œâ”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”
â”‚ a    â”† b     â”† c    â”‚
â”‚ ---  â”† ---   â”† ---  â”‚
â”‚ bool â”† bool  â”† bool â”‚
â•žâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•¡
â”‚ true â”† false â”† null â”‚
â””â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úallr]  s     ru   r`  ÚExpr.all  s   € ôt ˜Ÿ™×)Ñ)¨,Ó7Ó8Ð8rx   c                ó>   • [        [        U R                  5      5      $ )u1  
Return indices where expression evaluates `True`.

.. warning::
    Modifies number of rows returned, so will fail in combination with other
    expressions. Use as only expression in `select` / `with_columns`.

See Also
--------
Series.arg_true : Return indices where Series is True
polars.arg_where

Examples
--------
>>> df = pl.DataFrame({"a": [1, 1, 2, 1]})
>>> df.select((pl.col("a") == 1).arg_true())
shape: (3, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ a   â”‚
â”‚ --- â”‚
â”‚ u32 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 0   â”‚
â”‚ 1   â”‚
â”‚ 3   â”‚
â””â”€â”€â”€â”€â”€â”˜
)r$   Úpy_arg_whererd   r|   s    ru   Úarg_trueÚExpr.arg_trueR  s   € ô8 œ d§l¡lÓ3Ó4Ð4rx   c                óH   • [        U R                  R                  5       5      $ )uz  
Compute the square root of the elements.

Examples
--------
>>> df = pl.DataFrame({"values": [1.0, 2.0, 4.0]})
>>> df.select(pl.col("values").sqrt())
shape: (3, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ values   â”‚
â”‚ ---      â”‚
â”‚ f64      â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1.0      â”‚
â”‚ 1.414214 â”‚
â”‚ 2.0      â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úsqrtr|   s    ru   rg  Ú	Expr.sqrtp  ó   € ô& ˜Ÿ™×*Ñ*Ó,Ó-Ð-rx   c                óH   • [        U R                  R                  5       5      $ )ux  
Compute the cube root of the elements.

Examples
--------
>>> df = pl.DataFrame({"values": [1.0, 2.0, 4.0]})
>>> df.select(pl.col("values").cbrt())
shape: (3, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ values   â”‚
â”‚ ---      â”‚
â”‚ f64      â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1.0      â”‚
â”‚ 1.259921 â”‚
â”‚ 1.587401 â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úcbrtr|   s    ru   rk  Ú	Expr.cbrt…  ri  rx   c                ó$   • U R                  S5      $ )uƒ  
Compute the base 10 logarithm of the input array, element-wise.

Examples
--------
>>> df = pl.DataFrame({"values": [1.0, 2.0, 4.0]})
>>> df.select(pl.col("values").log10())
shape: (3, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ values  â”‚
â”‚ ---     â”‚
â”‚ f64     â”‚
â•žâ•â•â•â•â•â•â•â•â•â•¡
â”‚ 0.0     â”‚
â”‚ 0.30103 â”‚
â”‚ 0.60206 â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
g      $@)Úlogr|   s    ru   Úlog10Ú
Expr.log10š  s   € ð& x‰x˜‹~Ðrx   c                óH   • [        U R                  R                  5       5      $ )uw  
Compute the exponential, element-wise.

Examples
--------
>>> df = pl.DataFrame({"values": [1.0, 2.0, 4.0]})
>>> df.select(pl.col("values").exp())
shape: (3, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ values   â”‚
â”‚ ---      â”‚
â”‚ f64      â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 2.718282 â”‚
â”‚ 7.389056 â”‚
â”‚ 54.59815 â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úexpr|   s    ru   rr  ÚExpr.exp¯  s   € ô& ˜Ÿ™×)Ñ)Ó+Ó,Ð,rx   c                óJ   • [        U R                  R                  U5      5      $ )u  
Rename the expression.

Parameters
----------
name
    The new name.

See Also
--------
name.map
name.prefix
name.suffix

Examples
--------
Rename an expression to avoid overwriting an existing column.

>>> df = pl.DataFrame(
...     {
...         "a": [1, 2, 3],
...         "b": ["x", "y", "z"],
...     }
... )
>>> df.with_columns(
...     pl.col("a") + 10,
...     pl.col("b").str.to_uppercase().alias("c"),
... )
shape: (3, 3)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”
â”‚ a   â”† b   â”† c   â”‚
â”‚ --- â”† --- â”† --- â”‚
â”‚ i64 â”† str â”† str â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•¡
â”‚ 11  â”† x   â”† X   â”‚
â”‚ 12  â”† y   â”† Y   â”‚
â”‚ 13  â”† z   â”† Z   â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”˜

Overwrite the default name of literal columns to prevent errors due to duplicate
column names.

>>> df.with_columns(
...     pl.lit(True).alias("c"),
...     pl.lit(4.0).alias("d"),
... )
shape: (3, 4)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”
â”‚ a   â”† b   â”† c    â”† d   â”‚
â”‚ --- â”† --- â”† ---  â”† --- â”‚
â”‚ i64 â”† str â”† bool â”† f64 â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•¡
â”‚ 1   â”† x   â”† true â”† 4.0 â”‚
â”‚ 2   â”† y   â”† true â”† 4.0 â”‚
â”‚ 3   â”† z   â”† true â”† 4.0 â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”˜
)r$   rd   r=  )r}   rl   s     ru   r=  Ú
Expr.aliasÄ  s   € ôt ˜Ÿ™×+Ñ+¨DÓ1Ó2Ð2rx   c                ót   • U R                   R                  5       R                  " U/UQ76 R                  5       $ )uÅ  
Exclude columns from a multi-column expression.

Only works after a wildcard or regex column selection, and you cannot provide
both string column names *and* dtypes (you may prefer to use selectors instead).

Parameters
----------
columns
    The name or datatype of the column(s) to exclude. Accepts regular expression
    input. Regular expressions should start with `^` and end with `$`.
*more_columns
    Additional names or datatypes of columns to exclude, specified as positional
    arguments.

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "aa": [1, 2, 3],
...         "ba": ["a", "b", None],
...         "cc": [None, 2.5, 1.5],
...     }
... )
>>> df
shape: (3, 3)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”
â”‚ aa  â”† ba   â”† cc   â”‚
â”‚ --- â”† ---  â”† ---  â”‚
â”‚ i64 â”† str  â”† f64  â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•¡
â”‚ 1   â”† a    â”† null â”‚
â”‚ 2   â”† b    â”† 2.5  â”‚
â”‚ 3   â”† null â”† 1.5  â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”˜

Exclude by column name(s):

>>> df.select(pl.all().exclude("ba"))
shape: (3, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”
â”‚ aa  â”† cc   â”‚
â”‚ --- â”† ---  â”‚
â”‚ i64 â”† f64  â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•¡
â”‚ 1   â”† null â”‚
â”‚ 2   â”† 2.5  â”‚
â”‚ 3   â”† 1.5  â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”˜

Exclude by regex, e.g. removing all columns whose names end with the letter "a":

>>> df.select(pl.all().exclude("^.*a$"))
shape: (3, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”
â”‚ cc   â”‚
â”‚ ---  â”‚
â”‚ f64  â”‚
â•žâ•â•â•â•â•â•â•¡
â”‚ null â”‚
â”‚ 2.5  â”‚
â”‚ 1.5  â”‚
â””â”€â”€â”€â”€â”€â”€â”˜

Exclude by dtype(s), e.g. removing all columns of type Int64 or Float64:

>>> df.select(pl.all().exclude([pl.Int64, pl.Float64]))
shape: (3, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”
â”‚ ba   â”‚
â”‚ ---  â”‚
â”‚ str  â”‚
â•žâ•â•â•â•â•â•â•¡
â”‚ a    â”‚
â”‚ b    â”‚
â”‚ null â”‚
â””â”€â”€â”€â”€â”€â”€â”˜
)rk   Úas_selectorÚexcludeÚas_expr)r}   ÚcolumnsÚmore_columnss      ru   rx  ÚExpr.exclude   s1   € ðf y‰y×$Ñ$Ó&×.Ò.¨wÐF¸ÒF×NÑNÓPÐPrx   c                ó   • U" U /UQ70 UD6$ )uÎ  
Offers a structured way to apply a sequence of user-defined functions (UDFs).

Parameters
----------
function
    Callable; will receive the expression as the first parameter,
    followed by any given args/kwargs.
*args
    Arguments to pass to the UDF.
**kwargs
    Keyword arguments to pass to the UDF.

Examples
--------
>>> def extract_number(expr: pl.Expr) -> pl.Expr:
...     """Extract the digits from a string."""
...     return expr.str.extract(r"\d+", 0).cast(pl.Int64)
>>>
>>> def scale_negative_even(expr: pl.Expr, *, n: int = 1) -> pl.Expr:
...     """Set even numbers negative, and scale by a user-supplied value."""
...     expr = pl.when(expr % 2 == 0).then(-expr).otherwise(expr)
...     return expr * n
>>>
>>> df = pl.DataFrame({"val": ["a: 1", "b: 2", "c: 3", "d: 4"]})
>>> df.with_columns(
...     udfs=(
...         pl.col("val").pipe(extract_number).pipe(scale_negative_even, n=5)
...     ),
... )
shape: (4, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”
â”‚ val  â”† udfs â”‚
â”‚ ---  â”† ---  â”‚
â”‚ str  â”† i64  â”‚
â•žâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•¡
â”‚ a: 1 â”† 5    â”‚
â”‚ b: 2 â”† -10  â”‚
â”‚ c: 3 â”† 15   â”‚
â”‚ d: 4 â”† -20  â”‚
â””â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”˜

rî   )r}   r2  r,  r/  s       ru   ÚpipeÚ	Expr.pipeU  s   € ñb ˜Ð.˜tÒ. vÑ.Ð.rx   c                óH   • [        U R                  R                  5       5      $ )u¶  
Negate a boolean expression.

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "a": [True, False, False],
...         "b": ["a", "b", None],
...     }
... )
>>> df
shape: (3, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”
â”‚ a     â”† b    â”‚
â”‚ ---   â”† ---  â”‚
â”‚ bool  â”† str  â”‚
â•žâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•¡
â”‚ true  â”† a    â”‚
â”‚ false â”† b    â”‚
â”‚ false â”† null â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”˜
>>> df.select(pl.col("a").not_())
shape: (3, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”
â”‚ a     â”‚
â”‚ ---   â”‚
â”‚ bool  â”‚
â•žâ•â•â•â•â•â•â•â•¡
â”‚ false â”‚
â”‚ true  â”‚
â”‚ true  â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   rÆ   r|   s    ru   rÆ   Ú	Expr.not_ˆ  s   € ôF ˜Ÿ™×*Ñ*Ó,Ó-Ð-rx   c                óH   • [        U R                  R                  5       5      $ )uõ  
Returns a boolean Series indicating which values are null.

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "a": [1, 2, None, 1, 5],
...         "b": [1.0, 2.0, float("nan"), 1.0, 5.0],
...     }
... )
>>> df.with_columns(pl.all().is_null().name.suffix("_isnull"))  # nan != null
shape: (5, 4)
â”Œâ”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a    â”† b   â”† a_isnull â”† b_isnull â”‚
â”‚ ---  â”† --- â”† ---      â”† ---      â”‚
â”‚ i64  â”† f64 â”† bool     â”† bool     â”‚
â•žâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1    â”† 1.0 â”† false    â”† false    â”‚
â”‚ 2    â”† 2.0 â”† false    â”† false    â”‚
â”‚ null â”† NaN â”† true     â”† false    â”‚
â”‚ 1    â”† 1.0 â”† false    â”† false    â”‚
â”‚ 5    â”† 5.0 â”† false    â”† false    â”‚
â””â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úis_nullr|   s    ru   rƒ  ÚExpr.is_null­  s   € ô4 ˜Ÿ™×-Ñ-Ó/Ó0Ð0rx   c                óH   • [        U R                  R                  5       5      $ )uQ  
Returns a boolean Series indicating which values are not null.

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "a": [1, 2, None, 1, 5],
...         "b": [1.0, 2.0, float("nan"), 1.0, 5.0],
...     }
... )
>>> df.with_columns(
...     pl.all().is_not_null().name.suffix("_not_null")  # nan != null
... )
shape: (5, 4)
â”Œâ”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a    â”† b   â”† a_not_null â”† b_not_null â”‚
â”‚ ---  â”† --- â”† ---        â”† ---        â”‚
â”‚ i64  â”† f64 â”† bool       â”† bool       â”‚
â•žâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1    â”† 1.0 â”† true       â”† true       â”‚
â”‚ 2    â”† 2.0 â”† true       â”† true       â”‚
â”‚ null â”† NaN â”† false      â”† true       â”‚
â”‚ 1    â”† 1.0 â”† true       â”† true       â”‚
â”‚ 5    â”† 5.0 â”† true       â”† true       â”‚
â””â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úis_not_nullr|   s    ru   r†  ÚExpr.is_not_nullÉ  ó   € ô8 ˜Ÿ™×1Ñ1Ó3Ó4Ð4rx   c                óH   • [        U R                  R                  5       5      $ )u?  
Returns a boolean Series indicating which values are finite.

Returns
-------
Expr
    Expression of data type :class:`Boolean`.

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "A": [1.0, 2],
...         "B": [3.0, float("inf")],
...     }
... )
>>> df.select(pl.all().is_finite())
shape: (2, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”
â”‚ A    â”† B     â”‚
â”‚ ---  â”† ---   â”‚
â”‚ bool â”† bool  â”‚
â•žâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•¡
â”‚ true â”† true  â”‚
â”‚ true â”† false â”‚
â””â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Ú	is_finiter|   s    ru   rŠ  ÚExpr.is_finiteç  s   € ô8 ˜Ÿ™×/Ñ/Ó1Ó2Ð2rx   c                óH   • [        U R                  R                  5       5      $ )uQ  
Returns a boolean Series indicating which values are infinite.

Returns
-------
Expr
    Expression of data type :class:`Boolean`.

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "A": [1.0, 2],
...         "B": [3.0, float("inf")],
...     }
... )
>>> df.select(pl.all().is_infinite())
shape: (2, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”
â”‚ A     â”† B     â”‚
â”‚ ---   â”† ---   â”‚
â”‚ bool  â”† bool  â”‚
â•žâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•¡
â”‚ false â”† false â”‚
â”‚ false â”† true  â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úis_infiniter|   s    ru   r  ÚExpr.is_infinite  rˆ  rx   c                óH   • [        U R                  R                  5       5      $ )u‡  
Returns a boolean Series indicating which values are NaN.

Notes
-----
Floating point `NaN` (Not A Number) should not be confused
with missing data represented as `Null/None`.

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "a": [1, 2, None, 1, 5],
...         "b": [1.0, 2.0, float("nan"), 1.0, 5.0],
...     }
... )
>>> df.with_columns(pl.col(pl.Float64).is_nan().name.suffix("_isnan"))
shape: (5, 3)
â”Œâ”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a    â”† b   â”† b_isnan â”‚
â”‚ ---  â”† --- â”† ---     â”‚
â”‚ i64  â”† f64 â”† bool    â”‚
â•žâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•¡
â”‚ 1    â”† 1.0 â”† false   â”‚
â”‚ 2    â”† 2.0 â”† false   â”‚
â”‚ null â”† NaN â”† true    â”‚
â”‚ 1    â”† 1.0 â”† false   â”‚
â”‚ 5    â”† 5.0 â”† false   â”‚
â””â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úis_nanr|   s    ru   r  ÚExpr.is_nan#  s   € ô> ˜Ÿ™×,Ñ,Ó.Ó/Ð/rx   c                óH   • [        U R                  R                  5       5      $ )ué  
Returns a boolean Series indicating which values are not NaN.

Notes
-----
Floating point `NaN` (Not A Number) should not be confused
with missing data represented as `Null/None`.

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "a": [1, 2, None, 1, 5],
...         "b": [1.0, 2.0, float("nan"), 1.0, 5.0],
...     }
... )
>>> df.with_columns(pl.col(pl.Float64).is_not_nan().name.suffix("_is_not_nan"))
shape: (5, 3)
â”Œâ”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a    â”† b   â”† b_is_not_nan â”‚
â”‚ ---  â”† --- â”† ---          â”‚
â”‚ i64  â”† f64 â”† bool         â”‚
â•žâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1    â”† 1.0 â”† true         â”‚
â”‚ 2    â”† 2.0 â”† true         â”‚
â”‚ null â”† NaN â”† false        â”‚
â”‚ 1    â”† 1.0 â”† true         â”‚
â”‚ 5    â”† 5.0 â”† true         â”‚
â””â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Ú
is_not_nanr|   s    ru   r“  ÚExpr.is_not_nanD  s   € ô> ˜Ÿ™×0Ñ0Ó2Ó3Ð3rx   c                óH   • [        U R                  R                  5       5      $ )u,  
Get the group indexes of the group by operation.

Should be used in aggregation context only.

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "group": [
...             "one",
...             "one",
...             "one",
...             "two",
...             "two",
...             "two",
...         ],
...         "value": [94, 95, 96, 97, 97, 99],
...     }
... )
>>> df.group_by("group", maintain_order=True).agg(pl.col("value").agg_groups())
shape: (2, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ group â”† value     â”‚
â”‚ ---   â”† ---       â”‚
â”‚ str   â”† list[u32] â”‚
â•žâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ one   â”† [0, 1, 2] â”‚
â”‚ two   â”† [3, 4, 5] â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Ú
agg_groupsr|   s    ru   r–  ÚExpr.agg_groupse  s   € ô@ ˜Ÿ™×0Ñ0Ó2Ó3Ð3rx   c                óH   • [        U R                  R                  5       5      $ )uÕ  
Return the number of non-null elements in the column.

Returns
-------
Expr
    Expression of data type :class:`UInt32`.

See Also
--------
len

Examples
--------
>>> df = pl.DataFrame({"a": [1, 2, 3], "b": [None, 4, 4]})
>>> df.select(pl.all().count())
shape: (1, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”
â”‚ a   â”† b   â”‚
â”‚ --- â”† --- â”‚
â”‚ u32 â”† u32 â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•¡
â”‚ 3   â”† 2   â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”˜
)r$   rd   Úcountr|   s    ru   r™  Ú
Expr.count‡  s   € ô4 ˜Ÿ™×+Ñ+Ó-Ó.Ð.rx   c                óH   • [        U R                  R                  5       5      $ )uò  
Return the number of elements in the column.

Null values count towards the total.

Returns
-------
Expr
    Expression of data type :class:`UInt32`.

See Also
--------
count

Examples
--------
>>> df = pl.DataFrame({"a": [1, 2, 3], "b": [None, 4, 4]})
>>> df.select(pl.all().len())
shape: (1, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”
â”‚ a   â”† b   â”‚
â”‚ --- â”† --- â”‚
â”‚ u32 â”† u32 â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•¡
â”‚ 3   â”† 3   â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”˜
)r$   rd   r†   r|   s    ru   r†   ÚExpr.len£  s   € ô8 ˜Ÿ™×)Ñ)Ó+Ó,Ð,rx   c                ó   • [        U[        5      (       d  [        R                  " U5      n[        U[        5      (       d  [        R                  " U5      n[	        U R
                  R                  UR
                  UR
                  5      5      $ )um  
Get a slice of this expression.

Parameters
----------
offset
    Start index. Negative indexing is supported.
length
    Length of the slice. If set to `None`, all rows starting at the offset
    will be selected.

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "a": [8, 9, 10, 11],
...         "b": [None, 4, 4, 4],
...     }
... )
>>> df.select(pl.all().slice(1, 2))
shape: (2, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”
â”‚ a   â”† b   â”‚
â”‚ --- â”† --- â”‚
â”‚ i64 â”† i64 â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•¡
â”‚ 9   â”† 4   â”‚
â”‚ 10  â”† 4   â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”˜
)r!  rb   r  r  r$   rd   Úslice)r}   ÚoffsetÚlengths      ru   rž  Ú
Expr.sliceÁ  s\   € ô> ˜&¤$×'Ñ'Ü—U’U˜6“]ˆFÜ˜&¤$×'Ñ'Ü—U’U˜6“]ˆFÜ˜Ÿ™×+Ñ+¨F¯N©N¸F¿N¹NÓKÓLÐLrx   )Úupcastc               ó`   • [        U5      n[        U R                  R                  X25      5      $ )ur  
Append expressions.

This is done by adding the chunks of `other` to this `Series`.

Parameters
----------
other
    Expression to append.
upcast
    Cast both `Series` to the same supertype.

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "a": [8, 9, 10],
...         "b": [None, 4, 4],
...     }
... )
>>> df.select(pl.all().head(1).append(pl.all().tail(1)))
shape: (2, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”
â”‚ a   â”† b    â”‚
â”‚ --- â”† ---  â”‚
â”‚ i64 â”† i64  â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•¡
â”‚ 8   â”† null â”‚
â”‚ 10  â”† 4    â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”˜
)r   r$   rd   r*  )r}   rŸ   r¢  r    s       ru   r*  ÚExpr.appendæ  s)   € ô@ -¨UÓ3ˆÜ˜Ÿ™×,Ñ,¨\ÓBÓCÐCrx   c                óH   • [        U R                  R                  5       5      $ )uà  
Create a single chunk of memory for this Series.

Examples
--------
>>> df = pl.DataFrame({"a": [1, 1, 2]})

Create a Series with 3 nulls, append column `a`, then rechunk.

>>> df.select(pl.repeat(None, 3).append(pl.col("a")).rechunk())
shape: (6, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ repeat â”‚
â”‚ ---    â”‚
â”‚ i64    â”‚
â•žâ•â•â•â•â•â•â•â•â•¡
â”‚ null   â”‚
â”‚ null   â”‚
â”‚ null   â”‚
â”‚ 1      â”‚
â”‚ 1      â”‚
â”‚ 2      â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úrechunkr|   s    ru   r¦  ÚExpr.rechunk	  ó   € ô2 ˜Ÿ™×-Ñ-Ó/Ó0Ð0rx   c                óH   • [        U R                  R                  5       5      $ )uå  
Drop all null values.

The original order of the remaining elements is preserved.

See Also
--------
drop_nans

Notes
-----
A null value is not the same as a NaN value.
To drop NaN values, use :func:`drop_nans`.

Examples
--------
>>> df = pl.DataFrame({"a": [1.0, None, 3.0, float("nan")]})
>>> df.select(pl.col("a").drop_nulls())
shape: (3, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ a   â”‚
â”‚ --- â”‚
â”‚ f64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 1.0 â”‚
â”‚ 3.0 â”‚
â”‚ NaN â”‚
â””â”€â”€â”€â”€â”€â”˜
)r$   rd   Ú
drop_nullsr|   s    ru   rª  ÚExpr.drop_nulls$  s   € ô< ˜Ÿ™×0Ñ0Ó2Ó3Ð3rx   c                óH   • [        U R                  R                  5       5      $ )u  
Drop all floating point NaN values.

The original order of the remaining elements is preserved.

See Also
--------
drop_nulls

Notes
-----
A NaN value is not the same as a null value.
To drop null values, use :func:`drop_nulls`.

Examples
--------
>>> df = pl.DataFrame({"a": [1.0, None, 3.0, float("nan")]})
>>> df.select(pl.col("a").drop_nans())
shape: (3, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”
â”‚ a    â”‚
â”‚ ---  â”‚
â”‚ f64  â”‚
â•žâ•â•â•â•â•â•â•¡
â”‚ 1.0  â”‚
â”‚ null â”‚
â”‚ 3.0  â”‚
â””â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Ú	drop_nansr|   s    ru   r­  ÚExpr.drop_nansD  s   € ô< ˜Ÿ™×/Ñ/Ó1Ó2Ð2rx   F)Úreverser¯  c               óJ   • [        U R                  R                  U5      5      $ )uO
  
Get an array with the cumulative sum computed at every element.

Parameters
----------
reverse
    Reverse the operation.

Notes
-----
Dtypes in {Int8, UInt8, Int16, UInt16} are cast to
Int64 before summing to prevent overflow issues.

Examples
--------
>>> df = pl.DataFrame({"a": [1, 2, 3, 4]})
>>> df.with_columns(
...     pl.col("a").cum_sum().alias("cum_sum"),
...     pl.col("a").cum_sum(reverse=True).alias("cum_sum_reverse"),
... )
shape: (4, 3)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a   â”† cum_sum â”† cum_sum_reverse â”‚
â”‚ --- â”† ---     â”† ---             â”‚
â”‚ i64 â”† i64     â”† i64             â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1   â”† 1       â”† 10              â”‚
â”‚ 2   â”† 3       â”† 9               â”‚
â”‚ 3   â”† 6       â”† 7               â”‚
â”‚ 4   â”† 10      â”† 4               â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Null values are excluded, but can also be filled by calling
`fill_null(strategy="forward")`.

>>> df = pl.DataFrame({"values": [None, 10, None, 8, 9, None, 16, None]})
>>> df.with_columns(
...     pl.col("values").cum_sum().alias("value_cum_sum"),
...     pl.col("values")
...     .cum_sum()
...     .fill_null(strategy="forward")
...     .alias("value_cum_sum_all_filled"),
... )
shape: (8, 3)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ values â”† value_cum_sum â”† value_cum_sum_all_filled â”‚
â”‚ ---    â”† ---           â”† ---                      â”‚
â”‚ i64    â”† i64           â”† i64                      â”‚
â•žâ•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ null   â”† null          â”† null                     â”‚
â”‚ 10     â”† 10            â”† 10                       â”‚
â”‚ null   â”† null          â”† 10                       â”‚
â”‚ 8      â”† 18            â”† 18                       â”‚
â”‚ 9      â”† 27            â”† 27                       â”‚
â”‚ null   â”† null          â”† 27                       â”‚
â”‚ 16     â”† 43            â”† 43                       â”‚
â”‚ null   â”† null          â”† 43                       â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úcum_sum©r}   r¯  s     ru   r±  ÚExpr.cum_sumd  s   € ôx ˜Ÿ™×-Ñ-¨gÓ6Ó7Ð7rx   c               óJ   • [        U R                  R                  U5      5      $ )ua  
Get an array with the cumulative product computed at every element.

Parameters
----------
reverse
    Reverse the operation.

Notes
-----
Dtypes in {Int8, UInt8, Int16, UInt16} are cast to
Int64 before summing to prevent overflow issues.

Examples
--------
>>> df = pl.DataFrame({"a": [1, 2, 3, 4]})
>>> df.with_columns(
...     pl.col("a").cum_prod().alias("cum_prod"),
...     pl.col("a").cum_prod(reverse=True).alias("cum_prod_reverse"),
... )
shape: (4, 3)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a   â”† cum_prod â”† cum_prod_reverse â”‚
â”‚ --- â”† ---      â”† ---              â”‚
â”‚ i64 â”† i64      â”† i64              â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1   â”† 1        â”† 24               â”‚
â”‚ 2   â”† 2        â”† 24               â”‚
â”‚ 3   â”† 6        â”† 12               â”‚
â”‚ 4   â”† 24       â”† 4                â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úcum_prodr²  s     ru   rµ  ÚExpr.cum_prod¢  s   € ôB ˜Ÿ™×.Ñ.¨wÓ7Ó8Ð8rx   c               óJ   • [        U R                  R                  U5      5      $ )u™  
Get an array with the cumulative min computed at every element.

Parameters
----------
reverse
    Reverse the operation.

Examples
--------
>>> df = pl.DataFrame({"a": [3, 1, 2]})
>>> df.with_columns(
...     pl.col("a").cum_min().alias("cum_min"),
...     pl.col("a").cum_min(reverse=True).alias("cum_min_reverse"),
... )
shape: (3, 3)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a   â”† cum_min â”† cum_min_reverse â”‚
â”‚ --- â”† ---     â”† ---             â”‚
â”‚ i64 â”† i64     â”† i64             â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 3   â”† 3       â”† 1               â”‚
â”‚ 1   â”† 1       â”† 1               â”‚
â”‚ 2   â”† 1       â”† 2               â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úcum_minr²  s     ru   r¸  ÚExpr.cum_minÅ  s   € ô6 ˜Ÿ™×-Ñ-¨gÓ6Ó7Ð7rx   c               óJ   • [        U R                  R                  U5      5      $ )u´  
Get an array with the cumulative max computed at every element.

Parameters
----------
reverse
    Reverse the operation.

Examples
--------
>>> df = pl.DataFrame({"a": [1, 3, 2]})
>>> df.with_columns(
...     pl.col("a").cum_max().alias("cum_max"),
...     pl.col("a").cum_max(reverse=True).alias("cum_max_reverse"),
... )
shape: (3, 3)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a   â”† cum_max â”† cum_max_reverse â”‚
â”‚ --- â”† ---     â”† ---             â”‚
â”‚ i64 â”† i64     â”† i64             â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1   â”† 1       â”† 3               â”‚
â”‚ 3   â”† 3       â”† 3               â”‚
â”‚ 2   â”† 3       â”† 2               â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜


Null values are excluded, but can also be filled by calling
`fill_null(strategy="forward")`.

>>> df = pl.DataFrame({"values": [None, 10, None, 8, 9, None, 16, None]})
>>> df.with_columns(
...     pl.col("values").cum_max().alias("cum_max"),
...     pl.col("values")
...     .cum_max()
...     .fill_null(strategy="forward")
...     .alias("cum_max_all_filled"),
... )
shape: (8, 3)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ values â”† cum_max â”† cum_max_all_filled â”‚
â”‚ ---    â”† ---     â”† ---                â”‚
â”‚ i64    â”† i64     â”† i64                â”‚
â•žâ•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ null   â”† null    â”† null               â”‚
â”‚ 10     â”† 10      â”† 10                 â”‚
â”‚ null   â”† null    â”† 10                 â”‚
â”‚ 8      â”† 10      â”† 10                 â”‚
â”‚ 9      â”† 10      â”† 10                 â”‚
â”‚ null   â”† null    â”† 10                 â”‚
â”‚ 16     â”† 16      â”† 16                 â”‚
â”‚ null   â”† null    â”† 16                 â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úcum_maxr²  s     ru   r»  ÚExpr.cum_maxâ  s   € ôn ˜Ÿ™×-Ñ-¨gÓ6Ó7Ð7rx   c               óJ   • [        U R                  R                  U5      5      $ )u+  
Return the cumulative count of the non-null values in the column.

Parameters
----------
reverse
    Reverse the operation.

Examples
--------
>>> df = pl.DataFrame({"a": ["x", "k", None, "d"]})
>>> df.with_columns(
...     pl.col("a").cum_count().alias("cum_count"),
...     pl.col("a").cum_count(reverse=True).alias("cum_count_reverse"),
... )
shape: (4, 3)
â”Œâ”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a    â”† cum_count â”† cum_count_reverse â”‚
â”‚ ---  â”† ---       â”† ---               â”‚
â”‚ str  â”† u32       â”† u32               â”‚
â•žâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ x    â”† 1         â”† 3                 â”‚
â”‚ k    â”† 2         â”† 2                 â”‚
â”‚ null â”† 2         â”† 1                 â”‚
â”‚ d    â”† 3         â”† 1                 â”‚
â””â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Ú	cum_countr²  s     ru   r¾  ÚExpr.cum_count  s   € ô8 ˜Ÿ™×/Ñ/°Ó8Ó9Ð9rx   c                óH   • [        U R                  R                  5       5      $ )u^  
Rounds down to the nearest integer value.

Only works on floating point Series.

Examples
--------
>>> df = pl.DataFrame({"a": [0.3, 0.5, 1.0, 1.1]})
>>> df.select(pl.col("a").floor())
shape: (4, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ a   â”‚
â”‚ --- â”‚
â”‚ f64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 0.0 â”‚
â”‚ 0.0 â”‚
â”‚ 1.0 â”‚
â”‚ 1.0 â”‚
â””â”€â”€â”€â”€â”€â”˜
)r$   rd   Úfloorr|   s    ru   rÁ  Ú
Expr.floor9  s   € ô, ˜Ÿ™×+Ñ+Ó-Ó.Ð.rx   c                óH   • [        U R                  R                  5       5      $ )u[  
Rounds up to the nearest integer value.

Only works on floating point Series.

Examples
--------
>>> df = pl.DataFrame({"a": [0.3, 0.5, 1.0, 1.1]})
>>> df.select(pl.col("a").ceil())
shape: (4, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ a   â”‚
â”‚ --- â”‚
â”‚ f64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 1.0 â”‚
â”‚ 1.0 â”‚
â”‚ 1.0 â”‚
â”‚ 2.0 â”‚
â””â”€â”€â”€â”€â”€â”˜
)r$   rd   Úceilr|   s    ru   rÄ  Ú	Expr.ceilQ  ó   € ô, ˜Ÿ™×*Ñ*Ó,Ó-Ð-rx   c                óJ   • [        U R                  R                  X5      5      $ )u  
Round underlying floating point data by `decimals` digits.

The default rounding mode is "half to even" (also known as "bankers' rounding").

Parameters
----------
decimals
    Number of decimals to round by.
mode : {'half_to_even', 'half_away_from_zero'}
    RoundMode.

    * *half_to_even*
        round to the nearest even number
    * *half_away_from_zero*
        round to the nearest number away from zero

Examples
--------
>>> df = pl.DataFrame({"a": [0.33, 0.52, 1.02, 1.17]})
>>> df.select(pl.col("a").round(1))
shape: (4, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ a   â”‚
â”‚ --- â”‚
â”‚ f64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 0.3 â”‚
â”‚ 0.5 â”‚
â”‚ 1.0 â”‚
â”‚ 1.2 â”‚
â””â”€â”€â”€â”€â”€â”˜

>>> df = pl.DataFrame(
...     {
...         "f64": [-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5],
...         "d": ["-3.5", "-2.5", "-1.5", "-0.5", "0.5", "1.5", "2.5", "3.5"],
...     },
...     schema_overrides={"d": pl.Decimal(scale=1)},
... )
>>> df.with_columns(
...     pl.all().round(mode="half_away_from_zero").name.suffix("_away"),
...     pl.all().round(mode="half_to_even").name.suffix("_to_even"),
... )
shape: (8, 6)
â”Œâ”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ f64  â”† d            â”† f64_away â”† d_away       â”† f64_to_even â”† d_to_even    â”‚
â”‚ ---  â”† ---          â”† ---      â”† ---          â”† ---         â”† ---          â”‚
â”‚ f64  â”† decimal[*,1] â”† f64      â”† decimal[*,1] â”† f64         â”† decimal[*,1] â”‚
â•žâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ -3.5 â”† -3.5         â”† -4.0     â”† -4.0         â”† -4.0        â”† -4.0         â”‚
â”‚ -2.5 â”† -2.5         â”† -3.0     â”† -3.0         â”† -2.0        â”† -2.0         â”‚
â”‚ -1.5 â”† -1.5         â”† -2.0     â”† -2.0         â”† -2.0        â”† -2.0         â”‚
â”‚ -0.5 â”† -0.5         â”† -1.0     â”† -1.0         â”† -0.0        â”† 0.0          â”‚
â”‚ 0.5  â”† 0.5          â”† 1.0      â”† 1.0          â”† 0.0         â”† 0.0          â”‚
â”‚ 1.5  â”† 1.5          â”† 2.0      â”† 2.0          â”† 2.0         â”† 2.0          â”‚
â”‚ 2.5  â”† 2.5          â”† 3.0      â”† 3.0          â”† 2.0         â”† 2.0          â”‚
â”‚ 3.5  â”† 3.5          â”† 4.0      â”† 4.0          â”† 4.0         â”† 4.0          â”‚
â””â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úround)r}   ÚdecimalsÚmodes      ru   rÈ  Ú
Expr.roundi  s   € ôz ˜Ÿ™×+Ñ+¨HÓ;Ó<Ð<rx   c                óJ   • [        U R                  R                  U5      5      $ )uì  
Round to a number of significant figures.

Parameters
----------
digits
    Number of significant figures to round to.

Examples
--------
>>> df = pl.DataFrame({"a": [0.01234, 3.333, 1234.0]})
>>> df.with_columns(pl.col("a").round_sig_figs(2).alias("round_sig_figs"))
shape: (3, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a       â”† round_sig_figs â”‚
â”‚ ---     â”† ---            â”‚
â”‚ f64     â”† f64            â”‚
â•žâ•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 0.01234 â”† 0.012          â”‚
â”‚ 3.333   â”† 3.3            â”‚
â”‚ 1234.0  â”† 1200.0         â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úround_sig_figs)r}   Údigitss     ru   rÍ  ÚExpr.round_sig_figs¨  s   € ô0 ˜Ÿ™×4Ñ4°VÓ<Ó=Ð=rx   c                ó`   • [        U5      n[        U R                  R                  U5      5      $ )u©  
Compute the dot/inner product between two Expressions.

Parameters
----------
other
    Expression to compute dot product with.

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "a": [1, 3, 5],
...         "b": [2, 4, 6],
...     }
... )
>>> df.select(pl.col("a").dot(pl.col("b")))
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ a   â”‚
â”‚ --- â”‚
â”‚ i64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 44  â”‚
â””â”€â”€â”€â”€â”€â”˜
)r   r$   rd   Údotrž   s      ru   rÑ  ÚExpr.dotÂ  s(   € ô6 -¨UÓ3ˆÜ˜Ÿ™×)Ñ)¨,Ó7Ó8Ð8rx   c                óH   • [        U R                  R                  5       5      $ )uÝ  
Compute the most occurring value(s).

Can return multiple Values.

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "a": [1, 1, 2, 3],
...         "b": [1, 1, 2, 2],
...     }
... )
>>> df.select(pl.all().mode().first())  # doctest: +IGNORE_RESULT
shape: (2, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”
â”‚ a   â”† b   â”‚
â”‚ --- â”† --- â”‚
â”‚ i64 â”† i64 â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•¡
â”‚ 1   â”† 1   â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”˜
)r$   rd   rÊ  r|   s    ru   rÊ  Ú	Expr.modeà  s   € ô0 ˜Ÿ™×*Ñ*Ó,Ó-Ð-rx   ©ÚstrictÚwrap_numericalc               óv   • [        U5      n[        U R                  R                  UR                  X#5      5      $ )u(  
Cast between data types.

Parameters
----------
dtype
    DataType to cast to.
strict
    Raise if cast is invalid on rows after predicates are pushed down.
    If `False`, invalid casts will produce null values.
wrap_numerical
    If True numeric casts wrap overflowing values instead of
    marking the cast as invalid.

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "a": [1, 2, 3],
...         "b": ["4", "5", "6"],
...     }
... )
>>> df.with_columns(
...     pl.col("a").cast(pl.Float64),
...     pl.col("b").cast(pl.Int32),
... )
shape: (3, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”
â”‚ a   â”† b   â”‚
â”‚ --- â”† --- â”‚
â”‚ f64 â”† i32 â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•¡
â”‚ 1.0 â”† 4   â”‚
â”‚ 2.0 â”† 5   â”‚
â”‚ 3.0 â”† 6   â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”˜
)r'   r$   rd   ÚcastÚ_pydatatype_expr)r}   ÚdtyperÖ  r×  s       ru   rÙ  Ú	Expr.castú  s6   € ôX )¨Ó/ˆÜØL‰L×Ñ˜e×4Ñ4°fÓMó
ð 	
rx   )Ú
descendingÚ
nulls_lastrÝ  c               óJ   • [        U R                  R                  X5      5      $ )u”  
Sort this column.

When used in a projection/selection context, the whole column is sorted.
When used in a group by context, the groups are sorted.

Parameters
----------
descending
    Sort in descending order.
nulls_last
    Place null values last.

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "a": [1, None, 3, 2],
...     }
... )
>>> df.select(pl.col("a").sort())
shape: (4, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”
â”‚ a    â”‚
â”‚ ---  â”‚
â”‚ i64  â”‚
â•žâ•â•â•â•â•â•â•¡
â”‚ null â”‚
â”‚ 1    â”‚
â”‚ 2    â”‚
â”‚ 3    â”‚
â””â”€â”€â”€â”€â”€â”€â”˜
>>> df.select(pl.col("a").sort(descending=True))
shape: (4, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”
â”‚ a    â”‚
â”‚ ---  â”‚
â”‚ i64  â”‚
â•žâ•â•â•â•â•â•â•¡
â”‚ null â”‚
â”‚ 3    â”‚
â”‚ 2    â”‚
â”‚ 1    â”‚
â””â”€â”€â”€â”€â”€â”€â”˜
>>> df.select(pl.col("a").sort(nulls_last=True))
shape: (4, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”
â”‚ a    â”‚
â”‚ ---  â”‚
â”‚ i64  â”‚
â•žâ•â•â•â•â•â•â•¡
â”‚ 1    â”‚
â”‚ 2    â”‚
â”‚ 3    â”‚
â”‚ null â”‚
â””â”€â”€â”€â”€â”€â”€â”˜

When sorting in a group by context, the groups are sorted.

>>> df = pl.DataFrame(
...     {
...         "group": ["one", "one", "one", "two", "two", "two"],
...         "value": [1, 98, 2, 3, 99, 4],
...     }
... )
>>> df.group_by("group").agg(pl.col("value").sort())  # doctest: +IGNORE_RESULT
shape: (2, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ group â”† value      â”‚
â”‚ ---   â”† ---        â”‚
â”‚ str   â”† list[i64]  â”‚
â•žâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ two   â”† [3, 4, 99] â”‚
â”‚ one   â”† [1, 2, 98] â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Ú	sort_with©r}   rÝ  rÞ  s      ru   ÚsortÚ	Expr.sort+  s   € ôZ ˜Ÿ™×/Ñ/°
ÓGÓHÐHrx   c                ó`   • [        U5      n[        U R                  R                  U5      5      $ )u  
Return the `k` largest elements.

Non-null elements are always preferred over null elements. The output
is not guaranteed to be in any particular order, call :func:`sort`
after this function if you wish the output to be sorted.

This has time complexity:

.. math:: O(n)

Parameters
----------
k
    Number of elements to return.

See Also
--------
top_k_by
bottom_k
bottom_k_by

Examples
--------
Get the 5 largest values in series.

>>> df = pl.DataFrame({"value": [1, 98, 2, 3, 99, 4]})
>>> df.select(
...     pl.col("value").top_k().alias("top_k"),
...     pl.col("value").bottom_k().alias("bottom_k"),
... )
shape: (5, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ top_k â”† bottom_k â”‚
â”‚ ---   â”† ---      â”‚
â”‚ i64   â”† i64      â”‚
â•žâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 4     â”† 1        â”‚
â”‚ 98    â”† 98       â”‚
â”‚ 2     â”† 2        â”‚
â”‚ 3     â”† 3        â”‚
â”‚ 99    â”† 4        â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r   r$   rd   Útop_k©r}   ÚkÚk_pyexprs      ru   rå  Ú
Expr.top_kz  s)   € ôZ )¨Ó+ˆÜ˜Ÿ™×+Ñ+¨HÓ5Ó6Ð6rx   ú1.0.0©Úversionc               ó¢   • [        U5      n[        U5      n[        U[        U5      SS5      n[	        U R
                  R                  XTUS95      $ )uí  
Return the elements corresponding to the `k` largest elements of the `by` column(s).

Non-null elements are always preferred over null elements, regardless of
the value of `reverse`. The output is not guaranteed to be in any
particular order, call :func:`sort` after this function if you wish the
output to be sorted.

This has time complexity:

.. math:: O(n \log{n})

.. versionchanged:: 1.0.0
    The `descending` parameter was renamed to `reverse`.

Parameters
----------
by
    Column(s) used to determine the largest elements.
    Accepts expression input. Strings are parsed as column names.
k
    Number of elements to return.
reverse
    Consider the `k` smallest elements of the `by` column(s) (instead of the `k`
    largest). This can be specified per column by passing a sequence of
    booleans.

See Also
--------
top_k
bottom_k
bottom_k_by

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "a": [1, 2, 3, 4, 5, 6],
...         "b": [6, 5, 4, 3, 2, 1],
...         "c": ["Apple", "Orange", "Apple", "Apple", "Banana", "Banana"],
...     }
... )
>>> df
shape: (6, 3)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a   â”† b   â”† c      â”‚
â”‚ --- â”† --- â”† ---    â”‚
â”‚ i64 â”† i64 â”† str    â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•¡
â”‚ 1   â”† 6   â”† Apple  â”‚
â”‚ 2   â”† 5   â”† Orange â”‚
â”‚ 3   â”† 4   â”† Apple  â”‚
â”‚ 4   â”† 3   â”† Apple  â”‚
â”‚ 5   â”† 2   â”† Banana â”‚
â”‚ 6   â”† 1   â”† Banana â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Get the top 2 rows by column `a` or `b`.

>>> df.select(
...     pl.all().top_k_by("a", 2).name.suffix("_top_by_a"),
...     pl.all().top_k_by("b", 2).name.suffix("_top_by_b"),
... )
shape: (2, 6)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a_top_by_a â”† b_top_by_a â”† c_top_by_a â”† a_top_by_b â”† b_top_by_b â”† c_top_by_b â”‚
â”‚ ---        â”† ---        â”† ---        â”† ---        â”† ---        â”† ---        â”‚
â”‚ i64        â”† i64        â”† str        â”† i64        â”† i64        â”† str        â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 6          â”† 1          â”† Banana     â”† 1          â”† 6          â”† Apple      â”‚
â”‚ 5          â”† 2          â”† Banana     â”† 2          â”† 5          â”† Orange     â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Get the top 2 rows by multiple columns with given order.

>>> df.select(
...     pl.all()
...     .top_k_by(["c", "a"], 2, reverse=[False, True])
...     .name.suffix("_by_ca"),
...     pl.all()
...     .top_k_by(["c", "b"], 2, reverse=[False, True])
...     .name.suffix("_by_cb"),
... )
shape: (2, 6)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a_by_ca â”† b_by_ca â”† c_by_ca â”† a_by_cb â”† b_by_cb â”† c_by_cb â”‚
â”‚ ---     â”† ---     â”† ---     â”† ---     â”† ---     â”† ---     â”‚
â”‚ i64     â”† i64     â”† str     â”† i64     â”† i64     â”† str     â”‚
â•žâ•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•¡
â”‚ 2       â”† 5       â”† Orange  â”† 2       â”† 5       â”† Orange  â”‚
â”‚ 5       â”† 2       â”† Banana  â”† 6       â”† 1       â”† Banana  â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Get the top 2 rows by column `a` in each group.

>>> (
...     df.group_by("c", maintain_order=True)
...     .agg(pl.all().top_k_by("a", 2))
...     .explode(pl.all().exclude("c"))
... )
shape: (5, 3)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”
â”‚ c      â”† a   â”† b   â”‚
â”‚ ---    â”† --- â”† --- â”‚
â”‚ str    â”† i64 â”† i64 â”‚
â•žâ•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•¡
â”‚ Apple  â”† 4   â”† 3   â”‚
â”‚ Apple  â”† 3   â”† 4   â”‚
â”‚ Orange â”† 2   â”† 5   â”‚
â”‚ Banana â”† 6   â”† 1   â”‚
â”‚ Banana â”† 5   â”† 2   â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”˜
r¯  Úby©rç  r¯  )r   r   r   r†   r$   rd   Útop_k_by)r}   rî  rç  r¯  rè  Ú
by_pyexprss         ru   rð  ÚExpr.top_k_byª  sL   € ôr )¨Ó+ˆÜ3°BÓ7ˆ
ä˜g¤s¨:£¸	À4ÓHˆä˜Ÿ™×.Ñ.¨zÈwÐ.ÐWÓXÐXrx   c                ó`   • [        U5      n[        U R                  R                  U5      5      $ )u  
Return the `k` smallest elements.

Non-null elements are always preferred over null elements. The output is
not guaranteed to be in any particular order, call :func:`sort` after
this function if you wish the output to be sorted.

This has time complexity:

.. math:: O(n)

Parameters
----------
k
    Number of elements to return.

See Also
--------
top_k
top_k_by
bottom_k_by

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "value": [1, 98, 2, 3, 99, 4],
...     }
... )
>>> df.select(
...     pl.col("value").top_k().alias("top_k"),
...     pl.col("value").bottom_k().alias("bottom_k"),
... )
shape: (5, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ top_k â”† bottom_k â”‚
â”‚ ---   â”† ---      â”‚
â”‚ i64   â”† i64      â”‚
â•žâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 4     â”† 1        â”‚
â”‚ 98    â”† 98       â”‚
â”‚ 2     â”† 2        â”‚
â”‚ 3     â”† 3        â”‚
â”‚ 99    â”† 4        â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r   r$   rd   Úbottom_kræ  s      ru   rô  ÚExpr.bottom_k*  s)   € ô^ )¨Ó+ˆÜ˜Ÿ™×.Ñ.¨xÓ8Ó9Ð9rx   c               ó¢   • [        U5      n[        U5      n[        U[        U5      SS5      n[	        U R
                  R                  XTUS95      $ )u  
Return the elements corresponding to the `k` smallest elements of the `by` column(s).

Non-null elements are always preferred over null elements, regardless of
the value of `reverse`. The output is not guaranteed to be in any
particular order, call :func:`sort` after this function if you wish the
output to be sorted.

This has time complexity:

.. math:: O(n \log{n})

.. versionchanged:: 1.0.0
    The `descending` parameter was renamed `reverse`.

Parameters
----------
by
    Column(s) used to determine the smallest elements.
    Accepts expression input. Strings are parsed as column names.
k
    Number of elements to return.
reverse
    Consider the `k` largest elements of the `by` column(s) (instead of the `k`
    smallest). This can be specified per column by passing a sequence of
    booleans.

See Also
--------
top_k
top_k_by
bottom_k

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "a": [1, 2, 3, 4, 5, 6],
...         "b": [6, 5, 4, 3, 2, 1],
...         "c": ["Apple", "Orange", "Apple", "Apple", "Banana", "Banana"],
...     }
... )
>>> df
shape: (6, 3)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a   â”† b   â”† c      â”‚
â”‚ --- â”† --- â”† ---    â”‚
â”‚ i64 â”† i64 â”† str    â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•¡
â”‚ 1   â”† 6   â”† Apple  â”‚
â”‚ 2   â”† 5   â”† Orange â”‚
â”‚ 3   â”† 4   â”† Apple  â”‚
â”‚ 4   â”† 3   â”† Apple  â”‚
â”‚ 5   â”† 2   â”† Banana â”‚
â”‚ 6   â”† 1   â”† Banana â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Get the bottom 2 rows by column `a` or `b`.

>>> df.select(
...     pl.all().bottom_k_by("a", 2).name.suffix("_btm_by_a"),
...     pl.all().bottom_k_by("b", 2).name.suffix("_btm_by_b"),
... )
shape: (2, 6)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a_btm_by_a â”† b_btm_by_a â”† c_btm_by_a â”† a_btm_by_b â”† b_btm_by_b â”† c_btm_by_b â”‚
â”‚ ---        â”† ---        â”† ---        â”† ---        â”† ---        â”† ---        â”‚
â”‚ i64        â”† i64        â”† str        â”† i64        â”† i64        â”† str        â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1          â”† 6          â”† Apple      â”† 6          â”† 1          â”† Banana     â”‚
â”‚ 2          â”† 5          â”† Orange     â”† 5          â”† 2          â”† Banana     â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Get the bottom 2 rows by multiple columns with given order.

>>> df.select(
...     pl.all()
...     .bottom_k_by(["c", "a"], 2, reverse=[False, True])
...     .name.suffix("_by_ca"),
...     pl.all()
...     .bottom_k_by(["c", "b"], 2, reverse=[False, True])
...     .name.suffix("_by_cb"),
... )
shape: (2, 6)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a_by_ca â”† b_by_ca â”† c_by_ca â”† a_by_cb â”† b_by_cb â”† c_by_cb â”‚
â”‚ ---     â”† ---     â”† ---     â”† ---     â”† ---     â”† ---     â”‚
â”‚ i64     â”† i64     â”† str     â”† i64     â”† i64     â”† str     â”‚
â•žâ•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•¡
â”‚ 4       â”† 3       â”† Apple   â”† 1       â”† 6       â”† Apple   â”‚
â”‚ 3       â”† 4       â”† Apple   â”† 3       â”† 4       â”† Apple   â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Get the bottom 2 rows by column `a` in each group.

>>> (
...     df.group_by("c", maintain_order=True)
...     .agg(pl.all().bottom_k_by("a", 2))
...     .explode(pl.all().exclude("c"))
... )
shape: (5, 3)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”
â”‚ c      â”† a   â”† b   â”‚
â”‚ ---    â”† --- â”† --- â”‚
â”‚ str    â”† i64 â”† i64 â”‚
â•žâ•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•¡
â”‚ Apple  â”† 1   â”† 6   â”‚
â”‚ Apple  â”† 3   â”† 4   â”‚
â”‚ Orange â”† 2   â”† 5   â”‚
â”‚ Banana â”† 5   â”† 2   â”‚
â”‚ Banana â”† 6   â”† 1   â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”˜
r¯  rî  rï  )r   r   r   r†   r$   rd   Úbottom_k_by)r}   rî  rç  r¯  rè  Ú	by_pyexprs         ru   r÷  ÚExpr.bottom_k_by\  sQ   € ôr )¨Ó+ˆÜ2°2Ó6ˆ	Ü˜g¤s¨9£~°yÀ$ÓGˆÜØL‰L×$Ñ$ YÀGÐ$ÐLó
ð 	
rx   c               óJ   • [        U R                  R                  X5      5      $ )u›  
Get the index values that would sort this column.

Parameters
----------
descending
    Sort in descending (descending) order.
nulls_last
    Place null values last instead of first.

Returns
-------
Expr
    Expression of data type :class:`UInt32`.

See Also
--------
Expr.gather: Take values by index.
Expr.rank : Get the rank of each row.

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "a": [20, 10, 30],
...         "b": [1, 2, 3],
...     }
... )
>>> df.select(pl.col("a").arg_sort())
shape: (3, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ a   â”‚
â”‚ --- â”‚
â”‚ u32 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 1   â”‚
â”‚ 0   â”‚
â”‚ 2   â”‚
â””â”€â”€â”€â”€â”€â”˜

Use gather to apply the arg sort to other columns.

>>> df.select(pl.col("b").gather(pl.col("a").arg_sort()))
shape: (3, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ b   â”‚
â”‚ --- â”‚
â”‚ i64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 2   â”‚
â”‚ 1   â”‚
â”‚ 3   â”‚
â””â”€â”€â”€â”€â”€â”˜
)r$   rd   Úarg_sortrá  s      ru   rû  ÚExpr.arg_sortÜ  s   € ôn ˜Ÿ™×.Ñ.¨zÓFÓGÐGrx   c                óH   • [        U R                  R                  5       5      $ )u-  
Get the index of the maximal value.

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "a": [20, 10, 30],
...     }
... )
>>> df.select(pl.col("a").arg_max())
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ a   â”‚
â”‚ --- â”‚
â”‚ u32 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 2   â”‚
â””â”€â”€â”€â”€â”€â”˜
)r$   rd   Úarg_maxr|   s    ru   rþ  ÚExpr.arg_max	  ó   € ô* ˜Ÿ™×-Ñ-Ó/Ó0Ð0rx   c                óH   • [        U R                  R                  5       5      $ )u-  
Get the index of the minimal value.

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "a": [20, 10, 30],
...     }
... )
>>> df.select(pl.col("a").arg_min())
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ a   â”‚
â”‚ --- â”‚
â”‚ u32 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 1   â”‚
â””â”€â”€â”€â”€â”€â”˜
)r$   rd   Úarg_minr|   s    ru   r  ÚExpr.arg_min,	  r   rx   c                ó^   • [        USS9n[        U R                  R                  U5      5      $ )ui  
Get the index of the first occurrence of a value, or ``None`` if it's not found.

Parameters
----------
element
    Value to find.

Examples
--------
>>> df = pl.DataFrame({"a": [1, None, 17]})
>>> df.select(
...     [
...         pl.col("a").index_of(17).alias("seventeen"),
...         pl.col("a").index_of(None).alias("null"),
...         pl.col("a").index_of(55).alias("fiftyfive"),
...     ]
... )
shape: (1, 3)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ seventeen â”† null â”† fiftyfive â”‚
â”‚ ---       â”† ---  â”† ---       â”‚
â”‚ u32       â”† u32  â”† u32       â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 2         â”† 1    â”† null      â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
Tr›   )r   r$   rd   Úindex_of)r}   ÚelementÚelement_pyexprs      ru   r  ÚExpr.index_ofC	  s*   € ô8 /¨wÀ4ÑHˆÜ˜Ÿ™×.Ñ.¨~Ó>Ó?Ð?rx   )rÝ  c               ób   • [        USSS9n[        U R                  R                  XBU5      5      $ )u°  
Find indices where elements should be inserted to maintain order.

.. math:: a[i-1] < v <= a[i]

Parameters
----------
element
    Expression or scalar value.
side : {'any', 'left', 'right'}
    If 'any', the index of the first suitable location found is given.
    If 'left', the index of the leftmost suitable location found is given.
    If 'right', return the rightmost suitable location found is given.
descending
    Boolean indicating whether the values are descending or not (they
    are required to be sorted either way).

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "values": [1, 2, 3, 5],
...     }
... )
>>> df.select(
...     [
...         pl.col("values").search_sorted(0).alias("zero"),
...         pl.col("values").search_sorted(3).alias("three"),
...         pl.col("values").search_sorted(6).alias("six"),
...     ]
... )
shape: (1, 3)
â”Œâ”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”
â”‚ zero â”† three â”† six â”‚
â”‚ ---  â”† ---   â”† --- â”‚
â”‚ u32  â”† u32   â”† u32 â”‚
â•žâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•¡
â”‚ 0    â”† 2     â”† 4   â”‚
â””â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”˜
T)rœ   Úlist_as_series)r   r$   rd   Úsearch_sorted)r}   r  ÚsiderÝ  r  s        ru   r  ÚExpr.search_sortedb	  s4   € ô^ /Ø °Tñ
ˆô ˜Ÿ™×3Ñ3°NÈ*ÓUÓVÐVrx   )rÝ  rÞ  ÚmultithreadedÚmaintain_orderc          	     óÂ   • [        U/UQ76 n[        U[        U5      SS5      n[        U[        U5      SS5      n[        U R                  R                  XrX4U5      5      $ )uF  
Sort this column by the ordering of other columns.

When used in a projection/selection context, the whole column is sorted.
When used in a group by context, the groups are sorted.

Parameters
----------
by
    Column(s) to sort by. Accepts expression input. Strings are parsed as column
    names.
*more_by
    Additional columns to sort by, specified as positional arguments.
descending
    Sort in descending order. When sorting by multiple columns, can be specified
    per column by passing a sequence of booleans.
nulls_last
    Place null values last; can specify a single boolean applying to all columns
    or a sequence of booleans for per-column control.
multithreaded
    Sort using multiple threads.
maintain_order
    Whether the order should be maintained if elements are equal.

Examples
--------
Pass a single column name to sort by that column.

>>> df = pl.DataFrame(
...     {
...         "group": ["a", "a", "b", "b"],
...         "value1": [1, 3, 4, 2],
...         "value2": [8, 7, 6, 5],
...     }
... )
>>> df.select(pl.col("group").sort_by("value1"))
shape: (4, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”
â”‚ group â”‚
â”‚ ---   â”‚
â”‚ str   â”‚
â•žâ•â•â•â•â•â•â•â•¡
â”‚ a     â”‚
â”‚ b     â”‚
â”‚ a     â”‚
â”‚ b     â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”˜

Sorting by expressions is also supported.

>>> df.select(pl.col("group").sort_by(pl.col("value1") + pl.col("value2")))
shape: (4, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”
â”‚ group â”‚
â”‚ ---   â”‚
â”‚ str   â”‚
â•žâ•â•â•â•â•â•â•â•¡
â”‚ b     â”‚
â”‚ a     â”‚
â”‚ a     â”‚
â”‚ b     â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”˜

Sort by multiple columns by passing a list of columns.

>>> df.select(pl.col("group").sort_by(["value1", "value2"], descending=True))
shape: (4, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”
â”‚ group â”‚
â”‚ ---   â”‚
â”‚ str   â”‚
â•žâ•â•â•â•â•â•â•â•¡
â”‚ b     â”‚
â”‚ a     â”‚
â”‚ b     â”‚
â”‚ a     â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”˜

Or use positional arguments to sort by multiple columns in the same way.

>>> df.select(pl.col("group").sort_by("value1", "value2"))
shape: (4, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”
â”‚ group â”‚
â”‚ ---   â”‚
â”‚ str   â”‚
â•žâ•â•â•â•â•â•â•â•¡
â”‚ a     â”‚
â”‚ b     â”‚
â”‚ a     â”‚
â”‚ b     â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”˜

When sorting in a group by context, the groups are sorted.

>>> df.group_by("group").agg(
...     pl.col("value1").sort_by("value2")
... )  # doctest: +IGNORE_RESULT
shape: (2, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ group â”† value1    â”‚
â”‚ ---   â”† ---       â”‚
â”‚ str   â”† list[i64] â”‚
â•žâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ a     â”† [3, 1]    â”‚
â”‚ b     â”† [2, 4]    â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Take a single row from each group where a column attains its minimal value
within that group.

>>> df.group_by("group").agg(
...     pl.all().sort_by("value2").first()
... )  # doctest: +IGNORE_RESULT
shape: (2, 3)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ group â”† value1 â”† value2 |
â”‚ ---   â”† ---    â”† ---    â”‚
â”‚ str   â”† i64    â”† i64    |
â•žâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•¡
â”‚ a     â”† 3      â”† 7      |
â”‚ b     â”† 2      â”† 5      |
â””â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”˜
rÝ  rî  rÞ  )r   r   r†   r$   rd   Úsort_by)r}   rî  rÝ  rÞ  r  r  Úmore_byrñ  s           ru   r  ÚExpr.sort_by–	  sb   € ôJ 4°BÐA¸ÒAˆ
Ü  ¬S°«_¸lÈDÓQˆ
Ü  ¬S°«_¸lÈDÓQˆ
ÜØL‰L× Ñ Ø¨
À>óó
ð 	
rx   c                ó„  • [        U[        5      (       a  [        U[        5      (       a/  [        U5      (       aX  [        U[        R
                  5      (       a9  [        R                  " [        R                  " SU[        S95      R                  nO[        U5      n[        U R                  R                  U5      5      $ )uµ  
Take values by index.

Parameters
----------
indices
    An expression that leads to a UInt32 dtyped Series.

Returns
-------
Expr
    Expression of the same data type.

See Also
--------
Expr.get : Take a single value

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "group": [
...             "one",
...             "one",
...             "one",
...             "two",
...             "two",
...             "two",
...         ],
...         "value": [1, 98, 2, 3, 99, 4],
...     }
... )
>>> df.group_by("group", maintain_order=True).agg(
...     pl.col("value").gather([2, 1])
... )
shape: (2, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ group â”† value     â”‚
â”‚ ---   â”† ---       â”‚
â”‚ str   â”† list[i64] â”‚
â•žâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ one   â”† [2, 98]   â”‚
â”‚ two   â”† [4, 99]   â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
Ú )rÛ  )r!  r   ri   r(   ÚnpÚndarrayr  r  ÚplrG   r&   rd   r   r$   Úgather)r}   ÚindicesÚindices_lit_pyexprs      ru   r  ÚExpr.gather$
  s   € ô` w¤×)Ñ)´*¸WÄc×2JÑ2JÜ˜W×%Ñ%¬*°W¼b¿j¹j×*IÑ*Iä!"§¢¤r§y¢y°°WÄEÑ'JÓ!K×!SÑ!SÑä!6°wÓ!?ÐÜ˜Ÿ™×,Ñ,Ð-?Ó@ÓAÐArx   c                ó`   • [        U5      n[        U R                  R                  U5      5      $ )u5  
Return a single value by index.

Parameters
----------
index
    An expression that leads to a UInt32 index.

Returns
-------
Expr
    Expression of the same data type.

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "group": [
...             "one",
...             "one",
...             "one",
...             "two",
...             "two",
...             "two",
...         ],
...         "value": [1, 98, 2, 3, 99, 4],
...     }
... )
>>> df.group_by("group", maintain_order=True).agg(pl.col("value").get(1))
shape: (2, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”
â”‚ group â”† value â”‚
â”‚ ---   â”† ---   â”‚
â”‚ str   â”† i64   â”‚
â•žâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•¡
â”‚ one   â”† 98    â”‚
â”‚ two   â”† 99    â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”˜
)r   r$   rd   Úget)r}   rE  Úindex_lit_pyexprs      ru   r  ÚExpr.get\
  s+   € ôP 1°Ó7ÐÜ˜Ÿ™×)Ñ)Ð*:Ó;Ó<Ð<rx   r  )Ú
fill_valuec               ó€   • Ub  [        USS9nOSn[        U5      n[        U R                  R                  XC5      5      $ )uò  
Shift values by the given number of indices.

Parameters
----------
n
    Number of indices to shift forward. If a negative value is passed, values
    are shifted in the opposite direction instead.
fill_value
    Fill the resulting null values with this scalar value.

Notes
-----
This method is similar to the `LAG` operation in SQL when the value for `n`
is positive. With a negative value for `n`, it is similar to `LEAD`.

See Also
--------
fill_null

Examples
--------
By default, values are shifted forward by one index.

>>> df = pl.DataFrame({"a": [1, 2, 3, 4]})
>>> df.with_columns(shift=pl.col("a").shift())
shape: (4, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a   â”† shift â”‚
â”‚ --- â”† ---   â”‚
â”‚ i64 â”† i64   â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•¡
â”‚ 1   â”† null  â”‚
â”‚ 2   â”† 1     â”‚
â”‚ 3   â”† 2     â”‚
â”‚ 4   â”† 3     â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”˜

Pass a negative value to shift in the opposite direction instead.

>>> df.with_columns(shift=pl.col("a").shift(-2))
shape: (4, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a   â”† shift â”‚
â”‚ --- â”† ---   â”‚
â”‚ i64 â”† i64   â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•¡
â”‚ 1   â”† 3     â”‚
â”‚ 2   â”† 4     â”‚
â”‚ 3   â”† null  â”‚
â”‚ 4   â”† null  â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”˜

Specify `fill_value` to fill the resulting null values.

>>> df.with_columns(shift=pl.col("a").shift(-2, fill_value=100))
shape: (4, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a   â”† shift â”‚
â”‚ --- â”† ---   â”‚
â”‚ i64 â”† i64   â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•¡
â”‚ 1   â”† 3     â”‚
â”‚ 2   â”† 4     â”‚
â”‚ 3   â”† 100   â”‚
â”‚ 4   â”† 100   â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”˜
NTr›   )r   r$   rd   Úshift)r}   Únr!  Úfill_value_pyexprÚn_pyexprs        ru   r#  Ú
Expr.shift‡
  sB   € ðN Ñ!Ü 5°jÈTÑ RÑà $ÐÜ(¨Ó+ˆÜ˜Ÿ™×+Ñ+¨HÓHÓIÐIrx   c                ó.  • Ub  Ub  Sn[        U5      eUc  Uc  Sn[        U5      eUS;  a  Ub  Sn[        U5      eUb.  [        USS9n[        U R                  R	                  U5      5      $ Uc   e[        U R                  R                  X#5      5      $ )u8	  
Fill null values using the specified value or strategy.

To interpolate over null values see interpolate.
See the examples below to fill nulls with an expression.

Parameters
----------
value
    Value used to fill null values.
strategy : {None, 'forward', 'backward', 'min', 'max', 'mean', 'zero', 'one'}
    Strategy used to fill null values.
limit
    Number of consecutive null values to fill when using the 'forward' or
    'backward' strategy.

See Also
--------
backward_fill
fill_nan
forward_fill

Notes
-----
A null value is not the same as a NaN value.
To fill NaN values, use :func:`fill_nan`.

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "a": [1, 2, None],
...         "b": [4, None, 6],
...     }
... )
>>> df.with_columns(pl.col("b").fill_null(strategy="zero"))
shape: (3, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”
â”‚ a    â”† b   â”‚
â”‚ ---  â”† --- â”‚
â”‚ i64  â”† i64 â”‚
â•žâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•¡
â”‚ 1    â”† 4   â”‚
â”‚ 2    â”† 0   â”‚
â”‚ null â”† 6   â”‚
â””â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”˜
>>> df.with_columns(pl.col("b").fill_null(99))
shape: (3, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”
â”‚ a    â”† b   â”‚
â”‚ ---  â”† --- â”‚
â”‚ i64  â”† i64 â”‚
â•žâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•¡
â”‚ 1    â”† 4   â”‚
â”‚ 2    â”† 99  â”‚
â”‚ null â”† 6   â”‚
â””â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”˜
>>> df.with_columns(pl.col("b").fill_null(strategy="forward"))
shape: (3, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”
â”‚ a    â”† b   â”‚
â”‚ ---  â”† --- â”‚
â”‚ i64  â”† i64 â”‚
â•žâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•¡
â”‚ 1    â”† 4   â”‚
â”‚ 2    â”† 4   â”‚
â”‚ null â”† 6   â”‚
â””â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”˜
>>> df.with_columns(pl.col("b").fill_null(pl.col("b").median()))
shape: (3, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”
â”‚ a    â”† b   â”‚
â”‚ ---  â”† --- â”‚
â”‚ i64  â”† f64 â”‚
â•žâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•¡
â”‚ 1    â”† 4.0 â”‚
â”‚ 2    â”† 5.0 â”‚
â”‚ null â”† 6.0 â”‚
â””â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”˜
>>> df.with_columns(pl.all().fill_null(pl.all().median()))
shape: (3, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”
â”‚ a   â”† b   â”‚
â”‚ --- â”† --- â”‚
â”‚ f64 â”† f64 â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•¡
â”‚ 1.0 â”† 4.0 â”‚
â”‚ 2.0 â”† 5.0 â”‚
â”‚ 1.5 â”† 6.0 â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”˜
z*cannot specify both `value` and `strategy`z0must specify either a fill `value` or `strategy`)ÚforwardÚbackwardzHcan only specify `limit` when strategy is set to 'backward' or 'forward'Tr›   )rR  r   r$   rd   Ú	fill_nullÚfill_null_with_strategy)r}   ÚvalueÚstrategyÚlimitr’   Úvalue_pyexprs         ru   r+  ÚExpr.fill_nullÕ
  s§   € ðB Ñ Ñ!5Ø>ˆCÜ˜S“/Ð!Ø‰]˜xÑ/ØDˆCÜ˜S“/Ð!ØÐ4Ó4¸Ñ9JØ\ˆCÜ˜S“/Ð!àÑÜ0°À4ÑHˆLÜ˜TŸ\™\×3Ñ3°LÓAÓBÐBàÑ'Ð'Ð'Ü˜TŸ\™\×AÑAÀ(ÓRÓSÐSrx   c                ó^   • [        USS9n[        U R                  R                  U5      5      $ )uÈ  
Fill floating point NaN value with a fill value.

Parameters
----------
value
    Value used to fill NaN values.

See Also
--------
fill_null

Notes
-----
A NaN value is not the same as a null value.
To fill null values, use :func:`fill_null`.

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "a": [1.0, None, float("nan")],
...         "b": [4.0, float("nan"), 6],
...     }
... )
>>> df.with_columns(pl.col("b").fill_nan(0))
shape: (3, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”
â”‚ a    â”† b   â”‚
â”‚ ---  â”† --- â”‚
â”‚ f64  â”† f64 â”‚
â•žâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•¡
â”‚ 1.0  â”† 4.0 â”‚
â”‚ null â”† 0.0 â”‚
â”‚ NaN  â”† 6.0 â”‚
â””â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”˜
Tr›   )r   r$   rd   Úfill_nan)r}   r-  r%  s      ru   r3  ÚExpr.fill_nanG  s-   € ôL 2°%ÀDÑIÐÜ˜Ÿ™×.Ñ.Ð/@ÓAÓBÐBrx   c                ó"   • U R                  SUS9$ )zó
Fill missing values with the last non-null value.

This is an alias of `.fill_null(strategy="forward")`.

Parameters
----------
limit
    The number of consecutive null values to forward fill.

See Also
--------
backward_fill
fill_null
shift
r)  ©r.  r/  ©r+  ©r}   r/  s     ru   Úforward_fillÚExpr.forward_fillp  s   € ð" ~‰~ y¸ˆ~Ð>Ð>rx   c                ó"   • U R                  SUS9$ )zô
Fill missing values with the next non-null value.

This is an alias of `.fill_null(strategy="backward")`.

Parameters
----------
limit
    The number of consecutive null values to backward fill.

See Also
--------
fill_null
forward_fill
shift
r*  r6  r7  r8  s     ru   Úbackward_fillÚExpr.backward_fillƒ  s   € ð" ~‰~ z¸ˆ~Ð?Ð?rx   c                óH   • [        U R                  R                  5       5      $ )u  
Reverse the selection.

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "A": [1, 2, 3, 4, 5],
...         "fruits": ["banana", "banana", "apple", "apple", "banana"],
...         "B": [5, 4, 3, 2, 1],
...         "cars": ["beetle", "audi", "beetle", "beetle", "beetle"],
...     }
... )
>>> df.select(
...     [
...         pl.all(),
...         pl.all().reverse().name.suffix("_reverse"),
...     ]
... )
shape: (5, 8)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ A   â”† fruits â”† B   â”† cars   â”† A_reverse â”† fruits_reverse â”† B_reverse â”† cars_reverse â”‚
â”‚ --- â”† ---    â”† --- â”† ---    â”† ---       â”† ---            â”† ---       â”† ---          â”‚
â”‚ i64 â”† str    â”† i64 â”† str    â”† i64       â”† str            â”† i64       â”† str          â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1   â”† banana â”† 5   â”† beetle â”† 5         â”† banana         â”† 1         â”† beetle       â”‚
â”‚ 2   â”† banana â”† 4   â”† audi   â”† 4         â”† apple          â”† 2         â”† beetle       â”‚
â”‚ 3   â”† apple  â”† 3   â”† beetle â”† 3         â”† apple          â”† 3         â”† beetle       â”‚
â”‚ 4   â”† apple  â”† 2   â”† beetle â”† 2         â”† banana         â”† 4         â”† audi         â”‚
â”‚ 5   â”† banana â”† 1   â”† beetle â”† 1         â”† banana         â”† 5         â”† beetle       â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   r¯  r|   s    ru   r¯  ÚExpr.reverse–  s   € ôB ˜Ÿ™×-Ñ-Ó/Ó0Ð0rx   c                óJ   • [        U R                  R                  U5      5      $ )u°  
Get standard deviation.

Parameters
----------
ddof
    â€œDelta Degrees of Freedomâ€: the divisor used in the calculation is N - ddof,
    where N represents the number of elements.
    By default ddof is 1.

Examples
--------
>>> df = pl.DataFrame({"a": [-1, 0, 1]})
>>> df.select(pl.col("a").std())
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ a   â”‚
â”‚ --- â”‚
â”‚ f64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 1.0 â”‚
â””â”€â”€â”€â”€â”€â”˜
)r$   rd   Ústd©r}   Úddofs     ru   rA  ÚExpr.std¹  ó   € ô0 ˜Ÿ™×)Ñ)¨$Ó/Ó0Ð0rx   c                óJ   • [        U R                  R                  U5      5      $ )u¦  
Get variance.

Parameters
----------
ddof
    â€œDelta Degrees of Freedomâ€: the divisor used in the calculation is N - ddof,
    where N represents the number of elements.
    By default ddof is 1.

Examples
--------
>>> df = pl.DataFrame({"a": [-1, 0, 1]})
>>> df.select(pl.col("a").var())
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ a   â”‚
â”‚ --- â”‚
â”‚ f64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 1.0 â”‚
â””â”€â”€â”€â”€â”€â”˜
)r$   rd   ÚvarrB  s     ru   rG  ÚExpr.varÓ  rE  rx   c                óH   • [        U R                  R                  5       5      $ )u   
Get maximum value.

Examples
--------
>>> df = pl.DataFrame({"a": [-1.0, float("nan"), 1.0]})
>>> df.select(pl.col("a").max())
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ a   â”‚
â”‚ --- â”‚
â”‚ f64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 1.0 â”‚
â””â”€â”€â”€â”€â”€â”˜
)r$   rd   Úmaxr|   s    ru   rJ  ÚExpr.maxí  ó   € ô" ˜Ÿ™×)Ñ)Ó+Ó,Ð,rx   c                óH   • [        U R                  R                  5       5      $ )u  
Get minimum value.

Examples
--------
>>> df = pl.DataFrame({"a": [-1.0, float("nan"), 1.0]})
>>> df.select(pl.col("a").min())
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”
â”‚ a    â”‚
â”‚ ---  â”‚
â”‚ f64  â”‚
â•žâ•â•â•â•â•â•â•¡
â”‚ -1.0 â”‚
â””â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úminr|   s    ru   rN  ÚExpr.min   rL  rx   c                óH   • [        U R                  R                  5       5      $ )u¦  
Get maximum value, but propagate/poison encountered NaN values.

This differs from numpy's `nanmax` as numpy defaults to propagating NaN values,
whereas polars defaults to ignoring them.

Examples
--------
>>> df = pl.DataFrame({"a": [0.0, float("nan")]})
>>> df.select(pl.col("a").nan_max())
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ a   â”‚
â”‚ --- â”‚
â”‚ f64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ NaN â”‚
â””â”€â”€â”€â”€â”€â”˜
)r$   rd   Únan_maxr|   s    ru   rQ  ÚExpr.nan_max  ó   € ô( ˜Ÿ™×-Ñ-Ó/Ó0Ð0rx   c                óH   • [        U R                  R                  5       5      $ )u¦  
Get minimum value, but propagate/poison encountered NaN values.

This differs from numpy's `nanmax` as numpy defaults to propagating NaN values,
whereas polars defaults to ignoring them.

Examples
--------
>>> df = pl.DataFrame({"a": [0.0, float("nan")]})
>>> df.select(pl.col("a").nan_min())
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ a   â”‚
â”‚ --- â”‚
â”‚ f64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ NaN â”‚
â””â”€â”€â”€â”€â”€â”˜
)r$   rd   Únan_minr|   s    ru   rU  ÚExpr.nan_min)  rS  rx   c                óH   • [        U R                  R                  5       5      $ )u$  
Get sum value.

Notes
-----
* Dtypes in {Int8, UInt8, Int16, UInt16} are cast to
  Int64 before summing to prevent overflow issues.
* If there are no non-null values, then the output is `0`.
  If you would prefer empty sums to return `None`, you can
  use `pl.when(expr.count()>0).then(expr.sum())` instead
  of `expr.sum()`.

Examples
--------
>>> df = pl.DataFrame({"a": [-1, 0, 1]})
>>> df.select(pl.col("a").sum())
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ a   â”‚
â”‚ --- â”‚
â”‚ i64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚  0  â”‚
â””â”€â”€â”€â”€â”€â”˜
)r$   rd   r;  r|   s    ru   r;  ÚExpr.sum?  ó   € ô4 ˜Ÿ™×)Ñ)Ó+Ó,Ð,rx   c                óH   • [        U R                  R                  5       5      $ )uï   
Get mean value.

Examples
--------
>>> df = pl.DataFrame({"a": [-1, 0, 1]})
>>> df.select(pl.col("a").mean())
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ a   â”‚
â”‚ --- â”‚
â”‚ f64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 0.0 â”‚
â””â”€â”€â”€â”€â”€â”˜
)r$   rd   Úmeanr|   s    ru   r[  Ú	Expr.mean[  ó   € ô" ˜Ÿ™×*Ñ*Ó,Ó-Ð-rx   c                óH   • [        U R                  R                  5       5      $ )u  
Get median value using linear interpolation.

Examples
--------
>>> df = pl.DataFrame({"a": [-1, 0, 1]})
>>> df.select(pl.col("a").median())
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ a   â”‚
â”‚ --- â”‚
â”‚ f64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 0.0 â”‚
â””â”€â”€â”€â”€â”€â”˜
)r$   rd   Úmedianr|   s    ru   r_  ÚExpr.mediann  s   € ô" ˜Ÿ™×,Ñ,Ó.Ó/Ð/rx   c                óH   • [        U R                  R                  5       5      $ )uÚ  
Compute the product of an expression.

Notes
-----
If there are no non-null values, then the output is `1`.
If you would prefer empty products to return `None`, you can
use `pl.when(expr.count()>0).then(expr.product())` instead
of `expr.product()`.

Examples
--------
>>> df = pl.DataFrame({"a": [1, 2, 3]})
>>> df.select(pl.col("a").product())
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ a   â”‚
â”‚ --- â”‚
â”‚ i64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 6   â”‚
â””â”€â”€â”€â”€â”€â”˜
)r$   rd   Úproductr|   s    ru   rb  ÚExpr.product  s   € ô0 ˜Ÿ™×-Ñ-Ó/Ó0Ð0rx   c                óH   • [        U R                  R                  5       5      $ )u  
Count unique values.

Notes
-----
`null` is considered to be a unique value for the purposes of this operation.

Examples
--------
>>> df = pl.DataFrame({"x": [1, 1, 2, 2, 3], "y": [1, 1, 1, None, None]})
>>> df.select(
...     x_unique=pl.col("x").n_unique(),
...     y_unique=pl.col("y").n_unique(),
... )
shape: (1, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ x_unique â”† y_unique â”‚
â”‚ ---      â”† ---      â”‚
â”‚ u32      â”† u32      â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 3        â”† 2        â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Ún_uniquer|   s    ru   re  ÚExpr.n_unique›  s   € ô0 ˜Ÿ™×.Ñ.Ó0Ó1Ð1rx   c                óH   • [        U R                  R                  5       5      $ )u  
Approximate count of unique values.

This is done using the HyperLogLog++ algorithm for cardinality estimation.

Examples
--------
>>> df = pl.DataFrame({"n": [1, 1, 2]})
>>> df.select(pl.col("n").approx_n_unique())
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ n   â”‚
â”‚ --- â”‚
â”‚ u32 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 2   â”‚
â””â”€â”€â”€â”€â”€â”˜
>>> df = pl.DataFrame({"n": range(1000)})
>>> df.select(
...     exact=pl.col("n").n_unique(),
...     approx=pl.col("n").approx_n_unique(),
... )  # doctest: +SKIP
shape: (1, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ exact â”† approx â”‚
â”‚ ---   â”† ---    â”‚
â”‚ u32   â”† u32    â”‚
â•žâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•¡
â”‚ 1000  â”† 1005   â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úapprox_n_uniquer|   s    ru   rh  ÚExpr.approx_n_uniqueµ  s   € ô@ ˜Ÿ™×5Ñ5Ó7Ó8Ð8rx   c                óH   • [        U R                  R                  5       5      $ )u  
Count null values.

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "a": [None, 1, None],
...         "b": [10, None, 300],
...         "c": [350, 650, 850],
...     }
... )
>>> df.select(pl.all().null_count())
shape: (1, 3)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”
â”‚ a   â”† b   â”† c   â”‚
â”‚ --- â”† --- â”† --- â”‚
â”‚ u32 â”† u32 â”† u32 â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•¡
â”‚ 2   â”† 1   â”† 0   â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”˜
)r$   rd   Ú
null_countr|   s    ru   rk  ÚExpr.null_count×  s   € ô. ˜Ÿ™×0Ñ0Ó2Ó3Ð3rx   c                ó(   • U R                  5       S:„  $ )un  
Check whether the expression contains one or more null values.

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "a": [None, 1, None],
...         "b": [10, None, 300],
...         "c": [350, 650, 850],
...     }
... )
>>> df.select(pl.all().has_nulls())
shape: (1, 3)
â”Œâ”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a    â”† b    â”† c     â”‚
â”‚ ---  â”† ---  â”† ---   â”‚
â”‚ bool â”† bool â”† bool  â”‚
â•žâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•¡
â”‚ true â”† true â”† false â”‚
â””â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”˜
r   )rk  r|   s    ru   Ú	has_nullsÚExpr.has_nullsð  s   € ð. ‰Ó  1Ñ$Ð$rx   c                óH   • [        U R                  R                  5       5      $ )u  
Get index of first unique value.

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "a": [8, 9, 10],
...         "b": [None, 4, 4],
...     }
... )
>>> df.select(pl.col("a").arg_unique())
shape: (3, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ a   â”‚
â”‚ --- â”‚
â”‚ u32 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 0   â”‚
â”‚ 1   â”‚
â”‚ 2   â”‚
â””â”€â”€â”€â”€â”€â”˜
>>> df.select(pl.col("b").arg_unique())
shape: (2, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ b   â”‚
â”‚ --- â”‚
â”‚ u32 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 0   â”‚
â”‚ 1   â”‚
â””â”€â”€â”€â”€â”€â”˜
)r$   rd   Ú
arg_uniquer|   s    ru   rq  ÚExpr.arg_unique	  s   € ôD ˜Ÿ™×0Ñ0Ó2Ó3Ð3rx   )r  c               óœ   • U(       a#  [        U R                  R                  5       5      $ [        U R                  R                  5       5      $ )uK  
Get unique values of this expression.

Parameters
----------
maintain_order
    Maintain order of data. This requires more work.

Examples
--------
>>> df = pl.DataFrame({"a": [1, 1, 2]})
>>> df.select(pl.col("a").unique())  # doctest: +IGNORE_RESULT
shape: (2, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ a   â”‚
â”‚ --- â”‚
â”‚ i64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 2   â”‚
â”‚ 1   â”‚
â””â”€â”€â”€â”€â”€â”˜
>>> df.select(pl.col("a").unique(maintain_order=True))
shape: (2, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ a   â”‚
â”‚ --- â”‚
â”‚ i64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 1   â”‚
â”‚ 2   â”‚
â””â”€â”€â”€â”€â”€â”˜
)r$   rd   Úunique_stableÚunique)r}   r  s     ru   ru  ÚExpr.unique-  s8   € öB Ü˜TŸ\™\×7Ñ7Ó9Ó:Ð:Ü˜Ÿ™×,Ñ,Ó.Ó/Ð/rx   c                óH   • [        U R                  R                  5       5      $ )uô   
Get the first value.

Examples
--------
>>> df = pl.DataFrame({"a": [1, 1, 2]})
>>> df.select(pl.col("a").first())
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ a   â”‚
â”‚ --- â”‚
â”‚ i64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 1   â”‚
â””â”€â”€â”€â”€â”€â”˜
)r$   rd   Úfirstr|   s    ru   rx  Ú
Expr.firstR  s   € ô" ˜Ÿ™×+Ñ+Ó-Ó.Ð.rx   c                óH   • [        U R                  R                  5       5      $ )uò   
Get the last value.

Examples
--------
>>> df = pl.DataFrame({"a": [1, 3, 2]})
>>> df.select(pl.col("a").last())
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ a   â”‚
â”‚ --- â”‚
â”‚ i64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 2   â”‚
â””â”€â”€â”€â”€â”€â”˜
)r$   rd   Úlastr|   s    ru   r{  Ú	Expr.laste  r]  rx   Úgroup_to_rows)Úorder_byrÝ  rÞ  Úmapping_strategyc          
     ó”   • Ub  [        U/UQ76 nOSnUb  [        U5      nOSn[        U R                  R                  UUUSUS95      $ )uª  
Compute expressions over the given groups.

This expression is similar to performing a group by aggregation and joining the
result back into the original DataFrame.

The outcome is similar to how `window functions
<https://www.postgresql.org/docs/current/tutorial-window.html>`_
work in PostgreSQL.

Parameters
----------
partition_by
    Column(s) to group by. Accepts expression input. Strings are parsed as
    column names.
*more_exprs
    Additional columns to group by, specified as positional arguments.
order_by
    Order the window functions/aggregations with the partitioned groups by the
    result of the expression passed to `order_by`.
descending
    In case 'order_by' is given, indicate whether to order in
    ascending or descending order.
nulls_last
    In case 'order_by' is given, indicate whether to order
    the nulls in last position.
mapping_strategy: {'group_to_rows', 'join', 'explode'}
    - group_to_rows
        If the aggregation results in multiple values, assign them back to their
        position in the DataFrame. This can only be done if the group yields
        the same elements before aggregation as after.
    - join
        Join the groups as 'List<group_dtype>' to the row positions.
        warning: this can be memory intensive.
    - explode
        Explodes the grouped data into new rows, similar to the results of
        `group_by` + `agg` + `explode`. Sorting of the given groups is required
        if the groups are not part of the window operation for the operation,
        otherwise the result would not make sense. This operation changes the
        number of rows.

Examples
--------
Pass the name of a column to compute the expression over that column.

>>> df = pl.DataFrame(
...     {
...         "a": ["a", "a", "b", "b", "b"],
...         "b": [1, 2, 3, 5, 3],
...         "c": [5, 4, 3, 2, 1],
...     }
... )
>>> df.with_columns(c_max=pl.col("c").max().over("a"))
shape: (5, 4)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a   â”† b   â”† c   â”† c_max â”‚
â”‚ --- â”† --- â”† --- â”† ---   â”‚
â”‚ str â”† i64 â”† i64 â”† i64   â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•¡
â”‚ a   â”† 1   â”† 5   â”† 5     â”‚
â”‚ a   â”† 2   â”† 4   â”† 5     â”‚
â”‚ b   â”† 3   â”† 3   â”† 3     â”‚
â”‚ b   â”† 5   â”† 2   â”† 3     â”‚
â”‚ b   â”† 3   â”† 1   â”† 3     â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”˜

Expression input is also supported.

>>> df.with_columns(c_max=pl.col("c").max().over(pl.col("b") // 2))
shape: (5, 4)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a   â”† b   â”† c   â”† c_max â”‚
â”‚ --- â”† --- â”† --- â”† ---   â”‚
â”‚ str â”† i64 â”† i64 â”† i64   â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•¡
â”‚ a   â”† 1   â”† 5   â”† 5     â”‚
â”‚ a   â”† 2   â”† 4   â”† 4     â”‚
â”‚ b   â”† 3   â”† 3   â”† 4     â”‚
â”‚ b   â”† 5   â”† 2   â”† 2     â”‚
â”‚ b   â”† 3   â”† 1   â”† 4     â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”˜

Group by multiple columns by passing multiple column names or expressions.

>>> df.with_columns(c_min=pl.col("c").min().over("a", pl.col("b") % 2))
shape: (5, 4)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a   â”† b   â”† c   â”† c_min â”‚
â”‚ --- â”† --- â”† --- â”† ---   â”‚
â”‚ str â”† i64 â”† i64 â”† i64   â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•¡
â”‚ a   â”† 1   â”† 5   â”† 5     â”‚
â”‚ a   â”† 2   â”† 4   â”† 4     â”‚
â”‚ b   â”† 3   â”† 3   â”† 1     â”‚
â”‚ b   â”† 5   â”† 2   â”† 1     â”‚
â”‚ b   â”† 3   â”† 1   â”† 1     â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”˜

You can use non-elementwise expressions with `over` too. By default they are
evaluated using row-order, but you can specify a different one using `order_by`.

>>> from datetime import date
>>> df = pl.DataFrame(
...     {
...         "store_id": ["a", "a", "b", "b"],
...         "date": [
...             date(2024, 9, 18),
...             date(2024, 9, 17),
...             date(2024, 9, 18),
...             date(2024, 9, 16),
...         ],
...         "sales": [7, 9, 8, 10],
...     }
... )
>>> df.with_columns(
...     cumulative_sales=pl.col("sales")
...     .cum_sum()
...     .over("store_id", order_by="date")
... )
shape: (4, 4)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ store_id â”† date       â”† sales â”† cumulative_sales â”‚
â”‚ ---      â”† ---        â”† ---   â”† ---              â”‚
â”‚ str      â”† date       â”† i64   â”† i64              â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ a        â”† 2024-09-18 â”† 7     â”† 16               â”‚
â”‚ a        â”† 2024-09-17 â”† 9     â”† 9                â”‚
â”‚ b        â”† 2024-09-18 â”† 8     â”† 18               â”‚
â”‚ b        â”† 2024-09-16 â”† 10    â”† 10               â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

If you don't require that the group order be preserved, then the more performant
option is to use `mapping_strategy='explode'` - be careful however to only ever
use this in a `select` statement, not a `with_columns` one.

>>> window = {
...     "partition_by": "store_id",
...     "order_by": "date",
...     "mapping_strategy": "explode",
... }
>>> df.select(
...     pl.all().over(**window),
...     cumulative_sales=pl.col("sales").cum_sum().over(**window),
... )
shape: (4, 4)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ store_id â”† date       â”† sales â”† cumulative_sales â”‚
â”‚ ---      â”† ---        â”† ---   â”† ---              â”‚
â”‚ str      â”† date       â”† i64   â”† i64              â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ a        â”† 2024-09-17 â”† 9     â”† 9                â”‚
â”‚ a        â”† 2024-09-18 â”† 7     â”† 16               â”‚
â”‚ b        â”† 2024-09-16 â”† 10    â”† 10               â”‚
â”‚ b        â”† 2024-09-18 â”† 8     â”† 18               â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
NF)r~  Úorder_by_descendingÚorder_by_nulls_lastr  )r   r$   rd   Úover)	r}   Úpartition_byr~  rÝ  rÞ  r  Ú
more_exprsÚpartition_by_pyexprsÚorder_by_pyexprss	            ru   rƒ  Ú	Expr.overx  ss   € ðJ Ñ#Ü#AØð$Ø)ò$Ñ ð $(Ð àÑÜ=¸hÓGÑà#ÐäØL‰L×ÑØ$Ø)Ø$.Ø$)Ø!1ð ð ó
ð 	
rx   Úright)rŸ  Úclosedc               ó¦   • Uc  [        [        U5      5      n[        U5      n[        U5      n[        U R                  R	                  XX45      5      $ )ut  
Create rolling groups based on a temporal or integer column.

If you have a time series `<t_0, t_1, ..., t_n>`, then by default the
windows created will be

    * (t_0 - period, t_0]
    * (t_1 - period, t_1]
    * ...
    * (t_n - period, t_n]

whereas if you pass a non-default `offset`, then the windows will be

    * (t_0 + offset, t_0 + offset + period]
    * (t_1 + offset, t_1 + offset + period]
    * ...
    * (t_n + offset, t_n + offset + period]

The `period` and `offset` arguments are created either from a timedelta, or
by using the following string language:

- 1ns   (1 nanosecond)
- 1us   (1 microsecond)
- 1ms   (1 millisecond)
- 1s    (1 second)
- 1m    (1 minute)
- 1h    (1 hour)
- 1d    (1 calendar day)
- 1w    (1 calendar week)
- 1mo   (1 calendar month)
- 1q    (1 calendar quarter)
- 1y    (1 calendar year)
- 1i    (1 index count)

Or combine them:
"3d12h4m25s" # 3 days, 12 hours, 4 minutes, and 25 seconds

By "calendar day", we mean the corresponding time on the next day (which may
not be 24 hours, due to daylight savings). Similarly for "calendar week",
"calendar month", "calendar quarter", and "calendar year".

Parameters
----------
index_column
    Column used to group based on the time window.
    Often of type Date/Datetime.
    This column must be sorted in ascending order.
    In case of a rolling group by on indices, dtype needs to be one of
    {UInt32, UInt64, Int32, Int64}. Note that the first three get temporarily
    cast to Int64, so if performance matters use an Int64 column.
period
    Length of the window - must be non-negative.
offset
    Offset of the window. Default is `-period`.
closed : {'right', 'left', 'both', 'none'}
    Define which sides of the temporal interval are closed (inclusive).

Examples
--------
>>> dates = [
...     "2020-01-01 13:45:48",
...     "2020-01-01 16:42:13",
...     "2020-01-01 16:45:09",
...     "2020-01-02 18:12:48",
...     "2020-01-03 19:45:32",
...     "2020-01-08 23:16:43",
... ]
>>> df = pl.DataFrame({"dt": dates, "a": [3, 7, 5, 9, 2, 1]}).with_columns(
...     pl.col("dt").str.strptime(pl.Datetime).set_sorted()
... )
>>> df.with_columns(
...     sum_a=pl.sum("a").rolling(index_column="dt", period="2d"),
...     min_a=pl.min("a").rolling(index_column="dt", period="2d"),
...     max_a=pl.max("a").rolling(index_column="dt", period="2d"),
... )
shape: (6, 5)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”
â”‚ dt                  â”† a   â”† sum_a â”† min_a â”† max_a â”‚
â”‚ ---                 â”† --- â”† ---   â”† ---   â”† ---   â”‚
â”‚ datetime[Î¼s]        â”† i64 â”† i64   â”† i64   â”† i64   â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•¡
â”‚ 2020-01-01 13:45:48 â”† 3   â”† 3     â”† 3     â”† 3     â”‚
â”‚ 2020-01-01 16:42:13 â”† 7   â”† 10    â”† 3     â”† 7     â”‚
â”‚ 2020-01-01 16:45:09 â”† 5   â”† 15    â”† 3     â”† 7     â”‚
â”‚ 2020-01-02 18:12:48 â”† 9   â”† 24    â”† 3     â”† 9     â”‚
â”‚ 2020-01-03 19:45:32 â”† 2   â”† 11    â”† 2     â”† 9     â”‚
â”‚ 2020-01-08 23:16:43 â”† 1   â”† 1     â”† 1     â”† 1     â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”˜
)r   r   r$   rd   Úrolling)r}   Úindex_columnÚperiodrŸ  rŠ  s        ru   rŒ  ÚExpr.rolling3  sJ   € ðB ‰>Ü+Ô,DÀVÓ,LÓMˆFä)¨&Ó1ˆÜ)¨&Ó1ˆä˜Ÿ™×-Ñ-¨lÀFÓSÓTÐTrx   c                óH   • [        U R                  R                  5       5      $ )u4  
Get mask of unique values.

Examples
--------
>>> df = pl.DataFrame({"a": [1, 1, 2]})
>>> df.select(pl.col("a").is_unique())
shape: (3, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”
â”‚ a     â”‚
â”‚ ---   â”‚
â”‚ bool  â”‚
â•žâ•â•â•â•â•â•â•â•¡
â”‚ false â”‚
â”‚ false â”‚
â”‚ true  â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Ú	is_uniquer|   s    ru   r‘  ÚExpr.is_uniqueœ  s   € ô& ˜Ÿ™×/Ñ/Ó1Ó2Ð2rx   c                óH   • [        U R                  R                  5       5      $ )u`  
Return a boolean mask indicating the first occurrence of each distinct value.

Returns
-------
Expr
    Expression of data type :class:`Boolean`.

Examples
--------
>>> df = pl.DataFrame({"a": [1, 1, 2, 3, 2]})
>>> df.with_columns(pl.col("a").is_first_distinct().alias("first"))
shape: (5, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a   â”† first â”‚
â”‚ --- â”† ---   â”‚
â”‚ i64 â”† bool  â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•¡
â”‚ 1   â”† true  â”‚
â”‚ 1   â”† false â”‚
â”‚ 2   â”† true  â”‚
â”‚ 3   â”† true  â”‚
â”‚ 2   â”† false â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úis_first_distinctr|   s    ru   r”  ÚExpr.is_first_distinct±  s   € ô4 ˜Ÿ™×7Ñ7Ó9Ó:Ð:rx   c                óH   • [        U R                  R                  5       5      $ )u]  
Return a boolean mask indicating the last occurrence of each distinct value.

Returns
-------
Expr
    Expression of data type :class:`Boolean`.

Examples
--------
>>> df = pl.DataFrame({"a": [1, 1, 2, 3, 2]})
>>> df.with_columns(pl.col("a").is_last_distinct().alias("last"))
shape: (5, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a   â”† last  â”‚
â”‚ --- â”† ---   â”‚
â”‚ i64 â”† bool  â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•¡
â”‚ 1   â”† false â”‚
â”‚ 1   â”† true  â”‚
â”‚ 2   â”† false â”‚
â”‚ 3   â”† true  â”‚
â”‚ 2   â”† true  â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úis_last_distinctr|   s    ru   r—  ÚExpr.is_last_distinctÍ  s   € ô4 ˜Ÿ™×6Ñ6Ó8Ó9Ð9rx   c                óH   • [        U R                  R                  5       5      $ )u•  
Return a boolean mask indicating duplicated values.

Returns
-------
Expr
    Expression of data type :class:`Boolean`.

Examples
--------
>>> df = pl.DataFrame({"a": [1, 1, 2]})
>>> df.select(pl.col("a").is_duplicated())
shape: (3, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”
â”‚ a     â”‚
â”‚ ---   â”‚
â”‚ bool  â”‚
â•žâ•â•â•â•â•â•â•â•¡
â”‚ true  â”‚
â”‚ true  â”‚
â”‚ false â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úis_duplicatedr|   s    ru   rš  ÚExpr.is_duplicatedé  s   € ô0 ˜Ÿ™×3Ñ3Ó5Ó6Ð6rx   c                óH   • [        U R                  R                  5       5      $ )ui  
Get a boolean mask of the local maximum peaks.

Examples
--------
>>> df = pl.DataFrame({"a": [1, 2, 3, 4, 5]})
>>> df.select(pl.col("a").peak_max())
shape: (5, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”
â”‚ a     â”‚
â”‚ ---   â”‚
â”‚ bool  â”‚
â•žâ•â•â•â•â•â•â•â•¡
â”‚ false â”‚
â”‚ false â”‚
â”‚ false â”‚
â”‚ false â”‚
â”‚ true  â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úpeak_maxr|   s    ru   r  ÚExpr.peak_max  ó   € ô* ˜Ÿ™×.Ñ.Ó0Ó1Ð1rx   c                óH   • [        U R                  R                  5       5      $ )ui  
Get a boolean mask of the local minimum peaks.

Examples
--------
>>> df = pl.DataFrame({"a": [4, 1, 3, 2, 5]})
>>> df.select(pl.col("a").peak_min())
shape: (5, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”
â”‚ a     â”‚
â”‚ ---   â”‚
â”‚ bool  â”‚
â•žâ•â•â•â•â•â•â•â•¡
â”‚ false â”‚
â”‚ true  â”‚
â”‚ false â”‚
â”‚ true  â”‚
â”‚ false â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úpeak_minr|   s    ru   r¡  ÚExpr.peak_min  rŸ  rx   Únearestc                ó`   • [        U5      n[        U R                  R                  X25      5      $ )u¸  
Get quantile value.

Parameters
----------
quantile
    Quantile between 0.0 and 1.0.
interpolation : {'nearest', 'higher', 'lower', 'midpoint', 'linear', 'equiprobable'}
    Interpolation method.

Examples
--------
>>> df = pl.DataFrame({"a": [0, 1, 2, 3, 4, 5]})
>>> df.select(pl.col("a").quantile(0.3))
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ a   â”‚
â”‚ --- â”‚
â”‚ f64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 2.0 â”‚
â””â”€â”€â”€â”€â”€â”˜
>>> df.select(pl.col("a").quantile(0.3, interpolation="higher"))
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ a   â”‚
â”‚ --- â”‚
â”‚ f64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 2.0 â”‚
â””â”€â”€â”€â”€â”€â”˜
>>> df.select(pl.col("a").quantile(0.3, interpolation="lower"))
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ a   â”‚
â”‚ --- â”‚
â”‚ f64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 1.0 â”‚
â””â”€â”€â”€â”€â”€â”˜
>>> df.select(pl.col("a").quantile(0.3, interpolation="midpoint"))
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ a   â”‚
â”‚ --- â”‚
â”‚ f64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 1.5 â”‚
â””â”€â”€â”€â”€â”€â”˜
>>> df.select(pl.col("a").quantile(0.3, interpolation="linear"))
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ a   â”‚
â”‚ --- â”‚
â”‚ f64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 1.5 â”‚
â””â”€â”€â”€â”€â”€â”˜
)r   r$   rd   Úquantile)r}   r¥  ÚinterpolationÚquantile_pyexprs       ru   r¥  ÚExpr.quantile1  s)   € ô@ 0°Ó9ˆÜ˜Ÿ™×.Ñ.¨ÓNÓOÐOrx   )ÚlabelsÚleft_closedÚinclude_breaksc               óL   • [        U R                  R                  XX45      5      $ )ul  
Bin continuous values into discrete categories.

.. warning::
    This functionality is considered **unstable**. It may be changed
    at any point without it being considered a breaking change.

Parameters
----------
breaks
    List of unique cut points.
labels
    Names of the categories. The number of labels must be equal to the number
    of cut points plus one.
left_closed
    Set the intervals to be left-closed instead of right-closed.
include_breaks
    Include a column with the right endpoint of the bin each observation falls
    in. This will change the data type of the output from a
    :class:`Categorical` to a :class:`Struct`.

Returns
-------
Expr
    Expression of data type :class:`Categorical` if `include_breaks` is set to
    `False` (default), otherwise an expression of data type :class:`Struct`.

See Also
--------
qcut

Examples
--------
Divide a column into three categories.

>>> df = pl.DataFrame({"foo": [-2, -1, 0, 1, 2]})
>>> df.with_columns(
...     pl.col("foo").cut([-1, 1], labels=["a", "b", "c"]).alias("cut")
... )
shape: (5, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”
â”‚ foo â”† cut â”‚
â”‚ --- â”† --- â”‚
â”‚ i64 â”† cat â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•¡
â”‚ -2  â”† a   â”‚
â”‚ -1  â”† a   â”‚
â”‚ 0   â”† b   â”‚
â”‚ 1   â”† b   â”‚
â”‚ 2   â”† c   â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”˜

Add both the category and the breakpoint.

>>> df.with_columns(
...     pl.col("foo").cut([-1, 1], include_breaks=True).alias("cut")
... ).unnest("cut")
shape: (5, 3)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ foo â”† breakpoint â”† category   â”‚
â”‚ --- â”† ---        â”† ---        â”‚
â”‚ i64 â”† f64        â”† cat        â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ -2  â”† -1.0       â”† (-inf, -1] â”‚
â”‚ -1  â”† -1.0       â”† (-inf, -1] â”‚
â”‚ 0   â”† 1.0        â”† (-1, 1]    â”‚
â”‚ 1   â”† 1.0        â”† (-1, 1]    â”‚
â”‚ 2   â”† inf        â”† (1, inf]   â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úcut)r}   Úbreaksr©  rª  r«  s        ru   r­  ÚExpr.cutt  s!   € ô^ ˜Ÿ™×)Ñ)¨&¸+ÓVÓWÐWrx   )r©  rª  Úallow_duplicatesr«  c               ó¸   • [        U[        5      (       a  U R                  R                  XX4U5      nOU R                  R	                  XX4U5      n[        U5      $ )u  
Bin continuous values into discrete categories based on their quantiles.

.. warning::
    This functionality is considered **unstable**. It may be changed
    at any point without it being considered a breaking change.

Parameters
----------
quantiles
    Either a list of quantile probabilities between 0 and 1 or a positive
    integer determining the number of bins with uniform probability.
labels
    Names of the categories. The number of labels must be equal to the number
    of categories.
left_closed
    Set the intervals to be left-closed instead of right-closed.
allow_duplicates
    If set to `True`, duplicates in the resulting quantiles are dropped,
    rather than raising a `DuplicateError`. This can happen even with unique
    probabilities, depending on the data.
include_breaks
    Include a column with the right endpoint of the bin each observation falls
    in. This will change the data type of the output from a
    :class:`Categorical` to a :class:`Struct`.

Returns
-------
Expr
    Expression of data type :class:`Categorical` if `include_breaks` is set to
    `False` (default), otherwise an expression of data type :class:`Struct`.

See Also
--------
cut

Examples
--------
Divide a column into three categories according to pre-defined quantile
probabilities.

>>> df = pl.DataFrame({"foo": [-2, -1, 0, 1, 2]})
>>> df.with_columns(
...     pl.col("foo").qcut([0.25, 0.75], labels=["a", "b", "c"]).alias("qcut")
... )
shape: (5, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”
â”‚ foo â”† qcut â”‚
â”‚ --- â”† ---  â”‚
â”‚ i64 â”† cat  â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•¡
â”‚ -2  â”† a    â”‚
â”‚ -1  â”† a    â”‚
â”‚ 0   â”† b    â”‚
â”‚ 1   â”† b    â”‚
â”‚ 2   â”† c    â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”˜

Divide a column into two categories using uniform quantile probabilities.

>>> df.with_columns(
...     pl.col("foo")
...     .qcut(2, labels=["low", "high"], left_closed=True)
...     .alias("qcut")
... )
shape: (5, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”
â”‚ foo â”† qcut â”‚
â”‚ --- â”† ---  â”‚
â”‚ i64 â”† cat  â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•¡
â”‚ -2  â”† low  â”‚
â”‚ -1  â”† low  â”‚
â”‚ 0   â”† high â”‚
â”‚ 1   â”† high â”‚
â”‚ 2   â”† high â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”˜

Add both the category and the breakpoint.

>>> df.with_columns(
...     pl.col("foo").qcut([0.25, 0.75], include_breaks=True).alias("qcut")
... ).unnest("qcut")
shape: (5, 3)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ foo â”† breakpoint â”† category   â”‚
â”‚ --- â”† ---        â”† ---        â”‚
â”‚ i64 â”† f64        â”† cat        â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ -2  â”† -1.0       â”† (-inf, -1] â”‚
â”‚ -1  â”† -1.0       â”† (-inf, -1] â”‚
â”‚ 0   â”† 1.0        â”† (-1, 1]    â”‚
â”‚ 1   â”† 1.0        â”† (-1, 1]    â”‚
â”‚ 2   â”† inf        â”† (1, inf]   â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r!  Úintrd   Úqcut_uniformÚqcutr$   )r}   Ú	quantilesr©  rª  r°  r«  rs   s          ru   r´  Ú	Expr.qcutÅ  sW   € ôT i¤×%Ñ%Ø—\‘\×.Ñ.Ø ;À.ó‰Fð —\‘\×&Ñ&Ø ;À.óˆFô ˜Ó Ð rx   c                óH   • [        U R                  R                  5       5      $ )u)  
Compress the column data using run-length encoding.

Run-length encoding (RLE) encodes data by storing each *run* of identical values
as a single value and its length.

Returns
-------
Expr
    Expression of data type `Struct` with fields `len` of data type `UInt32`
    and `value` of the original data type.

See Also
--------
rle_id

Examples
--------
>>> df = pl.DataFrame({"a": [1, 1, 2, 1, None, 1, 3, 3]})
>>> df.select(pl.col("a").rle()).unnest("a")
shape: (6, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”
â”‚ len â”† value â”‚
â”‚ --- â”† ---   â”‚
â”‚ u32 â”† i64   â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•¡
â”‚ 2   â”† 1     â”‚
â”‚ 1   â”† 2     â”‚
â”‚ 1   â”† 1     â”‚
â”‚ 1   â”† null  â”‚
â”‚ 1   â”† 1     â”‚
â”‚ 2   â”† 3     â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úrler|   s    ru   r¸  ÚExpr.rle:  s   € ôF ˜Ÿ™×)Ñ)Ó+Ó,Ð,rx   c                óH   • [        U R                  R                  5       5      $ )us  
Get a distinct integer ID for each run of identical values.

The ID starts at 0 and increases by one each time the value of the column
changes.

Returns
-------
Expr
    Expression of data type `UInt32`.

See Also
--------
rle

Notes
-----
This functionality is especially useful for defining a new group for every time
a column's value changes, rather than for every distinct value of that column.

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "a": [1, 2, 1, 1, 1],
...         "b": ["x", "x", None, "y", "y"],
...     }
... )
>>> df.with_columns(
...     rle_id_a=pl.col("a").rle_id(),
...     rle_id_ab=pl.struct("a", "b").rle_id(),
... )
shape: (5, 4)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a   â”† b    â”† rle_id_a â”† rle_id_ab â”‚
â”‚ --- â”† ---  â”† ---      â”† ---       â”‚
â”‚ i64 â”† str  â”† u32      â”† u32       â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1   â”† x    â”† 0        â”† 0         â”‚
â”‚ 2   â”† x    â”† 1        â”† 1         â”‚
â”‚ 1   â”† null â”† 2        â”† 2         â”‚
â”‚ 1   â”† y    â”† 2        â”† 3         â”‚
â”‚ 1   â”† y    â”† 2        â”† 3         â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úrle_idr|   s    ru   r»  ÚExpr.rle_id_  s   € ô\ ˜Ÿ™×,Ñ,Ó.Ó/Ð/rx   c                ó`   • [        U0 UD6n[        U R                  R                  U5      5      $ )u•  
Filter the expression based on one or more predicate expressions.

The original order of the remaining elements is preserved.

Elements where the filter does not evaluate to True are discarded, including
nulls.

Mostly useful in an aggregation context. If you want to filter on a DataFrame
level, use `LazyFrame.filter`.

Parameters
----------
predicates
    Expression(s) that evaluates to a boolean Series.
constraints
    Column filters; use `name = value` to filter columns by the supplied value.
    Each constraint will behave the same as `pl.col(name).eq(value)`, and
    be implicitly joined with the other filter conditions using `&`.

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "group_col": ["g1", "g1", "g2"],
...         "b": [1, 2, 3],
...     }
... )
>>> df.group_by("group_col").agg(
...     lt=pl.col("b").filter(pl.col("b") < 2).sum(),
...     gte=pl.col("b").filter(pl.col("b") >= 2).sum(),
... ).sort("group_col")
shape: (2, 3)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”
â”‚ group_col â”† lt  â”† gte â”‚
â”‚ ---       â”† --- â”† --- â”‚
â”‚ str       â”† i64 â”† i64 â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•¡
â”‚ g1        â”† 1   â”† 2   â”‚
â”‚ g2        â”† 0   â”† 3   â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”˜

Filter expressions can also take constraints as keyword arguments.

>>> df = pl.DataFrame(
...     {
...         "key": ["a", "a", "a", "a", "b", "b", "b", "b", "b"],
...         "n": [1, 2, 2, 3, 1, 3, 3, 2, 3],
...     },
... )
>>> df.group_by("key").agg(
...     n_1=pl.col("n").filter(n=1).sum(),
...     n_2=pl.col("n").filter(n=2).sum(),
...     n_3=pl.col("n").filter(n=3).sum(),
... ).sort(by="key")
shape: (2, 4)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”
â”‚ key â”† n_1 â”† n_2 â”† n_3 â”‚
â”‚ --- â”† --- â”† --- â”† --- â”‚
â”‚ str â”† i64 â”† i64 â”† i64 â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•¡
â”‚ a   â”† 1   â”† 4   â”† 3   â”‚
â”‚ b   â”† 1   â”† 2   â”† 9   â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”˜
)r   r$   rd   Úfilter)r}   Ú
predicatesÚconstraintsÚ	predicates       ru   r¾  ÚExpr.filter  s7   € ôL AØð
Ø&ñ
ˆ	ô ˜Ÿ™×,Ñ,¨YÓ7Ó8Ð8rx   z,`where` is deprecated; use `filter` instead.c                ó$   • U R                  U5      $ )u¿  
Filter a single column.

.. deprecated:: 0.20.4
    Use the :func:`filter` method instead.

Alias for :func:`filter`.

Parameters
----------
predicate
    Boolean expression.

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "group_col": ["g1", "g1", "g2"],
...         "b": [1, 2, 3],
...     }
... )
>>> df.group_by("group_col").agg(  # doctest: +SKIP
...     [
...         pl.col("b").where(pl.col("b") < 2).sum().alias("lt"),
...         pl.col("b").where(pl.col("b") >= 2).sum().alias("gte"),
...     ]
... ).sort("group_col")
shape: (2, 3)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”
â”‚ group_col â”† lt  â”† gte â”‚
â”‚ ---       â”† --- â”† --- â”‚
â”‚ str       â”† i64 â”† i64 â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•¡
â”‚ g1        â”† 1   â”† 2   â”‚
â”‚ g2        â”† 0   â”† 3   â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”˜
)r¾  )r}   rÁ  s     ru   ÚwhereÚ
Expr.whereÚ  s   € ðN {‰{˜9Ó%Ð%rx   )Úagg_listr  Úreturns_scalarc               óv   ^• U(       a  SU  S3n[        U5      eSU4S jjn[        R                  " U /UUUUS9$ )u(  
Apply a custom python function to a whole Series or sequence of Series.

The output of this custom function is presumed to be either a Series,
or a NumPy array (in which case it will be automatically converted into
a Series), or a scalar that will be converted into a Series. If the
result is a scalar and you want it to stay as a scalar, pass in
``returns_scalar=True``. If you want to apply a
custom function elementwise over single values, see :func:`map_elements`.
A reasonable use case for `map` functions is transforming the values
represented by an expression using a third-party library.

Parameters
----------
function
    Lambda/function to apply.
return_dtype
    Datatype of the output Series.

    It is recommended to set this whenever possible. If this is `None`, it tries
    to infer the datatype by calling the function with dummy data and looking at
    the output.
agg_list
    First implode when in a group-by aggregation.

    .. deprecated:: 1.32.0

        Use `expr.implode().map_batches(..)` instead.
is_elementwise
    Set to true if the operations is elementwise for better performance
    and optimization.

    An elementwise operations has unit or equal length for all inputs
    and can be ran sequentially on slices without results being affected.
returns_scalar
    If the function returns a scalar, by default it will be wrapped in
    a list in the output, since the assumption is that the function
    always returns something Series-like. If you want to keep the
    result as a scalar, set this argument to True.

Notes
-----
A UDF passed to `map_batches` must be pure, meaning that it cannot modify
or depend on state other than its arguments. Polars may call the function
with arbitrary input data.

See Also
--------
map_elements
replace

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "sine": [0.0, 1.0, 0.0, -1.0],
...         "cosine": [1.0, 0.0, -1.0, 0.0],
...     }
... )
>>> df.select(
...     pl.all().map_batches(
...         lambda x: x.to_numpy().argmax(),
...         returns_scalar=True,
...     )
... )
shape: (1, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ sine â”† cosine â”‚
â”‚ ---  â”† ---    â”‚
â”‚ i64  â”† i64    â”‚
â•žâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•¡
â”‚ 1    â”† 0      â”‚
â””â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Here's an example of a function that returns a scalar, where we want it
to stay as a scalar:

>>> df = pl.DataFrame(
...     {
...         "a": [0, 1, 0, 1],
...         "b": [1, 2, 3, 4],
...     }
... )
>>> df.group_by("a").agg(
...     pl.col("b").map_batches(
...         lambda x: x.max(), returns_scalar=True, return_dtype=pl.self_dtype()
...     )
... )  # doctest: +IGNORE_RESULT
shape: (2, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”
â”‚ a   â”† b   â”‚
â”‚ --- â”† --- â”‚
â”‚ i64 â”† i64 â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•¡
â”‚ 1   â”† 4   â”‚
â”‚ 0   â”† 3   â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”˜

Call a function that takes multiple arguments by creating a `struct` and
referencing its fields inside the function call.

>>> df = pl.DataFrame(
...     {
...         "a": [5, 1, 0, 3],
...         "b": [4, 2, 3, 4],
...     }
... )
>>> df.with_columns(
...     a_times_b=pl.struct("a", "b").map_batches(
...         lambda x: np.multiply(x.struct.field("a"), x.struct.field("b")),
...         return_dtype=pl.Int64,
...     )
... )
shape: (4, 3)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a   â”† b   â”† a_times_b â”‚
â”‚ --- â”† --- â”† ---       â”‚
â”‚ i64 â”† i64 â”† i64       â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 5   â”† 4   â”† 20        â”‚
â”‚ 1   â”† 2   â”† 2         â”‚
â”‚ 0   â”† 3   â”† 0         â”‚
â”‚ 3   â”† 4   â”† 12        â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
zOusing 'agg_list=True' is deprecated and will be removed in 2.0

Consider using z.implode() insteadc                ó"   >• T" U S   /UQ70 UD6$ r  rî   )Úslr,  r/  r2  s      €ru   Ú_wrapÚExpr.map_batches.<locals>._wrap  s   ø€ Ù˜B˜q™EÐ3 DÒ3¨FÑ3Ð3rx   )r  rÇ  )rÊ  zSequence[pl.Series]r,  r   r/  r   r5  z	pl.Series)ÚDeprecationWarningÚimploder  r:  )r}   r2  Úreturn_dtyperÆ  r  rÇ  r’   rË  s    `      ru   r:  ÚExpr.map_batches  sS   ø€ öL ðàˆvÐ'ð+ˆCô % SÓ)Ð)÷	4ô }Š}ØˆFØØØ)Ø)ñ
ð 	
rx   Úthread_local)Ú
skip_nullsÚ	pass_namer.  rÇ  c               óh  ^^^^• US:X  a  [        S5        SSKJn  U R                  R	                  5       n[        U5      S:”  a  U" TUSS9  U(       a
  SUU4S jjmO	SUU4S jjmUS	:X  a  U R                  TS
TS
SS9$ US:X  a  SUU4S jjn	U R                  U	S
TS
SS9$ SU< S3n
[        U
5      e)uÏ  
Map a custom/user-defined function (UDF) to each element of a column.

.. warning::
    This method is much slower than the native expressions API.
    Only use it if you cannot implement your logic otherwise.

    Suppose that the function is: `x â†¦ sqrt(x)`:

    - For mapping elements of a series, consider:
      `pl.col("col_name").sqrt()`.
    - For mapping inner elements of lists, consider:
      `pl.col("col_name").list.eval(pl.element().sqrt())`.
    - For mapping elements of struct fields, consider:
      `pl.col("col_name").struct.field("field_name").sqrt()`.

    If you want to replace the original column or field,
    consider :meth:`.with_columns <polars.DataFrame.with_columns>`
    and :meth:`.with_fields <polars.Expr.struct.with_fields>`.

Parameters
----------
function
    Lambda/function to map.
return_dtype
    Datatype of the output Series.

    It is recommended to set this whenever possible. If this is `None`, it tries
    to infer the datatype by calling the function with dummy data and looking at
    the output.
skip_nulls
    Don't map the function over values that contain nulls (this is faster).
pass_name
    Pass the Series name to the custom function (this is more expensive).
returns_scalar

    .. deprecated:: 1.32.0
        Is ignored and will be removed in 2.0.
strategy : {'thread_local', 'threading'}
    The threading strategy to use.

    - 'thread_local': run the python function on a single thread.
    - 'threading': run the python function on separate threads. Use with
      care as this can slow performance. This might only speed up
      your code if the amount of work per element is significant
      and the python function releases the GIL (e.g. via calling
      a c function)

    .. warning::
        This functionality is considered **unstable**. It may be changed
        at any point without it being considered a breaking change.

Notes
-----
* Using `map_elements` is strongly discouraged as you will be effectively
  running python "for" loops, which will be very slow. Wherever possible you
  should prefer the native expression API to achieve the best performance.

* If your function is expensive and you don't want it to be called more than
  once for a given input, consider applying an `@lru_cache` decorator to it.
  If your data is suitable you may achieve *significant* speedups.

* Window function application using `over` is considered a GroupBy context
  here, so `map_elements` can be used to map functions over window groups.

* A UDF passed to `map_elements` must be pure, meaning that it cannot modify or
  depend on state other than its arguments. Polars may call the function
  with arbitrary input data.

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "a": [1, 2, 3, 1],
...         "b": ["a", "b", "c", "c"],
...     }
... )

The function is applied to each element of column `'a'`:

>>> df.with_columns(  # doctest: +SKIP
...     pl.col("a")
...     .map_elements(lambda x: x * 2, return_dtype=pl.self_dtype())
...     .alias("a_times_2"),
... )
shape: (4, 3)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a   â”† b   â”† a_times_2 â”‚
â”‚ --- â”† --- â”† ---       â”‚
â”‚ i64 â”† str â”† i64       â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1   â”† a   â”† 2         â”‚
â”‚ 2   â”† b   â”† 4         â”‚
â”‚ 3   â”† c   â”† 6         â”‚
â”‚ 1   â”† c   â”† 2         â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Tip: it is better to implement this with an expression:

>>> df.with_columns(
...     (pl.col("a") * 2).alias("a_times_2"),
... )  # doctest: +IGNORE_RESULT

>>> (
...     df.lazy()
...     .group_by("b")
...     .agg(
...         pl.col("a")
...         .implode()
...         .map_elements(lambda x: x.sum(), return_dtype=pl.Int64)
...     )
...     .collect()
... )  # doctest: +IGNORE_RESULT
shape: (3, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”
â”‚ b   â”† a   â”‚
â”‚ --- â”† --- â”‚
â”‚ str â”† i64 â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•¡
â”‚ a   â”† 1   â”‚
â”‚ b   â”† 2   â”‚
â”‚ c   â”† 4   â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”˜

Tip: again, it is better to implement this with an expression:

>>> (
...     df.lazy()
...     .group_by("b", maintain_order=True)
...     .agg(pl.col("a").sum())
...     .collect()
... )  # doctest: +IGNORE_RESULT

Window function application using `over` will behave as a GroupBy
context, with your function receiving individual window groups:

>>> df = pl.DataFrame(
...     {
...         "key": ["x", "x", "y", "x", "y", "z"],
...         "val": [1, 1, 1, 1, 1, 1],
...     }
... )
>>> df.with_columns(
...     scaled=pl.col("val")
...     .implode()
...     .map_elements(lambda s: s * len(s), return_dtype=pl.List(pl.Int64))
...     .explode()
...     .over("key"),
... ).sort("key")
shape: (6, 3)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ key â”† val â”† scaled â”‚
â”‚ --- â”† --- â”† ---    â”‚
â”‚ str â”† i64 â”† i64    â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•¡
â”‚ x   â”† 1   â”† 3      â”‚
â”‚ x   â”† 1   â”† 3      â”‚
â”‚ x   â”† 1   â”† 3      â”‚
â”‚ y   â”† 1   â”† 2      â”‚
â”‚ y   â”† 1   â”† 2      â”‚
â”‚ z   â”† 1   â”† 1      â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Note that this function would *also* be better-implemented natively:

>>> df.with_columns(
...     scaled=(pl.col("val") * pl.col("val").count()).over("key"),
... ).sort("key")  # doctest: +IGNORE_RESULT

Ú	threadingzCthe 'threading' strategy for `map_elements` is considered unstable.r   )Úwarn_on_inefficient_maprt   )rz  Ú
map_targetc                óØ   >^ • US   nSUU 4S jjn[         R                  " 5          [         R                  " S[        5        T R	                  X2TS9sS S S 5        $ ! , (       d  f       g = f)NrÏ  c                óˆ   >• [        U [        R                  5      (       a  U R                  TR                  5      n T" U 5      $ rp   )r!  r  rG   r=  rl   )r+  r2  Úxs    €€ru   ÚinnerÚ0Expr.map_elements.<locals>.wrap_f.<locals>.inner`  s/   ø€ Ü! !¤R§Y¡Y×/Ñ/ØŸG™G A§F¡F›O˜Ù# A›;Ð&rx   Úignore©rÏ  rÒ  )r+  zSeries | Anyr5  rG   ©r8  Úcatch_warningsÚsimplefilterr,   Úmap_elements)rÚ  r/  rÏ  rÛ  r2  rÒ  s   `   €€ru   Úwrap_fÚ!Expr.map_elements.<locals>.wrap_f]  sX   ù€ Ø% nÑ5÷'ð 'ô
 ×,Ò,Õ.Ü×)Ò)¨(Ô4OÔPØŸ>™>ØÀZð *ð ÷ /×.×.ús   ¦+AÁ
A)c                óÆ   >• US   n[         R                  " 5          [         R                  " S[        5        U R	                  TUTS9sS S S 5        $ ! , (       d  f       g = f)NrÏ  rÝ  rÞ  rß  )rÚ  r/  rÏ  r2  rÒ  s      €€ru   rã  rä  m  sN   ø€ Ø% nÑ5Ü×,Ò,Õ.Ü×)Ò)¨(Ô4OÔPàŸ>™>Ø ¨|È
ð *ð ÷ /×.×.ús   œ,AÁ
A rÑ  FT)rÆ  rÏ  rÇ  r  c                ó¬  >• SUU4S jjnU R                  S5      nU R                  5       S:X  a$  U" U5      R                  5       R                  5       $ [	        5       nU R                  5       U-  nU R                  5       U-  nUS:X  a  [        U5       Vs/ s H  nSPM     nnO%[        U5       Vs/ s H  nX…:  a  US-   OUPM     nn/ n	Sn
U H*  nU
nX«-   n
X,U
2S S 24   nU	R                  U" U5      5        M,     [        R                  " U	5       Vs/ s H  o"R                  5       PM     nn[        R                  " USS9$ s  snf s  snf s  snf )Nc           	     óŠ   >• U R                  5       R                  [        R                  " S5      R	                  TSTSS95      $ )NrÚ  F)rÆ  rÏ  rÇ  )ÚlazyÚselectr  Úcolr:  )ÚdfrÏ  rã  s    €€ru   Úget_lazy_promiseÚCExpr.map_elements.<locals>.wrap_threading.<locals>.get_lazy_promise  sC   ø€ ØŸ7™7›9×+Ñ+ÜŸš˜c›
×.Ñ.Ø"Ø%*Ø)5Ø+0ð	 /ð óð rx   rÚ  r   r  F)r¦  )rë  rE   r5  rF   )
Úto_framer†   ÚcollectÚ	to_seriesr?   Úranger*  r  Úcollect_allÚconcat)rÚ  rì  rë  Ú	n_threadsÚ
chunk_sizeÚ	remainderÚ_Úchunk_sizesr-  Ú
partitionsÚbÚstepÚaÚpartition_dfÚoutrÏ  rã  s                  €€ru   Úwrap_threadingÚ)Expr.map_elements.<locals>.wrap_threading€  sR  ø€ ÷ð ð —Z‘Z “_à—5‘5“7˜a“<Ù+¨BÓ/×7Ñ7Ó9×CÑCÓEÐEä,Ó.	ØŸU™U›W¨	Ñ1
ØŸE™E›G iÑ/	Ø “?Ü.3°IÔ.>Ó"?Ò.>¨£1Ñ.>KÐ"?Kô "' yÔ!1ó#â!1˜Að +,«-˜
 Qš¸ZÒGÙ!1ð  ð #ð  
ØÛ'DØAØ™AØ#%¨ cª1 f¡:LØ×%Ñ%Ñ&6°|Ó&DÖEñ	 (ô 12·²¸jÔ0IÓJÒ0I¨"—|‘|–~Ñ0IÐJÜ—x’x ¨UÑ3Ð3ùò% #@ùò#ùò Ks   ÂEÂ3EÄEz	strategy z is not supported)rÚ  rG   r/  r   r5  rG   )rÚ  rG   r5  rG   )r   Úpolars._utils.udfsrÖ  rk   Ú
root_namesr†   r:  rR  )r}   r2  rÏ  rÒ  rÓ  r.  rÇ  rÖ  r  rÿ  r’   rã  s    ```       @ru   râ  ÚExpr.map_elements›  sé   û€ ðh {Ó"Ü"ØUôõ
 	?à—Y‘Y×)Ñ)Ó+ˆ
Üˆz‹?˜QÓÙ# H°jÈVÒTæ÷ñ ÷ ð ð ~Ó%Ø×#Ñ#ØØØ)Ø$Ø#ð $ð ð ð ˜Ó$÷&4ð &4ðP ×#Ñ#ØØØ)Ø$Ø#ð $ð ð ð ˜h™\Ð):Ð;ˆCÜ˜S“/Ð!rx   c                óH   • [        U R                  R                  5       5      $ )u„  
Flatten a list or string column.

Alias for :func:`Expr.list.explode`.

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "group": ["a", "b", "b"],
...         "values": [[1, 2], [2, 3], [4]],
...     }
... )
>>> df.group_by("group").agg(pl.col("values").flatten())  # doctest: +SKIP
shape: (2, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ group â”† values    â”‚
â”‚ ---   â”† ---       â”‚
â”‚ str   â”† list[i64] â”‚
â•žâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ a     â”† [1, 2]    â”‚
â”‚ b     â”† [2, 3, 4] â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
©r$   rd   Úexploder|   s    ru   ÚflattenÚExpr.flatten³  r¨  rx   c                óH   • [        U R                  R                  5       5      $ )u˜  
Explode a list expression.

This means that every item is expanded to a new row.

Returns
-------
Expr
    Expression with the data type of the list elements.

See Also
--------
Expr.list.explode : Explode a list column.

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "group": ["a", "b"],
...         "values": [
...             [1, 2],
...             [3, 4],
...         ],
...     }
... )
>>> df.select(pl.col("values").explode())
shape: (4, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ values â”‚
â”‚ ---    â”‚
â”‚ i64    â”‚
â•žâ•â•â•â•â•â•â•â•â•¡
â”‚ 1      â”‚
â”‚ 2      â”‚
â”‚ 3      â”‚
â”‚ 4      â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”˜
r  r|   s    ru   r  ÚExpr.explodeÎ  s   € ôN ˜Ÿ™×-Ñ-Ó/Ó0Ð0rx   c                óH   • [        U R                  R                  5       5      $ )u/  
Aggregate values into a list.

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "a": [1, 2, 3],
...         "b": [4, 5, 6],
...     }
... )
>>> df.select(pl.all().implode())
shape: (1, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a         â”† b         â”‚
â”‚ ---       â”† ---       â”‚
â”‚ list[i64] â”† list[i64] â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ [1, 2, 3] â”† [4, 5, 6] â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   rÎ  r|   s    ru   rÎ  ÚExpr.implode÷  ó   € ô, ˜Ÿ™×-Ñ-Ó/Ó0Ð0rx   c                óJ   • [        U R                  R                  X5      5      $ )uu  
Take every nth value in the Series and return as a new Series.

Parameters
----------
n
    Gather every *n*-th row.
offset
    Starting index.

Examples
--------
>>> df = pl.DataFrame({"foo": [1, 2, 3, 4, 5, 6, 7, 8, 9]})
>>> df.select(pl.col("foo").gather_every(3))
shape: (3, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ foo â”‚
â”‚ --- â”‚
â”‚ i64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 1   â”‚
â”‚ 4   â”‚
â”‚ 7   â”‚
â””â”€â”€â”€â”€â”€â”˜

>>> df.select(pl.col("foo").gather_every(3, offset=1))
shape: (3, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ foo â”‚
â”‚ --- â”‚
â”‚ i64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 2   â”‚
â”‚ 5   â”‚
â”‚ 8   â”‚
â””â”€â”€â”€â”€â”€â”˜
)r$   rd   Úgather_every©r}   r$  rŸ  s      ru   r  ÚExpr.gather_every  s   € ôL ˜Ÿ™×2Ñ2°1Ó=Ó>Ð>rx   c                ó&   • U R                  SU5      $ )uV  
Get the first `n` rows.

Parameters
----------
n
    Number of rows to return.

Examples
--------
>>> df = pl.DataFrame({"foo": [1, 2, 3, 4, 5, 6, 7]})
>>> df.select(pl.col("foo").head(3))
shape: (3, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ foo â”‚
â”‚ --- â”‚
â”‚ i64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 1   â”‚
â”‚ 2   â”‚
â”‚ 3   â”‚
â””â”€â”€â”€â”€â”€â”˜
r   )rž  ©r}   r$  s     ru   ÚheadÚ	Expr.head7  s   € ð0 z‰z˜!˜QÓÐrx   c                ót   • [        [        U5      5      R                  [        SSS9* nU R	                  X!5      $ )uU  
Get the last `n` rows.

Parameters
----------
n
    Number of rows to return.

Examples
--------
>>> df = pl.DataFrame({"foo": [1, 2, 3, 4, 5, 6, 7]})
>>> df.select(pl.col("foo").tail(3))
shape: (3, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ foo â”‚
â”‚ --- â”‚
â”‚ i64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 5   â”‚
â”‚ 6   â”‚
â”‚ 7   â”‚
â””â”€â”€â”€â”€â”€â”˜
FTrÕ  )r$   r   rÙ  r&   rž  r  s      ru   ÚtailÚ	Expr.tailQ  sA   € ô6 Ô+¨AÓ.Ó/×4Ñ4Ü˜e°Dð 5ð ð
ˆð
 z‰z˜&Ó$Ð$rx   c                ó$   • U R                  U5      $ )uu  
Get the first `n` rows (alias for :func:`Expr.head`).

Parameters
----------
n
    Number of rows to return.

Examples
--------
>>> df = pl.DataFrame({"foo": [1, 2, 3, 4, 5, 6, 7]})
>>> df.select(pl.col("foo").limit(3))
shape: (3, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ foo â”‚
â”‚ --- â”‚
â”‚ i64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 1   â”‚
â”‚ 2   â”‚
â”‚ 3   â”‚
â””â”€â”€â”€â”€â”€â”˜
)r  r  s     ru   r/  Ú
Expr.limitr  s   € ð0 y‰y˜‹|Ðrx   c                ó>   • [        [        R                  U /UQ75      $ )u>  
Method equivalent of bitwise "and" operator `expr & other & ...`.

Parameters
----------
*others
    One or more integer or boolean expressions to evaluate/combine.

Examples
--------
>>> df = pl.DataFrame(
...     data={
...         "x": [5, 6, 7, 4, 8],
...         "y": [1.5, 2.5, 1.0, 4.0, -5.75],
...         "z": [-9, 2, -1, 4, 8],
...     }
... )
>>> df.select(
...     (pl.col("x") >= pl.col("z"))
...     .and_(
...         pl.col("y") >= pl.col("z"),
...         pl.col("y") == pl.col("y"),
...         pl.col("z") <= pl.col("x"),
...         pl.col("y") != pl.col("x"),
...     )
...     .alias("all")
... )
shape: (5, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”
â”‚ all   â”‚
â”‚ ---   â”‚
â”‚ bool  â”‚
â•žâ•â•â•â•â•â•â•â•¡
â”‚ true  â”‚
â”‚ true  â”‚
â”‚ true  â”‚
â”‚ false â”‚
â”‚ false â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”˜
)r   Úoperatorr§   ©r}   Úotherss     ru   r§   Ú	Expr.and_Œ  s   € ôR ”h—m‘m d _¨V¡_Ó5Ð5rx   c                ó>   • [        [        R                  U 4U-   5      $ )u  
Method equivalent of bitwise "or" operator `expr | other | ...`.

Parameters
----------
*others
    One or more integer or boolean expressions to evaluate/combine.

Examples
--------
>>> df = pl.DataFrame(
...     data={
...         "x": [5, 6, 7, 4, 8],
...         "y": [1.5, 2.5, 1.0, 4.0, -5.75],
...         "z": [-9, 2, -1, 4, 8],
...     }
... )
>>> df.select(
...     (pl.col("x") == pl.col("y"))
...     .or_(
...         pl.col("x") == pl.col("y"),
...         pl.col("y") == pl.col("z"),
...         pl.col("y").cast(int) == pl.col("z"),
...     )
...     .alias("any")
... )
shape: (5, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”
â”‚ any   â”‚
â”‚ ---   â”‚
â”‚ bool  â”‚
â•žâ•â•â•â•â•â•â•â•¡
â”‚ false â”‚
â”‚ true  â”‚
â”‚ false â”‚
â”‚ true  â”‚
â”‚ false â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”˜
)r   r  rç   r  s     ru   rç   ÚExpr.or_·  s   € ôP ”h—l‘l T G¨fÑ$4Ó5Ð5rx   c                ó$   • U R                  U5      $ )u2  
Method equivalent of equality operator `expr == other`.

Parameters
----------
other
    A literal or expression value to compare with.

Examples
--------
>>> df = pl.DataFrame(
...     data={
...         "x": [1.0, 2.0, float("nan"), 4.0],
...         "y": [2.0, 2.0, float("nan"), 4.0],
...     }
... )
>>> df.with_columns(
...     pl.col("x").eq(pl.col("y")).alias("x == y"),
... )
shape: (4, 3)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ x   â”† y   â”† x == y â”‚
â”‚ --- â”† --- â”† ---    â”‚
â”‚ f64 â”† f64 â”† bool   â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•¡
â”‚ 1.0 â”† 2.0 â”† false  â”‚
â”‚ 2.0 â”† 2.0 â”† true   â”‚
â”‚ NaN â”† NaN â”† true   â”‚
â”‚ 4.0 â”† 4.0 â”† true   â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r³   ©r}   rŸ   s     ru   r²   ÚExpr.eqá  ó   € ð@ {‰{˜5Ó!Ð!rx   c                ó^   • [        USS9n[        U R                  R                  U5      5      $ )u‹  
Method equivalent of equality operator `expr == other` where `None == None`.

This differs from default `eq` where null values are propagated.

Parameters
----------
other
    A literal or expression value to compare with.

Examples
--------
>>> df = pl.DataFrame(
...     data={
...         "x": [1.0, 2.0, float("nan"), 4.0, None, None],
...         "y": [2.0, 2.0, float("nan"), 4.0, 5.0, None],
...     }
... )
>>> df.with_columns(
...     pl.col("x").eq(pl.col("y")).alias("x eq y"),
...     pl.col("x").eq_missing(pl.col("y")).alias("x eq_missing y"),
... )
shape: (6, 4)
â”Œâ”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ x    â”† y    â”† x eq y â”† x eq_missing y â”‚
â”‚ ---  â”† ---  â”† ---    â”† ---            â”‚
â”‚ f64  â”† f64  â”† bool   â”† bool           â”‚
â•žâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1.0  â”† 2.0  â”† false  â”† false          â”‚
â”‚ 2.0  â”† 2.0  â”† true   â”† true           â”‚
â”‚ NaN  â”† NaN  â”† true   â”† true           â”‚
â”‚ 4.0  â”† 4.0  â”† true   â”† true           â”‚
â”‚ null â”† 5.0  â”† null   â”† false          â”‚
â”‚ null â”† null â”† null   â”† true           â”‚
â””â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
Tr›   )r   r$   rd   Ú
eq_missingrž   s      ru   r'  ÚExpr.eq_missing  s+   € ôJ -¨U¸tÑDˆÜ˜Ÿ™×0Ñ0°Ó>Ó?Ð?rx   c                ó$   • U R                  U5      $ )uA  
Method equivalent of "greater than or equal" operator `expr >= other`.

Parameters
----------
other
    A literal or expression value to compare with.

Examples
--------
>>> df = pl.DataFrame(
...     data={
...         "x": [5.0, 4.0, float("nan"), 2.0],
...         "y": [5.0, 3.0, float("nan"), 1.0],
...     }
... )
>>> df.with_columns(
...     pl.col("x").ge(pl.col("y")).alias("x >= y"),
... )
shape: (4, 3)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ x   â”† y   â”† x >= y â”‚
â”‚ --- â”† --- â”† ---    â”‚
â”‚ f64 â”† f64 â”† bool   â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•¡
â”‚ 5.0 â”† 5.0 â”† true   â”‚
â”‚ 4.0 â”† 3.0 â”† true   â”‚
â”‚ NaN â”† NaN â”† true   â”‚
â”‚ 2.0 â”† 1.0 â”† true   â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r¾   r#  s     ru   ÚgeÚExpr.ge+  r%  rx   c                ó$   • U R                  U5      $ )u&  
Method equivalent of "greater than" operator `expr > other`.

Parameters
----------
other
    A literal or expression value to compare with.

Examples
--------
>>> df = pl.DataFrame(
...     data={
...         "x": [5.0, 4.0, float("nan"), 2.0],
...         "y": [5.0, 3.0, float("nan"), 1.0],
...     }
... )
>>> df.with_columns(
...     pl.col("x").gt(pl.col("y")).alias("x > y"),
... )
shape: (4, 3)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”
â”‚ x   â”† y   â”† x > y â”‚
â”‚ --- â”† --- â”† ---   â”‚
â”‚ f64 â”† f64 â”† bool  â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•¡
â”‚ 5.0 â”† 5.0 â”† false â”‚
â”‚ 4.0 â”† 3.0 â”† true  â”‚
â”‚ NaN â”† NaN â”† false â”‚
â”‚ 2.0 â”† 1.0 â”† true  â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”˜
)rÃ   r#  s     ru   rÂ   ÚExpr.gtM  r%  rx   c                ó$   • U R                  U5      $ )u>  
Method equivalent of "less than or equal" operator `expr <= other`.

Parameters
----------
other
    A literal or expression value to compare with.

Examples
--------
>>> df = pl.DataFrame(
...     data={
...         "x": [5.0, 4.0, float("nan"), 0.5],
...         "y": [5.0, 3.5, float("nan"), 2.0],
...     }
... )
>>> df.with_columns(
...     pl.col("x").le(pl.col("y")).alias("x <= y"),
... )
shape: (4, 3)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ x   â”† y   â”† x <= y â”‚
â”‚ --- â”† --- â”† ---    â”‚
â”‚ f64 â”† f64 â”† bool   â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•¡
â”‚ 5.0 â”† 5.0 â”† true   â”‚
â”‚ 4.0 â”† 3.5 â”† false  â”‚
â”‚ NaN â”† NaN â”† true   â”‚
â”‚ 0.5 â”† 2.0 â”† true   â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)rË   r#  s     ru   ÚleÚExpr.leo  r%  rx   c                ó$   • U R                  U5      $ )u#  
Method equivalent of "less than" operator `expr < other`.

Parameters
----------
other
    A literal or expression value to compare with.

Examples
--------
>>> df = pl.DataFrame(
...     data={
...         "x": [1.0, 2.0, float("nan"), 3.0],
...         "y": [2.0, 2.0, float("nan"), 4.0],
...     }
... )
>>> df.with_columns(
...     pl.col("x").lt(pl.col("y")).alias("x < y"),
... )
shape: (4, 3)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”
â”‚ x   â”† y   â”† x < y â”‚
â”‚ --- â”† --- â”† ---   â”‚
â”‚ f64 â”† f64 â”† bool  â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•¡
â”‚ 1.0 â”† 2.0 â”† true  â”‚
â”‚ 2.0 â”† 2.0 â”† false â”‚
â”‚ NaN â”† NaN â”† false â”‚
â”‚ 3.0 â”† 4.0 â”† true  â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”˜
)rÏ   r#  s     ru   rÎ   ÚExpr.lt‘  r%  rx   c                ó$   • U R                  U5      $ )u4  
Method equivalent of inequality operator `expr != other`.

Parameters
----------
other
    A literal or expression value to compare with.

Examples
--------
>>> df = pl.DataFrame(
...     data={
...         "x": [1.0, 2.0, float("nan"), 4.0],
...         "y": [2.0, 2.0, float("nan"), 4.0],
...     }
... )
>>> df.with_columns(
...     pl.col("x").ne(pl.col("y")).alias("x != y"),
... )
shape: (4, 3)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ x   â”† y   â”† x != y â”‚
â”‚ --- â”† --- â”† ---    â”‚
â”‚ f64 â”† f64 â”† bool   â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•¡
â”‚ 1.0 â”† 2.0 â”† true   â”‚
â”‚ 2.0 â”† 2.0 â”† false  â”‚
â”‚ NaN â”† NaN â”† false  â”‚
â”‚ 4.0 â”† 4.0 â”† false  â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)rá   r#  s     ru   ÚneÚExpr.ne³  r%  rx   c                ó^   • [        USS9n[        U R                  R                  U5      5      $ )u‹  
Method equivalent of equality operator `expr != other` where `None == None`.

This differs from default `ne` where null values are propagated.

Parameters
----------
other
    A literal or expression value to compare with.

Examples
--------
>>> df = pl.DataFrame(
...     data={
...         "x": [1.0, 2.0, float("nan"), 4.0, None, None],
...         "y": [2.0, 2.0, float("nan"), 4.0, 5.0, None],
...     }
... )
>>> df.with_columns(
...     pl.col("x").ne(pl.col("y")).alias("x ne y"),
...     pl.col("x").ne_missing(pl.col("y")).alias("x ne_missing y"),
... )
shape: (6, 4)
â”Œâ”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ x    â”† y    â”† x ne y â”† x ne_missing y â”‚
â”‚ ---  â”† ---  â”† ---    â”† ---            â”‚
â”‚ f64  â”† f64  â”† bool   â”† bool           â”‚
â•žâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1.0  â”† 2.0  â”† true   â”† true           â”‚
â”‚ 2.0  â”† 2.0  â”† false  â”† false          â”‚
â”‚ NaN  â”† NaN  â”† false  â”† false          â”‚
â”‚ 4.0  â”† 4.0  â”† false  â”† false          â”‚
â”‚ null â”† 5.0  â”† null   â”† true           â”‚
â”‚ null â”† null â”† null   â”† false          â”‚
â””â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
Tr›   )r   r$   rd   Úneq_missingrž   s      ru   Ú
ne_missingÚExpr.ne_missingÕ  s+   € ôJ -¨U¸tÑDˆÜ˜Ÿ™×1Ñ1°,Ó?Ó@Ð@rx   c                ó$   • U R                  U5      $ )uÈ  
Method equivalent of addition operator `expr + other`.

Parameters
----------
other
    numeric or string value; accepts expression input.

Examples
--------
>>> df = pl.DataFrame({"x": [1, 2, 3, 4, 5]})
>>> df.with_columns(
...     pl.col("x").add(2).alias("x+int"),
...     pl.col("x").add(pl.col("x").cum_prod()).alias("x+expr"),
... )
shape: (5, 3)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ x   â”† x+int â”† x+expr â”‚
â”‚ --- â”† ---   â”† ---    â”‚
â”‚ i64 â”† i64   â”† i64    â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•¡
â”‚ 1   â”† 3     â”† 2      â”‚
â”‚ 2   â”† 4     â”† 4      â”‚
â”‚ 3   â”† 5     â”† 9      â”‚
â”‚ 4   â”† 6     â”† 28     â”‚
â”‚ 5   â”† 7     â”† 125    â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”˜

>>> df = pl.DataFrame(
...     {"x": ["a", "d", "g"], "y": ["b", "e", "h"], "z": ["c", "f", "i"]}
... )
>>> df.with_columns(pl.col("x").add(pl.col("y")).add(pl.col("z")).alias("xyz"))
shape: (3, 4)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”
â”‚ x   â”† y   â”† z   â”† xyz â”‚
â”‚ --- â”† --- â”† --- â”† --- â”‚
â”‚ str â”† str â”† str â”† str â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•¡
â”‚ a   â”† b   â”† c   â”† abc â”‚
â”‚ d   â”† e   â”† f   â”† def â”‚
â”‚ g   â”† h   â”† i   â”† ghi â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”˜
)r¡   r#  s     ru   ÚaddÚExpr.addý  s   € ðX |‰|˜EÓ"Ð"rx   c                ó$   • U R                  U5      $ )u÷	  
Method equivalent of integer division operator `expr // other`.

Parameters
----------
other
    Numeric literal or expression value.

See Also
--------
truediv

Examples
--------
>>> df = pl.DataFrame({"x": [1, 2, 3, 4, 5]})
>>> df.with_columns(
...     pl.col("x").truediv(2).alias("x/2"),
...     pl.col("x").floordiv(2).alias("x//2"),
... )
shape: (5, 3)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”
â”‚ x   â”† x/2 â”† x//2 â”‚
â”‚ --- â”† --- â”† ---  â”‚
â”‚ i64 â”† f64 â”† i64  â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•¡
â”‚ 1   â”† 0.5 â”† 0    â”‚
â”‚ 2   â”† 1.0 â”† 1    â”‚
â”‚ 3   â”† 1.5 â”† 1    â”‚
â”‚ 4   â”† 2.0 â”† 2    â”‚
â”‚ 5   â”† 2.5 â”† 2    â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”˜

Note that Polars' `floordiv` is subtly different from Python's floor division.
For example, consider 6.0 floor-divided by 0.1.
Python gives:

>>> 6.0 // 0.1
59.0

because `0.1` is not represented internally as that exact value,
but a slightly larger value.
So the result of the division is slightly less than 60,
meaning the flooring operation returns 59.0.

Polars instead first does the floating-point division,
resulting in a floating-point value of 60.0,
and then performs the flooring operation using :any:`floor`:

>>> df = pl.DataFrame({"x": [6.0, 6.03]})
>>> df.with_columns(
...     pl.col("x").truediv(0.1).alias("x/0.1"),
... ).with_columns(
...     pl.col("x/0.1").floor().alias("x/0.1 floor"),
... )
shape: (2, 3)
â”Œâ”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ x    â”† x/0.1 â”† x/0.1 floor â”‚
â”‚ ---  â”† ---   â”† ---         â”‚
â”‚ f64  â”† f64   â”† f64         â”‚
â•žâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 6.0  â”† 60.0  â”† 60.0        â”‚
â”‚ 6.03 â”† 60.3  â”† 60.0        â”‚
â””â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

yielding the more intuitive result 60.0.
The row with x = 6.03 is included to demonstrate
the effect of the flooring operation.

`floordiv` combines those two steps
to give the same result with one expression:

>>> df.with_columns(
...     pl.col("x").floordiv(0.1).alias("x//0.1"),
... )
shape: (2, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ x    â”† x//0.1 â”‚
â”‚ ---  â”† ---    â”‚
â”‚ f64  â”† f64    â”‚
â•žâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•¡
â”‚ 6.0  â”† 60.0   â”‚
â”‚ 6.03 â”† 60.0   â”‚
â””â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r·   r#  s     ru   ÚfloordivÚExpr.floordiv+  s   € ðj × Ñ  Ó'Ð'rx   c                ó$   • U R                  U5      $ )u  
Method equivalent of modulus operator `expr % other`.

Parameters
----------
other
    Numeric literal or expression value.

Examples
--------
>>> df = pl.DataFrame({"x": [0, 1, 2, 3, 4]})
>>> df.with_columns(pl.col("x").mod(2).alias("x%2"))
shape: (5, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”
â”‚ x   â”† x%2 â”‚
â”‚ --- â”† --- â”‚
â”‚ i64 â”† i64 â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•¡
â”‚ 0   â”† 0   â”‚
â”‚ 1   â”† 1   â”‚
â”‚ 2   â”† 0   â”‚
â”‚ 3   â”† 1   â”‚
â”‚ 4   â”† 0   â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”˜
)rÒ   r#  s     ru   ÚmodÚExpr.mod‚  s   € ð4 |‰|˜EÓ"Ð"rx   c                ó$   • U R                  U5      $ )uL  
Method equivalent of multiplication operator `expr * other`.

Parameters
----------
other
    Numeric literal or expression value.

Examples
--------
>>> df = pl.DataFrame({"x": [1, 2, 4, 8, 16]})
>>> df.with_columns(
...     pl.col("x").mul(2).alias("x*2"),
...     pl.col("x").mul(pl.col("x").log(2)).alias("x * xlog2"),
... )
shape: (5, 3)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ x   â”† x*2 â”† x * xlog2 â”‚
â”‚ --- â”† --- â”† ---       â”‚
â”‚ i64 â”† i64 â”† f64       â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1   â”† 2   â”† 0.0       â”‚
â”‚ 2   â”† 4   â”† 2.0       â”‚
â”‚ 4   â”† 8   â”† 8.0       â”‚
â”‚ 8   â”† 16  â”† 24.0      â”‚
â”‚ 16  â”† 32  â”† 64.0      â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)rÚ   r#  s     ru   ÚmulÚExpr.mulž  ó   € ð: |‰|˜EÓ"Ð"rx   c                ó$   • U R                  U5      $ )u  
Method equivalent of subtraction operator `expr - other`.

Parameters
----------
other
    Numeric literal or expression value.

Examples
--------
>>> df = pl.DataFrame({"x": [0, 1, 2, 3, 4]})
>>> df.with_columns(
...     pl.col("x").sub(2).alias("x-2"),
...     pl.col("x").sub(pl.col("x").cum_sum()).alias("x-expr"),
... )
shape: (5, 3)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ x   â”† x-2 â”† x-expr â”‚
â”‚ --- â”† --- â”† ---    â”‚
â”‚ i64 â”† i64 â”† i64    â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•¡
â”‚ 0   â”† -2  â”† 0      â”‚
â”‚ 1   â”† -1  â”† 0      â”‚
â”‚ 2   â”† 0   â”† -1     â”‚
â”‚ 3   â”† 1   â”† -3     â”‚
â”‚ 4   â”† 2   â”† -6     â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)rþ   r#  s     ru   ÚsubÚExpr.sub½  rF  rx   c                ó"   • U R                  5       $ )uQ  
Method equivalent of unary minus operator `-expr`.

Examples
--------
>>> df = pl.DataFrame({"a": [-1, 0, 2, None]})
>>> df.with_columns(pl.col("a").neg())
shape: (4, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”
â”‚ a    â”‚
â”‚ ---  â”‚
â”‚ i64  â”‚
â•žâ•â•â•â•â•â•â•¡
â”‚ 1    â”‚
â”‚ 0    â”‚
â”‚ -2   â”‚
â”‚ null â”‚
â””â”€â”€â”€â”€â”€â”€â”˜
)rä   r|   s    ru   ÚnegÚExpr.negÜ  s   € ð( |‰|‹~Ðrx   c                ó$   • U R                  U5      $ )u­  
Method equivalent of float division operator `expr / other`.

Parameters
----------
other
    Numeric literal or expression value.

Notes
-----
Zero-division behaviour follows IEEE-754:

0/0: Invalid operation - mathematically undefined, returns NaN.
n/0: On finite operands gives an exact infinite result, eg: Â±infinity.

See Also
--------
floordiv

Examples
--------
>>> df = pl.DataFrame(
...     data={"x": [-2, -1, 0, 1, 2], "y": [0.5, 0.0, 0.0, -4.0, -0.5]}
... )
>>> df.with_columns(
...     pl.col("x").truediv(2).alias("x/2"),
...     pl.col("x").truediv(pl.col("y")).alias("x/y"),
... )
shape: (5, 4)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”
â”‚ x   â”† y    â”† x/2  â”† x/y   â”‚
â”‚ --- â”† ---  â”† ---  â”† ---   â”‚
â”‚ i64 â”† f64  â”† f64  â”† f64   â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•¡
â”‚ -2  â”† 0.5  â”† -1.0 â”† -4.0  â”‚
â”‚ -1  â”† 0.0  â”† -0.5 â”† -inf  â”‚
â”‚ 0   â”† 0.0  â”† 0.0  â”† NaN   â”‚
â”‚ 1   â”† -4.0 â”† 0.5  â”† -0.25 â”‚
â”‚ 2   â”† -0.5 â”† 1.0  â”† -4.0  â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”˜
)r  r#  s     ru   ÚtruedivÚExpr.truedivò  s   € ðT ×Ñ Ó&Ð&rx   c                ó$   • U R                  U5      $ )u  
Method equivalent of exponentiation operator `expr ** exponent`.

If the exponent is float, the result follows the dtype of exponent.
Otherwise, it follows dtype of base.

Parameters
----------
exponent
    Numeric literal or expression exponent value.

Examples
--------
>>> df = pl.DataFrame({"x": [1, 2, 4, 8]})
>>> df.with_columns(
...     pl.col("x").pow(3).alias("cube"),
...     pl.col("x").pow(pl.col("x").log(2)).alias("x ** xlog2"),
... )
shape: (4, 3)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ x   â”† cube â”† x ** xlog2 â”‚
â”‚ --- â”† ---  â”† ---        â”‚
â”‚ i64 â”† i64  â”† f64        â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1   â”† 1    â”† 1.0        â”‚
â”‚ 2   â”† 8    â”† 2.0        â”‚
â”‚ 4   â”† 64   â”† 16.0       â”‚
â”‚ 8   â”† 512  â”† 512.0      â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Raising an integer to a positive integer results in an integer - in order
to raise to a negative integer, you can cast either the base or the exponent
to float first:

>>> df.with_columns(
...     x_squared=pl.col("x").pow(2),
...     x_inverse=pl.col("x").pow(-1.0),
... )
shape: (4, 3)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ x   â”† x_squared â”† x_inverse â”‚
â”‚ --- â”† ---       â”† ---       â”‚
â”‚ i64 â”† i64       â”† f64       â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1   â”† 1         â”† 1.0       â”‚
â”‚ 2   â”† 4         â”† 0.5       â”‚
â”‚ 4   â”† 16        â”† 0.25      â”‚
â”‚ 8   â”† 64        â”† 0.125     â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)rõ   )r}   ró   s     ru   rò   ÚExpr.pow  s   € ðf |‰|˜HÓ%Ð%rx   c                ó$   • U R                  U5      $ )uÀ	  
Method equivalent of bitwise exclusive-or operator `expr ^ other`.

Parameters
----------
other
    Integer or boolean value; accepts expression input.

Examples
--------
>>> df = pl.DataFrame(
...     {"x": [True, False, True, False], "y": [True, True, False, False]}
... )
>>> df.with_columns(pl.col("x").xor(pl.col("y")).alias("x ^ y"))
shape: (4, 3)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”
â”‚ x     â”† y     â”† x ^ y â”‚
â”‚ ---   â”† ---   â”† ---   â”‚
â”‚ bool  â”† bool  â”† bool  â”‚
â•žâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•¡
â”‚ true  â”† true  â”† false â”‚
â”‚ false â”† true  â”† true  â”‚
â”‚ true  â”† false â”† true  â”‚
â”‚ false â”† false â”† false â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”˜

>>> def binary_string(n: int) -> str:
...     return bin(n)[2:].zfill(8)
>>>
>>> df = pl.DataFrame(
...     data={"x": [10, 8, 250, 66], "y": [1, 2, 3, 4]},
...     schema={"x": pl.UInt8, "y": pl.UInt8},
... )
>>> df.with_columns(
...     pl.col("x")
...     .map_elements(binary_string, return_dtype=pl.String)
...     .alias("bin_x"),
...     pl.col("y")
...     .map_elements(binary_string, return_dtype=pl.String)
...     .alias("bin_y"),
...     pl.col("x").xor(pl.col("y")).alias("xor_xy"),
...     pl.col("x")
...     .xor(pl.col("y"))
...     .map_elements(binary_string, return_dtype=pl.String)
...     .alias("bin_xor_xy"),
... )
shape: (4, 6)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ x   â”† y   â”† bin_x    â”† bin_y    â”† xor_xy â”† bin_xor_xy â”‚
â”‚ --- â”† --- â”† ---      â”† ---      â”† ---    â”† ---        â”‚
â”‚ u8  â”† u8  â”† str      â”† str      â”† u8     â”† str        â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 10  â”† 1   â”† 00001010 â”† 00000001 â”† 11     â”† 00001011   â”‚
â”‚ 8   â”† 2   â”† 00001000 â”† 00000010 â”† 10     â”† 00001010   â”‚
â”‚ 250 â”† 3   â”† 11111010 â”† 00000011 â”† 249    â”† 11111001   â”‚
â”‚ 66  â”† 4   â”† 01000010 â”† 00000100 â”† 70     â”† 01000110   â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r  r#  s     ru   ÚxorÚExpr.xorS  s   € ðv |‰|˜EÓ"Ð"rx   )Únulls_equalc               óê   • [        U[        5      (       a0  [        U[        [        R                  45      (       d  [        U5      n[        U5      n[        U R                  R                  X25      5      $ )uÒ  
Check if elements of this expression are present in the other Series.

Parameters
----------
other
    Series or sequence of primitive type.
nulls_equal : bool, default False
    If True, treat null as a distinct value. Null values will not propagate.

Returns
-------
Expr
    Expression of data type :class:`Boolean`.

Examples
--------
>>> df = pl.DataFrame(
...     {"sets": [[1, 2, 3], [1, 2], [9, 10]], "optional_members": [1, 2, 3]}
... )
>>> df.with_columns(contains=pl.col("optional_members").is_in("sets"))
shape: (3, 3)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ sets      â”† optional_members â”† contains â”‚
â”‚ ---       â”† ---              â”† ---      â”‚
â”‚ list[i64] â”† i64              â”† bool     â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•¡
â”‚ [1, 2, 3] â”† 1                â”† true     â”‚
â”‚ [1, 2]    â”† 2                â”† true     â”‚
â”‚ [9, 10]   â”† 3                â”† false    â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)
r!  r   ri   r  rG   rj   r   r$   rd   Úis_in)r}   rŸ   rU  r    s       ru   rW  Ú
Expr.is_in  sT   € ôL eœZ×(Ñ(´¸EÄCÌÏÉÐCS×1TÑ1TÜ˜“KˆEä,¨UÓ3ˆÜ˜Ÿ™×+Ñ+¨LÓFÓGÐGrx   c                ó`   • [        U5      n[        U R                  R                  U5      5      $ )u¸  
Repeat the elements in this Series as specified in the given expression.

The repeated elements are expanded into a `List`.

Parameters
----------
by
    Numeric column that determines how often the values will be repeated.
    The column will be coerced to UInt32. Give this dtype to make the coercion a
    no-op.

Returns
-------
Expr
    Expression of data type :class:`List`, where the inner data type is equal
    to the original data type.

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "a": ["x", "y", "z"],
...         "n": [1, 2, 3],
...     }
... )
>>> df.select(pl.col("a").repeat_by("n"))
shape: (3, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a               â”‚
â”‚ ---             â”‚
â”‚ list[str]       â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ ["x"]           â”‚
â”‚ ["y", "y"]      â”‚
â”‚ ["z", "z", "z"] â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r   r$   rd   Ú	repeat_by©r}   rî  rø  s      ru   rZ  ÚExpr.repeat_by¼  s)   € ôN *¨"Ó-ˆ	Ü˜Ÿ™×/Ñ/°	Ó:Ó;Ð;rx   c                óx   • [        U5      n[        U5      n[        U R                  R                  XEU5      5      $ )uc  
Check if this expression is between the given lower and upper bounds.

Parameters
----------
lower_bound
    Lower bound value. Accepts expression input. Strings are parsed as column
    names, other non-expression inputs are parsed as literals.
upper_bound
    Upper bound value. Accepts expression input. Strings are parsed as column
    names, other non-expression inputs are parsed as literals.
closed : {'both', 'left', 'right', 'none'}
    Define which sides of the interval are closed (inclusive).

Notes
-----
If the value of the `lower_bound` is greater than that of the `upper_bound`
then the result will be False, as no value can satisfy the condition.

Returns
-------
Expr
    Expression of data type :class:`Boolean`.

Examples
--------
>>> df = pl.DataFrame({"num": [1, 2, 3, 4, 5]})
>>> df.with_columns(pl.col("num").is_between(2, 4).alias("is_between"))
shape: (5, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ num â”† is_between â”‚
â”‚ --- â”† ---        â”‚
â”‚ i64 â”† bool       â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1   â”† false      â”‚
â”‚ 2   â”† true       â”‚
â”‚ 3   â”† true       â”‚
â”‚ 4   â”† true       â”‚
â”‚ 5   â”† false      â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Use the `closed` argument to include or exclude the values at the bounds:

>>> df.with_columns(
...     pl.col("num").is_between(2, 4, closed="left").alias("is_between")
... )
shape: (5, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ num â”† is_between â”‚
â”‚ --- â”† ---        â”‚
â”‚ i64 â”† bool       â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1   â”† false      â”‚
â”‚ 2   â”† true       â”‚
â”‚ 3   â”† true       â”‚
â”‚ 4   â”† false      â”‚
â”‚ 5   â”† false      â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

You can also use strings as well as numeric/temporal values (note: ensure that
string literals are wrapped with `lit` so as not to conflate them with
column names):

>>> df = pl.DataFrame({"a": ["a", "b", "c", "d", "e"]})
>>> df.with_columns(
...     pl.col("a")
...     .is_between(pl.lit("a"), pl.lit("c"), closed="both")
...     .alias("is_between")
... )
shape: (5, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a   â”† is_between â”‚
â”‚ --- â”† ---        â”‚
â”‚ str â”† bool       â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ a   â”† true       â”‚
â”‚ b   â”† true       â”‚
â”‚ c   â”† true       â”‚
â”‚ d   â”† false      â”‚
â”‚ e   â”† false      â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Use column expressions as lower/upper bounds, comparing to a literal value:

>>> df = pl.DataFrame({"a": [1, 2, 3, 4, 5], "b": [5, 4, 3, 2, 1]})
>>> df.with_columns(
...     pl.lit(3).is_between(pl.col("a"), pl.col("b")).alias("between_ab")
... )
shape: (5, 3)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a   â”† b   â”† between_ab â”‚
â”‚ --- â”† --- â”† ---        â”‚
â”‚ i64 â”† i64 â”† bool       â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1   â”† 5   â”† true       â”‚
â”‚ 2   â”† 4   â”† true       â”‚
â”‚ 3   â”† 3   â”† true       â”‚
â”‚ 4   â”† 2   â”† false      â”‚
â”‚ 5   â”† 1   â”† false      â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r   r$   rd   Ú
is_between)r}   Úlower_boundÚupper_boundrŠ  Úlower_bound_pyexprÚupper_bound_pyexprs         ru   r^  ÚExpr.is_betweenæ  s=   € ôV 3°;Ó?ÐÜ2°;Ó?ÐäØL‰L×#Ñ#Ð$6ÈFÓSó
ð 	
rx   ç        g•Ö&è.>)Úabs_tolÚrel_tolÚ
nans_equalc               ób   • [        U5      n[        U R                  R                  XRX45      5      $ )u×  
Check if this expression is close, i.e. almost equal, to the other expression.

Two values `a` and `b` are considered close if the following condition holds:

.. math::
    |a-b| \le max \{ \text{rel_tol} \cdot max \{ |a|, |b| \}, \text{abs_tol} \}

Parameters
----------
abs_tol
    Absolute tolerance. This is the maximum allowed absolute difference between
    two values. Must be non-negative.
rel_tol
    Relative tolerance. This is the maximum allowed difference between two
    values, relative to the larger absolute value. Must be non-negative.
nans_equal
    Whether NaN values should be considered equal.

Returns
-------
Expr
    Expression of data type :class:`Boolean`.

Notes
-----
    The implementation of this method is symmetric and mirrors the behavior of
    :meth:`math.isclose`. Specifically note that this behavior is different to
    :meth:`numpy.isclose`.

Examples
--------
>>> df = pl.DataFrame({"a": [1.5, 2.0, 2.5], "b": [1.55, 2.2, 3.0]})
>>> df.with_columns(pl.col("a").is_close("b", abs_tol=0.1).alias("is_close"))
shape: (3, 3)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a   â”† b    â”† is_close â”‚
â”‚ --- â”† ---  â”† ---      â”‚
â”‚ f64 â”† f64  â”† bool     â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1.5 â”† 1.55 â”† true     â”‚
â”‚ 2.0 â”† 2.2  â”† false    â”‚
â”‚ 2.5 â”† 3.0  â”† false    â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r   r$   rd   Úis_close)r}   rŸ   re  rf  rg  r    s         ru   ri  ÚExpr.is_closeX  s0   € ôj -¨UÓ3ˆÜØL‰L×!Ñ! ,¸ÓMó
ð 	
rx   c                óz   • UnUb  UOUnUb  UOUnUb  UOUn[        U R                  R                  XVXx5      5      $ )uÌ  
Hash the elements in the selection.

The hash value is of type `UInt64`.

Parameters
----------
seed
    Random seed parameter. Defaults to 0.
seed_1
    Random seed parameter. Defaults to `seed` if not set.
seed_2
    Random seed parameter. Defaults to `seed` if not set.
seed_3
    Random seed parameter. Defaults to `seed` if not set.

Notes
-----
This implementation of `hash` does not guarantee stable results
across different Polars versions. Its stability is only guaranteed within a
single version.

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "a": [1, 2, None],
...         "b": ["x", None, "z"],
...     }
... )
>>> df.with_columns(pl.all().hash(10, 20, 30, 40))  # doctest: +IGNORE_RESULT
shape: (3, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a                    â”† b                    â”‚
â”‚ ---                  â”† ---                  â”‚
â”‚ u64                  â”† u64                  â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 9774092659964970114  â”† 13614470193936745724 â”‚
â”‚ 1101441246220388612  â”† 11638928888656214026 â”‚
â”‚ 11638928888656214026 â”† 13382926553367784577 â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úhash)	r}   ÚseedÚseed_1Úseed_2Úseed_3Úk0Úk1Úk2Úk3s	            ru   rl  Ú	Expr.hash’  sJ   € ðb ˆØÑ)‰V¨tˆØÑ)‰V¨tˆØÑ)‰V¨tˆÜ˜Ÿ™×*Ñ*¨2°2Ó:Ó;Ð;rx   )Úsignedc               óJ   • [        U R                  R                  U5      5      $ )uþ  
Reinterpret the underlying bits as a signed/unsigned integer.

This operation is only allowed for 64bit integers. For lower bits integers,
you can safely use that cast operation.

Parameters
----------
signed
    If True, reinterpret as `pl.Int64`. Otherwise, reinterpret as `pl.UInt64`.

Examples
--------
>>> s = pl.Series("a", [1, 1, 2], dtype=pl.UInt64)
>>> df = pl.DataFrame([s])
>>> df.select(
...     [
...         pl.col("a").reinterpret(signed=True).alias("reinterpreted"),
...         pl.col("a").alias("original"),
...     ]
... )
shape: (3, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ reinterpreted â”† original â”‚
â”‚ ---           â”† ---      â”‚
â”‚ i64           â”† u64      â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1             â”† 1        â”‚
â”‚ 1             â”† 1        â”‚
â”‚ 2             â”† 2        â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úreinterpret)r}   rv  s     ru   rx  ÚExpr.reinterpretÉ  s   € ôB ˜Ÿ™×1Ñ1°&Ó9Ó:Ð:rx   c                ó\   ^• SU4S jjnU R                  U[        R                  " U 5      S9$ )u«  
Print the value that this expression evaluates to and pass on the value.

Examples
--------
>>> df = pl.DataFrame({"foo": [1, 1, 2]})
>>> df.select(pl.col("foo").cum_sum().inspect("value is: {}").alias("bar"))
value is: shape: (3,)
Series: 'foo' [i64]
[
    1
    2
    4
]
shape: (3, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ bar â”‚
â”‚ --- â”‚
â”‚ i64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 1   â”‚
â”‚ 2   â”‚
â”‚ 4   â”‚
â””â”€â”€â”€â”€â”€â”˜
c                ó<   >• [        TR                  U 5      5        U $ rp   )ÚprintrJ  )r+  Úfmts    €ru   ÚinspectÚExpr.inspect.<locals>.inspect  s   ø€ Ü#—*‘*˜Q“-Ô ØˆHrx   )rÏ  r4  )r:  r  Údtype_of)r}   r}  r~  s    ` ru   r~  ÚExpr.inspectì  s)   ø€ ÷6	ð ×Ñ ´a·j²jÀÓ6FÐÐGÐGrx   c                óJ   • [        U R                  R                  U5      5      $ )u  
Interpolate intermediate values.

Nulls at the beginning and end of the series remain null.

Parameters
----------
method : {'linear', 'nearest'}
    Interpolation method.

Examples
--------
Fill null values using linear interpolation.

>>> df = pl.DataFrame(
...     {
...         "a": [1, None, 3],
...         "b": [1.0, float("nan"), 3.0],
...     }
... )
>>> df.select(pl.all().interpolate())
shape: (3, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”
â”‚ a   â”† b   â”‚
â”‚ --- â”† --- â”‚
â”‚ f64 â”† f64 â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•¡
â”‚ 1.0 â”† 1.0 â”‚
â”‚ 2.0 â”† NaN â”‚
â”‚ 3.0 â”† 3.0 â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”˜

Fill null values using nearest interpolation.

>>> df.select(pl.all().interpolate("nearest"))
shape: (3, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”
â”‚ a   â”† b   â”‚
â”‚ --- â”† --- â”‚
â”‚ i64 â”† f64 â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•¡
â”‚ 1   â”† 1.0 â”‚
â”‚ 3   â”† NaN â”‚
â”‚ 3   â”† 3.0 â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”˜

Regrid data to a new grid.

>>> df_original_grid = pl.DataFrame(
...     {
...         "grid_points": [1, 3, 10],
...         "values": [2.0, 6.0, 20.0],
...     }
... )  # Interpolate from this to the new grid
>>> df_new_grid = pl.DataFrame({"grid_points": range(1, 11)})
>>> df_new_grid.join(
...     df_original_grid, on="grid_points", how="left", coalesce=True
... ).with_columns(pl.col("values").interpolate())
shape: (10, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ grid_points â”† values â”‚
â”‚ ---         â”† ---    â”‚
â”‚ i64         â”† f64    â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•¡
â”‚ 1           â”† 2.0    â”‚
â”‚ 2           â”† 4.0    â”‚
â”‚ 3           â”† 6.0    â”‚
â”‚ 4           â”† 8.0    â”‚
â”‚ 5           â”† 10.0   â”‚
â”‚ 6           â”† 12.0   â”‚
â”‚ 7           â”† 14.0   â”‚
â”‚ 8           â”† 16.0   â”‚
â”‚ 9           â”† 18.0   â”‚
â”‚ 10          â”† 20.0   â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úinterpolate)r}   r>  s     ru   rƒ  ÚExpr.interpolate  s   € ôZ ˜Ÿ™×1Ñ1°&Ó9Ó:Ð:rx   c                ó`   • [        U5      n[        U R                  R                  U5      5      $ )uù  
Fill null values using interpolation based on another column.

Nulls at the beginning and end of the series remain null.

Parameters
----------
by
    Column to interpolate values based on.

Examples
--------
Fill null values using linear interpolation.

>>> df = pl.DataFrame(
...     {
...         "a": [1, None, None, 3],
...         "b": [1, 2, 7, 8],
...     }
... )
>>> df.with_columns(a_interpolated=pl.col("a").interpolate_by("b"))
shape: (4, 3)
â”Œâ”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a    â”† b   â”† a_interpolated â”‚
â”‚ ---  â”† --- â”† ---            â”‚
â”‚ i64  â”† i64 â”† f64            â”‚
â•žâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1    â”† 1   â”† 1.0            â”‚
â”‚ null â”† 2   â”† 1.285714       â”‚
â”‚ null â”† 7   â”† 2.714286       â”‚
â”‚ 3    â”† 8   â”† 3.0            â”‚
â””â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r   r$   rd   Úinterpolate_byr[  s      ru   r†  ÚExpr.interpolate_by\  s)   € ôD *¨"Ó-ˆ	Ü˜Ÿ™×4Ñ4°YÓ?Ó@Ð@rx   Úmin_periodsÚmin_samplesz1.21.0)r‰  rŠ  c               óx   • [        U5      n[        U5      n[        U R                  R	                  XRX45      5      $ )uf  
Apply a rolling min based on another column.

.. warning::
    This functionality is considered **unstable**. It may be changed
    at any point without it being considered a breaking change.

Given a `by` column `<t_0, t_1, ..., t_n>`, then `closed="right"`
(the default) means the windows will be:

    - (t_0 - window_size, t_0]
    - (t_1 - window_size, t_1]
    - ...
    - (t_n - window_size, t_n]

.. versionchanged:: 1.21.0
    The `min_periods` parameter was renamed `min_samples`.

Parameters
----------
by
    Should be ``DateTime``, ``Date``, ``UInt64``, ``UInt32``, ``Int64``,
    or ``Int32`` data type (note that the integral ones require using `'i'`
    in `window size`).
window_size
    The length of the window. Can be a dynamic temporal
    size indicated by a timedelta or the following string language:

    - 1ns   (1 nanosecond)
    - 1us   (1 microsecond)
    - 1ms   (1 millisecond)
    - 1s    (1 second)
    - 1m    (1 minute)
    - 1h    (1 hour)
    - 1d    (1 calendar day)
    - 1w    (1 calendar week)
    - 1mo   (1 calendar month)
    - 1q    (1 calendar quarter)
    - 1y    (1 calendar year)
    - 1i    (1 index count)

    By "calendar day", we mean the corresponding time on the next day
    (which may not be 24 hours, due to daylight savings). Similarly for
    "calendar week", "calendar month", "calendar quarter", and
    "calendar year".
min_samples
    The number of values in the window that should be non-null before computing
    a result.
closed : {'left', 'right', 'both', 'none'}
    Define which sides of the temporal interval are closed (inclusive),
    defaults to `'right'`.

Notes
-----
If you want to compute multiple aggregation statistics over the same dynamic
window, consider using `rolling` - this method can cache the window size
computation.

Examples
--------
Create a DataFrame with a datetime column and a row number column

>>> from datetime import timedelta, datetime
>>> start = datetime(2001, 1, 1)
>>> stop = datetime(2001, 1, 2)
>>> df_temporal = pl.DataFrame(
...     {"date": pl.datetime_range(start, stop, "1h", eager=True)}
... ).with_row_index()
>>> df_temporal
shape: (25, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ index â”† date                â”‚
â”‚ ---   â”† ---                 â”‚
â”‚ u32   â”† datetime[Î¼s]        â”‚
â•žâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 0     â”† 2001-01-01 00:00:00 â”‚
â”‚ 1     â”† 2001-01-01 01:00:00 â”‚
â”‚ 2     â”† 2001-01-01 02:00:00 â”‚
â”‚ 3     â”† 2001-01-01 03:00:00 â”‚
â”‚ 4     â”† 2001-01-01 04:00:00 â”‚
â”‚ â€¦     â”† â€¦                   â”‚
â”‚ 20    â”† 2001-01-01 20:00:00 â”‚
â”‚ 21    â”† 2001-01-01 21:00:00 â”‚
â”‚ 22    â”† 2001-01-01 22:00:00 â”‚
â”‚ 23    â”† 2001-01-01 23:00:00 â”‚
â”‚ 24    â”† 2001-01-02 00:00:00 â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Compute the rolling min with the temporal windows closed on the right (default)

>>> df_temporal.with_columns(
...     rolling_row_min=pl.col("index").rolling_min_by("date", window_size="2h")
... )
shape: (25, 3)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ index â”† date                â”† rolling_row_min â”‚
â”‚ ---   â”† ---                 â”† ---             â”‚
â”‚ u32   â”† datetime[Î¼s]        â”† u32             â”‚
â•žâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 0     â”† 2001-01-01 00:00:00 â”† 0               â”‚
â”‚ 1     â”† 2001-01-01 01:00:00 â”† 0               â”‚
â”‚ 2     â”† 2001-01-01 02:00:00 â”† 1               â”‚
â”‚ 3     â”† 2001-01-01 03:00:00 â”† 2               â”‚
â”‚ 4     â”† 2001-01-01 04:00:00 â”† 3               â”‚
â”‚ â€¦     â”† â€¦                   â”† â€¦               â”‚
â”‚ 20    â”† 2001-01-01 20:00:00 â”† 19              â”‚
â”‚ 21    â”† 2001-01-01 21:00:00 â”† 20              â”‚
â”‚ 22    â”† 2001-01-01 22:00:00 â”† 21              â”‚
â”‚ 23    â”† 2001-01-01 23:00:00 â”† 22              â”‚
â”‚ 24    â”† 2001-01-02 00:00:00 â”† 23              â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)Ú_prepare_rolling_by_window_argsr   r$   rd   Úrolling_min_by©r}   rî  Úwindow_sizer‰  rŠ  rø  s         ru   rŒ  ÚExpr.rolling_min_by  s:   € ôt 6°kÓBˆÜ)¨"Ó-ˆ	ÜØL‰L×'Ñ'¨	ÀÓTó
ð 	
rx   c               óx   • [        U5      n[        U5      n[        U R                  R	                  XRX45      5      $ )uB  
Apply a rolling max based on another column.

.. warning::
    This functionality is considered **unstable**. It may be changed
    at any point without it being considered a breaking change.

Given a `by` column `<t_0, t_1, ..., t_n>`, then `closed="right"`
(the default) means the windows will be:

    - (t_0 - window_size, t_0]
    - (t_1 - window_size, t_1]
    - ...
    - (t_n - window_size, t_n]

.. versionchanged:: 1.21.0
    The `min_periods` parameter was renamed `min_samples`.

Parameters
----------
by
    Should be ``DateTime``, ``Date``, ``UInt64``, ``UInt32``, ``Int64``,
    or ``Int32`` data type (note that the integral ones require using `'i'`
    in `window size`).
window_size
    The length of the window. Can be a dynamic temporal
    size indicated by a timedelta or the following string language:

    - 1ns   (1 nanosecond)
    - 1us   (1 microsecond)
    - 1ms   (1 millisecond)
    - 1s    (1 second)
    - 1m    (1 minute)
    - 1h    (1 hour)
    - 1d    (1 calendar day)
    - 1w    (1 calendar week)
    - 1mo   (1 calendar month)
    - 1q    (1 calendar quarter)
    - 1y    (1 calendar year)
    - 1i    (1 index count)

    By "calendar day", we mean the corresponding time on the next day
    (which may not be 24 hours, due to daylight savings). Similarly for
    "calendar week", "calendar month", "calendar quarter", and
    "calendar year".
min_samples
    The number of values in the window that should be non-null before computing
    a result.
closed : {'left', 'right', 'both', 'none'}
    Define which sides of the temporal interval are closed (inclusive),
    defaults to `'right'`.

Notes
-----
If you want to compute multiple aggregation statistics over the same dynamic
window, consider using `rolling` - this method can cache the window size
computation.

Examples
--------
Create a DataFrame with a datetime column and a row number column

>>> from datetime import timedelta, datetime
>>> start = datetime(2001, 1, 1)
>>> stop = datetime(2001, 1, 2)
>>> df_temporal = pl.DataFrame(
...     {"date": pl.datetime_range(start, stop, "1h", eager=True)}
... ).with_row_index()
>>> df_temporal
shape: (25, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ index â”† date                â”‚
â”‚ ---   â”† ---                 â”‚
â”‚ u32   â”† datetime[Î¼s]        â”‚
â•žâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 0     â”† 2001-01-01 00:00:00 â”‚
â”‚ 1     â”† 2001-01-01 01:00:00 â”‚
â”‚ 2     â”† 2001-01-01 02:00:00 â”‚
â”‚ 3     â”† 2001-01-01 03:00:00 â”‚
â”‚ 4     â”† 2001-01-01 04:00:00 â”‚
â”‚ â€¦     â”† â€¦                   â”‚
â”‚ 20    â”† 2001-01-01 20:00:00 â”‚
â”‚ 21    â”† 2001-01-01 21:00:00 â”‚
â”‚ 22    â”† 2001-01-01 22:00:00 â”‚
â”‚ 23    â”† 2001-01-01 23:00:00 â”‚
â”‚ 24    â”† 2001-01-02 00:00:00 â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Compute the rolling max with the temporal windows closed on the right (default)

>>> df_temporal.with_columns(
...     rolling_row_max=pl.col("index").rolling_max_by("date", window_size="2h")
... )
shape: (25, 3)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ index â”† date                â”† rolling_row_max â”‚
â”‚ ---   â”† ---                 â”† ---             â”‚
â”‚ u32   â”† datetime[Î¼s]        â”† u32             â”‚
â•žâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 0     â”† 2001-01-01 00:00:00 â”† 0               â”‚
â”‚ 1     â”† 2001-01-01 01:00:00 â”† 1               â”‚
â”‚ 2     â”† 2001-01-01 02:00:00 â”† 2               â”‚
â”‚ 3     â”† 2001-01-01 03:00:00 â”† 3               â”‚
â”‚ 4     â”† 2001-01-01 04:00:00 â”† 4               â”‚
â”‚ â€¦     â”† â€¦                   â”† â€¦               â”‚
â”‚ 20    â”† 2001-01-01 20:00:00 â”† 20              â”‚
â”‚ 21    â”† 2001-01-01 21:00:00 â”† 21              â”‚
â”‚ 22    â”† 2001-01-01 22:00:00 â”† 22              â”‚
â”‚ 23    â”† 2001-01-01 23:00:00 â”† 23              â”‚
â”‚ 24    â”† 2001-01-02 00:00:00 â”† 24              â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Compute the rolling max with the closure of windows on both sides

>>> df_temporal.with_columns(
...     rolling_row_max=pl.col("index").rolling_max_by(
...         "date", window_size="2h", closed="both"
...     )
... )
shape: (25, 3)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ index â”† date                â”† rolling_row_max â”‚
â”‚ ---   â”† ---                 â”† ---             â”‚
â”‚ u32   â”† datetime[Î¼s]        â”† u32             â”‚
â•žâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 0     â”† 2001-01-01 00:00:00 â”† 0               â”‚
â”‚ 1     â”† 2001-01-01 01:00:00 â”† 1               â”‚
â”‚ 2     â”† 2001-01-01 02:00:00 â”† 2               â”‚
â”‚ 3     â”† 2001-01-01 03:00:00 â”† 3               â”‚
â”‚ 4     â”† 2001-01-01 04:00:00 â”† 4               â”‚
â”‚ â€¦     â”† â€¦                   â”† â€¦               â”‚
â”‚ 20    â”† 2001-01-01 20:00:00 â”† 20              â”‚
â”‚ 21    â”† 2001-01-01 21:00:00 â”† 21              â”‚
â”‚ 22    â”† 2001-01-01 22:00:00 â”† 22              â”‚
â”‚ 23    â”† 2001-01-01 23:00:00 â”† 23              â”‚
â”‚ 24    â”† 2001-01-02 00:00:00 â”† 24              â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r‹  r   r$   rd   Úrolling_max_byr  s         ru   r‘  ÚExpr.rolling_max_by  ó:   € ôh 6°kÓBˆÜ)¨"Ó-ˆ	ÜØL‰L×'Ñ'¨	ÀÓTó
ð 	
rx   c               ó|   • [        U5      n[        U5      n[        U R                  R	                  UUUU5      5      $ )u  
Apply a rolling mean based on another column.

.. warning::
    This functionality is considered **unstable**. It may be changed
    at any point without it being considered a breaking change.

Given a `by` column `<t_0, t_1, ..., t_n>`, then `closed="right"`
(the default) means the windows will be:

    - (t_0 - window_size, t_0]
    - (t_1 - window_size, t_1]
    - ...
    - (t_n - window_size, t_n]

.. versionchanged:: 1.21.0
    The `min_periods` parameter was renamed `min_samples`.

Parameters
----------
by
    Should be ``DateTime``, ``Date``, ``UInt64``, ``UInt32``, ``Int64``,
    or ``Int32`` data type (note that the integral ones require using `'i'`
    in `window size`).
window_size
    The length of the window. Can be a dynamic temporal
    size indicated by a timedelta or the following string language:

    - 1ns   (1 nanosecond)
    - 1us   (1 microsecond)
    - 1ms   (1 millisecond)
    - 1s    (1 second)
    - 1m    (1 minute)
    - 1h    (1 hour)
    - 1d    (1 calendar day)
    - 1w    (1 calendar week)
    - 1mo   (1 calendar month)
    - 1q    (1 calendar quarter)
    - 1y    (1 calendar year)
    - 1i    (1 index count)

    By "calendar day", we mean the corresponding time on the next day
    (which may not be 24 hours, due to daylight savings). Similarly for
    "calendar week", "calendar month", "calendar quarter", and
    "calendar year".
min_samples
    The number of values in the window that should be non-null before computing
    a result.
closed : {'left', 'right', 'both', 'none'}
    Define which sides of the temporal interval are closed (inclusive),
    defaults to `'right'`.

Notes
-----
If you want to compute multiple aggregation statistics over the same dynamic
window, consider using `rolling` - this method can cache the window size
computation.

Examples
--------
Create a DataFrame with a datetime column and a row number column

>>> from datetime import timedelta, datetime
>>> start = datetime(2001, 1, 1)
>>> stop = datetime(2001, 1, 2)
>>> df_temporal = pl.DataFrame(
...     {"date": pl.datetime_range(start, stop, "1h", eager=True)}
... ).with_row_index()
>>> df_temporal
shape: (25, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ index â”† date                â”‚
â”‚ ---   â”† ---                 â”‚
â”‚ u32   â”† datetime[Î¼s]        â”‚
â•žâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 0     â”† 2001-01-01 00:00:00 â”‚
â”‚ 1     â”† 2001-01-01 01:00:00 â”‚
â”‚ 2     â”† 2001-01-01 02:00:00 â”‚
â”‚ 3     â”† 2001-01-01 03:00:00 â”‚
â”‚ 4     â”† 2001-01-01 04:00:00 â”‚
â”‚ â€¦     â”† â€¦                   â”‚
â”‚ 20    â”† 2001-01-01 20:00:00 â”‚
â”‚ 21    â”† 2001-01-01 21:00:00 â”‚
â”‚ 22    â”† 2001-01-01 22:00:00 â”‚
â”‚ 23    â”† 2001-01-01 23:00:00 â”‚
â”‚ 24    â”† 2001-01-02 00:00:00 â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Compute the rolling mean with the temporal windows closed on the right (default)

>>> df_temporal.with_columns(
...     rolling_row_mean=pl.col("index").rolling_mean_by(
...         "date", window_size="2h"
...     )
... )
shape: (25, 3)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ index â”† date                â”† rolling_row_mean â”‚
â”‚ ---   â”† ---                 â”† ---              â”‚
â”‚ u32   â”† datetime[Î¼s]        â”† f64              â”‚
â•žâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 0     â”† 2001-01-01 00:00:00 â”† 0.0              â”‚
â”‚ 1     â”† 2001-01-01 01:00:00 â”† 0.5              â”‚
â”‚ 2     â”† 2001-01-01 02:00:00 â”† 1.5              â”‚
â”‚ 3     â”† 2001-01-01 03:00:00 â”† 2.5              â”‚
â”‚ 4     â”† 2001-01-01 04:00:00 â”† 3.5              â”‚
â”‚ â€¦     â”† â€¦                   â”† â€¦                â”‚
â”‚ 20    â”† 2001-01-01 20:00:00 â”† 19.5             â”‚
â”‚ 21    â”† 2001-01-01 21:00:00 â”† 20.5             â”‚
â”‚ 22    â”† 2001-01-01 22:00:00 â”† 21.5             â”‚
â”‚ 23    â”† 2001-01-01 23:00:00 â”† 22.5             â”‚
â”‚ 24    â”† 2001-01-02 00:00:00 â”† 23.5             â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Compute the rolling mean with the closure of windows on both sides

>>> df_temporal.with_columns(
...     rolling_row_mean=pl.col("index").rolling_mean_by(
...         "date", window_size="2h", closed="both"
...     )
... )
shape: (25, 3)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ index â”† date                â”† rolling_row_mean â”‚
â”‚ ---   â”† ---                 â”† ---              â”‚
â”‚ u32   â”† datetime[Î¼s]        â”† f64              â”‚
â•žâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 0     â”† 2001-01-01 00:00:00 â”† 0.0              â”‚
â”‚ 1     â”† 2001-01-01 01:00:00 â”† 0.5              â”‚
â”‚ 2     â”† 2001-01-01 02:00:00 â”† 1.0              â”‚
â”‚ 3     â”† 2001-01-01 03:00:00 â”† 2.0              â”‚
â”‚ 4     â”† 2001-01-01 04:00:00 â”† 3.0              â”‚
â”‚ â€¦     â”† â€¦                   â”† â€¦                â”‚
â”‚ 20    â”† 2001-01-01 20:00:00 â”† 19.0             â”‚
â”‚ 21    â”† 2001-01-01 21:00:00 â”† 20.0             â”‚
â”‚ 22    â”† 2001-01-01 22:00:00 â”† 21.0             â”‚
â”‚ 23    â”† 2001-01-01 23:00:00 â”† 22.0             â”‚
â”‚ 24    â”† 2001-01-02 00:00:00 â”† 23.0             â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r‹  r   r$   rd   Úrolling_mean_byr  s         ru   r•  ÚExpr.rolling_mean_by›  sD   € ôl 6°kÓBˆÜ)¨"Ó-ˆ	ÜØL‰L×(Ñ(ØØØØó	ó
ð 	
rx   c               óx   • [        U5      n[        U5      n[        U R                  R	                  XRX45      5      $ )uB  
Apply a rolling sum based on another column.

.. warning::
    This functionality is considered **unstable**. It may be changed
    at any point without it being considered a breaking change.

Given a `by` column `<t_0, t_1, ..., t_n>`, then `closed="right"`
(the default) means the windows will be:

    - (t_0 - window_size, t_0]
    - (t_1 - window_size, t_1]
    - ...
    - (t_n - window_size, t_n]

.. versionchanged:: 1.21.0
    The `min_periods` parameter was renamed `min_samples`.

Parameters
----------
window_size
    The length of the window. Can be a dynamic temporal
    size indicated by a timedelta or the following string language:

    - 1ns   (1 nanosecond)
    - 1us   (1 microsecond)
    - 1ms   (1 millisecond)
    - 1s    (1 second)
    - 1m    (1 minute)
    - 1h    (1 hour)
    - 1d    (1 calendar day)
    - 1w    (1 calendar week)
    - 1mo   (1 calendar month)
    - 1q    (1 calendar quarter)
    - 1y    (1 calendar year)
    - 1i    (1 index count)

    By "calendar day", we mean the corresponding time on the next day
    (which may not be 24 hours, due to daylight savings). Similarly for
    "calendar week", "calendar month", "calendar quarter", and
    "calendar year".
min_samples
    The number of values in the window that should be non-null before computing
    a result.
by
    Should be ``DateTime``, ``Date``, ``UInt64``, ``UInt32``, ``Int64``,
    or ``Int32`` data type (note that the integral ones require using `'i'`
    in `window size`).
closed : {'left', 'right', 'both', 'none'}
    Define which sides of the temporal interval are closed (inclusive),
    defaults to `'right'`.

Notes
-----
If you want to compute multiple aggregation statistics over the same dynamic
window, consider using `rolling` - this method can cache the window size
computation.

Examples
--------
Create a DataFrame with a datetime column and a row number column

>>> from datetime import timedelta, datetime
>>> start = datetime(2001, 1, 1)
>>> stop = datetime(2001, 1, 2)
>>> df_temporal = pl.DataFrame(
...     {"date": pl.datetime_range(start, stop, "1h", eager=True)}
... ).with_row_index()
>>> df_temporal
shape: (25, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ index â”† date                â”‚
â”‚ ---   â”† ---                 â”‚
â”‚ u32   â”† datetime[Î¼s]        â”‚
â•žâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 0     â”† 2001-01-01 00:00:00 â”‚
â”‚ 1     â”† 2001-01-01 01:00:00 â”‚
â”‚ 2     â”† 2001-01-01 02:00:00 â”‚
â”‚ 3     â”† 2001-01-01 03:00:00 â”‚
â”‚ 4     â”† 2001-01-01 04:00:00 â”‚
â”‚ â€¦     â”† â€¦                   â”‚
â”‚ 20    â”† 2001-01-01 20:00:00 â”‚
â”‚ 21    â”† 2001-01-01 21:00:00 â”‚
â”‚ 22    â”† 2001-01-01 22:00:00 â”‚
â”‚ 23    â”† 2001-01-01 23:00:00 â”‚
â”‚ 24    â”† 2001-01-02 00:00:00 â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Compute the rolling sum with the temporal windows closed on the right (default)

>>> df_temporal.with_columns(
...     rolling_row_sum=pl.col("index").rolling_sum_by("date", window_size="2h")
... )
shape: (25, 3)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ index â”† date                â”† rolling_row_sum â”‚
â”‚ ---   â”† ---                 â”† ---             â”‚
â”‚ u32   â”† datetime[Î¼s]        â”† u32             â”‚
â•žâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 0     â”† 2001-01-01 00:00:00 â”† 0               â”‚
â”‚ 1     â”† 2001-01-01 01:00:00 â”† 1               â”‚
â”‚ 2     â”† 2001-01-01 02:00:00 â”† 3               â”‚
â”‚ 3     â”† 2001-01-01 03:00:00 â”† 5               â”‚
â”‚ 4     â”† 2001-01-01 04:00:00 â”† 7               â”‚
â”‚ â€¦     â”† â€¦                   â”† â€¦               â”‚
â”‚ 20    â”† 2001-01-01 20:00:00 â”† 39              â”‚
â”‚ 21    â”† 2001-01-01 21:00:00 â”† 41              â”‚
â”‚ 22    â”† 2001-01-01 22:00:00 â”† 43              â”‚
â”‚ 23    â”† 2001-01-01 23:00:00 â”† 45              â”‚
â”‚ 24    â”† 2001-01-02 00:00:00 â”† 47              â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Compute the rolling sum with the closure of windows on both sides

>>> df_temporal.with_columns(
...     rolling_row_sum=pl.col("index").rolling_sum_by(
...         "date", window_size="2h", closed="both"
...     )
... )
shape: (25, 3)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ index â”† date                â”† rolling_row_sum â”‚
â”‚ ---   â”† ---                 â”† ---             â”‚
â”‚ u32   â”† datetime[Î¼s]        â”† u32             â”‚
â•žâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 0     â”† 2001-01-01 00:00:00 â”† 0               â”‚
â”‚ 1     â”† 2001-01-01 01:00:00 â”† 1               â”‚
â”‚ 2     â”† 2001-01-01 02:00:00 â”† 3               â”‚
â”‚ 3     â”† 2001-01-01 03:00:00 â”† 6               â”‚
â”‚ 4     â”† 2001-01-01 04:00:00 â”† 9               â”‚
â”‚ â€¦     â”† â€¦                   â”† â€¦               â”‚
â”‚ 20    â”† 2001-01-01 20:00:00 â”† 57              â”‚
â”‚ 21    â”† 2001-01-01 21:00:00 â”† 60              â”‚
â”‚ 22    â”† 2001-01-01 22:00:00 â”† 63              â”‚
â”‚ 23    â”† 2001-01-01 23:00:00 â”† 66              â”‚
â”‚ 24    â”† 2001-01-02 00:00:00 â”† 69              â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r‹  r   r$   rd   Úrolling_sum_byr  s         ru   r˜  ÚExpr.rolling_sum_by<  r“  rx   )r‰  rŠ  rC  c          	     ó~   • [        U5      n[        U5      n[        U R                  R	                  UUUUU5      5      $ )u¦  
Compute a rolling standard deviation based on another column.

.. warning::
    This functionality is considered **unstable**. It may be changed
    at any point without it being considered a breaking change.

Given a `by` column `<t_0, t_1, ..., t_n>`, then `closed="right"`
(the default) means the windows will be:

    - (t_0 - window_size, t_0]
    - (t_1 - window_size, t_1]
    - ...
    - (t_n - window_size, t_n]

.. versionchanged:: 1.21.0
    The `min_periods` parameter was renamed `min_samples`.

Parameters
----------
by
    Should be ``DateTime``, ``Date``, ``UInt64``, ``UInt32``, ``Int64``,
    or ``Int32`` data type (note that the integral ones require using `'i'`
    in `window size`).
window_size
    The length of the window. Can be a dynamic temporal
    size indicated by a timedelta or the following string language:

    - 1ns   (1 nanosecond)
    - 1us   (1 microsecond)
    - 1ms   (1 millisecond)
    - 1s    (1 second)
    - 1m    (1 minute)
    - 1h    (1 hour)
    - 1d    (1 calendar day)
    - 1w    (1 calendar week)
    - 1mo   (1 calendar month)
    - 1q    (1 calendar quarter)
    - 1y    (1 calendar year)
    - 1i    (1 index count)

    By "calendar day", we mean the corresponding time on the next day
    (which may not be 24 hours, due to daylight savings). Similarly for
    "calendar week", "calendar month", "calendar quarter", and
    "calendar year".
min_samples
    The number of values in the window that should be non-null before computing
    a result.
closed : {'left', 'right', 'both', 'none'}
    Define which sides of the temporal interval are closed (inclusive),
    defaults to `'right'`.
ddof
    "Delta Degrees of Freedom": The divisor for a length N window is N - ddof

Notes
-----
If you want to compute multiple aggregation statistics over the same dynamic
window, consider using `rolling` - this method can cache the window size
computation.

Examples
--------
Create a DataFrame with a datetime column and a row number column

>>> from datetime import timedelta, datetime
>>> start = datetime(2001, 1, 1)
>>> stop = datetime(2001, 1, 2)
>>> df_temporal = pl.DataFrame(
...     {"date": pl.datetime_range(start, stop, "1h", eager=True)}
... ).with_row_index()
>>> df_temporal
shape: (25, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ index â”† date                â”‚
â”‚ ---   â”† ---                 â”‚
â”‚ u32   â”† datetime[Î¼s]        â”‚
â•žâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 0     â”† 2001-01-01 00:00:00 â”‚
â”‚ 1     â”† 2001-01-01 01:00:00 â”‚
â”‚ 2     â”† 2001-01-01 02:00:00 â”‚
â”‚ 3     â”† 2001-01-01 03:00:00 â”‚
â”‚ 4     â”† 2001-01-01 04:00:00 â”‚
â”‚ â€¦     â”† â€¦                   â”‚
â”‚ 20    â”† 2001-01-01 20:00:00 â”‚
â”‚ 21    â”† 2001-01-01 21:00:00 â”‚
â”‚ 22    â”† 2001-01-01 22:00:00 â”‚
â”‚ 23    â”† 2001-01-01 23:00:00 â”‚
â”‚ 24    â”† 2001-01-02 00:00:00 â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Compute the rolling std with the temporal windows closed on the right (default)

>>> df_temporal.with_columns(
...     rolling_row_std=pl.col("index").rolling_std_by("date", window_size="2h")
... )
shape: (25, 3)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ index â”† date                â”† rolling_row_std â”‚
â”‚ ---   â”† ---                 â”† ---             â”‚
â”‚ u32   â”† datetime[Î¼s]        â”† f64             â”‚
â•žâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 0     â”† 2001-01-01 00:00:00 â”† null            â”‚
â”‚ 1     â”† 2001-01-01 01:00:00 â”† 0.707107        â”‚
â”‚ 2     â”† 2001-01-01 02:00:00 â”† 0.707107        â”‚
â”‚ 3     â”† 2001-01-01 03:00:00 â”† 0.707107        â”‚
â”‚ 4     â”† 2001-01-01 04:00:00 â”† 0.707107        â”‚
â”‚ â€¦     â”† â€¦                   â”† â€¦               â”‚
â”‚ 20    â”† 2001-01-01 20:00:00 â”† 0.707107        â”‚
â”‚ 21    â”† 2001-01-01 21:00:00 â”† 0.707107        â”‚
â”‚ 22    â”† 2001-01-01 22:00:00 â”† 0.707107        â”‚
â”‚ 23    â”† 2001-01-01 23:00:00 â”† 0.707107        â”‚
â”‚ 24    â”† 2001-01-02 00:00:00 â”† 0.707107        â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Compute the rolling std with the closure of windows on both sides

>>> df_temporal.with_columns(
...     rolling_row_std=pl.col("index").rolling_std_by(
...         "date", window_size="2h", closed="both"
...     )
... )
shape: (25, 3)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ index â”† date                â”† rolling_row_std â”‚
â”‚ ---   â”† ---                 â”† ---             â”‚
â”‚ u32   â”† datetime[Î¼s]        â”† f64             â”‚
â•žâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 0     â”† 2001-01-01 00:00:00 â”† null            â”‚
â”‚ 1     â”† 2001-01-01 01:00:00 â”† 0.707107        â”‚
â”‚ 2     â”† 2001-01-01 02:00:00 â”† 1.0             â”‚
â”‚ 3     â”† 2001-01-01 03:00:00 â”† 1.0             â”‚
â”‚ 4     â”† 2001-01-01 04:00:00 â”† 1.0             â”‚
â”‚ â€¦     â”† â€¦                   â”† â€¦               â”‚
â”‚ 20    â”† 2001-01-01 20:00:00 â”† 1.0             â”‚
â”‚ 21    â”† 2001-01-01 21:00:00 â”† 1.0             â”‚
â”‚ 22    â”† 2001-01-01 22:00:00 â”† 1.0             â”‚
â”‚ 23    â”† 2001-01-01 23:00:00 â”† 1.0             â”‚
â”‚ 24    â”† 2001-01-02 00:00:00 â”† 1.0             â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r‹  r   r$   rd   Úrolling_std_by©r}   rî  rŽ  r‰  rŠ  rC  rø  s          ru   r›  ÚExpr.rolling_std_byÖ  óG   € ôn 6°kÓBˆÜ)¨"Ó-ˆ	ÜØL‰L×'Ñ'ØØØØØóó
ð 	
rx   c          	     ó~   • [        U5      n[        U5      n[        U R                  R	                  UUUUU5      5      $ )uœ  
Compute a rolling variance based on another column.

.. warning::
    This functionality is considered **unstable**. It may be changed
    at any point without it being considered a breaking change.

Given a `by` column `<t_0, t_1, ..., t_n>`, then `closed="right"`
(the default) means the windows will be:

    - (t_0 - window_size, t_0]
    - (t_1 - window_size, t_1]
    - ...
    - (t_n - window_size, t_n]

.. versionchanged:: 1.21.0
    The `min_periods` parameter was renamed `min_samples`.

Parameters
----------
by
    Should be ``DateTime``, ``Date``, ``UInt64``, ``UInt32``, ``Int64``,
    or ``Int32`` data type (note that the integral ones require using `'i'`
    in `window size`).
window_size
    The length of the window. Can be a dynamic temporal
    size indicated by a timedelta or the following string language:

    - 1ns   (1 nanosecond)
    - 1us   (1 microsecond)
    - 1ms   (1 millisecond)
    - 1s    (1 second)
    - 1m    (1 minute)
    - 1h    (1 hour)
    - 1d    (1 calendar day)
    - 1w    (1 calendar week)
    - 1mo   (1 calendar month)
    - 1q    (1 calendar quarter)
    - 1y    (1 calendar year)
    - 1i    (1 index count)

    By "calendar day", we mean the corresponding time on the next day
    (which may not be 24 hours, due to daylight savings). Similarly for
    "calendar week", "calendar month", "calendar quarter", and
    "calendar year".
min_samples
    The number of values in the window that should be non-null before computing
    a result.
closed : {'left', 'right', 'both', 'none'}
    Define which sides of the temporal interval are closed (inclusive),
    defaults to `'right'`.
ddof
    "Delta Degrees of Freedom": The divisor for a length N window is N - ddof

Notes
-----
If you want to compute multiple aggregation statistics over the same dynamic
window, consider using `rolling` - this method can cache the window size
computation.

Examples
--------
Create a DataFrame with a datetime column and a row number column

>>> from datetime import timedelta, datetime
>>> start = datetime(2001, 1, 1)
>>> stop = datetime(2001, 1, 2)
>>> df_temporal = pl.DataFrame(
...     {"date": pl.datetime_range(start, stop, "1h", eager=True)}
... ).with_row_index()
>>> df_temporal
shape: (25, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ index â”† date                â”‚
â”‚ ---   â”† ---                 â”‚
â”‚ u32   â”† datetime[Î¼s]        â”‚
â•žâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 0     â”† 2001-01-01 00:00:00 â”‚
â”‚ 1     â”† 2001-01-01 01:00:00 â”‚
â”‚ 2     â”† 2001-01-01 02:00:00 â”‚
â”‚ 3     â”† 2001-01-01 03:00:00 â”‚
â”‚ 4     â”† 2001-01-01 04:00:00 â”‚
â”‚ â€¦     â”† â€¦                   â”‚
â”‚ 20    â”† 2001-01-01 20:00:00 â”‚
â”‚ 21    â”† 2001-01-01 21:00:00 â”‚
â”‚ 22    â”† 2001-01-01 22:00:00 â”‚
â”‚ 23    â”† 2001-01-01 23:00:00 â”‚
â”‚ 24    â”† 2001-01-02 00:00:00 â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Compute the rolling var with the temporal windows closed on the right (default)

>>> df_temporal.with_columns(
...     rolling_row_var=pl.col("index").rolling_var_by("date", window_size="2h")
... )
shape: (25, 3)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ index â”† date                â”† rolling_row_var â”‚
â”‚ ---   â”† ---                 â”† ---             â”‚
â”‚ u32   â”† datetime[Î¼s]        â”† f64             â”‚
â•žâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 0     â”† 2001-01-01 00:00:00 â”† null            â”‚
â”‚ 1     â”† 2001-01-01 01:00:00 â”† 0.5             â”‚
â”‚ 2     â”† 2001-01-01 02:00:00 â”† 0.5             â”‚
â”‚ 3     â”† 2001-01-01 03:00:00 â”† 0.5             â”‚
â”‚ 4     â”† 2001-01-01 04:00:00 â”† 0.5             â”‚
â”‚ â€¦     â”† â€¦                   â”† â€¦               â”‚
â”‚ 20    â”† 2001-01-01 20:00:00 â”† 0.5             â”‚
â”‚ 21    â”† 2001-01-01 21:00:00 â”† 0.5             â”‚
â”‚ 22    â”† 2001-01-01 22:00:00 â”† 0.5             â”‚
â”‚ 23    â”† 2001-01-01 23:00:00 â”† 0.5             â”‚
â”‚ 24    â”† 2001-01-02 00:00:00 â”† 0.5             â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Compute the rolling var with the closure of windows on both sides

>>> df_temporal.with_columns(
...     rolling_row_var=pl.col("index").rolling_var_by(
...         "date", window_size="2h", closed="both"
...     )
... )
shape: (25, 3)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ index â”† date                â”† rolling_row_var â”‚
â”‚ ---   â”† ---                 â”† ---             â”‚
â”‚ u32   â”† datetime[Î¼s]        â”† f64             â”‚
â•žâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 0     â”† 2001-01-01 00:00:00 â”† null            â”‚
â”‚ 1     â”† 2001-01-01 01:00:00 â”† 0.5             â”‚
â”‚ 2     â”† 2001-01-01 02:00:00 â”† 1.0             â”‚
â”‚ 3     â”† 2001-01-01 03:00:00 â”† 1.0             â”‚
â”‚ 4     â”† 2001-01-01 04:00:00 â”† 1.0             â”‚
â”‚ â€¦     â”† â€¦                   â”† â€¦               â”‚
â”‚ 20    â”† 2001-01-01 20:00:00 â”† 1.0             â”‚
â”‚ 21    â”† 2001-01-01 21:00:00 â”† 1.0             â”‚
â”‚ 22    â”† 2001-01-01 22:00:00 â”† 1.0             â”‚
â”‚ 23    â”† 2001-01-01 23:00:00 â”† 1.0             â”‚
â”‚ 24    â”† 2001-01-02 00:00:00 â”† 1.0             â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r‹  r   r$   rd   Úrolling_var_byrœ  s          ru   r   ÚExpr.rolling_var_byy  rž  rx   c               óx   • [        U5      n[        U5      n[        U R                  R	                  XRX45      5      $ )uÆ  
Compute a rolling median based on another column.

.. warning::
    This functionality is considered **unstable**. It may be changed
    at any point without it being considered a breaking change.

Given a `by` column `<t_0, t_1, ..., t_n>`, then `closed="right"`
(the default) means the windows will be:

    - (t_0 - window_size, t_0]
    - (t_1 - window_size, t_1]
    - ...
    - (t_n - window_size, t_n]

.. versionchanged:: 1.21.0
    The `min_periods` parameter was renamed `min_samples`.

Parameters
----------
by
    Should be ``DateTime``, ``Date``, ``UInt64``, ``UInt32``, ``Int64``,
    or ``Int32`` data type (note that the integral ones require using `'i'`
    in `window size`).
window_size
    The length of the window. Can be a dynamic temporal
    size indicated by a timedelta or the following string language:

    - 1ns   (1 nanosecond)
    - 1us   (1 microsecond)
    - 1ms   (1 millisecond)
    - 1s    (1 second)
    - 1m    (1 minute)
    - 1h    (1 hour)
    - 1d    (1 calendar day)
    - 1w    (1 calendar week)
    - 1mo   (1 calendar month)
    - 1q    (1 calendar quarter)
    - 1y    (1 calendar year)
    - 1i    (1 index count)

    By "calendar day", we mean the corresponding time on the next day
    (which may not be 24 hours, due to daylight savings). Similarly for
    "calendar week", "calendar month", "calendar quarter", and
    "calendar year".
min_samples
    The number of values in the window that should be non-null before computing
    a result.
closed : {'left', 'right', 'both', 'none'}
    Define which sides of the temporal interval are closed (inclusive),
    defaults to `'right'`.

Notes
-----
If you want to compute multiple aggregation statistics over the same dynamic
window, consider using `rolling` - this method can cache the window size
computation.

Examples
--------
Create a DataFrame with a datetime column and a row number column

>>> from datetime import timedelta, datetime
>>> start = datetime(2001, 1, 1)
>>> stop = datetime(2001, 1, 2)
>>> df_temporal = pl.DataFrame(
...     {"date": pl.datetime_range(start, stop, "1h", eager=True)}
... ).with_row_index()
>>> df_temporal
shape: (25, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ index â”† date                â”‚
â”‚ ---   â”† ---                 â”‚
â”‚ u32   â”† datetime[Î¼s]        â”‚
â•žâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 0     â”† 2001-01-01 00:00:00 â”‚
â”‚ 1     â”† 2001-01-01 01:00:00 â”‚
â”‚ 2     â”† 2001-01-01 02:00:00 â”‚
â”‚ 3     â”† 2001-01-01 03:00:00 â”‚
â”‚ 4     â”† 2001-01-01 04:00:00 â”‚
â”‚ â€¦     â”† â€¦                   â”‚
â”‚ 20    â”† 2001-01-01 20:00:00 â”‚
â”‚ 21    â”† 2001-01-01 21:00:00 â”‚
â”‚ 22    â”† 2001-01-01 22:00:00 â”‚
â”‚ 23    â”† 2001-01-01 23:00:00 â”‚
â”‚ 24    â”† 2001-01-02 00:00:00 â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Compute the rolling median with the temporal windows closed on the right:

>>> df_temporal.with_columns(
...     rolling_row_median=pl.col("index").rolling_median_by(
...         "date", window_size="2h"
...     )
... )
shape: (25, 3)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ index â”† date                â”† rolling_row_median â”‚
â”‚ ---   â”† ---                 â”† ---                â”‚
â”‚ u32   â”† datetime[Î¼s]        â”† f64                â”‚
â•žâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 0     â”† 2001-01-01 00:00:00 â”† 0.0                â”‚
â”‚ 1     â”† 2001-01-01 01:00:00 â”† 0.5                â”‚
â”‚ 2     â”† 2001-01-01 02:00:00 â”† 1.5                â”‚
â”‚ 3     â”† 2001-01-01 03:00:00 â”† 2.5                â”‚
â”‚ 4     â”† 2001-01-01 04:00:00 â”† 3.5                â”‚
â”‚ â€¦     â”† â€¦                   â”† â€¦                  â”‚
â”‚ 20    â”† 2001-01-01 20:00:00 â”† 19.5               â”‚
â”‚ 21    â”† 2001-01-01 21:00:00 â”† 20.5               â”‚
â”‚ 22    â”† 2001-01-01 22:00:00 â”† 21.5               â”‚
â”‚ 23    â”† 2001-01-01 23:00:00 â”† 22.5               â”‚
â”‚ 24    â”† 2001-01-02 00:00:00 â”† 23.5               â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r‹  r   r$   rd   Úrolling_median_byr  s         ru   r£  ÚExpr.rolling_median_by  s:   € ôx 6°kÓBˆÜ)¨"Ó-ˆ	ÜØL‰L×*Ñ*¨9À;ÓWó
ð 	
rx   )r¦  r‰  rŠ  c          
     ó€   • [        U5      n[        U5      n[        U R                  R	                  UUUUUU5      5      $ )u¤  
Compute a rolling quantile based on another column.

.. warning::
    This functionality is considered **unstable**. It may be changed
    at any point without it being considered a breaking change.

Given a `by` column `<t_0, t_1, ..., t_n>`, then `closed="right"`
(the default) means the windows will be:

    - (t_0 - window_size, t_0]
    - (t_1 - window_size, t_1]
    - ...
    - (t_n - window_size, t_n]

.. versionchanged:: 1.21.0
    The `min_periods` parameter was renamed `min_samples`.

Parameters
----------
by
    Should be ``DateTime``, ``Date``, ``UInt64``, ``UInt32``, ``Int64``,
    or ``Int32`` data type (note that the integral ones require using `'i'`
    in `window size`).
quantile
    Quantile between 0.0 and 1.0.
interpolation : {'nearest', 'higher', 'lower', 'midpoint', 'linear', 'equiprobable'}
    Interpolation method.
window_size
    The length of the window. Can be a dynamic
    temporal size indicated by a timedelta or the following string language:

    - 1ns   (1 nanosecond)
    - 1us   (1 microsecond)
    - 1ms   (1 millisecond)
    - 1s    (1 second)
    - 1m    (1 minute)
    - 1h    (1 hour)
    - 1d    (1 calendar day)
    - 1w    (1 calendar week)
    - 1mo   (1 calendar month)
    - 1q    (1 calendar quarter)
    - 1y    (1 calendar year)
    - 1i    (1 index count)

    By "calendar day", we mean the corresponding time on the next day
    (which may not be 24 hours, due to daylight savings). Similarly for
    "calendar week", "calendar month", "calendar quarter", and
    "calendar year".
min_samples
    The number of values in the window that should be non-null before computing
    a result.
closed : {'left', 'right', 'both', 'none'}
    Define which sides of the temporal interval are closed (inclusive),
    defaults to `'right'`.

Notes
-----
If you want to compute multiple aggregation statistics over the same dynamic
window, consider using `rolling` - this method can cache the window size
computation.

Examples
--------
Create a DataFrame with a datetime column and a row number column

>>> from datetime import timedelta, datetime
>>> start = datetime(2001, 1, 1)
>>> stop = datetime(2001, 1, 2)
>>> df_temporal = pl.DataFrame(
...     {"date": pl.datetime_range(start, stop, "1h", eager=True)}
... ).with_row_index()
>>> df_temporal
shape: (25, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ index â”† date                â”‚
â”‚ ---   â”† ---                 â”‚
â”‚ u32   â”† datetime[Î¼s]        â”‚
â•žâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 0     â”† 2001-01-01 00:00:00 â”‚
â”‚ 1     â”† 2001-01-01 01:00:00 â”‚
â”‚ 2     â”† 2001-01-01 02:00:00 â”‚
â”‚ 3     â”† 2001-01-01 03:00:00 â”‚
â”‚ 4     â”† 2001-01-01 04:00:00 â”‚
â”‚ â€¦     â”† â€¦                   â”‚
â”‚ 20    â”† 2001-01-01 20:00:00 â”‚
â”‚ 21    â”† 2001-01-01 21:00:00 â”‚
â”‚ 22    â”† 2001-01-01 22:00:00 â”‚
â”‚ 23    â”† 2001-01-01 23:00:00 â”‚
â”‚ 24    â”† 2001-01-02 00:00:00 â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Compute the rolling quantile with the temporal windows closed on the right:

>>> df_temporal.with_columns(
...     rolling_row_quantile=pl.col("index").rolling_quantile_by(
...         "date", window_size="2h", quantile=0.3
...     )
... )
shape: (25, 3)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ index â”† date                â”† rolling_row_quantile â”‚
â”‚ ---   â”† ---                 â”† ---                  â”‚
â”‚ u32   â”† datetime[Î¼s]        â”† f64                  â”‚
â•žâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 0     â”† 2001-01-01 00:00:00 â”† 0.0                  â”‚
â”‚ 1     â”† 2001-01-01 01:00:00 â”† 0.0                  â”‚
â”‚ 2     â”† 2001-01-01 02:00:00 â”† 1.0                  â”‚
â”‚ 3     â”† 2001-01-01 03:00:00 â”† 2.0                  â”‚
â”‚ 4     â”† 2001-01-01 04:00:00 â”† 3.0                  â”‚
â”‚ â€¦     â”† â€¦                   â”† â€¦                    â”‚
â”‚ 20    â”† 2001-01-01 20:00:00 â”† 19.0                 â”‚
â”‚ 21    â”† 2001-01-01 21:00:00 â”† 20.0                 â”‚
â”‚ 22    â”† 2001-01-01 22:00:00 â”† 21.0                 â”‚
â”‚ 23    â”† 2001-01-01 23:00:00 â”† 22.0                 â”‚
â”‚ 24    â”† 2001-01-02 00:00:00 â”† 23.0                 â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r‹  r   r$   rd   Úrolling_quantile_by)r}   rî  rŽ  r¥  r¦  r‰  rŠ  rø  s           ru   r¦  ÚExpr.rolling_quantile_byž  sJ   € ôD 6°kÓBˆÜ)¨"Ó-ˆ	ÜØL‰L×,Ñ,ØØØØØØóó	
ð 		
rx   )r‰  Úcenterc          	     óL   • [        U R                  R                  UUUUS95      $ )už  
Apply a rolling min (moving min) over the values in this array.

A window of length `window_size` will traverse the array. The values that fill
this window will (optionally) be multiplied with the weights given by the
`weights` vector. The resulting values will be aggregated to their min.

The window at a given row will include the row itself, and the `window_size - 1`
elements before it.

.. versionchanged:: 1.21.0
    The `min_periods` parameter was renamed `min_samples`.

Parameters
----------
window_size
    The length of the window in number of elements.
weights
    An optional slice with the same length as the window that will be multiplied
    elementwise with the values in the window.
min_samples
    The number of values in the window that should be non-null before computing
    a result. If set to `None` (default), it will be set equal to `window_size`.
center
    Set the labels at the center of the window.

Notes
-----
If you want to compute multiple aggregation statistics over the same dynamic
window, consider using `rolling` - this method can cache the window size
computation.

Examples
--------
>>> df = pl.DataFrame({"A": [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]})
>>> df.with_columns(
...     rolling_min=pl.col("A").rolling_min(window_size=2),
... )
shape: (6, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ A   â”† rolling_min â”‚
â”‚ --- â”† ---         â”‚
â”‚ f64 â”† f64         â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1.0 â”† null        â”‚
â”‚ 2.0 â”† 1.0         â”‚
â”‚ 3.0 â”† 2.0         â”‚
â”‚ 4.0 â”† 3.0         â”‚
â”‚ 5.0 â”† 4.0         â”‚
â”‚ 6.0 â”† 5.0         â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Specify weights to multiply the values in the window with:

>>> df.with_columns(
...     rolling_min=pl.col("A").rolling_min(
...         window_size=2, weights=[0.25, 0.75]
...     ),
... )
shape: (6, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ A   â”† rolling_min â”‚
â”‚ --- â”† ---         â”‚
â”‚ f64 â”† f64         â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1.0 â”† null        â”‚
â”‚ 2.0 â”† 0.25        â”‚
â”‚ 3.0 â”† 0.5         â”‚
â”‚ 4.0 â”† 0.75        â”‚
â”‚ 5.0 â”† 1.0         â”‚
â”‚ 6.0 â”† 1.25        â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Center the values in the window

>>> df.with_columns(
...     rolling_min=pl.col("A").rolling_min(window_size=3, center=True),
... )
shape: (6, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ A   â”† rolling_min â”‚
â”‚ --- â”† ---         â”‚
â”‚ f64 â”† f64         â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1.0 â”† null        â”‚
â”‚ 2.0 â”† 1.0         â”‚
â”‚ 3.0 â”† 2.0         â”‚
â”‚ 4.0 â”† 3.0         â”‚
â”‚ 5.0 â”† 4.0         â”‚
â”‚ 6.0 â”† null        â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
©r¨  )r$   rd   Úrolling_min©r}   rŽ  Úweightsr‰  r¨  s        ru   r«  ÚExpr.rolling_min-  s5   € ôJ ØL‰L×$Ñ$ØØØØð	 %ð ó
ð 	
rx   c               óP   • [        U R                  R                  UUUU5      5      $ )už  
Apply a rolling max (moving max) over the values in this array.

A window of length `window_size` will traverse the array. The values that fill
this window will (optionally) be multiplied with the weights given by the
`weights` vector. The resulting values will be aggregated to their max.

The window at a given row will include the row itself, and the `window_size - 1`
elements before it.

.. versionchanged:: 1.21.0
    The `min_periods` parameter was renamed `min_samples`.

Parameters
----------
window_size
    The length of the window in number of elements.
weights
    An optional slice with the same length as the window that will be multiplied
    elementwise with the values in the window.
min_samples
    The number of values in the window that should be non-null before computing
    a result. If set to `None` (default), it will be set equal to `window_size`.
center
    Set the labels at the center of the window.

Notes
-----
If you want to compute multiple aggregation statistics over the same dynamic
window, consider using `rolling` - this method can cache the window size
computation.

Examples
--------
>>> df = pl.DataFrame({"A": [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]})
>>> df.with_columns(
...     rolling_max=pl.col("A").rolling_max(window_size=2),
... )
shape: (6, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ A   â”† rolling_max â”‚
â”‚ --- â”† ---         â”‚
â”‚ f64 â”† f64         â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1.0 â”† null        â”‚
â”‚ 2.0 â”† 2.0         â”‚
â”‚ 3.0 â”† 3.0         â”‚
â”‚ 4.0 â”† 4.0         â”‚
â”‚ 5.0 â”† 5.0         â”‚
â”‚ 6.0 â”† 6.0         â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Specify weights to multiply the values in the window with:

>>> df.with_columns(
...     rolling_max=pl.col("A").rolling_max(
...         window_size=2, weights=[0.25, 0.75]
...     ),
... )
shape: (6, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ A   â”† rolling_max â”‚
â”‚ --- â”† ---         â”‚
â”‚ f64 â”† f64         â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1.0 â”† null        â”‚
â”‚ 2.0 â”† 1.5         â”‚
â”‚ 3.0 â”† 2.25        â”‚
â”‚ 4.0 â”† 3.0         â”‚
â”‚ 5.0 â”† 3.75        â”‚
â”‚ 6.0 â”† 4.5         â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Center the values in the window

>>> df.with_columns(
...     rolling_max=pl.col("A").rolling_max(window_size=3, center=True),
... )
shape: (6, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ A   â”† rolling_max â”‚
â”‚ --- â”† ---         â”‚
â”‚ f64 â”† f64         â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1.0 â”† null        â”‚
â”‚ 2.0 â”† 3.0         â”‚
â”‚ 3.0 â”† 4.0         â”‚
â”‚ 4.0 â”† 5.0         â”‚
â”‚ 5.0 â”† 6.0         â”‚
â”‚ 6.0 â”† null        â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úrolling_maxr¬  s        ru   r°  ÚExpr.rolling_max›  ó0   € ôJ ØL‰L×$Ñ$ØØØØó	ó
ð 	
rx   c               óP   • [        U R                  R                  UUUU5      5      $ )u)  
Apply a rolling mean (moving mean) over the values in this array.

A window of length `window_size` will traverse the array. The values that fill
this window will (optionally) be multiplied with the weights given by the
`weights` vector. The resulting values will be aggregated to their mean. Weights
are normalized to sum to 1.

The window at a given row will include the row itself, and the `window_size - 1`
elements before it.

.. versionchanged:: 1.21.0
    The `min_periods` parameter was renamed `min_samples`.

Parameters
----------
window_size
    The length of the window in number of elements.
weights
    An optional slice with the same length as the window that will be multiplied
    elementwise with the values in the window, after being normalized to sum to
    1.
min_samples
    The number of values in the window that should be non-null before computing
    a result. If set to `None` (default), it will be set equal to `window_size`.
center
    Set the labels at the center of the window.

Notes
-----
If you want to compute multiple aggregation statistics over the same dynamic
window, consider using `rolling` - this method can cache the window size
computation.

Examples
--------
>>> df = pl.DataFrame({"A": [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]})
>>> df.with_columns(
...     rolling_mean=pl.col("A").rolling_mean(window_size=2),
... )
shape: (6, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ A   â”† rolling_mean â”‚
â”‚ --- â”† ---          â”‚
â”‚ f64 â”† f64          â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1.0 â”† null         â”‚
â”‚ 2.0 â”† 1.5          â”‚
â”‚ 3.0 â”† 2.5          â”‚
â”‚ 4.0 â”† 3.5          â”‚
â”‚ 5.0 â”† 4.5          â”‚
â”‚ 6.0 â”† 5.5          â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Specify weights to multiply the values in the window with:

>>> df.with_columns(
...     rolling_mean=pl.col("A").rolling_mean(
...         window_size=2, weights=[0.25, 0.75]
...     ),
... )
shape: (6, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ A   â”† rolling_mean â”‚
â”‚ --- â”† ---          â”‚
â”‚ f64 â”† f64          â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1.0 â”† null         â”‚
â”‚ 2.0 â”† 1.75         â”‚
â”‚ 3.0 â”† 2.75         â”‚
â”‚ 4.0 â”† 3.75         â”‚
â”‚ 5.0 â”† 4.75         â”‚
â”‚ 6.0 â”† 5.75         â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Center the values in the window

>>> df.with_columns(
...     rolling_mean=pl.col("A").rolling_mean(window_size=3, center=True),
... )
shape: (6, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ A   â”† rolling_mean â”‚
â”‚ --- â”† ---          â”‚
â”‚ f64 â”† f64          â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1.0 â”† null         â”‚
â”‚ 2.0 â”† 2.0          â”‚
â”‚ 3.0 â”† 3.0          â”‚
â”‚ 4.0 â”† 4.0          â”‚
â”‚ 5.0 â”† 5.0          â”‚
â”‚ 6.0 â”† null         â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úrolling_meanr¬  s        ru   r´  ÚExpr.rolling_mean	  s0   € ôN ØL‰L×%Ñ%ØØØØó	ó
ð 	
rx   c               óP   • [        U R                  R                  UUUU5      5      $ )už  
Apply a rolling sum (moving sum) over the values in this array.

A window of length `window_size` will traverse the array. The values that fill
this window will (optionally) be multiplied with the weights given by the
`weights` vector. The resulting values will be aggregated to their sum.

The window at a given row will include the row itself, and the `window_size - 1`
elements before it.

.. versionchanged:: 1.21.0
    The `min_periods` parameter was renamed `min_samples`.

Parameters
----------
window_size
    The length of the window in number of elements.
weights
    An optional slice with the same length as the window that will be multiplied
    elementwise with the values in the window.
min_samples
    The number of values in the window that should be non-null before computing
    a result. If set to `None` (default), it will be set equal to `window_size`.
center
    Set the labels at the center of the window.

Notes
-----
If you want to compute multiple aggregation statistics over the same dynamic
window, consider using `rolling` - this method can cache the window size
computation.

Examples
--------
>>> df = pl.DataFrame({"A": [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]})
>>> df.with_columns(
...     rolling_sum=pl.col("A").rolling_sum(window_size=2),
... )
shape: (6, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ A   â”† rolling_sum â”‚
â”‚ --- â”† ---         â”‚
â”‚ f64 â”† f64         â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1.0 â”† null        â”‚
â”‚ 2.0 â”† 3.0         â”‚
â”‚ 3.0 â”† 5.0         â”‚
â”‚ 4.0 â”† 7.0         â”‚
â”‚ 5.0 â”† 9.0         â”‚
â”‚ 6.0 â”† 11.0        â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Specify weights to multiply the values in the window with:

>>> df.with_columns(
...     rolling_sum=pl.col("A").rolling_sum(
...         window_size=2, weights=[0.25, 0.75]
...     ),
... )
shape: (6, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ A   â”† rolling_sum â”‚
â”‚ --- â”† ---         â”‚
â”‚ f64 â”† f64         â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1.0 â”† null        â”‚
â”‚ 2.0 â”† 1.75        â”‚
â”‚ 3.0 â”† 2.75        â”‚
â”‚ 4.0 â”† 3.75        â”‚
â”‚ 5.0 â”† 4.75        â”‚
â”‚ 6.0 â”† 5.75        â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Center the values in the window

>>> df.with_columns(
...     rolling_sum=pl.col("A").rolling_sum(window_size=3, center=True),
... )
shape: (6, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ A   â”† rolling_sum â”‚
â”‚ --- â”† ---         â”‚
â”‚ f64 â”† f64         â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1.0 â”† null        â”‚
â”‚ 2.0 â”† 6.0         â”‚
â”‚ 3.0 â”† 9.0         â”‚
â”‚ 4.0 â”† 12.0        â”‚
â”‚ 5.0 â”† 15.0        â”‚
â”‚ 6.0 â”† null        â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úrolling_sumr¬  s        ru   r·  ÚExpr.rolling_sumy  r²  rx   )r‰  r¨  rC  c          
     óN   • [        U R                  R                  UUUUUS95      $ )u"  
Compute a rolling standard deviation.

A window of length `window_size` will traverse the array. The values that fill
this window will (optionally) be multiplied with the weights given by the
`weights` vector. The resulting values will be aggregated to their std. Weights
are normalized to sum to 1.

The window at a given row will include the row itself, and the `window_size - 1`
elements before it.

.. versionchanged:: 1.21.0
    The `min_periods` parameter was renamed `min_samples`.

Parameters
----------
window_size
    The length of the window in number of elements.
weights
    An optional slice with the same length as the window that will be multiplied
    elementwise with the values in the window after being normalized to sum to
    1.
min_samples
    The number of values in the window that should be non-null before computing
    a result. If set to `None` (default), it will be set equal to `window_size`.
center
    Set the labels at the center of the window.
ddof
    "Delta Degrees of Freedom": The divisor for a length N window is N - ddof

Notes
-----
If you want to compute multiple aggregation statistics over the same dynamic
window, consider using `rolling` - this method can cache the window size
computation.

Examples
--------
>>> df = pl.DataFrame({"A": [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]})
>>> df.with_columns(
...     rolling_std=pl.col("A").rolling_std(window_size=2),
... )
shape: (6, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ A   â”† rolling_std â”‚
â”‚ --- â”† ---         â”‚
â”‚ f64 â”† f64         â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1.0 â”† null        â”‚
â”‚ 2.0 â”† 0.707107    â”‚
â”‚ 3.0 â”† 0.707107    â”‚
â”‚ 4.0 â”† 0.707107    â”‚
â”‚ 5.0 â”† 0.707107    â”‚
â”‚ 6.0 â”† 0.707107    â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Specify weights to multiply the values in the window with:

>>> df.with_columns(
...     rolling_std=pl.col("A").rolling_std(
...         window_size=2, weights=[0.25, 0.75]
...     ),
... )
shape: (6, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ A   â”† rolling_std â”‚
â”‚ --- â”† ---         â”‚
â”‚ f64 â”† f64         â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1.0 â”† null        â”‚
â”‚ 2.0 â”† 0.433013    â”‚
â”‚ 3.0 â”† 0.433013    â”‚
â”‚ 4.0 â”† 0.433013    â”‚
â”‚ 5.0 â”† 0.433013    â”‚
â”‚ 6.0 â”† 0.433013    â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Center the values in the window

>>> df.with_columns(
...     rolling_std=pl.col("A").rolling_std(window_size=3, center=True),
... )
shape: (6, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ A   â”† rolling_std â”‚
â”‚ --- â”† ---         â”‚
â”‚ f64 â”† f64         â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1.0 â”† null        â”‚
â”‚ 2.0 â”† 1.0         â”‚
â”‚ 3.0 â”† 1.0         â”‚
â”‚ 4.0 â”† 1.0         â”‚
â”‚ 5.0 â”† 1.0         â”‚
â”‚ 6.0 â”† null        â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
©r¨  rC  )r$   rd   Úrolling_std©r}   rŽ  r­  r‰  r¨  rC  s         ru   r»  ÚExpr.rolling_stdç  ó8   € ôT ØL‰L×$Ñ$ØØØØØð %ð ó
ð 	
rx   c          
     óN   • [        U R                  R                  UUUUUS95      $ )u  
Compute a rolling variance.

A window of length `window_size` will traverse the array. The values that fill
this window will (optionally) be multiplied with the weights given by the
`weights` vector. The resulting values will be aggregated to their var. Weights
are normalized to sum to 1.

The window at a given row will include the row itself, and the `window_size - 1`
elements before it.

.. versionchanged:: 1.21.0
    The `min_periods` parameter was renamed `min_samples`.

Parameters
----------
window_size
    The length of the window in number of elements.
weights
    An optional slice with the same length as the window that will be multiplied
    elementwise with the values in the window after being normalized to sum to
    1.
min_samples
    The number of values in the window that should be non-null before computing
    a result. If set to `None` (default), it will be set equal to `window_size`.
center
    Set the labels at the center of the window.
ddof
    "Delta Degrees of Freedom": The divisor for a length N window is N - ddof

Notes
-----
If you want to compute multiple aggregation statistics over the same dynamic
window, consider using `rolling` - this method can cache the window size
computation.

Examples
--------
>>> df = pl.DataFrame({"A": [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]})
>>> df.with_columns(
...     rolling_var=pl.col("A").rolling_var(window_size=2),
... )
shape: (6, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ A   â”† rolling_var â”‚
â”‚ --- â”† ---         â”‚
â”‚ f64 â”† f64         â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1.0 â”† null        â”‚
â”‚ 2.0 â”† 0.5         â”‚
â”‚ 3.0 â”† 0.5         â”‚
â”‚ 4.0 â”† 0.5         â”‚
â”‚ 5.0 â”† 0.5         â”‚
â”‚ 6.0 â”† 0.5         â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Specify weights to multiply the values in the window with:

>>> df.with_columns(
...     rolling_var=pl.col("A").rolling_var(
...         window_size=2, weights=[0.25, 0.75]
...     ),
... )
shape: (6, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ A   â”† rolling_var â”‚
â”‚ --- â”† ---         â”‚
â”‚ f64 â”† f64         â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1.0 â”† null        â”‚
â”‚ 2.0 â”† 0.1875      â”‚
â”‚ 3.0 â”† 0.1875      â”‚
â”‚ 4.0 â”† 0.1875      â”‚
â”‚ 5.0 â”† 0.1875      â”‚
â”‚ 6.0 â”† 0.1875      â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Center the values in the window

>>> df.with_columns(
...     rolling_var=pl.col("A").rolling_var(window_size=3, center=True),
... )
shape: (6, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ A   â”† rolling_var â”‚
â”‚ --- â”† ---         â”‚
â”‚ f64 â”† f64         â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1.0 â”† null        â”‚
â”‚ 2.0 â”† 1.0         â”‚
â”‚ 3.0 â”† 1.0         â”‚
â”‚ 4.0 â”† 1.0         â”‚
â”‚ 5.0 â”† 1.0         â”‚
â”‚ 6.0 â”† null        â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
rº  )r$   rd   Úrolling_varr¼  s         ru   rÀ  ÚExpr.rolling_var[  r¾  rx   c          	     óL   • [        U R                  R                  UUUUS95      $ )u#  
Compute a rolling median.

A window of length `window_size` will traverse the array. The values that fill
this window will (optionally) be multiplied with the weights given by the
`weights` vector. The resulting values will be aggregated to their median.

The window at a given row will include the row itself, and the `window_size - 1`
elements before it.

.. versionchanged:: 1.21.0
    The `min_periods` parameter was renamed `min_samples`.

Parameters
----------
window_size
    The length of the window in number of elements.
weights
    An optional slice with the same length as the window that will be multiplied
    elementwise with the values in the window.
min_samples
    The number of values in the window that should be non-null before computing
    a result. If set to `None` (default), it will be set equal to `window_size`.
center
    Set the labels at the center of the window.

Notes
-----
If you want to compute multiple aggregation statistics over the same dynamic
window, consider using `rolling` - this method can cache the window size
computation.

Examples
--------
>>> df = pl.DataFrame({"A": [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]})
>>> df.with_columns(
...     rolling_median=pl.col("A").rolling_median(window_size=2),
... )
shape: (6, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ A   â”† rolling_median â”‚
â”‚ --- â”† ---            â”‚
â”‚ f64 â”† f64            â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1.0 â”† null           â”‚
â”‚ 2.0 â”† 1.5            â”‚
â”‚ 3.0 â”† 2.5            â”‚
â”‚ 4.0 â”† 3.5            â”‚
â”‚ 5.0 â”† 4.5            â”‚
â”‚ 6.0 â”† 5.5            â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Specify weights for the values in each window:

>>> df.with_columns(
...     rolling_median=pl.col("A").rolling_median(
...         window_size=2, weights=[0.25, 0.75]
...     ),
... )
shape: (6, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ A   â”† rolling_median â”‚
â”‚ --- â”† ---            â”‚
â”‚ f64 â”† f64            â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1.0 â”† null           â”‚
â”‚ 2.0 â”† 1.5            â”‚
â”‚ 3.0 â”† 2.5            â”‚
â”‚ 4.0 â”† 3.5            â”‚
â”‚ 5.0 â”† 4.5            â”‚
â”‚ 6.0 â”† 5.5            â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Center the values in the window

>>> df.with_columns(
...     rolling_median=pl.col("A").rolling_median(window_size=3, center=True),
... )
shape: (6, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ A   â”† rolling_median â”‚
â”‚ --- â”† ---            â”‚
â”‚ f64 â”† f64            â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1.0 â”† null           â”‚
â”‚ 2.0 â”† 2.0            â”‚
â”‚ 3.0 â”† 3.0            â”‚
â”‚ 4.0 â”† 4.0            â”‚
â”‚ 5.0 â”† 5.0            â”‚
â”‚ 6.0 â”† null           â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
rª  )r$   rd   Úrolling_medianr¬  s        ru   rÃ  ÚExpr.rolling_medianÏ  s5   € ôJ ØL‰L×'Ñ'ØØØØð	 (ð ó
ð 	
rx   c               óP   • [        U R                  R                  UUUUUUS95      $ )uÉ  
Compute a rolling quantile.

A window of length `window_size` will traverse the array. The values that fill
this window will (optionally) be multiplied with the weights given by the
`weights` vector. The resulting values will be aggregated to their quantile.

The window at a given row will include the row itself, and the `window_size - 1`
elements before it.

.. versionchanged:: 1.21.0
    The `min_periods` parameter was renamed `min_samples`.

Parameters
----------
quantile
    Quantile between 0.0 and 1.0.
interpolation : {'nearest', 'higher', 'lower', 'midpoint', 'linear', 'equiprobable'}
    Interpolation method.
window_size
    The length of the window in number of elements.
weights
    An optional slice with the same length as the window that will be multiplied
    elementwise with the values in the window.
min_samples
    The number of values in the window that should be non-null before computing
    a result. If set to `None` (default), it will be set equal to `window_size`.
center
    Set the labels at the center of the window.

Notes
-----
If you want to compute multiple aggregation statistics over the same dynamic
window, consider using `rolling` - this method can cache the window size
computation.

Examples
--------
>>> df = pl.DataFrame({"A": [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]})
>>> df.with_columns(
...     rolling_quantile=pl.col("A").rolling_quantile(
...         quantile=0.25, window_size=4
...     ),
... )
shape: (6, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ A   â”† rolling_quantile â”‚
â”‚ --- â”† ---              â”‚
â”‚ f64 â”† f64              â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1.0 â”† null             â”‚
â”‚ 2.0 â”† null             â”‚
â”‚ 3.0 â”† null             â”‚
â”‚ 4.0 â”† 2.0              â”‚
â”‚ 5.0 â”† 3.0              â”‚
â”‚ 6.0 â”† 4.0              â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Specify weights for the values in each window:

>>> df.with_columns(
...     rolling_quantile=pl.col("A").rolling_quantile(
...         quantile=0.25, window_size=4, weights=[0.2, 0.4, 0.4, 0.2]
...     ),
... )
shape: (6, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ A   â”† rolling_quantile â”‚
â”‚ --- â”† ---              â”‚
â”‚ f64 â”† f64              â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1.0 â”† null             â”‚
â”‚ 2.0 â”† null             â”‚
â”‚ 3.0 â”† null             â”‚
â”‚ 4.0 â”† 2.0              â”‚
â”‚ 5.0 â”† 3.0              â”‚
â”‚ 6.0 â”† 4.0              â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Specify weights and interpolation method

>>> df.with_columns(
...     rolling_quantile=pl.col("A").rolling_quantile(
...         quantile=0.25,
...         window_size=4,
...         weights=[0.2, 0.4, 0.4, 0.2],
...         interpolation="linear",
...     ),
... )
shape: (6, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ A   â”† rolling_quantile â”‚
â”‚ --- â”† ---              â”‚
â”‚ f64 â”† f64              â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1.0 â”† null             â”‚
â”‚ 2.0 â”† null             â”‚
â”‚ 3.0 â”† null             â”‚
â”‚ 4.0 â”† 1.625            â”‚
â”‚ 5.0 â”† 2.625            â”‚
â”‚ 6.0 â”† 3.625            â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Center the values in the window

>>> df.with_columns(
...     rolling_quantile=pl.col("A").rolling_quantile(
...         quantile=0.2, window_size=5, center=True
...     ),
... )
shape: (6, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ A   â”† rolling_quantile â”‚
â”‚ --- â”† ---              â”‚
â”‚ f64 â”† f64              â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1.0 â”† null             â”‚
â”‚ 2.0 â”† null             â”‚
â”‚ 3.0 â”† 2.0              â”‚
â”‚ 4.0 â”† 3.0              â”‚
â”‚ 5.0 â”† null             â”‚
â”‚ 6.0 â”† null             â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
rª  )r$   rd   Úrolling_quantile)r}   r¥  r¦  rŽ  r­  r‰  r¨  s          ru   rÆ  ÚExpr.rolling_quantile=   s;   € ôN ØL‰L×)Ñ)ØØØØØØð *ð ó	
ð 		
rx   )Úbiasr‰  r¨  c          	     óH   • [        U R                  R                  XX4S95      $ )u¶  
Compute a rolling skew.

.. warning::
    This functionality is considered **unstable**. It may be changed
    at any point without it being considered a breaking change.

The window at a given row will include the row itself, and the `window_size - 1`
elements before it.

Parameters
----------
window_size
    Integer size of the rolling window.
bias
    If False, the calculations are corrected for statistical bias.
             bias: bool = True,
min_samples
    The number of values in the window that should be non-null before computing
    a result. If set to `None` (default), it will be set equal to `window_size`.
center
    Set the labels at the center of the window.

See Also
--------
Expr.skew

Examples
--------
>>> df = pl.DataFrame({"a": [1, 4, 2, 9]})
>>> df.select(pl.col("a").rolling_skew(3))
shape: (4, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a        â”‚
â”‚ ---      â”‚
â”‚ f64      â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•¡
â”‚ null     â”‚
â”‚ null     â”‚
â”‚ 0.381802 â”‚
â”‚ 0.47033  â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Note how the values match the following:

>>> pl.Series([1, 4, 2]).skew(), pl.Series([4, 2, 9]).skew()
(0.38180177416060584, 0.47033046033698594)
)rÈ  rˆ  r¨  )r$   rd   Úrolling_skew)r}   rŽ  rÈ  r‰  r¨  s        ru   rÊ  ÚExpr.rolling_skewÏ   s.   € ôr ØL‰L×%Ñ%Ø°Kð &ð ó
ð 	
rx   )ÚfisherrÈ  r‰  r¨  c          
     óN   • [        U R                  R                  UUUUUS95      $ )up  
Compute a rolling kurtosis.

.. warning::
    This functionality is considered **unstable**. It may be changed
    at any point without it being considered a breaking change.

The window at a given row will include the row itself, and the `window_size - 1`
elements before it.

Parameters
----------
window_size
    Integer size of the rolling window.
fisher : bool, optional
    If True, Fisher's definition is used (normal ==> 0.0). If False,
    Pearson's definition is used (normal ==> 3.0).
bias : bool, optional
    If False, the calculations are corrected for statistical bias.
min_samples
    The number of values in the window that should be non-null before computing
    a result. If set to `None` (default), it will be set equal to `window_size`.
center
    Set the labels at the center of the window.

See Also
--------
Expr.kurtosis

Examples
--------
>>> df = pl.DataFrame({"a": [1, 4, 2, 9]})
>>> df.select(pl.col("a").rolling_kurtosis(3))
shape: (4, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”
â”‚ a    â”‚
â”‚ ---  â”‚
â”‚ f64  â”‚
â•žâ•â•â•â•â•â•â•¡
â”‚ null â”‚
â”‚ null â”‚
â”‚ -1.5 â”‚
â”‚ -1.5 â”‚
â””â”€â”€â”€â”€â”€â”€â”˜
)rÌ  rÈ  rˆ  r¨  )r$   rd   Úrolling_kurtosis)r}   rŽ  rÌ  rÈ  r‰  r¨  s         ru   rÎ  ÚExpr.rolling_kurtosis!  s8   € ôn ØL‰L×)Ñ)ØØØØ'Øð *ð ó
ð 	
rx   c          	     ój   ^• Uc  UnSU4S jjn[        U R                  R                  XbX4U5      5      $ )u  
Compute a custom rolling window function.

.. warning::
    This functionality is considered **unstable**. It may be changed
    at any point without it being considered a breaking change.

.. versionchanged:: 1.21.0
    The `min_periods` parameter was renamed `min_samples`.

Parameters
----------
function
    Custom aggregation function.
window_size
    The length of the window in number of elements.
weights
    An optional slice with the same length as the window that will be multiplied
    elementwise with the values in the window.
min_samples
    The number of values in the window that should be non-null before computing
    a result. If set to `None` (default), it will be set equal to `window_size`.
center
    Set the labels at the center of the window.

Warnings
--------
Computing custom functions is extremely slow. Use specialized rolling
functions such as :func:`Expr.rolling_sum` if at all possible.

Examples
--------
>>> from numpy import nansum
>>> df = pl.DataFrame({"a": [11.0, 2.0, 9.0, float("nan"), 8.0]})
>>> df.select(pl.col("a").rolling_map(nansum, window_size=3))
shape: (5, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”
â”‚ a    â”‚
â”‚ ---  â”‚
â”‚ f64  â”‚
â•žâ•â•â•â•â•â•â•¡
â”‚ null â”‚
â”‚ null â”‚
â”‚ 22.0 â”‚
â”‚ 11.0 â”‚
â”‚ 17.0 â”‚
â””â”€â”€â”€â”€â”€â”€â”˜
c                óÂ   >• [        U 5      nT" U5      n[        U[        R                  5      (       a  UR                  $ [        R                  " U/5      R                  $ rp   )r%   r!  r  rG   Ú_s)Úpysr+  Úrvr2  s      €ru   rË  ÚExpr.rolling_map.<locals>._wrap!  sE   ø€ Üs“ˆAÙ˜!“ˆBÜ˜"œbŸi™i×(Ñ(Ø—u‘uÜ—9’9˜b˜T“?×%Ñ%Ð%rx   )rÓ  rB   r5  rB   )r$   rd   Úrolling_map)r}   r2  rŽ  r­  r‰  r¨  rË  s    `     ru   rÖ  ÚExpr.rolling_mapO!  s;   ø€ ðv ÑØ%ˆK÷	&ô ØL‰L×$Ñ$ U¸ÈvÓVó
ð 	
rx   c                óH   • [        U R                  R                  5       5      $ )ua  
Compute absolute values.

Same as `abs(expr)`.

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "A": [-1.0, 0.0, 1.0, 2.0],
...     }
... )
>>> df.select(pl.col("A").abs())
shape: (4, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ A   â”‚
â”‚ --- â”‚
â”‚ f64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 1.0 â”‚
â”‚ 0.0 â”‚
â”‚ 1.0 â”‚
â”‚ 2.0 â”‚
â””â”€â”€â”€â”€â”€â”˜
)r$   rd   r–   r|   s    ru   r–   ÚExpr.abs˜!  rY  rx   )rÝ  rm  c               óL   • [        U R                  R                  XU5      5      $ )uˆ  
Assign ranks to data, dealing with ties appropriately.

Parameters
----------
method : {'average', 'min', 'max', 'dense', 'ordinal', 'random'}
    The method used to assign ranks to tied elements.
    The following methods are available (default is 'average'):

    - 'average' : The average of the ranks that would have been assigned to
      all the tied values is assigned to each value.
    - 'min' : The minimum of the ranks that would have been assigned to all
      the tied values is assigned to each value. (This is also referred to
      as "competition" ranking.)
    - 'max' : The maximum of the ranks that would have been assigned to all
      the tied values is assigned to each value.
    - 'dense' : Like 'min', but the rank of the next highest element is
      assigned the rank immediately after those assigned to the tied
      elements.
    - 'ordinal' : All values are given a distinct rank, corresponding to
      the order that the values occur in the Series.
    - 'random' : Like 'ordinal', but the rank for ties is not dependent
      on the order that the values occur in the Series.
descending
    Rank in descending order.
seed
    If `method="random"`, use this as seed.

Examples
--------
The 'average' method:

>>> df = pl.DataFrame({"a": [3, 6, 1, 1, 6]})
>>> df.select(pl.col("a").rank())
shape: (5, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ a   â”‚
â”‚ --- â”‚
â”‚ f64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 3.0 â”‚
â”‚ 4.5 â”‚
â”‚ 1.5 â”‚
â”‚ 1.5 â”‚
â”‚ 4.5 â”‚
â””â”€â”€â”€â”€â”€â”˜

The 'ordinal' method:

>>> df = pl.DataFrame({"a": [3, 6, 1, 1, 6]})
>>> df.select(pl.col("a").rank("ordinal"))
shape: (5, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ a   â”‚
â”‚ --- â”‚
â”‚ u32 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 3   â”‚
â”‚ 4   â”‚
â”‚ 1   â”‚
â”‚ 2   â”‚
â”‚ 5   â”‚
â””â”€â”€â”€â”€â”€â”˜

Use 'rank' with 'over' to rank within groups:

>>> df = pl.DataFrame({"a": [1, 1, 2, 2, 2], "b": [6, 7, 5, 14, 11]})
>>> df.with_columns(pl.col("b").rank().over("a").alias("rank"))
shape: (5, 3)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”
â”‚ a   â”† b   â”† rank â”‚
â”‚ --- â”† --- â”† ---  â”‚
â”‚ i64 â”† i64 â”† f64  â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•¡
â”‚ 1   â”† 6   â”† 1.0  â”‚
â”‚ 1   â”† 7   â”† 2.0  â”‚
â”‚ 2   â”† 5   â”† 1.0  â”‚
â”‚ 2   â”† 14  â”† 3.0  â”‚
â”‚ 2   â”† 11  â”† 2.0  â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”˜

Divide by the length or number of non-null values
to compute the percentile rank.

>>> df = pl.DataFrame({"a": [6, 7, None, 14, 11]})
>>> df.with_columns(
...     pct=pl.col("a").rank() / pl.len(),
...     pct_valid=pl.col("a").rank() / pl.count("a"),
... )
shape: (5, 3)
â”Œâ”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a    â”† pct  â”† pct_valid â”‚
â”‚ ---  â”† ---  â”† ---       â”‚
â”‚ i64  â”† f64  â”† f64       â”‚
â•žâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 6    â”† 0.2  â”† 0.25      â”‚
â”‚ 7    â”† 0.4  â”† 0.5       â”‚
â”‚ null â”† null â”† null      â”‚
â”‚ 14   â”† 0.8  â”† 1.0       â”‚
â”‚ 11   â”† 0.6  â”† 0.75      â”‚
â””â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

)r$   rd   Úrank)r}   r>  rÝ  rm  s       ru   rÛ  Ú	Expr.rank´!  s!   € ô\ ˜Ÿ™×*Ñ*¨6¸tÓDÓEÐErx   c                ó`   • [        U5      n[        U R                  R                  X25      5      $ )u  
Calculate the first discrete difference between shifted items.

Parameters
----------
n
    Number of slots to shift.
null_behavior : {'ignore', 'drop'}
    How to handle null values.

Examples
--------
>>> df = pl.DataFrame({"int": [20, 10, 30, 25, 35]})
>>> df.with_columns(change=pl.col("int").diff())
shape: (5, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ int â”† change â”‚
â”‚ --- â”† ---    â”‚
â”‚ i64 â”† i64    â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•¡
â”‚ 20  â”† null   â”‚
â”‚ 10  â”† -10    â”‚
â”‚ 30  â”† 20     â”‚
â”‚ 25  â”† -5     â”‚
â”‚ 35  â”† 10     â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”˜

>>> df.with_columns(change=pl.col("int").diff(n=2))
shape: (5, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ int â”† change â”‚
â”‚ --- â”† ---    â”‚
â”‚ i64 â”† i64    â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•¡
â”‚ 20  â”† null   â”‚
â”‚ 10  â”† null   â”‚
â”‚ 30  â”† 10     â”‚
â”‚ 25  â”† 15     â”‚
â”‚ 35  â”† 5      â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”˜

>>> df.select(pl.col("int").diff(n=2, null_behavior="drop").alias("diff"))
shape: (3, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”
â”‚ diff â”‚
â”‚ ---  â”‚
â”‚ i64  â”‚
â•žâ•â•â•â•â•â•â•¡
â”‚ 10   â”‚
â”‚ 15   â”‚
â”‚ 5    â”‚
â””â”€â”€â”€â”€â”€â”€â”˜
)r   r$   rd   Údiff)r}   r$  Únull_behaviorr&  s       ru   rÞ  Ú	Expr.diff$"  s)   € ôp )¨Ó+ˆÜ˜Ÿ™×*Ñ*¨8ÓCÓDÐDrx   c                ó`   • [        U5      n[        U R                  R                  U5      5      $ )u—  
Computes percentage change between values.

Percentage change (as fraction) between current element and most-recent
non-null element at least `n` period(s) before the current element.

Computes the change from the previous row by default.

Parameters
----------
n
    periods to shift for forming percent change.

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "a": [10, 11, 12, None, 12],
...     }
... )
>>> df.with_columns(pl.col("a").pct_change().alias("pct_change"))
shape: (5, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a    â”† pct_change â”‚
â”‚ ---  â”† ---        â”‚
â”‚ i64  â”† f64        â”‚
â•žâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 10   â”† null       â”‚
â”‚ 11   â”† 0.1        â”‚
â”‚ 12   â”† 0.090909   â”‚
â”‚ null â”† 0.0        â”‚
â”‚ 12   â”† 0.0        â”‚
â””â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r   r$   rd   Ú
pct_change)r}   r$  r&  s      ru   râ  ÚExpr.pct_change_"  s)   € ôF )¨Ó+ˆÜ˜Ÿ™×0Ñ0°Ó:Ó;Ð;rx   )rÈ  c               óJ   • [        U R                  R                  U5      5      $ )uO  
Compute the sample skewness of a data set.

For normally distributed data, the skewness should be about zero. For
unimodal continuous distributions, a skewness value greater than zero means
that there is more weight in the right tail of the distribution. The
function `skewtest` can be used to determine if the skewness value
is close enough to zero, statistically speaking.


See scipy.stats for more information.

Parameters
----------
bias : bool, optional
    If False, the calculations are corrected for statistical bias.

Notes
-----
The sample skewness is computed as the Fisher-Pearson coefficient
of skewness, i.e.

.. math:: g_1=\frac{m_3}{m_2^{3/2}}

where

.. math:: m_i=\frac{1}{N}\sum_{n=1}^N(x[n]-\bar{x})^i

is the biased sample :math:`i\texttt{th}` central moment, and
:math:`\bar{x}` is
the sample mean. If `bias` is False, the calculations are
corrected for bias and the value computed is the adjusted
Fisher-Pearson standardized moment coefficient, i.e.

.. math::
    G_1 = \frac{k_3}{k_2^{3/2}} = \frac{\sqrt{N(N-1)}}{N-2}\frac{m_3}{m_2^{3/2}}

Examples
--------
>>> df = pl.DataFrame({"a": [1, 2, 3, 2, 1]})
>>> df.select(pl.col("a").skew())
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a        â”‚
â”‚ ---      â”‚
â”‚ f64      â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 0.343622 â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úskew)r}   rÈ  s     ru   rå  Ú	Expr.skew…"  s   € ôf ˜Ÿ™×*Ñ*¨4Ó0Ó1Ð1rx   )rÌ  rÈ  c               óJ   • [        U R                  R                  X5      5      $ )uË  
Compute the kurtosis (Fisher or Pearson) of a dataset.

Kurtosis is the fourth central moment divided by the square of the
variance. If Fisher's definition is used, then 3.0 is subtracted from
the result to give 0.0 for a normal distribution.
If bias is False then the kurtosis is calculated using k statistics to
eliminate bias coming from biased moment estimators.

See scipy.stats for more information

Parameters
----------
fisher : bool, optional
    If True, Fisher's definition is used (normal ==> 0.0). If False,
    Pearson's definition is used (normal ==> 3.0).
bias : bool, optional
    If False, the calculations are corrected for statistical bias.

Examples
--------
>>> df = pl.DataFrame({"a": [1, 2, 3, 2, 1]})
>>> df.select(pl.col("a").kurtosis())
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a         â”‚
â”‚ ---       â”‚
â”‚ f64       â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ -1.153061 â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úkurtosis)r}   rÌ  rÈ  s      ru   rè  ÚExpr.kurtosisº"  s   € ôB ˜Ÿ™×.Ñ.¨vÓ<Ó=Ð=rx   c                óŽ   • Ub  [        U5      nOSnUb  [        U5      nOSn[        U R                  R                  X45      5      $ )u\  
Set values outside the given boundaries to the boundary value.

Parameters
----------
lower_bound
    Lower bound. Accepts expression input. Non-expression inputs are
    parsed as literals. Strings are parsed as column names.
upper_bound
    Upper bound. Accepts expression input. Non-expression inputs are
    parsed as literals. Strings are parsed as column names.

See Also
--------
when

Notes
-----
This method only works for numeric and temporal columns. To clip other data
types, consider writing a `when-then-otherwise` expression. See :func:`when`.

Examples
--------
Specifying both a lower and upper bound:

>>> df = pl.DataFrame({"a": [-50, 5, 50, None]})
>>> df.with_columns(clip=pl.col("a").clip(1, 10))
shape: (4, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”
â”‚ a    â”† clip â”‚
â”‚ ---  â”† ---  â”‚
â”‚ i64  â”† i64  â”‚
â•žâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•¡
â”‚ -50  â”† 1    â”‚
â”‚ 5    â”† 5    â”‚
â”‚ 50   â”† 10   â”‚
â”‚ null â”† null â”‚
â””â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”˜

Specifying only a single bound:

>>> df.with_columns(clip=pl.col("a").clip(upper_bound=10))
shape: (4, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”
â”‚ a    â”† clip â”‚
â”‚ ---  â”† ---  â”‚
â”‚ i64  â”† i64  â”‚
â•žâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•¡
â”‚ -50  â”† -50  â”‚
â”‚ 5    â”† 5    â”‚
â”‚ 50   â”† 10   â”‚
â”‚ null â”† null â”‚
â””â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”˜

Using columns as bounds:

>>> df = pl.DataFrame(
...     {"a": [-50, 5, 50, None], "low": [10, 1, 0, 0], "up": [20, 4, 3, 2]}
... )
>>> df.with_columns(clip=pl.col("a").clip("low", "up"))
shape: (4, 4)
â”Œâ”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”
â”‚ a    â”† low â”† up  â”† clip â”‚
â”‚ ---  â”† --- â”† --- â”† ---  â”‚
â”‚ i64  â”† i64 â”† i64 â”† i64  â”‚
â•žâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•¡
â”‚ -50  â”† 10  â”† 20  â”† 10   â”‚
â”‚ 5    â”† 1   â”† 4   â”† 4    â”‚
â”‚ 50   â”† 0   â”† 3   â”† 3    â”‚
â”‚ null â”† 0   â”† 2   â”† null â”‚
â””â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”˜
N)r   r$   rd   Úclip)r}   r_  r`  ra  rb  s        ru   rë  Ú	Expr.clipÝ"  sN   € ðZ Ñ"Ü!6°{Ó!CÑà!%ÐØÑ"Ü!6°{Ó!CÑà!%ÐÜ˜Ÿ™×*Ñ*Ð+=ÓRÓSÐSrx   c                óH   • [        U R                  R                  5       5      $ )u;  
Calculate the lower bound.

Returns a unit Series with the lowest value possible for the dtype of this
expression.

Examples
--------
>>> df = pl.DataFrame({"a": [1, 2, 3, 2, 1]})
>>> df.select(pl.col("a").lower_bound())
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a                    â”‚
â”‚ ---                  â”‚
â”‚ i64                  â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ -9223372036854775808 â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   r_  r|   s    ru   r_  ÚExpr.lower_bound4#  ó   € ô( ˜Ÿ™×1Ñ1Ó3Ó4Ð4rx   c                óH   • [        U R                  R                  5       5      $ )u/  
Calculate the upper bound.

Returns a unit Series with the highest value possible for the dtype of this
expression.

Examples
--------
>>> df = pl.DataFrame({"a": [1, 2, 3, 2, 1]})
>>> df.select(pl.col("a").upper_bound())
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a                   â”‚
â”‚ ---                 â”‚
â”‚ i64                 â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 9223372036854775807 â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   r`  r|   s    ru   r`  ÚExpr.upper_boundJ#  rï  rx   c                óH   • [        U R                  R                  5       5      $ )uY  
Compute the element-wise sign function on numeric types.

The returned value is computed as follows:

* -1 if x < 0.
*  1 if x > 0.
*  x otherwise (typically 0, but could be NaN if the input is).

Null values are preserved as-is, and the dtype of the input is preserved.

Examples
--------
>>> df = pl.DataFrame({"a": [-9.0, -0.0, 0.0, 4.0, float("nan"), None]})
>>> df.select(pl.col.a.sign())
shape: (6, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”
â”‚ a    â”‚
â”‚ ---  â”‚
â”‚ f64  â”‚
â•žâ•â•â•â•â•â•â•¡
â”‚ -1.0 â”‚
â”‚ -0.0 â”‚
â”‚ 0.0  â”‚
â”‚ 1.0  â”‚
â”‚ NaN  â”‚
â”‚ null â”‚
â””â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úsignr|   s    ru   ró  Ú	Expr.sign`#  s   € ô< ˜Ÿ™×*Ñ*Ó,Ó-Ð-rx   c                óH   • [        U R                  R                  5       5      $ )uJ  
Compute the element-wise value for the sine.

Returns
-------
Expr
    Expression of data type :class:`Float64`.

Examples
--------
>>> df = pl.DataFrame({"a": [0.0]})
>>> df.select(pl.col("a").sin())
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ a   â”‚
â”‚ --- â”‚
â”‚ f64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 0.0 â”‚
â””â”€â”€â”€â”€â”€â”˜
)r$   rd   Úsinr|   s    ru   rö  ÚExpr.sin€#  ó   € ô, ˜Ÿ™×)Ñ)Ó+Ó,Ð,rx   c                óH   • [        U R                  R                  5       5      $ )uL  
Compute the element-wise value for the cosine.

Returns
-------
Expr
    Expression of data type :class:`Float64`.

Examples
--------
>>> df = pl.DataFrame({"a": [0.0]})
>>> df.select(pl.col("a").cos())
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ a   â”‚
â”‚ --- â”‚
â”‚ f64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 1.0 â”‚
â””â”€â”€â”€â”€â”€â”˜
)r$   rd   Úcosr|   s    ru   rú  ÚExpr.cos˜#  rø  rx   c                óH   • [        U R                  R                  5       5      $ )uc  
Compute the element-wise value for the tangent.

Returns
-------
Expr
    Expression of data type :class:`Float64`.

Examples
--------
>>> df = pl.DataFrame({"a": [1.0]})
>>> df.select(pl.col("a").tan().round(2))
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”
â”‚ a    â”‚
â”‚ ---  â”‚
â”‚ f64  â”‚
â•žâ•â•â•â•â•â•â•¡
â”‚ 1.56 â”‚
â””â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Útanr|   s    ru   rý  ÚExpr.tan°#  rø  rx   c                óH   • [        U R                  R                  5       5      $ )ue  
Compute the element-wise value for the cotangent.

Returns
-------
Expr
    Expression of data type :class:`Float64`.

Examples
--------
>>> df = pl.DataFrame({"a": [1.0]})
>>> df.select(pl.col("a").cot().round(2))
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”
â”‚ a    â”‚
â”‚ ---  â”‚
â”‚ f64  â”‚
â•žâ•â•â•â•â•â•â•¡
â”‚ 0.64 â”‚
â””â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úcotr|   s    ru   r   ÚExpr.cotÈ#  rø  rx   c                óH   • [        U R                  R                  5       5      $ )u–  
Compute the element-wise value for the inverse sine.

Returns
-------
Expr
    Expression of data type :class:`Float64`.

Examples
--------
>>> df = pl.DataFrame({"a": [1.0]})
>>> df.select(pl.col("a").arcsin())
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a        â”‚
â”‚ ---      â”‚
â”‚ f64      â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1.570796 â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úarcsinr|   s    ru   r  ÚExpr.arcsinà#  ó   € ô, ˜Ÿ™×,Ñ,Ó.Ó/Ð/rx   c                óH   • [        U R                  R                  5       5      $ )u˜  
Compute the element-wise value for the inverse cosine.

Returns
-------
Expr
    Expression of data type :class:`Float64`.

Examples
--------
>>> df = pl.DataFrame({"a": [0.0]})
>>> df.select(pl.col("a").arccos())
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a        â”‚
â”‚ ---      â”‚
â”‚ f64      â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1.570796 â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úarccosr|   s    ru   r  ÚExpr.arccosø#  r  rx   c                óH   • [        U R                  R                  5       5      $ )u™  
Compute the element-wise value for the inverse tangent.

Returns
-------
Expr
    Expression of data type :class:`Float64`.

Examples
--------
>>> df = pl.DataFrame({"a": [1.0]})
>>> df.select(pl.col("a").arctan())
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a        â”‚
â”‚ ---      â”‚
â”‚ f64      â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 0.785398 â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úarctanr|   s    ru   r
  ÚExpr.arctan$  r  rx   c                óH   • [        U R                  R                  5       5      $ )u—  
Compute the element-wise value for the hyperbolic sine.

Returns
-------
Expr
    Expression of data type :class:`Float64`.

Examples
--------
>>> df = pl.DataFrame({"a": [1.0]})
>>> df.select(pl.col("a").sinh())
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a        â”‚
â”‚ ---      â”‚
â”‚ f64      â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1.175201 â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úsinhr|   s    ru   r  Ú	Expr.sinh($  rÆ  rx   c                óH   • [        U R                  R                  5       5      $ )u™  
Compute the element-wise value for the hyperbolic cosine.

Returns
-------
Expr
    Expression of data type :class:`Float64`.

Examples
--------
>>> df = pl.DataFrame({"a": [1.0]})
>>> df.select(pl.col("a").cosh())
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a        â”‚
â”‚ ---      â”‚
â”‚ f64      â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1.543081 â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úcoshr|   s    ru   r  Ú	Expr.cosh@$  rÆ  rx   c                óH   • [        U R                  R                  5       5      $ )uš  
Compute the element-wise value for the hyperbolic tangent.

Returns
-------
Expr
    Expression of data type :class:`Float64`.

Examples
--------
>>> df = pl.DataFrame({"a": [1.0]})
>>> df.select(pl.col("a").tanh())
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a        â”‚
â”‚ ---      â”‚
â”‚ f64      â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 0.761594 â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Útanhr|   s    ru   r  Ú	Expr.tanhX$  rÆ  rx   c                óH   • [        U R                  R                  5       5      $ )u¢  
Compute the element-wise value for the inverse hyperbolic sine.

Returns
-------
Expr
    Expression of data type :class:`Float64`.

Examples
--------
>>> df = pl.DataFrame({"a": [1.0]})
>>> df.select(pl.col("a").arcsinh())
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a        â”‚
â”‚ ---      â”‚
â”‚ f64      â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 0.881374 â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úarcsinhr|   s    ru   r  ÚExpr.arcsinhp$  r  rx   c                óH   • [        U R                  R                  5       5      $ )uc  
Compute the element-wise value for the inverse hyperbolic cosine.

Returns
-------
Expr
    Expression of data type :class:`Float64`.

Examples
--------
>>> df = pl.DataFrame({"a": [1.0]})
>>> df.select(pl.col("a").arccosh())
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ a   â”‚
â”‚ --- â”‚
â”‚ f64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 0.0 â”‚
â””â”€â”€â”€â”€â”€â”˜
)r$   rd   Úarccoshr|   s    ru   r  ÚExpr.arccoshˆ$  r  rx   c                óH   • [        U R                  R                  5       5      $ )ud  
Compute the element-wise value for the inverse hyperbolic tangent.

Returns
-------
Expr
    Expression of data type :class:`Float64`.

Examples
--------
>>> df = pl.DataFrame({"a": [1.0]})
>>> df.select(pl.col("a").arctanh())
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ a   â”‚
â”‚ --- â”‚
â”‚ f64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ inf â”‚
â””â”€â”€â”€â”€â”€â”˜
)r$   rd   Úarctanhr|   s    ru   r  ÚExpr.arctanh $  r  rx   c                óH   • [        U R                  R                  5       5      $ )u  
Convert from radians to degrees.

Returns
-------
Expr
    Expression of data type :class:`Float64`.

Examples
--------
>>> import math
>>> df = pl.DataFrame({"a": [x * math.pi for x in range(-4, 5)]})
>>> df.select(pl.col("a").degrees())
shape: (9, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a      â”‚
â”‚ ---    â”‚
â”‚ f64    â”‚
â•žâ•â•â•â•â•â•â•â•â•¡
â”‚ -720.0 â”‚
â”‚ -540.0 â”‚
â”‚ -360.0 â”‚
â”‚ -180.0 â”‚
â”‚ 0.0    â”‚
â”‚ 180.0  â”‚
â”‚ 360.0  â”‚
â”‚ 540.0  â”‚
â”‚ 720.0  â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Údegreesr|   s    ru   r  ÚExpr.degrees¸$  s   € ô> ˜Ÿ™×-Ñ-Ó/Ó0Ð0rx   c                óH   • [        U R                  R                  5       5      $ )u_  
Convert from degrees to radians.

Returns
-------
Expr
    Expression of data type :class:`Float64`.

Examples
--------
>>> df = pl.DataFrame({"a": [-720, -540, -360, -180, 0, 180, 360, 540, 720]})
>>> df.select(pl.col("a").radians())
shape: (9, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a          â”‚
â”‚ ---        â”‚
â”‚ f64        â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ -12.566371 â”‚
â”‚ -9.424778  â”‚
â”‚ -6.283185  â”‚
â”‚ -3.141593  â”‚
â”‚ 0.0        â”‚
â”‚ 3.141593   â”‚
â”‚ 6.283185   â”‚
â”‚ 9.424778   â”‚
â”‚ 12.566371  â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úradiansr|   s    ru   r"  ÚExpr.radiansÙ$  s   € ô< ˜Ÿ™×-Ñ-Ó/Ó0Ð0rx   c                óJ   • [        U R                  R                  U5      5      $ )u½  
Reshape this Expr to a flat column or an Array column.

Parameters
----------
dimensions
    Tuple of the dimension sizes. If a -1 is used in any of the dimensions, that
    dimension is inferred.

Returns
-------
Expr
    If a single dimension is given, results in an expression of the original
    data type.
    If a multiple dimensions are given, results in an expression of data type
    :class:`Array` with shape `dimensions`.

Examples
--------
>>> df = pl.DataFrame({"foo": [1, 2, 3, 4, 5, 6, 7, 8, 9]})
>>> square = df.select(pl.col("foo").reshape((3, 3)))
>>> square
shape: (3, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ foo           â”‚
â”‚ ---           â”‚
â”‚ array[i64, 3] â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ [1, 2, 3]     â”‚
â”‚ [4, 5, 6]     â”‚
â”‚ [7, 8, 9]     â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
>>> square.select(pl.col("foo").reshape((9,)))
shape: (9, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ foo â”‚
â”‚ --- â”‚
â”‚ i64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 1   â”‚
â”‚ 2   â”‚
â”‚ 3   â”‚
â”‚ 4   â”‚
â”‚ 5   â”‚
â”‚ 6   â”‚
â”‚ 7   â”‚
â”‚ 8   â”‚
â”‚ 9   â”‚
â””â”€â”€â”€â”€â”€â”˜

See Also
--------
Expr.list.explode : Explode a list column.
)r$   rd   Úreshape)r}   Ú
dimensionss     ru   r%  ÚExpr.reshapeù$  s   € ôn ˜Ÿ™×-Ñ-¨jÓ9Ó:Ð:rx   c                óJ   • [        U R                  R                  U5      5      $ )uR  
Shuffle the contents of this expression.

Note this is shuffled independently of any other column or Expression. If you
want each row to stay the same use df.sample(shuffle=True)

Parameters
----------
seed
    Seed for the random number generator. If set to None (default), a
    random seed is generated each time the shuffle is called.

Examples
--------
>>> df = pl.DataFrame({"a": [1, 2, 3]})
>>> df.select(pl.col("a").shuffle(seed=1))
shape: (3, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ a   â”‚
â”‚ --- â”‚
â”‚ i64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 2   â”‚
â”‚ 3   â”‚
â”‚ 1   â”‚
â””â”€â”€â”€â”€â”€â”˜
)r$   rd   Úshuffle)r}   rm  s     ru   r)  ÚExpr.shuffle2%  s   € ô8 ˜Ÿ™×-Ñ-¨dÓ3Ó4Ð4rx   )ÚfractionÚwith_replacementr)  rm  c               óø   • Ub  Ub  Sn[        U5      eUb0  [        U5      n[        U R                  R	                  XsXE5      5      $ Uc  Sn[        U5      n[        U R                  R                  XƒXE5      5      $ )uý  
Sample from this expression.

Parameters
----------
n
    Number of items to return. Cannot be used with `fraction`. Defaults to 1 if
    `fraction` is None.
fraction
    Fraction of items to return. Cannot be used with `n`.
with_replacement
    Allow values to be sampled more than once.
shuffle
    Shuffle the order of sampled data points.
seed
    Seed for the random number generator. If set to None (default), a
    random seed is generated for each sample operation.

Examples
--------
>>> df = pl.DataFrame({"a": [1, 2, 3]})
>>> df.select(pl.col("a").sample(fraction=1.0, with_replacement=True, seed=1))
shape: (3, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ a   â”‚
â”‚ --- â”‚
â”‚ i64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 3   â”‚
â”‚ 3   â”‚
â”‚ 1   â”‚
â””â”€â”€â”€â”€â”€â”˜
z&cannot specify both `n` and `fraction`r  )rR  r   r$   rd   Úsample_fracÚsample_n)	r}   r$  r+  r,  r)  rm  r’   Úfraction_pyexprr&  s	            ru   ÚsampleÚExpr.sampleP%  s‰   € ðT ‰=˜XÑ1Ø:ˆCÜ˜S“/Ð!àÑÜ3°HÓ=ˆOÜØ—‘×(Ñ(Ø#°wóóð ð ‰9ØˆAÜ(¨Ó+ˆÜØL‰L×!Ñ! (¸gÓLó
ð 	
rx   )ÚcomÚspanÚ	half_lifeÚalphaÚadjustr‰  rZ  c               ód   • [        XX45      n[        U R                  R                  XEXg5      5      $ )u¼	  
Compute exponentially-weighted moving average.

.. versionchanged:: 1.21.0
    The `min_periods` parameter was renamed `min_samples`.

Parameters
----------
com
    Specify decay in terms of center of mass, :math:`\gamma`, with

        .. math::
            \alpha = \frac{1}{1 + \gamma} \; \forall \; \gamma \geq 0
span
    Specify decay in terms of span, :math:`\theta`, with

        .. math::
            \alpha = \frac{2}{\theta + 1} \; \forall \; \theta \geq 1
half_life
    Specify decay in terms of half-life, :math:`\tau`, with

        .. math::
            \alpha = 1 - \exp \left\{ \frac{ -\ln(2) }{ \tau } \right\} \;
            \forall \; \tau > 0
alpha
    Specify smoothing factor alpha directly, :math:`0 < \alpha \leq 1`.
adjust
    Divide by decaying adjustment factor in beginning periods to account for
    imbalance in relative weightings

        - When `adjust=True` (the default) the EW function is calculated
          using weights :math:`w_i = (1 - \alpha)^i`
        - When `adjust=False` the EW function is calculated
          recursively by

          .. math::
            y_0 &= x_0 \\
            y_t &= (1 - \alpha)y_{t - 1} + \alpha x_t
min_samples
    Minimum number of observations in window required to have a value
    (otherwise result is null).
ignore_nulls
    Ignore missing values when calculating weights.

        - When `ignore_nulls=False` (default), weights are based on absolute
          positions.
          For example, the weights of :math:`x_0` and :math:`x_2` used in
          calculating the final weighted average of
          [:math:`x_0`, None, :math:`x_2`] are
          :math:`(1-\alpha)^2` and :math:`1` if `adjust=True`, and
          :math:`(1-\alpha)^2` and :math:`\alpha` if `adjust=False`.

        - When `ignore_nulls=True`, weights are based
          on relative positions. For example, the weights of
          :math:`x_0` and :math:`x_2` used in calculating the final weighted
          average of [:math:`x_0`, None, :math:`x_2`] are
          :math:`1-\alpha` and :math:`1` if `adjust=True`,
          and :math:`1-\alpha` and :math:`\alpha` if `adjust=False`.

Examples
--------
>>> df = pl.DataFrame({"a": [1, 2, 3]})
>>> df.select(pl.col("a").ewm_mean(com=1, ignore_nulls=False))
shape: (3, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a        â”‚
â”‚ ---      â”‚
â”‚ f64      â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1.0      â”‚
â”‚ 1.666667 â”‚
â”‚ 2.428571 â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)Ú_prepare_alphar$   rd   Úewm_mean)r}   r3  r4  r5  r6  r7  r‰  rZ  s           ru   r:  ÚExpr.ewm_mean%  s2   € ôl ˜s¨)Ó;ˆÜØL‰L×!Ñ! %°ÓKó
ð 	
rx   c               óv   • [        U5      n[        U5      n[        U R                  R	                  X25      5      $ )uë	  
Compute time-based exponentially weighted moving average.

Given observations :math:`x_0, x_1, \ldots, x_{n-1}` at times
:math:`t_0, t_1, \ldots, t_{n-1}`, the EWMA is calculated as

    .. math::

        y_0 &= x_0

        \alpha_i &= 1 - \exp \left\{ \frac{ -\ln(2)(t_i-t_{i-1}) }
            { \tau } \right\}

        y_i &= \alpha_i x_i + (1 - \alpha_i) y_{i-1}; \quad i > 0

where :math:`\tau` is the `half_life`.

Parameters
----------
by
    Times to calculate average by. Should be ``DateTime``, ``Date``, ``UInt64``,
    ``UInt32``, ``Int64``, or ``Int32`` data type.
half_life
    Unit over which observation decays to half its value.

    Can be created either from a timedelta, or
    by using the following string language:

    - 1ns   (1 nanosecond)
    - 1us   (1 microsecond)
    - 1ms   (1 millisecond)
    - 1s    (1 second)
    - 1m    (1 minute)
    - 1h    (1 hour)
    - 1d    (1 day)
    - 1w    (1 week)
    - 1i    (1 index count)

    Or combine them:
    "3d12h4m25s" # 3 days, 12 hours, 4 minutes, and 25 seconds

    Note that `half_life` is treated as a constant duration - calendar
    durations such as months (or even days in the time-zone-aware case)
    are not supported, please express your duration in an approximately
    equivalent number of hours (e.g. '370h' instead of '1mo').

Returns
-------
Expr
    Float32 if input is Float32, otherwise Float64.

Examples
--------
>>> from datetime import date, timedelta
>>> df = pl.DataFrame(
...     {
...         "values": [0, 1, 2, None, 4],
...         "times": [
...             date(2020, 1, 1),
...             date(2020, 1, 3),
...             date(2020, 1, 10),
...             date(2020, 1, 15),
...             date(2020, 1, 17),
...         ],
...     }
... ).sort("times")
>>> df.with_columns(
...     result=pl.col("values").ewm_mean_by("times", half_life="4d"),
... )
shape: (5, 3)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ values â”† times      â”† result   â”‚
â”‚ ---    â”† ---        â”† ---      â”‚
â”‚ i64    â”† date       â”† f64      â”‚
â•žâ•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 0      â”† 2020-01-01 â”† 0.0      â”‚
â”‚ 1      â”† 2020-01-03 â”† 0.292893 â”‚
â”‚ 2      â”† 2020-01-10 â”† 1.492474 â”‚
â”‚ null   â”† 2020-01-15 â”† null     â”‚
â”‚ 4      â”† 2020-01-17 â”† 3.254508 â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r   r   r$   rd   Úewm_mean_by)r}   rî  r5  rø  s       ru   r=  ÚExpr.ewm_mean_byè%  s3   € ôp *¨"Ó-ˆ	Ü,¨YÓ7ˆ	Ü˜Ÿ™×1Ñ1°)ÓGÓHÐHrx   )r3  r4  r5  r6  r7  rÈ  r‰  rZ  c          	     óf   • [        XX45      n[        U R                  R                  XEXgU5      5      $ )u/
  
Compute exponentially-weighted moving standard deviation.

.. versionchanged:: 1.21.0
    The `min_periods` parameter was renamed `min_samples`.

Parameters
----------
com
    Specify decay in terms of center of mass, :math:`\gamma`, with

        .. math::
            \alpha = \frac{1}{1 + \gamma} \; \forall \; \gamma \geq 0
span
    Specify decay in terms of span, :math:`\theta`, with

        .. math::
            \alpha = \frac{2}{\theta + 1} \; \forall \; \theta \geq 1
half_life
    Specify decay in terms of half-life, :math:`\lambda`, with

        .. math::
            \alpha = 1 - \exp \left\{ \frac{ -\ln(2) }{ \lambda } \right\} \;
            \forall \; \lambda > 0
alpha
    Specify smoothing factor alpha directly, :math:`0 < \alpha \leq 1`.
adjust
    Divide by decaying adjustment factor in beginning periods to account for
    imbalance in relative weightings

        - When `adjust=True` (the default) the EW function is calculated
          using weights :math:`w_i = (1 - \alpha)^i`
        - When `adjust=False` the EW function is calculated
          recursively by

          .. math::
            y_0 &= x_0 \\
            y_t &= (1 - \alpha)y_{t - 1} + \alpha x_t
bias
    When `bias=False`, apply a correction to make the estimate statistically
    unbiased.
min_samples
    Minimum number of observations in window required to have a value
    (otherwise result is null).
ignore_nulls
    Ignore missing values when calculating weights.

        - When `ignore_nulls=False` (default), weights are based on absolute
          positions.
          For example, the weights of :math:`x_0` and :math:`x_2` used in
          calculating the final weighted average of
          [:math:`x_0`, None, :math:`x_2`] are
          :math:`(1-\alpha)^2` and :math:`1` if `adjust=True`, and
          :math:`(1-\alpha)^2` and :math:`\alpha` if `adjust=False`.

        - When `ignore_nulls=True`, weights are based
          on relative positions. For example, the weights of
          :math:`x_0` and :math:`x_2` used in calculating the final weighted
          average of [:math:`x_0`, None, :math:`x_2`] are
          :math:`1-\alpha` and :math:`1` if `adjust=True`,
          and :math:`1-\alpha` and :math:`\alpha` if `adjust=False`.

Examples
--------
>>> df = pl.DataFrame({"a": [1, 2, 3]})
>>> df.select(pl.col("a").ewm_std(com=1, ignore_nulls=False))
shape: (3, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a        â”‚
â”‚ ---      â”‚
â”‚ f64      â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 0.0      â”‚
â”‚ 0.707107 â”‚
â”‚ 0.963624 â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r9  r$   rd   Úewm_std©	r}   r3  r4  r5  r6  r7  rÈ  r‰  rZ  s	            ru   r@  ÚExpr.ewm_stdD&  ó4   € ôt ˜s¨)Ó;ˆÜØL‰L× Ñ  °À<ÓPó
ð 	
rx   c          	     óf   • [        XX45      n[        U R                  R                  XEXgU5      5      $ )u%
  
Compute exponentially-weighted moving variance.

.. versionchanged:: 1.21.0
    The `min_periods` parameter was renamed `min_samples`.

Parameters
----------
com
    Specify decay in terms of center of mass, :math:`\gamma`, with

        .. math::
            \alpha = \frac{1}{1 + \gamma} \; \forall \; \gamma \geq 0
span
    Specify decay in terms of span, :math:`\theta`, with

        .. math::
            \alpha = \frac{2}{\theta + 1} \; \forall \; \theta \geq 1
half_life
    Specify decay in terms of half-life, :math:`\lambda`, with

        .. math::
            \alpha = 1 - \exp \left\{ \frac{ -\ln(2) }{ \lambda } \right\} \;
            \forall \; \lambda > 0
alpha
    Specify smoothing factor alpha directly, :math:`0 < \alpha \leq 1`.
adjust
    Divide by decaying adjustment factor in beginning periods to account for
    imbalance in relative weightings

        - When `adjust=True` (the default) the EW function is calculated
          using weights :math:`w_i = (1 - \alpha)^i`
        - When `adjust=False` the EW function is calculated
          recursively by

          .. math::
            y_0 &= x_0 \\
            y_t &= (1 - \alpha)y_{t - 1} + \alpha x_t
bias
    When `bias=False`, apply a correction to make the estimate statistically
    unbiased.
min_samples
    Minimum number of observations in window required to have a value
    (otherwise result is null).
ignore_nulls
    Ignore missing values when calculating weights.

        - When `ignore_nulls=False` (default), weights are based on absolute
          positions.
          For example, the weights of :math:`x_0` and :math:`x_2` used in
          calculating the final weighted average of
          [:math:`x_0`, None, :math:`x_2`] are
          :math:`(1-\alpha)^2` and :math:`1` if `adjust=True`, and
          :math:`(1-\alpha)^2` and :math:`\alpha` if `adjust=False`.

        - When `ignore_nulls=True`, weights are based
          on relative positions. For example, the weights of
          :math:`x_0` and :math:`x_2` used in calculating the final weighted
          average of [:math:`x_0`, None, :math:`x_2`] are
          :math:`1-\alpha` and :math:`1` if `adjust=True`,
          and :math:`1-\alpha` and :math:`\alpha` if `adjust=False`.

Examples
--------
>>> df = pl.DataFrame({"a": [1, 2, 3]})
>>> df.select(pl.col("a").ewm_var(com=1, ignore_nulls=False))
shape: (3, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a        â”‚
â”‚ ---      â”‚
â”‚ f64      â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 0.0      â”‚
â”‚ 0.5      â”‚
â”‚ 0.928571 â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r9  r$   rd   Úewm_varrA  s	            ru   rE  ÚExpr.ewm_var£&  rC  rx   c                ót   • [        USS9n[        U5      n[        U R                  R                  X45      5      $ )u’  
Extremely fast method for extending the Series with 'n' copies of a value.

Parameters
----------
value
    A constant literal value or a unit expression with which to extend the
    expression result Series; can pass None to extend with nulls.
n
    The number of additional values that will be added.

Examples
--------
>>> df = pl.DataFrame({"values": [1, 2, 3]})
>>> df.select((pl.col("values") - 1).extend_constant(99, n=2))
shape: (5, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ values â”‚
â”‚ ---    â”‚
â”‚ i64    â”‚
â•žâ•â•â•â•â•â•â•â•â•¡
â”‚ 0      â”‚
â”‚ 1      â”‚
â”‚ 2      â”‚
â”‚ 99     â”‚
â”‚ 99     â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”˜
Tr›   )r   r$   rd   Úextend_constant)r}   r-  r$  r0  r&  s        ru   rH  ÚExpr.extend_constant'  s4   € ô: -¨U¸tÑDˆÜ(¨Ó+ˆÜ˜Ÿ™×5Ñ5°lÓMÓNÐNrx   )râ  Úparallelrl   Ú	normalizec               ót   • U=(       d    U(       a  SOSn[        U R                  R                  XX45      5      $ )u„
  
Count the occurrence of unique values.

Parameters
----------
sort
    Sort the output by count, in descending order.
    If set to `False` (default), the order is non-deterministic.
parallel
    Execute the computation in parallel.

    .. note::
        This option should likely *not* be enabled in a `group_by` context,
        as the computation will already be parallelized per group.
name
    Give the resulting count column a specific name; if `normalize` is
    True this defaults to "proportion", otherwise defaults to "count".
normalize
    If True, the count is returned as the relative frequency of unique
    values normalized to 1.0.

Returns
-------
Expr
    Expression of type :class:`Struct`, mapping unique values to their
    count (or proportion).

Examples
--------
>>> df = pl.DataFrame(
...     {"color": ["red", "blue", "red", "green", "blue", "blue"]}
... )
>>> df_count = df.select(pl.col("color").value_counts())
>>> df_count  # doctest: +IGNORE_RESULT
shape: (3, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ color       â”‚
â”‚ ---         â”‚
â”‚ struct[2]   â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ {"green",1} â”‚
â”‚ {"blue",3}  â”‚
â”‚ {"red",2}   â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

>>> df_count.unnest("color")  # doctest: +IGNORE_RESULT
shape: (3, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”
â”‚ color â”† count â”‚
â”‚ ---   â”† ---   â”‚
â”‚ str   â”† u32   â”‚
â•žâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•¡
â”‚ green â”† 1     â”‚
â”‚ blue  â”† 3     â”‚
â”‚ red   â”† 2     â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”˜

Sort the output by (descending) count, customize the field name,
and normalize the count to its relative proportion (of 1.0).

>>> df_count = df.select(
...     pl.col("color").value_counts(
...         name="fraction",
...         normalize=True,
...         sort=True,
...     )
... )
>>> df_count
shape: (3, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ color              â”‚
â”‚ ---                â”‚
â”‚ struct[2]          â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ {"blue",0.5}       â”‚
â”‚ {"red",0.333333}   â”‚
â”‚ {"green",0.166667} â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

>>> df_count.unnest("color")
shape: (3, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ color â”† fraction â”‚
â”‚ ---   â”† ---      â”‚
â”‚ str   â”† f64      â”‚
â•žâ•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•¡
â”‚ blue  â”† 0.5      â”‚
â”‚ red   â”† 0.333333 â”‚
â”‚ green â”† 0.166667 â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
Ú
proportionr™  )r$   rd   Úvalue_counts)r}   râ  rJ  rl   rK  s        ru   rN  ÚExpr.value_counts#'  s/   € ðF ×?®	™°wˆÜ˜Ÿ™×2Ñ2°4À4ÓSÓTÐTrx   c                óH   • [        U R                  R                  5       5      $ )u  
Return a count of the unique values in the order of appearance.

This method differs from `value_counts` in that it does not return the
values, only the counts and might be faster

Examples
--------
>>> df = pl.DataFrame(
...     {
...         "id": ["a", "b", "b", "c", "c", "c"],
...     }
... )
>>> df.select(
...     [
...         pl.col("id").unique_counts(),
...     ]
... )
shape: (3, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ id  â”‚
â”‚ --- â”‚
â”‚ u32 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 1   â”‚
â”‚ 2   â”‚
â”‚ 3   â”‚
â””â”€â”€â”€â”€â”€â”˜
)r$   rd   Úunique_countsr|   s    ru   rQ  ÚExpr.unique_counts‰'  s   € ô< ˜Ÿ™×3Ñ3Ó5Ó6Ð6rx   c                ó`   • [        U5      n[        U R                  R                  U5      5      $ )u©  
Compute the logarithm to a given base.

Parameters
----------
base
    Given base, defaults to `e`

Examples
--------
>>> df = pl.DataFrame({"a": [1, 2, 3]})
>>> df.select(pl.col("a").log(base=2))
shape: (3, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a        â”‚
â”‚ ---      â”‚
â”‚ f64      â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 0.0      â”‚
â”‚ 1.0      â”‚
â”‚ 1.584963 â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r   r$   rd   rn  rø   s      ru   rn  ÚExpr.log©'  s(   € ô0 ,¨DÓ1ˆÜ˜Ÿ™×)Ñ)¨+Ó6Ó7Ð7rx   c                óH   • [        U R                  R                  5       5      $ )uÌ  
Compute the natural logarithm of each element plus one.

This computes `log(1 + x)` but is more numerically stable for `x` close to zero.

Examples
--------
>>> df = pl.DataFrame({"a": [1, 2, 3]})
>>> df.select(pl.col("a").log1p())
shape: (3, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a        â”‚
â”‚ ---      â”‚
â”‚ f64      â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 0.693147 â”‚
â”‚ 1.098612 â”‚
â”‚ 1.386294 â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úlog1pr|   s    ru   rV  Ú
Expr.log1pÄ'  s   € ô* ˜Ÿ™×+Ñ+Ó-Ó.Ð.rx   )rK  c               óJ   • [        U R                  R                  X5      5      $ )u  
Computes the entropy.

Uses the formula `-sum(pk * log(pk))` where `pk` are discrete probabilities.

Parameters
----------
base
    Given base, defaults to `e`
normalize
    Normalize pk if it doesn't sum to 1.

Examples
--------
>>> df = pl.DataFrame({"a": [1, 2, 3]})
>>> df.select(pl.col("a").entropy(base=2))
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a        â”‚
â”‚ ---      â”‚
â”‚ f64      â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1.459148 â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
>>> df.select(pl.col("a").entropy(base=2, normalize=False))
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a         â”‚
â”‚ ---       â”‚
â”‚ f64       â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ -6.754888 â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úentropy)r}   rù   rK  s      ru   rY  ÚExpr.entropyÛ'  s   € ôF ˜Ÿ™×-Ñ-¨dÓ>Ó?Ð?rx   )r‰  c               ó`   • [        U R                  R                  UR                  U5      5      $ )uY  
Run an expression over a sliding window that increases `1` slot every iteration.

.. warning::
    This functionality is considered **unstable**. It may be changed
    at any point without it being considered a breaking change.

.. versionchanged:: 1.21.0
    The `min_periods` parameter was renamed `min_samples`.

Parameters
----------
expr
    Expression to evaluate
min_samples
    Number of valid values there should be in the window before the expression
    is evaluated. valid values = `length - null_count`

Warnings
--------
This can be really slow as it can have `O(n^2)` complexity. Don't use this
for operations that visit all elements.

Examples
--------
>>> df = pl.DataFrame({"values": [1, 2, 3, 4, 5]})
>>> df.select(
...     [
...         pl.col("values").cumulative_eval(
...             pl.element().first() - pl.element().last() ** 2
...         )
...     ]
... )
shape: (5, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ values â”‚
â”‚ ---    â”‚
â”‚ i64    â”‚
â•žâ•â•â•â•â•â•â•â•â•¡
â”‚ 0      â”‚
â”‚ -3     â”‚
â”‚ -8     â”‚
â”‚ -15    â”‚
â”‚ -24    â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úcumulative_eval)r}   rt   r‰  s      ru   r\  ÚExpr.cumulative_eval (  s%   € ôb ˜Ÿ™×5Ñ5°d·l±lÀKÓPÓQÐQrx   c               óJ   • [        U R                  R                  U5      5      $ )u-  
Flags the expression as 'sorted'.

Enables downstream code to user fast paths for sorted arrays.

Parameters
----------
descending
    Whether the `Series` order is descending.

Warnings
--------
This can lead to incorrect results if the data is NOT sorted!!
Use with care!

Examples
--------
>>> df = pl.DataFrame({"values": [1, 2, 3]})
>>> df.select(pl.col("values").set_sorted().max())
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ values â”‚
â”‚ ---    â”‚
â”‚ i64    â”‚
â•žâ•â•â•â•â•â•â•â•â•¡
â”‚ 3      â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”˜
)r$   rd   Úset_sorted_flag)r}   rÝ  s     ru   Ú
set_sortedÚExpr.set_sorted3(  s   € ô: ˜Ÿ™×5Ñ5°jÓAÓBÐBrx   zT`Expr.shrink_dtype` is deprecated and is a no-op; use `Series.shrink_dtype` instead.c                ó   • U $ )u¬  
Shrink numeric columns to the minimal required datatype.

Shrink to the dtype needed to fit the extrema of this [`Series`].
This can be used to reduce memory pressure.

.. versionchanged:: 1.33.0
    Deprecated and turned into a no-op. The operation does not match the
    Polars data-model during lazy execution since the output datatype
    cannot be known without inspecting the data.

    Use `Series.shrink_dtype` instead.

Examples
--------
>>> pl.DataFrame(
...     {
...         "a": [1, 2, 3],
...         "b": [1, 2, 2 << 32],
...         "c": [-1, 2, 1 << 30],
...         "d": [-112, 2, 112],
...         "e": [-112, 2, 129],
...         "f": ["a", "b", "c"],
...         "g": [0.1, 1.32, 0.12],
...         "h": [True, None, False],
...     }
... ).select(pl.all().shrink_dtype())  # doctest: +SKIP
shape: (3, 8)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a   â”† b          â”† c          â”† d    â”† e    â”† f   â”† g    â”† h     â”‚
â”‚ --- â”† ---        â”† ---        â”† ---  â”† ---  â”† --- â”† ---  â”† ---   â”‚
â”‚ i8  â”† i64        â”† i32        â”† i8   â”† i16  â”† str â”† f32  â”† bool  â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•¡
â”‚ 1   â”† 1          â”† -1         â”† -112 â”† -112 â”† a   â”† 0.1  â”† true  â”‚
â”‚ 2   â”† 2          â”† 2          â”† 2    â”† 2    â”† b   â”† 1.32 â”† null  â”‚
â”‚ 3   â”† 8589934592 â”† 1073741824 â”† 112  â”† 129  â”† c   â”† 0.12 â”† false â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”˜
rî   r|   s    ru   Úshrink_dtypeÚExpr.shrink_dtypeR(  s
   € ðT ˆrx   )Ú	bin_countÚinclude_categoryÚinclude_breakpointc               óÄ   • Ub7  [        U[        5      (       a  [        R                  " U5      n[	        U5      nOSn[        U R                  R                  XRX45      5      $ )uU  
Bin values into buckets and count their occurrences.

.. warning::
    This functionality is considered **unstable**. It may be changed
    at any point without it being considered a breaking change.

Parameters
----------
bins
    Bin edges. If None given, we determine the edges based on the data.
bin_count
    If `bins` is not provided, `bin_count` uniform bins are created that fully
    encompass the data.
include_breakpoint
    Include a column that indicates the upper breakpoint.
include_category
    Include a column that shows the intervals as categories.

Returns
-------
DataFrame

Examples
--------
>>> df = pl.DataFrame({"a": [1, 3, 8, 8, 2, 1, 3]})
>>> df.select(pl.col("a").hist(bins=[1, 2, 3]))
shape: (2, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ a   â”‚
â”‚ --- â”‚
â”‚ u32 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 3   â”‚
â”‚ 2   â”‚
â””â”€â”€â”€â”€â”€â”˜
>>> df.select(
...     pl.col("a").hist(
...         bins=[1, 2, 3], include_breakpoint=True, include_category=True
...     )
... )
shape: (2, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a                    â”‚
â”‚ ---                  â”‚
â”‚ struct[3]            â”‚
â•žâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•¡
â”‚ {2.0,"[1.0, 2.0]",3} â”‚
â”‚ {3.0,"(2.0, 3.0]",2} â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
N)r!  rj   r  rG   r   r$   rd   Úhist)r}   Úbinsre  rf  rg  Úbins_pyexprs         ru   ri  Ú	Expr.hist~(  sY   € ðx ÑÜ˜$¤×%Ñ%Ü—y’y “Ü/°Ó5‰KàˆKÜØL‰L×ÑØÐ(8óó
ð 	
rx   ©ÚdefaultrÏ  c               óú  • Ub
  [        SSS9  U[        La  [        SSS9  U R                  XX4S9$ U[        L aU  [        U[        5      (       d  Sn[        U5      e[        UR                  5       5      n[        UR                  5       5      nO [        U[        5      (       a;  [        U[        [        R                  45      (       d  [        R                  " U5      n[        U[        5      (       a;  [        U[        [        R                  45      (       d  [        R                  " U5      n[        USS9n[        USS9n[        U R                  R!                  Xg5      5      nUb  UR#                  U5      nU$ )	uê  
Replace the given values by different values of the same data type.

Parameters
----------
old
    Value or sequence of values to replace.
    Accepts expression input. Sequences are parsed as Series,
    other non-expression inputs are parsed as literals.
    Also accepts a mapping of values to their replacement as syntactic sugar for
    `replace(old=Series(mapping.keys()), new=Series(mapping.values()))`.
new
    Value or sequence of values to replace by.
    Accepts expression input. Sequences are parsed as Series,
    other non-expression inputs are parsed as literals.
    Length must match the length of `old` or have length 1.

default
    Set values that were not replaced to this value.
    Defaults to keeping the original value.
    Accepts expression input. Non-expression inputs are parsed as literals.

    .. deprecated:: 1.0.0
        Use :meth:`replace_strict` instead to set a default while replacing
        values.

return_dtype
    The data type of the resulting expression. If set to `None` (default),
    the data type of the original column is preserved.

    .. deprecated:: 1.0.0
        Use :meth:`replace_strict` instead to set a return data type while
        replacing values, or explicitly call :meth:`cast` on the output.

See Also
--------
replace_strict
str.replace

Notes
-----
The global string cache must be enabled when replacing categorical values.

Examples
--------
Replace a single value by another value. Values that were not replaced remain
unchanged.

>>> df = pl.DataFrame({"a": [1, 2, 2, 3]})
>>> df.with_columns(replaced=pl.col("a").replace(2, 100))
shape: (4, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a   â”† replaced â”‚
â”‚ --- â”† ---      â”‚
â”‚ i64 â”† i64      â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1   â”† 1        â”‚
â”‚ 2   â”† 100      â”‚
â”‚ 2   â”† 100      â”‚
â”‚ 3   â”† 3        â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Replace multiple values by passing sequences to the `old` and `new` parameters.

>>> df.with_columns(replaced=pl.col("a").replace([2, 3], [100, 200]))
shape: (4, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a   â”† replaced â”‚
â”‚ --- â”† ---      â”‚
â”‚ i64 â”† i64      â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1   â”† 1        â”‚
â”‚ 2   â”† 100      â”‚
â”‚ 2   â”† 100      â”‚
â”‚ 3   â”† 200      â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Passing a mapping with replacements is also supported as syntactic sugar.

>>> mapping = {2: 100, 3: 200}
>>> df.with_columns(replaced=pl.col("a").replace(mapping))
shape: (4, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a   â”† replaced â”‚
â”‚ --- â”† ---      â”‚
â”‚ i64 â”† i64      â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1   â”† 1        â”‚
â”‚ 2   â”† 100      â”‚
â”‚ 2   â”† 100      â”‚
â”‚ 3   â”† 200      â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

The original data type is preserved when replacing by values of a different
data type. Use :meth:`replace_strict` to replace and change the return data
type.

>>> df = pl.DataFrame({"a": ["x", "y", "z"]})
>>> mapping = {"x": 1, "y": 2, "z": 3}
>>> df.with_columns(replaced=pl.col("a").replace(mapping))
shape: (3, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a   â”† replaced â”‚
â”‚ --- â”† ---      â”‚
â”‚ str â”† str      â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•¡
â”‚ x   â”† 1        â”‚
â”‚ y   â”† 2        â”‚
â”‚ z   â”† 3        â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Expression input is supported.

>>> df = pl.DataFrame({"a": [1, 2, 2, 3], "b": [1.5, 2.5, 5.0, 1.0]})
>>> df.with_columns(
...     replaced=pl.col("a").replace(
...         old=pl.col("a").max(),
...         new=pl.col("b").sum(),
...     )
... )
shape: (4, 3)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a   â”† b   â”† replaced â”‚
â”‚ --- â”† --- â”† ---      â”‚
â”‚ i64 â”† f64 â”† i64      â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1   â”† 1.5 â”† 1        â”‚
â”‚ 2   â”† 2.5 â”† 2        â”‚
â”‚ 2   â”† 5.0 â”† 2        â”‚
â”‚ 3   â”† 1.0 â”† 10       â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
zˆthe `return_dtype` parameter for `replace` is deprecated. Use `replace_strict` instead to set a return data type while replacing values.rê  rë  zzthe `default` parameter for `replace` is deprecated. Use `replace_strict` instead to set a default while replacing values.rm  úB`new` argument is required if `old` argument is not a Mapping typeTr›   )r   r    Úreplace_strictr!  r   r‘   rj   ÚvaluesÚkeysr   ri   r  rG   r   r$   rd   ÚreplacerÙ  )	r}   ÚoldÚnewrn  rÏ  r’   Ú
old_pyexprÚ
new_pyexprÚresults	            ru   rt  ÚExpr.replaceÆ(  sI  € ðX Ñ#Ü%ðbàòð
 œ*Ò$Ü%ðYàòð
 ×&Ñ&Ø 'ð 'ð ð ð ”*ÒÜ˜c¤7×+Ñ+àXð ô   “nÐ$Üs—z‘z“|Ó$ˆCÜs—x‘x“zÓ"‰Cä˜#œx×(Ñ(´¸CÄ#ÄrÇyÁyÐAQ×1RÑ1RÜ—i’i “nÜ˜#œx×(Ñ(´¸CÄ#ÄrÇyÁyÐAQ×1RÑ1RÜ—i’i “nä*¨3¸4Ñ@ˆ
Ü*¨3¸4Ñ@ˆ
ä˜4Ÿ<™<×/Ñ/°
ÓGÓHˆàÑ#Ø—[‘[ Ó.ˆFàˆrx   c               ó’  • U[         L aT  [        U[        5      (       d  Sn[        U5      e[	        UR                  5       5      n[	        UR                  5       5      n[        USS9n[        USS9nSnUb  [        U5      R                  nOSnU[         L a  SO	[        USS9n	[        U R                  R                  XgX˜5      5      $ )u§  
Replace all values by different values.

Parameters
----------
old
    Value or sequence of values to replace.
    Accepts expression input. Sequences are parsed as Series,
    other non-expression inputs are parsed as literals.
    Also accepts a mapping of values to their replacement as syntactic sugar for
    `replace_strict(old=Series(mapping.keys()), new=Series(mapping.values()))`.
new
    Value or sequence of values to replace by.
    Accepts expression input. Sequences are parsed as Series,
    other non-expression inputs are parsed as literals.
    Length must match the length of `old` or have length 1.
default
    Set values that were not replaced to this value. If no default is specified,
    (default), an error is raised if any values were not replaced.
    Accepts expression input. Non-expression inputs are parsed as literals.
return_dtype
    The data type of the resulting expression. If set to `None` (default),
    the data type is determined automatically based on the other inputs.

Raises
------
InvalidOperationError
    If any non-null values in the original column were not replaced, and no
    `default` was specified.

See Also
--------
replace
str.replace

Notes
-----
The global string cache must be enabled when replacing categorical values.

Examples
--------
Replace values by passing sequences to the `old` and `new` parameters.

>>> df = pl.DataFrame({"a": [1, 2, 2, 3]})
>>> df.with_columns(
...     replaced=pl.col("a").replace_strict([1, 2, 3], [100, 200, 300])
... )
shape: (4, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a   â”† replaced â”‚
â”‚ --- â”† ---      â”‚
â”‚ i64 â”† i64      â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1   â”† 100      â”‚
â”‚ 2   â”† 200      â”‚
â”‚ 2   â”† 200      â”‚
â”‚ 3   â”† 300      â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Passing a mapping with replacements is also supported as syntactic sugar.

>>> mapping = {1: 100, 2: 200, 3: 300}
>>> df.with_columns(replaced=pl.col("a").replace_strict(mapping))
shape: (4, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a   â”† replaced â”‚
â”‚ --- â”† ---      â”‚
â”‚ i64 â”† i64      â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1   â”† 100      â”‚
â”‚ 2   â”† 200      â”‚
â”‚ 2   â”† 200      â”‚
â”‚ 3   â”† 300      â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

By default, an error is raised if any non-null values were not replaced.
Specify a default to set all values that were not matched.

>>> mapping = {2: 200, 3: 300}
>>> df.with_columns(
...     replaced=pl.col("a").replace_strict(mapping)
... )  # doctest: +SKIP
Traceback (most recent call last):
...
polars.exceptions.InvalidOperationError: incomplete mapping specified for `replace_strict`
>>> df.with_columns(replaced=pl.col("a").replace_strict(mapping, default=-1))
shape: (4, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a   â”† replaced â”‚
â”‚ --- â”† ---      â”‚
â”‚ i64 â”† i64      â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1   â”† -1       â”‚
â”‚ 2   â”† 200      â”‚
â”‚ 2   â”† 200      â”‚
â”‚ 3   â”† 300      â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Replacing by values of a different data type sets the return type based on
a combination of the `new` data type and the `default` data type.

>>> df = pl.DataFrame({"a": ["x", "y", "z"]})
>>> mapping = {"x": 1, "y": 2, "z": 3}
>>> df.with_columns(replaced=pl.col("a").replace_strict(mapping))
shape: (3, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a   â”† replaced â”‚
â”‚ --- â”† ---      â”‚
â”‚ str â”† i64      â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•¡
â”‚ x   â”† 1        â”‚
â”‚ y   â”† 2        â”‚
â”‚ z   â”† 3        â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
>>> df.with_columns(replaced=pl.col("a").replace_strict(mapping, default="x"))
shape: (3, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a   â”† replaced â”‚
â”‚ --- â”† ---      â”‚
â”‚ str â”† str      â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•¡
â”‚ x   â”† 1        â”‚
â”‚ y   â”† 2        â”‚
â”‚ z   â”† 3        â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Set the `return_dtype` parameter to control the resulting data type directly.

>>> df.with_columns(
...     replaced=pl.col("a").replace_strict(mapping, return_dtype=pl.UInt8)
... )
shape: (3, 2)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a   â”† replaced â”‚
â”‚ --- â”† ---      â”‚
â”‚ str â”† u8       â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•¡
â”‚ x   â”† 1        â”‚
â”‚ y   â”† 2        â”‚
â”‚ z   â”† 3        â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜

Expression input is supported for all parameters.

>>> df = pl.DataFrame({"a": [1, 2, 2, 3], "b": [1.5, 2.5, 5.0, 1.0]})
>>> df.with_columns(
...     replaced=pl.col("a").replace_strict(
...         old=pl.col("a").max(),
...         new=pl.col("b").sum(),
...         default=pl.col("b"),
...     )
... )
shape: (4, 3)
â”Œâ”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ a   â”† b   â”† replaced â”‚
â”‚ --- â”† --- â”† ---      â”‚
â”‚ i64 â”† f64 â”† f64      â”‚
â•žâ•â•â•â•â•â•ªâ•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•¡
â”‚ 1   â”† 1.5 â”† 1.5      â”‚
â”‚ 2   â”† 2.5 â”† 2.5      â”‚
â”‚ 2   â”† 5.0 â”† 5.0      â”‚
â”‚ 3   â”† 1.0 â”† 10.0     â”‚
â””â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
rp  Tr›   N)r    r!  r   r‘   rj   rr  rs  r   r'   rÚ  r$   rd   rq  )
r}   ru  rv  rn  rÏ  r’   rw  rx  Údtype_pyexprÚdefault_pyexprs
             ru   rq  ÚExpr.replace_strictz)  sÊ   € ðX ”*ÒÜ˜c¤7×+Ñ+àXð ô   “nÐ$Üs—z‘z“|Ó$ˆCÜs—x‘x“zÓ"ˆCä*¨3¸4Ñ@ˆ
Ü*¨3¸4Ñ@ˆ
à26ˆØÑ#Ü3°LÓA×RÑR‰LàˆLð œ*Ò$ñ ä& w¸4Ñ@ð 	ô ØL‰L×'Ñ'Ø¨óó
ð 	
rx   c                óH   • [        U R                  R                  5       5      $ )z Evaluate the number of set bits.)r$   rd   Úbitwise_count_onesr|   s    ru   r€  ÚExpr.bitwise_count_onesD*  s   € ä˜Ÿ™×8Ñ8Ó:Ó;Ð;rx   c                óH   • [        U R                  R                  5       5      $ )z"Evaluate the number of unset bits.)r$   rd   Úbitwise_count_zerosr|   s    ru   rƒ  ÚExpr.bitwise_count_zerosH*  s   € ä˜Ÿ™×9Ñ9Ó;Ó<Ð<rx   c                óH   • [        U R                  R                  5       5      $ )zIEvaluate the number most-significant set bits before seeing an unset bit.)r$   rd   Úbitwise_leading_onesr|   s    ru   r†  ÚExpr.bitwise_leading_onesL*  s   € ä˜Ÿ™×:Ñ:Ó<Ó=Ð=rx   c                óH   • [        U R                  R                  5       5      $ )zHEvaluate the number most-significant unset bits before seeing a set bit.)r$   rd   Úbitwise_leading_zerosr|   s    ru   r‰  ÚExpr.bitwise_leading_zerosP*  ó   € ä˜Ÿ™×;Ñ;Ó=Ó>Ð>rx   c                óH   • [        U R                  R                  5       5      $ )zJEvaluate the number least-significant set bits before seeing an unset bit.)r$   rd   Úbitwise_trailing_onesr|   s    ru   r  ÚExpr.bitwise_trailing_onesT*  r‹  rx   c                óH   • [        U R                  R                  5       5      $ )zIEvaluate the number least-significant unset bits before seeing a set bit.)r$   rd   Úbitwise_trailing_zerosr|   s    ru   r  ÚExpr.bitwise_trailing_zerosX*  s   € ä˜Ÿ™×<Ñ<Ó>Ó?Ð?rx   c                óH   • [        U R                  R                  5       5      $ )uâ  Perform an aggregation of bitwise ANDs.

Examples
--------
>>> df = pl.DataFrame({"n": [-1, 0, 1]})
>>> df.select(pl.col("n").bitwise_and())
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ n   â”‚
â”‚ --- â”‚
â”‚ i64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ 0   â”‚
â””â”€â”€â”€â”€â”€â”˜
>>> df = pl.DataFrame(
...     {"grouper": ["a", "a", "a", "b", "b"], "n": [-1, 0, 1, -1, 1]}
... )
>>> df.group_by("grouper", maintain_order=True).agg(pl.col("n").bitwise_and())
shape: (2, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”
â”‚ grouper â”† n   â”‚
â”‚ ---     â”† --- â”‚
â”‚ str     â”† i64 â”‚
â•žâ•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•¡
â”‚ a       â”† 0   â”‚
â”‚ b       â”† 1   â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”˜
)r$   rd   Úbitwise_andr|   s    ru   r“  ÚExpr.bitwise_and\*  ó   € ô: ˜Ÿ™×1Ñ1Ó3Ó4Ð4rx   c                óH   • [        U R                  R                  5       5      $ )uß  Perform an aggregation of bitwise ORs.

Examples
--------
>>> df = pl.DataFrame({"n": [-1, 0, 1]})
>>> df.select(pl.col("n").bitwise_or())
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ n   â”‚
â”‚ --- â”‚
â”‚ i64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ -1  â”‚
â””â”€â”€â”€â”€â”€â”˜
>>> df = pl.DataFrame(
...     {"grouper": ["a", "a", "a", "b", "b"], "n": [-1, 0, 1, -1, 1]}
... )
>>> df.group_by("grouper", maintain_order=True).agg(pl.col("n").bitwise_or())
shape: (2, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”
â”‚ grouper â”† n   â”‚
â”‚ ---     â”† --- â”‚
â”‚ str     â”† i64 â”‚
â•žâ•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•¡
â”‚ a       â”† -1  â”‚
â”‚ b       â”† -1  â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”˜
)r$   rd   Ú
bitwise_orr|   s    ru   r—  ÚExpr.bitwise_or{*  s   € ô: ˜Ÿ™×0Ñ0Ó2Ó3Ð3rx   c                óH   • [        U R                  R                  5       5      $ )uâ  Perform an aggregation of bitwise XORs.

Examples
--------
>>> df = pl.DataFrame({"n": [-1, 0, 1]})
>>> df.select(pl.col("n").bitwise_xor())
shape: (1, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ n   â”‚
â”‚ --- â”‚
â”‚ i64 â”‚
â•žâ•â•â•â•â•â•¡
â”‚ -2  â”‚
â””â”€â”€â”€â”€â”€â”˜
>>> df = pl.DataFrame(
...     {"grouper": ["a", "a", "a", "b", "b"], "n": [-1, 0, 1, -1, 1]}
... )
>>> df.group_by("grouper", maintain_order=True).agg(pl.col("n").bitwise_xor())
shape: (2, 2)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”¬â”€â”€â”€â”€â”€â”
â”‚ grouper â”† n   â”‚
â”‚ ---     â”† --- â”‚
â”‚ str     â”† i64 â”‚
â•žâ•â•â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•¡
â”‚ a       â”† -2  â”‚
â”‚ b       â”† -2  â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”´â”€â”€â”€â”€â”€â”˜
)r$   rd   Úbitwise_xorr|   s    ru   rš  ÚExpr.bitwise_xorš*  r•  rx   zW`register_plugin` is deprecated; use `polars.plugins.register_plugin_function` instead.)r,  r/  r  Úinput_wildcard_expansionrÇ  Úcast_to_supertypesÚpass_name_to_applyÚchanges_lengthc       
        óV   • SSK Jn  Uc  U /nOU /[        U5      QnU" UUUUUU
UUUU	S9
$ )a~  
Register a plugin function.

.. deprecated:: 0.20.16
    Use :func:`polars.plugins.register_plugin_function` instead.

See the `user guide <https://docs.pola.rs/user-guide/plugins/>`_
for more information about plugins.

Warnings
--------
This method is deprecated. Use the new `polars.plugins.register_plugin_function`
function instead.

This is highly unsafe as this will call the C function loaded by
`lib::symbol`.

The parameters you set dictate how Polars will handle the function.
Make sure they are correct!

Parameters
----------
lib
    Library to load.
symbol
    Function to load.
args
    Arguments (other than self) passed to this function.
    These arguments have to be of type Expression.
kwargs
    Non-expression arguments. They must be JSON serializable.
is_elementwise
    If the function only operates on scalars
    this will trigger fast paths.
input_wildcard_expansion
    Expand expressions as input of this function.
returns_scalar
    Automatically explode on unit length if it ran as final aggregation.
    this is the case for aggregations like `sum`, `min`, `covariance` etc.
cast_to_supertypes
    Cast the input datatypes to their supertype.
pass_name_to_apply
    if set, then the `Series` passed to the function in the group_by operation
    will ensure the name is set. This is an extra heap allocation per group.
changes_length
    For example a `unique` or a `slice`
r   )Úregister_plugin_function)
Úplugin_pathÚfunction_namer,  r/  r  rŸ  rÇ  Úcast_to_supertyperœ  rž  )Úpolars.pluginsr¡  rj   )r}   ÚlibÚsymbolr,  r/  r  rœ  rÇ  r  rž  rŸ  r¡  s               ru   Úregister_pluginÚExpr.register_plugin¹*  sP   € õB 	<à‰<Ø6‰DàÐ&œ4 ›:Ð&ˆDá'ØØ ØØØ)Ø)Ø)Ø0Ø%=Ø1ñ
ð 	
rx   ©Ú	unorderedrÝ  rÞ  c               óN   • [         R                  " U /UUc  S OU/Uc  S S9$ U/S9$ )Nrª  )r  Ú_row_encode)r}   r«  rÝ  rÞ  s       ru   r­  ÚExpr._row_encode+  sC   € ô }Š}ØˆFØØ)Ñ1‘t¸
°|Ø)Ñ1tñ	
ð 	
ð 9C°|ñ	
ð 	
rx   c               óþ   • U Vs/ s H  n[        U5      R                  PM     nnU(       a&  Ub   eUb   eU R                  R                  X5      nOU R                  R	                  XXE5      n[        U5      $ s  snf rp   )r'   rÚ  rd   Úrow_decode_unorderedÚrow_decode_orderedr$   )	r}   ÚnamesÚdtypesr«  rÝ  rÞ  rÛ  Údtypes_pyexprsry  s	            ru   Ú_row_decodeÚExpr._row_decode+  sŠ   € ñ KQó
ÚJPÀÔ$ UÓ+×<Ô<É&ð 	ð 
ö ØÑ%Ð%Ð%ØÑ%Ð%Ð%à—\‘\×6Ñ6°uÓM‰Fà—\‘\×4Ñ4Ø zóˆFô ˜Ó Ð ùò
s   …A:c                óH   • [        SSS9  U R                  [        U5      SS9$ )aj  
Read an expression from a JSON encoded string to construct an Expression.

.. deprecated:: 0.20.11
    This method has been renamed to :meth:`deserialize`.
    Note that the new method operates on file-like inputs rather than strings.
    Enclose your input in `io.StringIO` to keep the same behavior.

Parameters
----------
value
    JSON encoded string value
zÔ`Expr.from_json` is deprecated. It has been renamed to `Expr.deserialize`. Note that the new method operates on file-like inputs rather than strings. Enclose your input in `io.StringIO` to keep the same behavior.z0.20.11rë  rL  rI  )r   rU  r
   )rr   r-  s     ru   Ú	from_jsonÚExpr.from_json5+  s0   € ô 	"ðNð ò		
ð ‰œx¨›°vˆÐ>Ð>rx   c                ó   • [        U 5      $ )zm
Create an object namespace of all binary related methods.

See the individual method pages for full details
r/   r|   s    ru   rg   ÚExpr.binL+  s   € ô # 4Ó(Ð(rx   c                ó   • [        U 5      $ )uÚ  
Create an object namespace of all categorical related methods.

See the individual method pages for full details

Examples
--------
>>> df = pl.DataFrame({"values": ["a", "b"]}).select(
...     pl.col("values").cast(pl.Categorical)
... )
>>> df.select(pl.col("values").cat.get_categories())
shape: (2, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ values â”‚
â”‚ ---    â”‚
â”‚ str    â”‚
â•žâ•â•â•â•â•â•â•â•â•¡
â”‚ a      â”‚
â”‚ b      â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”˜
r1   r|   s    ru   rh   ÚExpr.catU+  s   € ô.   Ó%Ð%rx   c                ó   • [        U 5      $ )z;Create an object namespace of all datetime related methods.r3   r|   s    ru   re   ÚExpr.dtn+  s   € ô % TÓ*Ð*rx   c                ó   • [        U 5      $ )zl
Create an object namespace of all list related methods.

See the individual method pages for full details.
r5   r|   s    ru   rj   Ú	Expr.listv+  ó   € ô ! Ó&Ð&rx   c                ó   • [        U 5      $ )zm
Create an object namespace of all array related methods.

See the individual method pages for full details.
r-   r|   s    ru   rf   ÚExpr.arr+  s   € ô " $Ó'Ð'rx   c                ó   • [        U 5      $ )zƒ
Create an object namespace of all meta related expression methods.

This can be used to modify and traverse existing expressions.
r7   r|   s    ru   rk   Ú	Expr.metaˆ+  rÂ  rx   c                ó   • [        U 5      $ )z€
Create an object namespace of all expressions that modify expression names.

See the individual method pages for full details.
r9   r|   s    ru   rl   Ú	Expr.name‘+  rÂ  rx   c                ó   • [        U 5      $ )u¨  
Create an object namespace of all string related methods.

See the individual method pages for full details.

Examples
--------
>>> df = pl.DataFrame({"letters": ["a", "b"]})
>>> df.select(pl.col("letters").str.to_uppercase())
shape: (2, 1)
â”Œâ”€â”€â”€â”€â”€â”€â”€â”€â”€â”
â”‚ letters â”‚
â”‚ ---     â”‚
â”‚ str     â”‚
â•žâ•â•â•â•â•â•â•â•â•â•¡
â”‚ A       â”‚
â”‚ B       â”‚
â””â”€â”€â”€â”€â”€â”€â”€â”€â”€â”˜
r;   r|   s    ru   ri   ÚExpr.strš+  s   € ô* # 4Ó(Ð(rx   c                ó   • [        U 5      $ )uT  
Create an object namespace of all struct related methods.

See the individual method pages for full details.

Examples
--------
>>> df = (
...     pl.DataFrame(
...         {
...             "int": [1, 2],
...             "str": ["a", "b"],
...             "bool": [True, None],
...             "list": [[1, 2], [3]],
...         }
...     )
...     .to_struct("my_struct")
...     .to_frame()
... )
>>> df.select(pl.col("my_struct").struct.field("str"))
shape: (2, 1)
â”Œâ”€â”€â”€â”€â”€â”
â”‚ str â”‚
â”‚ --- â”‚
â”‚ str â”‚
â•žâ•â•â•â•â•â•¡
â”‚ a   â”‚
â”‚ b   â”‚
â””â”€â”€â”€â”€â”€â”˜
r=   r|   s    ru   rm   ÚExpr.struct±+  s   € ô@ # 4Ó(Ð(rx   c                óV   • U R                   R                  U5      nUc  g [        U5      $ rp   )rd   Úskip_batch_predicater$   )r}   Úschemary  s      ru   Ú_skip_batch_predicateÚExpr._skip_batch_predicateÓ+  s)   € Ø—‘×2Ñ2°6Ó:ˆØ‰>ØÜ˜Ó Ð rx   )rd   )rs   rA   r5  rb   )r5  ri   )r5  r   )r5  rb   )rŸ   rK   r5  rb   )rŸ   zIntoExprColumn | int | boolr5  rb   )ró   úIntoExprColumn | int | floatr5  rb   )rù   rÒ  r5  rb   )r5  rO  )r  rO  r5  ÚNone)
r1  zCallable[..., Any]r>  ri   r?  r   r/  r   r5  rb   )rS  zstr | Path | IOBase | bytesrJ  rV   r5  rb   )rZ  Úboolr5  rb   )rl   ri   r5  rb   )rz  zCstr | PolarsDataType | Collection[str] | Collection[PolarsDataType]r{  zstr | PolarsDataTyper5  rb   )r2  z!Callable[Concatenate[Expr, P], T]r,  zP.argsr/  zP.kwargsr5  r_   rp   )rŸ  ú
int | Exprr   zint | Expr | Noner5  rb   )rŸ   rK   r¢  rÔ  r5  rb   )r¯  rÔ  r5  rb   )r   Úhalf_to_even)rÉ  r²  rÊ  rS   r5  rb   )rÎ  r²  r5  rb   )rŸ   z
Expr | strr5  rb   )rÛ  z,PolarsDataType | pl.DataTypeExpr | type[Any]rÖ  rÔ  r×  rÔ  r5  rb   )rÝ  rÔ  rÞ  rÔ  r5  rb   )é   )rç  úint | IntoExprColumnr5  rb   )rî  úIntoExpr | Iterable[IntoExpr]rç  rØ  r¯  úbool | Sequence[bool]r5  rb   )r  rK   r5  rb   )r\  )r  zIntoExpr | np.ndarray[Any, Any]r  rU   rÝ  rÔ  r5  rb   )rî  rÙ  r  rK   rÝ  rÚ  rÞ  rÚ  r  rÔ  r  rÔ  r5  rb   )r  z>int | Sequence[int] | IntoExpr | Series | np.ndarray[Any, Any]r5  rb   )rE  rÕ  r5  rb   )r  )r$  rØ  r!  úIntoExpr | Noner5  rb   )NNN)r-  zAny | Expr | Noner.  zFillNullStrategy | Noner/  ú
int | Noner5  rb   )r-  zint | float | Expr | Noner5  rb   )r/  rÜ  r5  rb   )rC  r²  r5  rb   )r  rÔ  r5  rb   )r„  ú$IntoExpr | Iterable[IntoExpr] | Noner…  rK   r~  rÝ  rÝ  rÔ  rÞ  rÔ  r  rX   r5  rb   )
r  ri   rŽ  ústr | timedeltarŸ  zstr | timedelta | NonerŠ  rH   r5  rb   )r£  )r¥  zfloat | Exprr¦  rQ   r5  rb   )
r®  zSequence[float]r©  úSequence[str] | Nonerª  rÔ  r«  rÔ  r5  rb   )rµ  zSequence[float] | intr©  rß  rª  rÔ  r°  rÔ  r«  rÔ  r5  rb   )r¿  z)IntoExprColumn | Iterable[IntoExprColumn]rÀ  r   r5  rb   )rÁ  rb   r5  rb   )r2  z Callable[[Series], Series | Any]rÏ  ú'PolarsDataType | pl.DataTypeExpr | NonerÆ  rÔ  r  rÔ  rÇ  rÔ  r5  rb   )r2  zCallable[[Any], Any]rÏ  rà  rÒ  rÔ  rÓ  rÔ  r.  rM   rÇ  rÔ  r5  rb   )r   )r$  r²  rŸ  r²  r5  rb   )é
   )r$  rÕ  r5  rb   )r  r   r5  rb   )rŸ   r   r5  rb   )rŸ   zExpr | Collection[Any] | SeriesrU  rÔ  r5  rb   )rî  zpl.Series | Expr | str | intr5  rb   )Úboth)r_  rK   r`  rK   rŠ  rH   r5  rb   )
rŸ   rK   re  Úfloatrf  rã  rg  rÔ  r5  rb   )r   NNN)
rm  r²  rn  rÜ  ro  rÜ  rp  rÜ  r5  rb   )rv  rÔ  r5  rb   )z{})r}  ri   r5  rb   )Úlinear)r>  rJ   r5  rb   )rî  rK   r5  rb   )
rî  rK   rŽ  útimedelta | strr‰  r²  rŠ  rH   r5  rb   )rî  rK   rŽ  rå  r‰  r²  rŠ  rH   rC  r²  r5  rb   )rî  rK   rŽ  rå  r¥  rã  r¦  rQ   r‰  r²  rŠ  rH   r5  rb   )
rŽ  r²  r­  úlist[float] | Noner‰  rÜ  r¨  rÔ  r5  rb   )rŽ  r²  r­  ræ  r‰  rÜ  r¨  rÔ  rC  r²  r5  rb   )r£  é   N)r¥  rã  r¦  rQ   rŽ  r²  r­  ræ  r‰  rÜ  r¨  rÔ  r5  rb   )
rŽ  r²  rÈ  rÔ  r‰  rÜ  r¨  rÔ  r5  rb   )rŽ  r²  rÌ  rÔ  rÈ  rÔ  r‰  rÜ  r¨  rÔ  r5  rb   )r2  zCallable[[Series], Any]rŽ  r²  r­  ræ  r‰  rÜ  r¨  rÔ  r5  rb   )Úaverage)r>  rR   rÝ  rÔ  rm  rÜ  r5  rb   )r  rÝ  )r$  zint | IntoExprrß  rN   r5  rb   )r$  rØ  r5  rb   )rÈ  rÔ  r5  rb   )rÌ  rÔ  rÈ  rÔ  r5  rb   )NN)r_  ú8NumericLiteral | TemporalLiteral | IntoExprColumn | Noner`  ré  r5  rb   )r&  ztuple[int, ...]r5  rb   )rm  rÜ  r5  rb   )r$  zint | IntoExprColumn | Noner+  zfloat | IntoExprColumn | Noner,  rÔ  r)  rÔ  rm  rÜ  r5  rb   )r3  úfloat | Noner4  rê  r5  rê  r6  rê  r7  rÔ  r‰  r²  rZ  rÔ  r5  rb   )rî  zstr | IntoExprr5  rÞ  r5  rb   )r3  rê  r4  rê  r5  rê  r6  rê  r7  rÔ  rÈ  rÔ  r‰  r²  rZ  rÔ  r5  rb   )r-  rK   r$  rØ  r5  rb   )
râ  rÔ  rJ  rÔ  rl   z
str | NonerK  rÔ  r5  rb   )rù   zfloat | IntoExprr5  rb   )rù   rã  rK  rÔ  r5  rb   )rt   rb   r‰  r²  r5  rb   )rÝ  rÔ  r5  rb   )
rj  rÛ  re  rÜ  rf  rÔ  rg  rÔ  r5  rb   )
ru  ú,IntoExpr | Sequence[Any] | Mapping[Any, Any]rv  ú$IntoExpr | Sequence[Any] | NoDefaultrn  úIntoExpr | NoDefaultrÏ  zPolarsDataType | Noner5  rb   )
ru  rë  rv  rì  rn  rí  rÏ  rà  r5  rb   )r¦  ri   r§  ri   r,  zlist[IntoExpr] | Noner/  zdict[Any, Any] | Noner  rÔ  rœ  rÔ  rÇ  rÔ  r  rÔ  rž  rÔ  rŸ  rÔ  r5  rb   )r«  rÔ  rÝ  úbool | NonerÞ  rî  r5  rb   )r²  zSequence[str]r³  z*Sequence[pl.DataTypeExpr | PolarsDataType]r«  rÔ  rÝ  úSequence[bool] | NonerÞ  rï  r5  rb   )r-  ri   r5  rb   )r5  r0   )r5  r2   )r5  r4   )r5  r6   )r5  r.   )r5  r8   )r5  r:   )r5  r<   )r5  r>   )rÏ  rT   r5  zExpr | None(  rˆ   Ú
__module__Ú__qualname__Ú__firstlineno__Ú__doc__rd   Ú__annotations__rn   Úclassmethodrv   r~   r‹   rŽ   r“   r—   r¡   r¤   r¨   r®   r³   r·   rº   r¾   rÃ   rÇ   rË   rÏ   rÒ   rÖ   rÚ   rÝ   rá   rä   rè   rë   rï   rõ   rû   rþ   r  r  r  r  r  r  r  rF  rU  rX  r\  r`  rd  rg  rk  ro  rr  r=  rx  r~  rÆ   rƒ  r†  rŠ  r  r  r“  r–  r™  r†   rž  r*  r¦  rª  r­  r±  rµ  r¸  r»  r¾  rÁ  rÄ  rÈ  rÍ  rÑ  rÊ  rÙ  râ  rå  r   rð  rô  r÷  rû  rþ  r  r  r  r  r  r  r#  r+  r3  r9  r<  r¯  rA  rG  rJ  rN  rQ  rU  r;  r[  r_  rb  re  rh  rk  rn  rq  ru  rx  r{  rƒ  rŒ  r‘  r”  r—  rš  r  r¡  r¥  r   r­  r´  r¸  r»  r¾  r   rÄ  r:  râ  r  r  rÎ  r  r  r  r/  r§   rç   r²   r'  r*  rÂ   r/  rÎ   r4  r8  r;  r>  rA  rD  rH  rK  rN  rò   rS  rW  rZ  r^  ri  rl  rx  r~  rƒ  r†  rŒ  r‘  r•  r˜  r›  r   r£  r¦  r«  r°  r´  r·  r»  rÀ  rÃ  rÆ  rÊ  rÎ  rÖ  r–   rÛ  rÞ  râ  rå  rè  rë  r_  r`  ró  rö  rú  rý  r   r  r  r
  r  r  r  r  r  r  r  r"  r%  r)  r1  r:  r=  r@  rE  rH  rN  rQ  ÚmathÚern  rV  rY  r\  r`  rc  ri  r    rt  rq  r€  rƒ  r†  r‰  r  r  r“  r—  rš  r¨  r­  rµ  r¸  Úpropertyrg   rh   re   rj   rf   rk   rl   ri   rm   rÐ  Ú__static_attributes__rî   rx   ru   rb   rb   ~   sz  ‡ Ù;ð €GˆVÓò
&€JÐ"ó 
ð ôó ðõ
%õOõ
%õõõ6õ6õ:õ8õ8õ
7õ7õ;õ
8õ
õ;õ
8õ
6õ6õ6õ6õ9õ
(õ9õ7õõ<õ.õ6õ6õ6õ6õ:õ8õ+õ)ð?SØ'ð?SØ14ð?SØ?Bð?SØNQð?Sà	õ?SðB ð
 '/ñ	;6à+ð;6ð $ð	;6ð
 
õ;6ó ð;6õz.5ð` +/÷ 69ð 69ðp +/÷ :9ð :9õx5õ<.õ*.õ*õ*-õ*:3ðxSQàTðSQð ,ðSQð 
õ	SQðj1/à3ð1/ð ð1/ð ð	1/ð
 
õ1/õf#.õJ1õ85õ<3õ<5õ<0õB4õB 4õD/õ8-÷<#Mð #MðJ 9=÷ !Dð !DõF1õ64õ@3ð@ */÷ <8ð <8ð| +0÷ !9ð !9ðF */÷ 8ð 8ð: */÷ 78ð 78ðr ,1÷ :ð :õ</õ0.÷0==ð ==õ~>õ49õ<.ð< Ø$ñ/
à;ð/
ð ð	/
ð
 ð/
ð 
ö/
ðb */À5÷ MIð MI÷^.7ð .7ñ` ! ¨yÀ'ÑJð #$ñ}Yð
 */ñ}Yà)ð}Yð  ð}Yð
 'ð}Yð 
ö}Yó Kð}Y÷~0:ð 0:ñd ! ¨yÀ'ÑJð #$ñ}
ð
 */ñ}
à)ð}
ð  ð}
ð
 'ð}
ð 
ö}
ó Kð}
ð~ .3Àu÷ 7Hð 7Hõr1õ.1õ.@ðD "'ñ2Wð
 !ñ2Wà0ð2Wð ð2Wð
 ð2Wð 
÷2Wðp -2Ø,1Ø"Ø$ñL
à)ðL
ð ðL
ð *ð	L
ð
 *ðL
ð ðL
ð ðL
ð 
öL
ð\6BØUð6Bà	õ6Bõp)=ðX )*ñLJØLPñLJØ%ðLJØ:IðLJà	÷LJð` $(Ø,0Ø ñ	pTà ðpTð *ðpTð ð	pTð
 
öpTõd'C÷R?ð ?÷&@ð @õ&!1÷F1ð 1÷41ð 1õ4-õ&-õ&1õ,1õ,-õ8.õ&0õ&1õ42õ4 9õD4õ2%õ2"4ðH 05÷ #0ð #0õJ/õ&.ð* >Bñy
ð :>Ø Ø Ø2Añy
à:ðy
ð ðy
ð 7ð	y
ð
 ðy
ð ðy
ð 0ðy
ð 
÷y
ð@ *.Ø!(ñgUàðgUð  ð	gUð
 'ðgUð ðgUð 
ögUõR3õ*;õ8:õ87õ42õ.2ð4 )2ñAPàðAPð &ðAPð 
ö	APñF ƒZð
 (,Ø!Ø$ñNXàðNXð %ð	NXð
 ðNXð ðNXð 
õNXó ðNXñ` ƒZð
 (,Ø!Ø!&Ø$ñr!à(ðr!ð %ð	r!ð
 ðr!ð ðr!ð ðr!ð 
õr!ó ðr!õh#-õJ.0ð`I9à>ðI9ð ðI9ð 
õ	I9ñV Ð>Ó?ô&&ó @ð&&ðV AEñV
ð
 Ø$Ø$ñV
à2ðV
ð >ðV
ð
 ðV
ð ðV
ð ðV
ð 
÷V
ðv AEñV"ð
  ØØ(6Ø$ñV"à&ðV"ð >ðV"ð
 ðV"ð ðV"ð &ðV"ð ðV"ð 
÷V"õp1õ6'1õR1÷0&?ð &?÷P ð  ÷4%ð %÷Bð õ4)6õV(6õT "õD&@õP "õD "õD "õD "õD "õD&AõP,#õ\U(õn#õ8#õ>#õ>õ,*'õX3&õj;#ðB "ñ	*Hà.ð*Hð ð	*Hð
 
ö*HõX(<ð\ "(ñ	p
àðp
ð ðp
ð ð	p
ð
 
öp
ðl ØØ ñ8
àð8
ð ð	8
ð
 ð8
ð ð8
ð 
ö8
ðx Ø!Ø!Ø!ñ5<àð5<ð ð5<ð ð	5<ð
 ð5<ð 
ö5<ðn -1÷ !;ð !;÷FHð H÷BM;ð M;õ^#AñJ ƒZÙ  °ÀxÑPð Ø!(ñ|
àð|
ð %ð|
ð
 ð|
ð ð|
ð 
õ|
ó Qó ð|
ñ| ƒZÙ  °ÀxÑPð Ø!(ñV
àðV
ð %ðV
ð
 ðV
ð ðV
ð 
õV
ó Qó ðV
ñp ƒZÙ  °ÀxÑPð Ø!(ñ]
àð]
ð %ð]
ð
 ð]
ð ð]
ð 
õ]
ó Qó ð]
ñ~ ƒZÙ  °ÀxÑPð Ø!(ñV
àðV
ð %ðV
ð
 ðV
ð ðV
ð 
õV
ó Qó ðV
ñp ƒZÙ  °ÀxÑPð Ø!(Øñ_
àð_
ð %ð_
ð
 ð_
ð ð_
ð ð_
ð 
õ_
ó Qó ð_
ñB ƒZÙ  °ÀxÑPð Ø!(Øñ_
àð_
ð %ð_
ð
 ð_
ð ð_
ð ð_
ð 
õ_
ó Qó ð_
ñB ƒZÙ  °ÀxÑPð Ø!(ñ~
àð~
ð %ð~
ð
 ð~
ð ð~
ð 
õ~
ó Qó ð~
ñ@ ƒZÙ  °ÀxÑPð )2ØØ!(ñK
àðK
ð %ðK
ð
 ðK
ð &ðK
ð ðK
ð ðK
ð 
õK
ó Qó ðK
ñZ ! °ÀxÑPð '+ñk
ð
 #'Øñk
àðk
ð $ðk
ð
  ðk
ð ðk
ð 
ök
ó Qðk
ñZ ! °ÀxÑPð '+ñk
ð
 #'Øñk
àðk
ð $ðk
ð
  ðk
ð ðk
ð 
ök
ó Qðk
ñZ ! °ÀxÑPð '+ñm
ð
 #'Øñm
àðm
ð $ðm
ð
  ðm
ð ðm
ð 
öm
ó Qðm
ñ^ ! °ÀxÑPð '+ñk
ð
 #'Øñk
àðk
ð $ðk
ð
  ðk
ð ðk
ð 
ök
ó Qðk
ñZ ! °ÀxÑPð '+ñq
ð
 #'ØØñq
àðq
ð $ðq
ð
  ðq
ð ðq
ð ðq
ð 
öq
ó Qðq
ñf ! °ÀxÑPð '+ñq
ð
 #'ØØñq
àðq
ð $ðq
ð
  ðq
ð ðq
ð ðq
ð 
öq
ó Qðq
ñf ! °ÀxÑPð '+ñk
ð
 #'Øñk
àðk
ð $ðk
ð
  ðk
ð ðk
ð 
ök
ó Qðk
ñZ ! °ÀxÑPð )2ØØ&*ñO
ð #'ØñO
àðO
ð &ðO
ð ð	O
ð
 $ðO
ð  ðO
ð ðO
ð 
öO
ó QðO
ñb ƒZð
 Ø"&Øñ<
àð<
ð ð	<
ð
  ð<
ð ð<
ð 
õ<
ó ð<
ñ| ƒZð
 ØØ"&Øñ>
àð>
ð ð	>
ð
 ð>
ð  ð>
ð ð>
ð 
õ>
ó ð>
ñ@ ƒZÙ  °ÀxÑPð
 '+ñ	E
ð #'ØñE
à)ðE
ð ðE
ð $ð	E
ð  ðE
ð ðE
ð 
öE
ó Qó ðE
õN-ð< 'ñnFð !ØñnFàðnFð ð	nFð
 ðnFð 
÷nFðb DLñ9EØð9EØ4@ð9Eà	ö9E÷v$<ð $<ðL $(÷ 32ð 32ðj *.¸D÷ !>ð !>ðJ QUØPTñUTàMðUTð NðUTð 
ö	UTõn5õ,5õ,.õ@-õ0-õ0-õ0-õ00ö00ö00ö0.ö0.ö0.ö01ö01ö01ö01öB1ö@7;÷r5ñ 5ð@ *.ñ;
ð 37Ø!&ØØò;
à&ð;
ð 0ð	;
ð
 ð;
ð ð;
ð ð;
ð 
÷;
ð ;
ñz ! °ÀxÑPð !Ø!Ø"&Ø"ØØØ"òX
ð ðX
ð ð	X
ð
  ðX
ð ðX
ð ðX
ð ðX
ð ðX
ð 
öX
ó QðX
ðtZIàðZIð #ð	ZIð
 
öZIñx ! °ÀxÑPð !Ø!Ø"&Ø"ØØØØ"ò\
ð ð\
ð ð	\
ð
  ð\
ð ð\
ð ð\
ð ð\
ð ð\
ð ð\
ð 
ö\
ó Qð\
ñ| ! °ÀxÑPð !Ø!Ø"&Ø"ØØØØ"ò\
ð ð\
ð ð	\
ð
  ð\
ð ð\
ð ð\
ð ð\
ð ð\
ð ð\
ð 
ö\
ó Qð\
ö|OðH ØØØòdUð ðdUð ð	dUð
 ðdUð ðdUð 
÷dUöL7ð@ ,0¯6ª6÷ 8ð 8ö6/ð. %)§F¢Fð #@À÷ #@ó #@ñJ ƒZÙ  °ÀxÑPØ@A÷ /Rñ /Ró Qó ð/Rðb 05÷ Cñ Cñ> Ù^óõ'óð'ñR ƒZð !%ñE
ð !%Ø!&Ø#(òE
àðE
ð ð	E
ð
 ðE
ð !ðE
ð 
÷E
ó ðE
ðT 5?ðrð
 )3Ø.2òrà9ðrð 2ðrð
 &ðrð ,ðrð 
÷rð rðn 5?ðH
ð
 )3Ø@DòH
à9ðH
ð 2ðH
ð
 &ðH
ð >ðH
ð 
÷H
ð H
öT<ö=ö>ö?ö?ö@ö5ö>4ö>5ñ> ñ	Aóð '+Ø(,Ø$Ø).Ø$Ø#(Ø#(Ø$òO
ð ðO
ð ð	O
ð
 $ðO
ð &ðO
ð ðO
ð #'ðO
ð ðO
ð !ðO
ð !ðO
ð ðO
ð 
öO
ó	ðO
ðh  Ø"&Ø"&ò
ð ð
ð  ð	
ð
  ð
ð 
÷
ð&  Ø,0Ø,0ò!àð!ð ;ð!ð
 ð!ð *ð!ð *ð!ð 
÷!ð2 õ?ó ñ?ñ, õ)ó ñ)ñ õ&ó ñ&ñ0 õ+ó ñ+ñ õ'ó ñ'ñ õ(ó ñ(ñ õ'ó ñ'ñ õ'ó ñ'ñ õ)ó ñ)ñ, õ)ó ñ)÷B!ô !rx   rb   c                óâ  • [        S XX#4 5       5      S:”  a  Sn[        U5      eU b"  U S:  a  SU < S3n[        U5      eSSU -   -  nU$ Ub"  US:  a  SU< S3n[        U5      eS	US-   -  nU$ UbK  US::  a  S
U< S3n[        U5      eS[        R                  " [        R                  " S	5      * U-  5      -
  nU$ Uc  Sn[        U5      eSUs=:  a  S::  d  O  SU< S3n[        U5      eU$ )zGNormalise EWM decay specification in terms of smoothing factor 'alpha'.c              3  ó(   #   • U  H  oS Lv •  M
     g 7frp   rî   )r"  Úparams     ru   r$  Ú!_prepare_alpha.<locals>.<genexpr>á+  s   é € Ð
JÒ,I 5˜ÕÒ,Iùs   ‚r  zIparameters `com`, `span`, `half_life`, and `alpha` are mutually exclusiverd  zrequire `com` >= 0 (found Ú)g      ð?zrequire `span` >= 1 (found g       @zrequire `half_life` > 0 (found z9one of `com`, `span`, `half_life`, or `alpha` must be setr   z require 0 < `alpha` <= 1 (found )r;  rR  rö  rr  rn  )r3  r4  r5  r6  r’   s        ru   r9  r9  Ú+  s4  € ô Ñ
J¨S¸	Ñ,IÓ
JÓJÈQÓNàWð 	ô ˜‹oÐØ
Ø‹9Ø.¨s©g°QÐ7ˆCÜ˜S“/Ð!Øs˜S‘yÑ!ˆð, €Lð) 
Ñ	Ø#‹:Ø/°©x°qÐ9ˆCÜ˜S“/Ð!Øt˜c‘zÑ"ˆð  €Lð 
Ñ	Ø˜ÓØ3°I±=ÀÐBˆCÜ˜S“/Ð!Ø”d—h’h¤§¢¨£˜~°	Ñ9Ó:Ñ:ˆð €Lð 
‰ØIˆÜ˜‹oÐà%n˜1nØ0°±	¸Ð;ˆÜ˜‹oÐà€Lrx   c                óF   • [        U [        5      (       a  [        U 5      n U $ rp   )r!  r   r   )rŽ  s    ru   r‹  r‹  ,  s   € Ü+œy×)Ñ)Ü.¨{Ó;ˆØÐrx   )NNNN)
r3  úfloat | int | Noner4  r   r5  r   r6  r   r5  rã  )rŽ  rå  r5  ri   )‡Ú
__future__r   Ú
contextlibrö  r  Úsysr8  Úcollections.abcr   r   r   Údatetimer   Ú	functoolsr   Úior	   r
   Úpathlibr   Útypingr   r   r   r   r   r   Úpolars._reexportÚ	_reexportr  Úpolarsr   r  Úpolars._utils.convertr   r   Úpolars._utils.deprecationr   r   r   Úpolars._utils.parser   r   r   Úpolars._utils.unstabler   r   Úpolars._utils.variousr   r   r   r    r!   r"   r#   Úpolars._utils.wrapr$   r%   Úpolars.datatypesr&   r'   Úpolars.dependenciesr(   r)   r  Úpolars.exceptionsr*   r+   r,   Úpolars.expr.arrayr.   Úpolars.expr.binaryr0   Úpolars.expr.categoricalr2   Úpolars.expr.datetimer4   Úpolars.expr.listr6   Úpolars.expr.metar8   Úpolars.expr.namer:   Úpolars.expr.stringr<   Úpolars.expr.structr>   Úpolars.metar?   ÚsuppressÚImportErrorÚpolars._plrr@   rc  rA   rB   Ú_plrÚplrrC   rD   rE   rF   rG   Úpolars._typingrH   rI   rJ   rK   rL   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rV   rW   rX   rY   Úversion_infor\   r]   Útyping_extensionsr_   r`   Úmodulesrˆ   Úcurrent_modulerø  rb   r9  r‹  rî   rx   ru   Ú<module>r*     s  ðÝ "ã Û Û Û 
Û ß 9Ñ 9Ý Ý ß  Ý ÷÷ õ Ý !ß R÷ñ ÷
ñ ÷
 D÷÷ ñ ÷ 1÷õ 1Ý +÷ñ õ
 1Ý 2Ý 4Ý 6Ý .Ý .Ý .Ý 2Ý 2Ý (à×Ò˜Õ%Ý5÷ &ð ×Ò˜Õ%Ý"÷ &ö Ø	×	Ò	˜[Õ	)Ý(÷ 
*ð 
×	Ò	˜[Õ	)Ý!÷ 
*õ )Ýç3Ñ3÷÷ ÷ ÷ õ õ& 0à
×Ñ˜7Ó"ß1Ð1ç<à
×Ñ˜7Ó"Þ'å0á‹€AÙ#‹Aæð —[’[ Ñ*€NØ-€NÔ÷Ym!ñ Ym!ðzZ #Ø#Ø$(Ø $ð	&Ø	ð&à
ð&ð "ð&ð ð	&ð
 õ&õR÷] &Ö%ú÷ &Ö%ú÷ 
*Ö	)ú÷ 
*Ö	)ús0   ÄIÄ&I#ÅI5Å/JÉ
I É#
I2É5
JÊ
J