
    'h\                      S SK Jr  S SKrS SKrS SKrS SKrS SKJrJrJ	r	  S SK
JrJ
r
JrJr  S SKJrJrJr  S SKJrJr  S SKJr  S SKJr  S S	KJrJrJrJrJrJrJr  S SK J!r"  S SK#J$r%  S S
K&J'r(  S SK)J*r*J+r+  S SK,J-r-J.r.  S SK/J0r0J1r1  S SK2J3r3J4r4J5r5J6r6  S SK7J8r8  S SK9J:r:J;r;  S SK<J=r=  S SK>J?r?  S SK@JArA  S SKBJCrCJDrD  S SKEJFrFJGrGJHrHJIrIJJrJJKrKJLrLJMrMJNrNJOrOJPrP  S SKQJRrRJSrS  S SKTJUrUJVrVJWrWJXrXJYrYJZrZJ[r[J\r\J]r]J^r^J_r_J`r`JaraJbrbJcrcJdrdJereJfrfJgrgJhrhJiriJjrjJkrkJlrlJmrmJnrnJoro  S SKpJqrq  S SKrJsrsJtrtJuru  S SKrJvrw  S SKrJxry  S SKzJ{r{  S SK|J}r}  S SK~Jr  S SKJr  S S KJr  S S!KJrJr  S S"KJr  S S#K#JrJr  \GR                  " \5         S S$KJrJr  SSS5        \(       Ga
  S SKrS S%KJrJr  S S&KJr  S S'KJrJr  S S(KJr  \GR                  " \5         S S)KJrJrJr  SSS5        S S*K&JrJrJr  S S+K)JrJrJrJrJrJrJrJrJrJrJrJrJrJrJrJrJ*r*JrJrJrJrJrJrJrJrJrJr  S S,KrJr  S S-KJr  S S.KJr  \GRz                  S/:  a	  S S0KJrJr  OS S0KJrJr  \GRz                  S1:  a  S S2KJr  OS S2KJr  \GRz                  S3:  a  S S4KJ5r5  OS S4KJ5r5  \" S55      r\" S65      rS<S7 jr    S=S8 jr            S>S9 jr " S: S;5      rg! , (       d  f       GNK= f! , (       d  f       GN= f)?    )annotationsN)
CollectionIterableMapping)datedatetimetime	timedelta)	lru_cachepartialreduce)BytesIOStringIO)and_)Path)TYPE_CHECKINGAnyCallableClassVarNoReturnTypeVaroverload)	functions)ParquetMetadataPartitioningScheme)_AioDataFrameResult_GeventDataFrameResult)negate_duration_stringparse_as_duration_string)deprecate_renamed_parameterdeprecate_streaming_parameter
deprecatedissue_deprecation_warning)wrap_parquet_metadata_callback)parse_into_expressionparse_into_list_of_expressions)parse_list_into_selector)serialize_polars_object)LazyPolarsSlice)issue_unstable_warningunstable)_is_generatordisplay_dot_graphextend_boolfind_stacklevelis_bool_sequenceis_sequenceissue_warningnormalize_filepathparse_percentilesqualified_type_namerequire_same_type)wrap_df	wrap_expr)DTYPE_TEMPORAL_UNITSN_INFER_DEFAULTBooleanCategoricalDateDatetimeDurationEnumFloat32Float64Int8Int16Int32Int64Int128NullObjectStringTimeUInt8UInt16UInt32UInt64Unknownis_polars_dtypeparse_into_datatype_exprparse_into_dtype)DataTypeGroup)_PYARROW_AVAILABLEimport_optional
subprocess)polars_cloud)pyarrow)PerformanceWarning)CompatLevel)	GPUEngine)LazyGroupBy)InProcessQuery)DEFAULT_QUERY_OPT_FLAGSforward_old_opt_flags)Schema)by_dtypeexpand_selector)PyLazyFrameget_engine_affinity)	AwaitableSequence)IOBase)IOLiteral)QueryOptFlags)PyExprPyPartitioning
PySelector)	DataFrameDataTypeExpr)AsofJoinStrategyClosedIntervalColumnNameOrSelectorCsvQuoteStyle
EngineTypeExplainFormatFillNullStrategyFrameInitTypesIntoExprIntoExprColumnIpcCompressionJoinStrategyJoinValidationLabelMaintainOrderJoinOrientationr   	PlanStagePolarsDataTypePythonDataTypeQuantileMethodSchemaDefinition
SchemaDictSerializationFormatStartBySyncOnCloseMethodUniqueKeepStrategy)numpy)CredentialProviderFunction)ParquetFieldOverwrites)   
   )Concatenate	ParamSpec)r      )Self)r      )r"   TPc                &    U S:X  a
  [        5       $ U $ )Nauto)re   engines    iC:\Users\julio\OneDrive\Documentos\Trabajo\Ideas Frescas\venv\Lib\site-packages\polars/lazyframe/frame.py_select_enginer      s    $*f$4 @&@    c                B   [        U [        [        45      (       a  [        U 5      $ [        U [        R
                  5      (       a  U $ [        U [        5      (       a  U R                  $ [        [        U SS 5      5      (       a  U $ S[        U 5      < S3n[        U5      e)Nwritez!`path` argument has invalid type z), and cannot be turned into a sink target)
isinstancestrr   r3   iorh   r   _py_partitioningcallablegetattrr5   	TypeError)pathmsgs     r   _to_sink_targetr      s     $d$$!$''	D"))	$	$	D,	-	-$$$	'$.	/	/12Ed2K1NNwxnr   c               T   [        U [        5      =n=(       d    U S:H  nU(       d  U S;   d  SU < 3n[        U5      eU(       d  U(       d  U(       a  U(       a  [        S[        S9  SnU(       a  SnU(       d  g [        SSS	S
9nU(       d
  [        5       n [        UR                  U S9$ )Ngpu)r   cpuz	in-memory	streamingr   zInvalid engine argument engine=zUGPU engine does not support streaming or background collection, disabling GPU engine.categoryFcudf_polarsz*GPU engine requested, but required packagezPlease install using the command `pip install cudf-polars-cu12` (CUDA 12 is required for RAPIDS cuDF v25.08 and later). If your system has a CUDA 11 driver, install with `pip install cudf-polars-cu11==25.06` )
err_prefixinstall_message)config)r   r\   
ValueErrorr2   UserWarningrV   r   execute_with_cudf)	r   r   
backgroundnew_streaming_eageris_config_objis_gpur   r   s	            r   _gpu_engine_callbackr      s      *&)<<mP5F#SS0	2oZ=f$ 	

 !?5	
K ;00@@r   c                     \ rS rSr% SrS\S'   \" 5       rS\S'     SSSS\S	S
.               SS jjjr	\
SS j5       rSS jrSS jr\
S	S	S	S.           SS jj5       r\
SS.     SS jj5       r\SS j5       r\SS j5       r\SS j5       r\SS j5       rSS jrSS jrSS jrSS jrSS jrSS jrSS jrSS jrSS  jrSS! jrSSS" jjrSS# jr SS$ jr!SS% jr"SS& jr#\$ SS'S.     SS( jjj5       r%\$SSS) jj5       r%\$S'S.     SS* jj5       r% SSS.     SS+ jjjr%        SS, jr&\'" 5           SS- j5       r( SS.S/.     SS0 jjjr)\*" 5       \+" 5       S1SSSSSSSSSSS	S2S\,S3.                               SS4 jj5       5       r-\*" 5       \+" 5       SSSS	S5SSSSSSSSSSS2S6S\,S7.                                       SS8 jj5       5       r.SSS9 jjr/S	S	S	SS:.             SS= jjr0S>S?.SS@ jjr1\2" S<SASBSC9S	SD.       SSE jj5       r3\2" S<SASBSC9S	SD.       SSF jj5       r4\+" 5       SSSSS	SSSSSS	SGSHS2\,SI.                                 SSJ jj5       r5\$SSSSSSSSSS	S2\,SK.                           SSL jj5       r6\$SSSSSSSSSS	S2S	\,SM.                           SSN jj5       r6\*" 5       \+" 5       SSSSSSSSSS	S2S	\,SM.                             SSO jj5       5       r6\$S2\,SP.       SSQ jj5       r7\$S	S2\,SR.       SSS jj5       r7\*" 5       S	S2\,SR.       SST jj5       r7SSU jr8\$SVSSSSSSS2SWSS	S'SS2S\,SX.                                   GS SY jj5       r9\$SVSSSSSSS2SWSS	SS2S\,SZ.                                   GSS[ jj5       r9SVSSSSSSS2SWSSS	S	SS2\,S\.                                   GSS] jjr9\$SVSSSS2SWSS	S'S2\,S^.                         GSS_ jj5       r:\$SVSSSS2SWSS	S2\,S`.
                         GSSa jj5       r:SbSSSS2SWSS	S	S2\,S^.                         GSSc jjr:\$S	SSdSeSfSgSSSSSS	SSSSS2SWSS	S'S2\,Sh.                                                 GSSi jj5       r;\$S	SSdSeSfSgSSSSSS	SSSSS2SWSS	S2\,Sj.                                                 GSSk jj5       r;S	SSdSeSfSgSSSSSS	SSSSS2SWSS	S	S2\,Sh.                                                 GSSl jjr;\$SSS2SWSS	S'S2\,Sm.	                     GS	Sn jj5       r<\$SSS2SWSS	S2\,So.                     GS
Sp jj5       r<SSS2SWSS	S	S2\,Sm.	                     GSSq jjr<\=" Sr5       GS     GSSs jj5       r>SSt jr?SSu jr@SSv.     GSSw jjrAGSGSSx jjrBSSy jrCS	Sz.       GSS{ jjrD      GSS| jrE      GSS} jrF      GSS~ jrG      GSS jrHS	S.       GSS jjrI\2" S;SSSC9SSSS.           GSS jj5       rJ\2" S;SSSC9SSS	SSSSS.                   GSS jj5       rKSSSSSSSSSSS	SSSS.                               GSS jjrL\2" SSSSC9  GSSSSSS	SSSS	S.	                         GSS jjj5       rM\'" 5       SS.       GSS jj5       rN      GSS jrO      GSS jrP\=" S5      GSS j5       rQSSv.     GSS jjrRSSv.     GSS jjrSSS jrT GSSS.     GSS jjjrUSGS S jjrVGS!GSS jjrWGS!GSS jjrXGS!GSS jjrYSS jrZSS jr[\=" S5      SS j5       r\GS"GS#S jjr]\=" S5      GS$GS#S jj5       r^GSGS%S jjr_   GS&SS.         GS'S jjjr`GS(S jraGSGS)S jjrbGSGS)S jjrcSS jrdSS jreSS jrfSS jrgSS jrhSS jri GS*     GS+S jjrj      GS,S jrk SSS	S.       GS-S jjjrl S   GS.S jjrm S   GS.S jjrn SSSSSS.           GS/S jjjroSSSS	SSS	S.                 GS0S jjrpSS jrq      GS1S jrrGS2S jrsS	S.     GS3S jjrt\'" 5         GS4SSS	SS.               GS5S jjj5       ruSS jrv\=" S5          GS6SS.           GS7S jjj5       rw\'" 5         GS8     GS9S jj5       rx\'" 5       SSSSSSS.               GS:S jj5       ry  S     GS;S jjrzSr{g(<  	LazyFrame   u  
Representation of a Lazy computation graph/query against a DataFrame.

This allows for whole-query optimisation in addition to parallelism, and
is the preferred (and highest-performance) mode of operation for polars.

Parameters
----------
data : dict, Sequence, ndarray, Series, or pandas.DataFrame
    Two-dimensional data in various forms; dict input must contain Sequences,
    Generators, or a `range`. Sequence may contain Series or other Sequences.
schema : Sequence of str, (str,DataType) pairs, or a {str:DataType,} dict
    The LazyFrame schema may be declared in several ways:

    * As a dict of {name:type} pairs; if type is None, it will be auto-inferred.
    * As a list of column names; in this case types are automatically inferred.
    * As a list of (name,type) pairs; this is equivalent to the dictionary form.

    If you supply a list of column names that does not match the names in the
    underlying data, the names given here will overwrite them. The number
    of names given in the schema should match the underlying data dimensions.
schema_overrides : dict, default None
    Support type specification or override of one or more columns; note that
    any dtypes inferred from the schema param will be overridden.

    The number of entries in the schema should match the underlying data
    dimensions, unless a sequence of dictionaries is being passed, in which case
    a *partial* schema can be declared to prevent specific fields from being loaded.
strict : bool, default True
    Throw an error if any `data` value does not exactly match the given or inferred
    data type for that column. If set to `False`, values that do not match the data
    type are cast to that data type or, if casting is not possible, set to null
    instead.
orient : {'col', 'row'}, default None
    Whether to interpret two-dimensional data as columns or as rows. If None,
    the orientation is inferred by matching the columns and data dimensions. If
    this does not yield conclusive results, column orientation is used.
infer_schema_length : int or None
    The maximum number of rows to scan for schema inference. If set to `None`, the
    full data may be scanned *(this can be slow)*. This parameter only applies if
    the input data is a sequence or generator of rows; other input is read as-is.
nan_to_null : bool, default False
    If the data comes from one or more numpy arrays, can optionally convert input
    data np.nan values to null instead. This is a no-op for all other input data.

Notes
-----
Initialising `LazyFrame(...)` directly is equivalent to `DataFrame(...).lazy()`.

Examples
--------
Constructing a LazyFrame directly from a dictionary:

>>> data = {"a": [1, 2], "b": [3, 4]}
>>> lf = pl.LazyFrame(data)
>>> lf.collect()
shape: (2, 2)
┌─────┬─────┐
│ a   ┆ b   │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 1   ┆ 3   │
│ 2   ┆ 4   │
└─────┴─────┘

Notice that the dtypes are automatically inferred as Polars Int64:

>>> lf.collect_schema().dtypes()
[Int64, Int64]

To specify a more detailed/specific frame schema you can supply the `schema`
parameter with a dictionary of (name,dtype) pairs...

>>> data = {"col1": [0, 2], "col2": [3, 7]}
>>> lf2 = pl.LazyFrame(data, schema={"col1": pl.Float32, "col2": pl.Int64})
>>> lf2.collect()
shape: (2, 2)
┌──────┬──────┐
│ col1 ┆ col2 │
│ ---  ┆ ---  │
│ f32  ┆ i64  │
╞══════╪══════╡
│ 0.0  ┆ 3    │
│ 2.0  ┆ 7    │
└──────┴──────┘

...a sequence of (name,dtype) pairs...

>>> data = {"col1": [1, 2], "col2": [3, 4]}
>>> lf3 = pl.LazyFrame(data, schema=[("col1", pl.Float32), ("col2", pl.Int64)])
>>> lf3.collect()
shape: (2, 2)
┌──────┬──────┐
│ col1 ┆ col2 │
│ ---  ┆ ---  │
│ f32  ┆ i64  │
╞══════╪══════╡
│ 1.0  ┆ 3    │
│ 2.0  ┆ 4    │
└──────┴──────┘

...or a list of typed Series.

>>> data = [
...     pl.Series("col1", [1, 2], dtype=pl.Float32),
...     pl.Series("col2", [3, 4], dtype=pl.Int64),
... ]
>>> lf4 = pl.LazyFrame(data)
>>> lf4.collect()
shape: (2, 2)
┌──────┬──────┐
│ col1 ┆ col2 │
│ ---  ┆ ---  │
│ f32  ┆ i64  │
╞══════╪══════╡
│ 1.0  ┆ 3    │
│ 2.0  ┆ 4    │
└──────┴──────┘

Constructing a LazyFrame from a numpy ndarray, specifying column names:

>>> import numpy as np
>>> data = np.array([(1, 2), (3, 4)], dtype=np.int64)
>>> lf5 = pl.LazyFrame(data, schema=["a", "b"], orient="col")
>>> lf5.collect()
shape: (2, 2)
┌─────┬─────┐
│ a   ┆ b   │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 1   ┆ 3   │
│ 2   ┆ 4   │
└─────┴─────┘

Constructing a LazyFrame from a list of lists, row orientation specified:

>>> data = [[1, 2, 3], [4, 5, 6]]
>>> lf6 = pl.LazyFrame(data, schema=["a", "b", "c"], orient="row")
>>> lf6.collect()
shape: (2, 3)
┌─────┬─────┬─────┐
│ a   ┆ b   ┆ c   │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ i64 │
╞═════╪═════╪═════╡
│ 1   ┆ 2   ┆ 3   │
│ 4   ┆ 5   ┆ 6   │
└─────┴─────┴─────┘
rd   _ldfzClassVar[set[str]]
_accessorsNTF)schema_overridesstrictorientinfer_schema_lengthnan_to_nullc          
     b    SSK Jn  U" UUUUUUUS9R                  5       R                  U l        g )Nr   )ro   )dataschemar   r   r   r   r   )polars.dataframero   lazyr   )	selfr   r   r   r   r   r   r   ro   s	            r   __init__LazyFrame.__init__  s;     	/ !1$7' TVT 		r   c                4    U R                  U 5      nXl        U$ N)__new__r   )clsldfr   s      r   _from_pyldfLazyFrame._from_pyldf  s    {{3	r   c                "    U R                  5       $ r   )	serializer   s    r   __getstate__LazyFrame.__getstate__  s    ~~r   c                V    U R                  [        U5      5      R                  U l        g r   )deserializer   r   )r   states     r   __setstate__LazyFrame.__setstate__  s    $$WU^499	r   rY   validate_schemais_purec                  U R                  U 5      n[        U[        5      (       a6  [        R                  " [        UR                  5       5      UUUUS9Ul        U$ [        (       aG  [        U[        R                  5      (       a(  [        R                  " [        U5      UUUUS9Ul        U$ [        R                  " XXES9Ul        U$ )Nr   )r   r   )r   r   r   rd   #scan_from_python_function_pl_schemalistitemsr   rU   para   &scan_from_python_function_arrow_schema)scan_from_python_function_schema_function)r   r   scan_fnrY   r   r   r   s          r   _scan_python_functionLazyFrame._scan_python_function  s     {{3fg&&#GGV\\^$ /DI&   Jvryy$A$A#JJV /DI  $MMDI r   binary)formatc               t   [        U[        5      (       a(  [        UR                  5       R	                  5       5      nO&[        U[
        [        45      (       a  [        U5      nUS:X  a  [        R                  nO(US:X  a  [        R                  nOSU< 3n[        U5      eU R                  U" U5      5      $ )u  
Read a logical plan from a file to construct a LazyFrame.

Parameters
----------
source
    Path to a file or a file-like object (by file-like object, we refer to
    objects that have a `read()` method, such as a file handler (e.g.
    via builtin `open` function) or `BytesIO`).
format
    The format with which the LazyFrame was serialized. Options:

    - `"binary"`: Deserialize from binary format (bytes). This is the default.
    - `"json"`: Deserialize from JSON format (string).

Warnings
--------
This function uses :mod:`pickle` if the logical plan contains Python UDFs,
and as such inherits the security implications. Deserializing can execute
arbitrary code, so it should only be attempted on trusted data.

See Also
--------
LazyFrame.serialize

Notes
-----
Serialization is not stable across Polars versions: a LazyFrame serialized
in one Polars version may not be deserializable in another Polars version.

Examples
--------
>>> import io
>>> lf = pl.LazyFrame({"a": [1, 2, 3]}).sum()
>>> bytes = lf.serialize()
>>> pl.LazyFrame.deserialize(io.BytesIO(bytes)).collect()
shape: (1, 1)
┌─────┐
│ a   │
│ --- │
│ i64 │
╞═════╡
│ 6   │
└─────┘
r   json0`format` must be one of {'binary', 'json'}, got )r   r   r   getvalueencoder   r   r3   rd   deserialize_binarydeserialize_jsonr   r   )r   sourcer   deserializerr   s        r   r   LazyFrame.deserialize  s    b fh''V__.5578Fd,,'/FX&99Lv&77LFvjQCS/!|F344r   c                Z    [        S[        S9  U R                  5       R                  5       $ )a  
Get the column names.

Returns
-------
list of str
    A list containing the name of each column in order.

Warnings
--------
Determining the column names of a LazyFrame requires resolving its schema,
which is a potentially expensive operation.
Using :meth:`collect_schema` is the idiomatic way of resolving the schema.
This property exists only for symmetry with the DataFrame class.

See Also
--------
collect_schema
Schema.names

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "foo": [1, 2, 3],
...         "bar": [6, 7, 8],
...         "ham": ["a", "b", "c"],
...     }
... ).select("foo", "bar")
>>> lf.columns  # doctest: +SKIP
['foo', 'bar']
zDetermining the column names of a LazyFrame requires resolving its schema, which is a potentially expensive operation. Use `LazyFrame.collect_schema().names()` to get the column names without this warning.r   )r2   rZ   collect_schemanamesr   s    r   columnsLazyFrame.columns  s1    D 	= (		
 ""$**,,r   c                Z    [        S[        S9  U R                  5       R                  5       $ )a  
Get the column data types.

Returns
-------
list of DataType
    A list containing the data type of each column in order.

Warnings
--------
Determining the data types of a LazyFrame requires resolving its schema,
which is a potentially expensive operation.
Using :meth:`collect_schema` is the idiomatic way to resolve the schema.
This property exists only for symmetry with the DataFrame class.

See Also
--------
collect_schema
Schema.dtypes

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "foo": [1, 2, 3],
...         "bar": [6.0, 7.0, 8.0],
...         "ham": ["a", "b", "c"],
...     }
... )
>>> lf.dtypes  # doctest: +SKIP
[Int64, Float64, String]
zDetermining the data types of a LazyFrame requires resolving its schema, which is a potentially expensive operation. Use `LazyFrame.collect_schema().dtypes()` to get the data types without this warning.r   )r2   rZ   r   dtypesr   s    r   r   LazyFrame.dtypes<  s1    D 	; (		
 ""$++--r   c                >    [        S[        S9  U R                  5       $ )aL  
Get an ordered mapping of column names to their data type.

Warnings
--------
Resolving the schema of a LazyFrame is a potentially expensive operation.
Using :meth:`collect_schema` is the idiomatic way to resolve the schema.
This property exists only for symmetry with the DataFrame class.

See Also
--------
collect_schema
Schema

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "foo": [1, 2, 3],
...         "bar": [6.0, 7.0, 8.0],
...         "ham": ["a", "b", "c"],
...     }
... )
>>> lf.schema  # doctest: +SKIP
Schema({'foo': Int64, 'bar': Float64, 'ham': String})
zResolving the schema of a LazyFrame is a potentially expensive operation. Use `LazyFrame.collect_schema()` to get the schema without this warning.r   )r2   rZ   r   r   s    r   r   LazyFrame.schemaf  s&    8 	X'	

 ""$$r   c                Z    [        S[        S9  U R                  5       R                  5       $ )a  
Get the number of columns.

Returns
-------
int

Warnings
--------
Determining the width of a LazyFrame requires resolving its schema,
which is a potentially expensive operation.
Using :meth:`collect_schema` is the idiomatic way to resolve the schema.
This property exists only for symmetry with the DataFrame class.

See Also
--------
collect_schema
Schema.len

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "foo": [1, 2, 3],
...         "bar": [4, 5, 6],
...     }
... )
>>> lf.width  # doctest: +SKIP
2
zdetermining the width of a LazyFrame requires resolving its schema, which is a potentially expensive operation. Use `LazyFrame.collect_schema().len()` to get the width without this warning.r   )r2   rZ   r   lenr   s    r   widthLazyFrame.width  s1    @ 	6 (		
 ""$((**r   c                    Sn[        U5      e)Nztthe truth value of a LazyFrame is ambiguous

LazyFrames cannot be used in boolean context with and/or/not operators.r   )r   r   s     r   __bool__LazyFrame.__bool__  s    Z 	 nr   c                &    SU< S3n[        U5      e)N"z0" comparison not supported for LazyFrame objectsr	  )r   operatorr   s      r   _comparison_errorLazyFrame._comparison_error  s    (MNnr   c                &    U R                  S5        g )Nz==r  r   others     r   __eq__LazyFrame.__eq__      t$r   c                &    U R                  S5        g )Nz!=r  r  s     r   __ne__LazyFrame.__ne__  r  r   c                &    U R                  S5        g )N>r  r  s     r   __gt__LazyFrame.__gt__      s#r   c                &    U R                  S5        g )N<r  r  s     r   __lt__LazyFrame.__lt__  r  r   c                &    U R                  S5        g )Nz>=r  r  s     r   __ge__LazyFrame.__ge__  r  r   c                &    U R                  S5        g )Nz<=r  r  s     r   __le__LazyFrame.__le__  r  r   c                &    XR                  5       ;   $ r   )r   )r   keys     r   __contains__LazyFrame.__contains__  s    ))+++r   c                "    U R                  5       $ r   cloner   s    r   __copy__LazyFrame.__copy__      zz|r   c                "    U R                  5       $ r   r/  )r   memos     r   __deepcopy__LazyFrame.__deepcopy__  r3  r   c                z    [        U[        5      (       d  Sn[        U5      e[        U 5      R	                  U5      $ )u  
Support slice syntax, returning a new LazyFrame.

All other forms of subscripting are currently unsupported here; use `select`,
`filter`, or other standard methods instead.

Notes
-----
LazyFrame is designed primarily for efficient computation and does not know
its own length so, unlike DataFrame, certain slice patterns (such as those
requiring negative stop/step) may not be supported.

Examples
--------
>>> lf = pl.LazyFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
>>> lf[:2].collect()
shape: (2, 2)
┌─────┬─────┐
│ a   ┆ b   │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 1   ┆ 4   │
│ 2   ┆ 5   │
└─────┴─────┘
>>> lf[::2].collect()
shape: (2, 2)
┌─────┬─────┐
│ a   ┆ b   │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 1   ┆ 4   │
│ 3   ┆ 6   │
└─────┴─────┘
zZLazyFrame is not subscriptable (aside from slicing)

Use `select()` or `filter()` instead.)r   slicer   r)   apply)r   itemr   s      r   __getitem__LazyFrame.__getitem__  s>    J $&&<  C. t$**400r   c                &    SU R                  SS9 3$ )NzOnaive plan: (run LazyFrame.explain(optimized=True) to see the optimized plan)

F	optimized)explainr   s    r   __str__LazyFrame.__str__  s$       	r   c                P    SU R                   R                   S[        U 5      S S3$ )Nr!  z at 0xXr  )	__class____name__idr   s    r   __repr__LazyFrame.__repr__  s(    4>>**+6"T(1Q??r   c                    U R                   R                  SS9n[        R                  " / SQU R	                  5       S9nSUR                  5        3$ ! [         a(    U R                  SS9R                  SS5      nSU S	3s $ f = f)
NFr?  )dotz-Nshape=boxz-Tsvg)inputz^<h4>NAIVE QUERY PLAN</h4><p>run <b>LazyFrame.show_graph()</b> to see the optimized version</p>
z<p></p>zq<i>naive plan: (run <b>LazyFrame.explain(optimized=True)</b> to see the optimized plan)</i>
    <p></p>
    <div>z</div>)	r   to_dotrW   check_outputr   decode	ExceptionrA  replace)r   rL  svginserts       r   _repr_html_LazyFrame._repr_html_  s    	))""U"3C))/#7HC--0ZZ\N<  	\\E\2::4KF
   	s   AA /BB.c                   g r    r   filer   s      r   r   LazyFrame.serialize"  s     r   c                   g r   rY  rZ  s      r   r   r\  '  s    NQr   c                   g r   rY  rZ  s      r   r   r\  *  s     r   c                   US:X  a  U R                   R                  nOMUS:X  a6  Sn[        R                  " U[	        5       S9  U R                   R
                  nOSU< 3n[        U5      e[        X1U5      $ )u  
Serialize the logical plan of this LazyFrame to a file or string in JSON format.

Parameters
----------
file
    File path to which the result should be written. If set to `None`
    (default), the output is returned as a string instead.
format
    The format in which to serialize. Options:

    - `"binary"`: Serialize to binary format (bytes). This is the default.
    - `"json"`: Serialize to JSON format (string) (deprecated).

See Also
--------
LazyFrame.deserialize

Notes
-----
Serialization is not stable across Polars versions: a LazyFrame serialized
in one Polars version may not be deserializable in another Polars version.

Examples
--------
Serialize the logical plan into a binary representation.

>>> lf = pl.LazyFrame({"a": [1, 2, 3]}).sum()
>>> bytes = lf.serialize()

The bytes can later be deserialized back into a LazyFrame.

>>> import io
>>> pl.LazyFrame.deserialize(io.BytesIO(bytes)).collect()
shape: (1, 1)
┌─────┐
│ a   │
│ --- │
│ i64 │
╞═════╡
│ 6   │
└─────┘
r   r   z6'json' serialization format of LazyFrame is deprecated)
stacklevelr   )r   serialize_binarywarningswarnr/   serialize_jsonr   r(   )r   r[  r   
serializerr   s        r   r   r\  /  sr    b X33JvJCMM*, 11JFvjQCS/!&z@@r   c                    U" U /UQ70 UD6$ )uC  
Offers a structured way to apply a sequence of user-defined functions (UDFs).

Parameters
----------
function
    Callable; will receive the frame as the first parameter,
    followed by any given args/kwargs.
*args
    Arguments to pass to the UDF.
**kwargs
    Keyword arguments to pass to the UDF.

See Also
--------
pipe_with_schema

Examples
--------
>>> def cast_str_to_int(lf: pl.LazyFrame, col_name: str) -> pl.LazyFrame:
...     return lf.with_columns(pl.col(col_name).cast(pl.Int64))
>>> lf = pl.LazyFrame(
...     {
...         "a": [1, 2, 3, 4],
...         "b": ["10", "20", "30", "40"],
...     }
... )
>>> lf.pipe(cast_str_to_int, col_name="b").collect()
shape: (4, 2)
┌─────┬─────┐
│ a   ┆ b   │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 1   ┆ 10  │
│ 2   ┆ 20  │
│ 3   ┆ 30  │
│ 4   ┆ 40  │
└─────┴─────┘

>>> lf = pl.LazyFrame(
...     {
...         "b": [1, 2],
...         "a": [3, 4],
...     }
... )
>>> lf.collect()
shape: (2, 2)
┌─────┬─────┐
│ b   ┆ a   │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 1   ┆ 3   │
│ 2   ┆ 4   │
└─────┴─────┘
>>> lf.pipe(lambda lf: lf.select(sorted(lf.collect_schema()))).collect()
shape: (2, 2)
┌─────┬─────┐
│ a   ┆ b   │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 3   ┆ 1   │
│ 4   ┆ 2   │
└─────┴─────┘
rY  )r   functionargskwargss       r   pipeLazyFrame.pipeo  s    R .t.v..r   c                d   ^ ^ T R                  T R                  R                  UU 4S j5      5      $ )u'  
Allows to alter the lazy frame during the plan stage with the resolved schema.

In contrast to `pipe`, this method does not execute `function` immediately but
only during the plan stage. This allows to use the resolved schema of the input
to dynamically alter the lazy frame. This also means that any exceptions raised
by `function` will only be emitted during the plan stage.

.. warning::
    This functionality is considered **unstable**. It may be changed at any
    point without it being considered a breaking change.

Parameters
----------
function
    Callable; will receive the frame as the first parameter and the resolved
    schema as the second parameter.

See Also
--------
pipe

Examples
--------
>>> def cast_to_float_if_necessary(
...     lf: pl.LazyFrame, schema: pl.Schema
... ) -> pl.LazyFrame:
...     required_casts = [
...         pl.col(name).cast(pl.Float64)
...         for name, dtype in schema.items()
...         if not dtype.is_float()
...     ]
...     return lf.with_columns(required_casts)
>>> lf = pl.LazyFrame(
...     {"a": [1.0, 2.0], "b": ["1.0", "2.5"], "c": [2.0, 3.0]},
...     schema={"a": pl.Float64, "b": pl.String, "c": pl.Float32},
... )
>>> lf.pipe_with_schema(cast_to_float_if_necessary).collect()
shape: (2, 3)
┌─────┬─────┬─────┐
│ a   ┆ b   ┆ c   │
│ --- ┆ --- ┆ --- │
│ f64 ┆ f64 ┆ f32 │
╞═════╪═════╪═════╡
│ 1.0 ┆ 1.0 ┆ 2.0 │
│ 2.0 ┆ 2.5 ┆ 3.0 │
└─────┴─────┴─────┘
c                T   > T" TR                  U S   5      U S   5      R                  $ )Nr      )r   r   )lf_and_schemarg  r   s    r   <lambda>,LazyFrame.pipe_with_schema.<locals>.<lambda>  s1    h$$]1%56!!$' $'r   )r   r   pipe_with_schema)r   rg  s   ``r   rr  LazyFrame.pipe_with_schema  s0    j II&&
 	
r   nearest)interpolationc          	     	   SSK Jn  U R                  5       nU(       d  Sn[        U5      e/ SQn[	        U5      =n(       a  UR                  S U 5       5        UR                  S5        [        SS j5       n[        5       [        5       p/ n[        R                  " S5      nUR                  5        GH  u  pUR                  5       nU(       + =(       a    UR                  5       n[        R                  " U5      R                  5       R                   R#                  S	5      [        R                  " U5      R%                  5       R                   R#                  S
5      /nU(       d  U(       d
  U[&        :X  a$  [        R                  " U5      R)                  5       OUnU(       a$  [        R                  " U5      R+                  5       OUnU" U5      (       d$  [        R                  " U5      R-                  5       OUnU" U5      (       d$  [        R                  " U5      R/                  5       OUn/ nU H  nU(       d  U(       a  U(       aC  [        R                  " U5      R1                  5       R3                  UU5      R5                  U5      O%[        R                  " U5      R3                  UU5      nU
R7                  U5        OUnUR                  UR9                  U SU 35      5        M     U(       d%  UR;                  5       (       d  U[<        [&        4;   a  U	R7                  U5        UR                  / UQUR9                  SU 35      PUR9                  SU 35      PUR9                  SU 35      PUQUR9                  SU 35      P5        GM     U
(       a  U R?                  S U
 5       5      OU R@                  " U6 RC                  5       n[E        U5      n[G        URE                  5       5       Vs/ s H   nURI                  S5      UU-  US-   U-   PM"     nn[K        [M        UU5      5      nU HO  nUU    Vs/ s H8  nUb  [O        U[J        5      (       a  SOX;   a  [Q        U5      O
[S        U5      PM:     snUU'   MQ     U" U5      nURU                  S[V        RX                  " SU5      5        U$ s  snf s  snf )u  
Creates a summary of statistics for a LazyFrame, returning a DataFrame.

Parameters
----------
percentiles
    One or more percentiles to include in the summary statistics.
    All values must be in the range `[0, 1]`.

interpolation : {'nearest', 'higher', 'lower', 'midpoint', 'linear', 'equiprobable'}
    Interpolation method used when calculating percentiles.

Returns
-------
DataFrame

Notes
-----
The median is included by default as the 50% percentile.

Warnings
--------
* This method does *not* maintain the laziness of the frame, and will `collect`
  the final result. This could potentially be an expensive operation.
* We do not guarantee the output of `describe` to be stable. It will show
  statistics that we deem informative, and may be updated in the future.
  Using `describe` programmatically (versus interactive exploration) is
  not recommended for this reason.

Examples
--------
>>> from datetime import date, time
>>> lf = pl.LazyFrame(
...     {
...         "float": [1.0, 2.8, 3.0],
...         "int": [40, 50, None],
...         "bool": [True, False, True],
...         "str": ["zz", "xx", "yy"],
...         "date": [date(2020, 1, 1), date(2021, 7, 5), date(2022, 12, 31)],
...         "time": [time(10, 20, 30), time(14, 45, 50), time(23, 15, 10)],
...     }
... )

Show default frame statistics:

>>> lf.describe()
shape: (9, 7)
┌────────────┬──────────┬──────────┬──────────┬──────┬─────────────────────┬──────────┐
│ statistic  ┆ float    ┆ int      ┆ bool     ┆ str  ┆ date                ┆ time     │
│ ---        ┆ ---      ┆ ---      ┆ ---      ┆ ---  ┆ ---                 ┆ ---      │
│ str        ┆ f64      ┆ f64      ┆ f64      ┆ str  ┆ str                 ┆ str      │
╞════════════╪══════════╪══════════╪══════════╪══════╪═════════════════════╪══════════╡
│ count      ┆ 3.0      ┆ 2.0      ┆ 3.0      ┆ 3    ┆ 3                   ┆ 3        │
│ null_count ┆ 0.0      ┆ 1.0      ┆ 0.0      ┆ 0    ┆ 0                   ┆ 0        │
│ mean       ┆ 2.266667 ┆ 45.0     ┆ 0.666667 ┆ null ┆ 2021-07-02 16:00:00 ┆ 16:07:10 │
│ std        ┆ 1.101514 ┆ 7.071068 ┆ null     ┆ null ┆ null                ┆ null     │
│ min        ┆ 1.0      ┆ 40.0     ┆ 0.0      ┆ xx   ┆ 2020-01-01          ┆ 10:20:30 │
│ 25%        ┆ 2.8      ┆ 40.0     ┆ null     ┆ null ┆ 2021-07-05          ┆ 14:45:50 │
│ 50%        ┆ 2.8      ┆ 50.0     ┆ null     ┆ null ┆ 2021-07-05          ┆ 14:45:50 │
│ 75%        ┆ 3.0      ┆ 50.0     ┆ null     ┆ null ┆ 2022-12-31          ┆ 23:15:10 │
│ max        ┆ 3.0      ┆ 50.0     ┆ 1.0      ┆ zz   ┆ 2022-12-31          ┆ 23:15:10 │
└────────────┴──────────┴──────────┴──────────┴──────┴─────────────────────┴──────────┘

Customize which percentiles are displayed, applying linear interpolation:

>>> with pl.Config(tbl_rows=12):
...     lf.describe(
...         percentiles=[0.1, 0.3, 0.5, 0.7, 0.9],
...         interpolation="linear",
...     )
shape: (11, 7)
┌────────────┬──────────┬──────────┬──────────┬──────┬─────────────────────┬──────────┐
│ statistic  ┆ float    ┆ int      ┆ bool     ┆ str  ┆ date                ┆ time     │
│ ---        ┆ ---      ┆ ---      ┆ ---      ┆ ---  ┆ ---                 ┆ ---      │
│ str        ┆ f64      ┆ f64      ┆ f64      ┆ str  ┆ str                 ┆ str      │
╞════════════╪══════════╪══════════╪══════════╪══════╪═════════════════════╪══════════╡
│ count      ┆ 3.0      ┆ 2.0      ┆ 3.0      ┆ 3    ┆ 3                   ┆ 3        │
│ null_count ┆ 0.0      ┆ 1.0      ┆ 0.0      ┆ 0    ┆ 0                   ┆ 0        │
│ mean       ┆ 2.266667 ┆ 45.0     ┆ 0.666667 ┆ null ┆ 2021-07-02 16:00:00 ┆ 16:07:10 │
│ std        ┆ 1.101514 ┆ 7.071068 ┆ null     ┆ null ┆ null                ┆ null     │
│ min        ┆ 1.0      ┆ 40.0     ┆ 0.0      ┆ xx   ┆ 2020-01-01          ┆ 10:20:30 │
│ 10%        ┆ 1.36     ┆ 41.0     ┆ null     ┆ null ┆ 2020-04-20          ┆ 11:13:34 │
│ 30%        ┆ 2.08     ┆ 43.0     ┆ null     ┆ null ┆ 2020-11-26          ┆ 12:59:42 │
│ 50%        ┆ 2.8      ┆ 45.0     ┆ null     ┆ null ┆ 2021-07-05          ┆ 14:45:50 │
│ 70%        ┆ 2.88     ┆ 47.0     ┆ null     ┆ null ┆ 2022-02-07          ┆ 18:09:34 │
│ 90%        ┆ 2.96     ┆ 49.0     ┆ null     ┆ null ┆ 2022-09-13          ┆ 21:33:18 │
│ max        ┆ 3.0      ┆ 50.0     ┆ 1.0      ┆ zz   ┆ 2022-12-31          ┆ 23:15:10 │
└────────────┴──────────┴──────────┴──────────┴──────┴─────────────────────┴──────────┘
r   )	from_dictz/cannot describe a LazyFrame that has no columns)count
null_countmeanstdminc              3  2   #    U  H  oS -  S S3v   M     g7f)d   g%NrY  ).0qs     r   	<genexpr>%LazyFrame.describe.<locals>.<genexpr>b  s     >Iq#ga[?Is   maxc                l    U R                  5       =(       d    U [        [        [        [        [
        4;   $ r   )	is_nestedr<   r@   rH   rI   rP   )dts    r   skip_minmax'LazyFrame.describe.<locals>.skip_minmaxe  s#    <<>URKtVW+U%UUr   Nzcount:znull_count::zmean:zstd:zmin:zmax:c              3  j   #    U  H)  n[         R                  " U5      R                  5       v   M+     g 7fr   )Fcolsort)r  cs     r   r  r    s      !E9a!%%(--//9s   13rn  	statistic)r  r   returnbool)-polars.convertrw  r   r   r4   extendappendr   setr  litr   
is_numericis_temporalr  rx  nameprefixry  r;   rz  r{  r|  r  to_physicalquantilecastaddaliasr  rH   with_columnsselectcollectr  rangerowdictzipr   floatr   insert_columnplSeries) r   percentilesru  rw  r   r   metrics	quantilesr  has_numeric_result	sort_colsmetric_exprsnullr  dtyper  r  count_exprs	mean_exprexpr_stdmin_exprmax_expr	pct_exprsppct_expr
df_metrics	n_metricsncolumn_metricssummaryv
df_summarys                                    r   describeLazyFrame.describe  s'   ~ 	-$$&CCC.  @)+6696NN>I>>u		V 
	V
 ),suI#%uuT{HA))+J(.@U->->-@K a %%,,X6a##%**11-@K *0@ a  *4quuQx||~H-8-?-?quuQx||~TH-8-?-?quuQx||~TH I ' a,,.77=INNuUUU1X..q-@ 
 MM!$#H  1#Qqc
!;<  U__..%D'?2J"&&q)  OOeA3K0 NNT!:. NNT!:.	
  NNT!:.	O 'p  !!!E9!EEV "# WY 	 L	 6::<(
( NN1q9}Q)0CD( 	 
 s6>23 A ! $A 	Z4%8%8 '('>%(SVM $GAJ  w'
  BIIk7$CD)
s   ='S?Splainr   )r   r@  type_coercionpredicate_pushdownprojection_pushdownsimplify_expressionslice_pushdowncomm_subplan_elimcomm_subexpr_elimcluster_with_columnscollapse_joinsr   r   tree_formatoptimizationsc                  Ub  [        SSS9  U(       a  Sn[        U5      nUS:X  a  [        S5        U(       ao  UR                  5       nUS:H  UR                  l        U R                  R                  UR                  5      nUS:X  a  UR                  5       $ UR                  5       $ US:X  a  U R                  R                  5       $ U R                  R                  5       $ )a"  
Create a string representation of the query plan.

Different optimizations can be turned on or off.

Parameters
----------
format : {'plain', 'tree'}
    The format to use for displaying the logical plan.
optimized
    Return an optimized query plan. Defaults to `True`.
    If this is set to `True` the subsequent
    optimization flags control which optimizations
    run.
type_coercion
    Do type coercion optimization.

    .. deprecated:: 1.30.0
        Use the `optimizations` parameters.
predicate_pushdown
    Do predicate pushdown optimization.

    .. deprecated:: 1.30.0
        Use the `optimizations` parameters.
projection_pushdown
    Do projection pushdown optimization.

    .. deprecated:: 1.30.0
        Use the `optimizations` parameters.
simplify_expression
    Run simplify expressions optimization.

    .. deprecated:: 1.30.0
        Use the `optimizations` parameters.
slice_pushdown
    Slice pushdown optimization.

    .. deprecated:: 1.30.0
        Use the `optimizations` parameters.
comm_subplan_elim
    Will try to cache branching subplans that occur on self-joins or unions.

    .. deprecated:: 1.30.0
        Use the `optimizations` parameters.
comm_subexpr_elim
    Common subexpressions will be cached and reused.

    .. deprecated:: 1.30.0
        Use the `optimizations` parameters.
cluster_with_columns
    Combine sequential independent calls to with_columns

    .. deprecated:: 1.30.0
        Use the `optimizations` parameters.
collapse_joins
    Collapse a join and filters into a faster join

    .. deprecated:: 1.30.0
        Use the `optimizations` parameters.
engine
    Select the engine used to process the query, optional.
    At the moment, if set to `"auto"` (default), the query
    is run using the polars in-memory engine. Polars will also
    attempt to use the engine set by the `POLARS_ENGINE_AFFINITY`
    environment variable. If it cannot run the query using the
    selected engine, the query is run using the polars in-memory
    engine. If set to `"gpu"`, the GPU engine is used. Fine-grained
    control over the GPU engine, for example which device to use
    on a system with multiple devices, is possible by providing a
    :class:`~.GPUEngine` object with configuration options.

    .. note::
       GPU mode is considered **unstable**. Not all queries will run
       successfully on the GPU, however, they should fall back transparently
       to the default engine if execution is not supported.

       Running with `POLARS_VERBOSE=1` will provide information if a query
       falls back (and why).

    .. note::
       The GPU engine does not support streaming, if streaming
       is enabled then GPU execution is switched off.
optimizations
    The optimization passes done during query optimization.

    .. warning::
        This functionality is considered **unstable**. It may be changed
        at any point without it being considered a breaking change.
tree_format
    Format the output as a tree.

    .. deprecated:: 0.20.30
        Use `format="tree"` instead.

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "a": ["a", "b", "a", "b", "b", "c"],
...         "b": [1, 2, 3, 4, 5, 6],
...         "c": [6, 5, 4, 3, 2, 1],
...     }
... )
>>> lf.group_by("a", maintain_order=True).agg(pl.all().sum()).sort(
...     "a"
... ).explain()  # doctest: +SKIP
zethe `tree_format` parameter for `LazyFrame.explain` is deprecated Use the `format` parameter instead.z0.20.30versiontreer   &streaming mode is considered unstable.)r#   r   r*   r1  _pyoptflagsr   r   with_optimizationsdescribe_optimized_plan_treedescribe_optimized_plandescribe_plan_treedescribe_plan)r   r   r@  r  r  r  r  r  r  r  r  r  r   r   r  r  r   s                    r   rA  LazyFrame.explain  s    @ "%7!
 '[ "#KL)224M28K2GM%%/))..}/H/HIC77992244V99//1199**,,r   )g      0@g      (@ir)r@  showoutput_path
raw_outputfigsizer  _type_checkr  r  r  r  r  r  r  r  r   
plan_stage_check_orderr  c                  [        U5      nUS:X  a  [        S5        UR                  5       nUS:H  UR                  l        U R
                  R                  UR                  5      nUS:X  a  UR                  U5      nOAUS:X  a*  US:X  a  UR                  U5      nO#UR                  U5      nOSU S3n[        U5      e[        UUUUUS9$ )a  
Show a plot of the query plan.

Note that Graphviz must be installed to render the visualization (if not
already present, you can download it here: `<https://graphviz.org/download>`_).

Parameters
----------
optimized
    Optimize the query plan.
show
    Show the figure.
output_path
    Write the figure to disk.
raw_output
    Return dot syntax. This cannot be combined with `show` and/or `output_path`.
figsize
    Passed to matplotlib if `show == True`.
type_coercion
    Do type coercion optimization.

    .. deprecated:: 1.30.0
        Use the `optimizations` parameters.
predicate_pushdown
    Do predicate pushdown optimization.

    .. deprecated:: 1.30.0
        Use the `optimizations` parameters.
projection_pushdown
    Do projection pushdown optimization.

    .. deprecated:: 1.30.0
        Use the `optimizations` parameters.
simplify_expression
    Run simplify expressions optimization.

    .. deprecated:: 1.30.0
        Use the `optimizations` parameters.
slice_pushdown
    Slice pushdown optimization.

    .. deprecated:: 1.30.0
        Use the `optimizations` parameters.
comm_subplan_elim
    Will try to cache branching subplans that occur on self-joins or unions.

    .. deprecated:: 1.30.0
        Use the `optimizations` parameters.
comm_subexpr_elim
    Common subexpressions will be cached and reused.

    .. deprecated:: 1.30.0
        Use the `optimizations` parameters.
cluster_with_columns
    Combine sequential independent calls to with_columns.

    .. deprecated:: 1.30.0
        Use the `optimizations` parameters.
collapse_joins
    Collapse a join and filters into a faster join.

    .. deprecated:: 1.30.0
        Use the `optimizations` parameters.
engine
    Select the engine used to process the query, optional.
    At the moment, if set to `"auto"` (default), the query
    is run using the polars in-memory engine. Polars will also
    attempt to use the engine set by the `POLARS_ENGINE_AFFINITY`
    environment variable. If it cannot run the query using the
    selected engine, the query is run using the polars in-memory
    engine. If set to `"gpu"`, the GPU engine is used. Fine-grained
    control over the GPU engine, for example which device to use
    on a system with multiple devices, is possible by providing a
    :class:`~.GPUEngine` object with configuration options.

    .. note::
       GPU mode is considered **unstable**. Not all queries will run
       successfully on the GPU, however, they should fall back transparently
       to the default engine if execution is not supported.

       Running with `POLARS_VERBOSE=1` will provide information if a query
       falls back (and why).

    .. note::
       The GPU engine does not support streaming, if streaming
       is enabled then GPU execution is switched off.
plan_stage : {'ir', 'physical'}
    Select the stage to display. Currently only the streaming engine has a
    separate physical stage, for the other engines both IR and physical are the
    same.


Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "a": ["a", "b", "a", "b", "b", "c"],
...         "b": [1, 2, 3, 4, 5, 6],
...         "c": [6, 5, 4, 3, 2, 1],
...     }
... )
>>> lf.group_by("a", maintain_order=True).agg(pl.all().sum()).sort(
...     "a"
... ).show_graph()  # doctest: +SKIP
r   r  r  physicalzinvalid plan stage '')rL  r  r  r  r  )r   r*   r1  r  r   r   r  rO  to_dot_streaming_physr   r-   )r   r@  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r   r  r  r  r   rL  	error_msgs                          r   
show_graphLazyFrame.show_graphb  s    D  '[ "#KL%..0.4.C!!+yy++M,E,EF++i(C:%$00;kk),.zl!<II&& #!
 	
r   c                6   ^ SU4S jjnU R                  USSS9$ )a  
Inspect a node in the computation graph.

Print the value that this node in the computation graph evaluates to and pass on
the value.

Examples
--------
>>> lf = pl.LazyFrame({"foo": [1, 1, -2, 3]})
>>> (
...     lf.with_columns(pl.col("foo").cum_sum().alias("bar"))
...     .inspect()  # print the node before the filter
...     .filter(pl.col("bar") == pl.col("foo"))
... )
<LazyFrame at ...>
c                <   > [        TR                  U 5      5        U $ r   )printr   )sfmts    r   inspect"LazyFrame.inspect.<locals>.inspect  s    #**Q- Hr   T)r  r  )r  ro   r  ro   )map_batches)r   r  r  s    ` r   r  LazyFrame.inspect   s)    $	 $   
 	
r   )
descending
nulls_lastmaintain_ordermultithreadedbyr  c          	        [        U[        5      (       a]  U(       dV  [        U[        5      (       aA  [        U[        5      (       a,  U R                  U R                  R                  XX4U5      5      $ [        U/UQ76 n[        U[        U5      SS5      n[        U[        U5      SS5      nU R                  U R                  R                  XX4U5      5      $ )uB  
Sort the LazyFrame by the given columns.

Parameters
----------
by
    Column(s) to sort by. Accepts expression input, including selectors. Strings
    are parsed as column names.
*more_by
    Additional columns to sort by, specified as positional arguments.
descending
    Sort in descending order. When sorting by multiple columns, can be specified
    per column by passing a sequence of booleans.
nulls_last
    Place null values last; can specify a single boolean applying to all columns
    or a sequence of booleans for per-column control.
maintain_order
    Whether the order should be maintained if elements are equal.
    Note that if `true` streaming is not possible and performance might be
    worse since this requires a stable search.
multithreaded
    Sort using multiple threads.

Examples
--------
Pass a single column name to sort by that column.

>>> lf = pl.LazyFrame(
...     {
...         "a": [1, 2, None],
...         "b": [6.0, 5.0, 4.0],
...         "c": ["a", "c", "b"],
...     }
... )
>>> lf.sort("a").collect()
shape: (3, 3)
┌──────┬─────┬─────┐
│ a    ┆ b   ┆ c   │
│ ---  ┆ --- ┆ --- │
│ i64  ┆ f64 ┆ str │
╞══════╪═════╪═════╡
│ null ┆ 4.0 ┆ b   │
│ 1    ┆ 6.0 ┆ a   │
│ 2    ┆ 5.0 ┆ c   │
└──────┴─────┴─────┘

Sorting by expressions is also supported.

>>> lf.sort(pl.col("a") + pl.col("b") * 2, nulls_last=True).collect()
shape: (3, 3)
┌──────┬─────┬─────┐
│ a    ┆ b   ┆ c   │
│ ---  ┆ --- ┆ --- │
│ i64  ┆ f64 ┆ str │
╞══════╪═════╪═════╡
│ 2    ┆ 5.0 ┆ c   │
│ 1    ┆ 6.0 ┆ a   │
│ null ┆ 4.0 ┆ b   │
└──────┴─────┴─────┘

Sort by multiple columns by passing a list of columns.

>>> lf.sort(["c", "a"], descending=True).collect()
shape: (3, 3)
┌──────┬─────┬─────┐
│ a    ┆ b   ┆ c   │
│ ---  ┆ --- ┆ --- │
│ i64  ┆ f64 ┆ str │
╞══════╪═════╪═════╡
│ 2    ┆ 5.0 ┆ c   │
│ null ┆ 4.0 ┆ b   │
│ 1    ┆ 6.0 ┆ a   │
└──────┴─────┴─────┘

Or use positional arguments to sort by multiple columns in the same way.

>>> lf.sort("c", "a", descending=[False, True]).collect()
shape: (3, 3)
┌──────┬─────┬─────┐
│ a    ┆ b   ┆ c   │
│ ---  ┆ --- ┆ --- │
│ i64  ┆ f64 ┆ str │
╞══════╪═════╪═════╡
│ 1    ┆ 6.0 ┆ a   │
│ null ┆ 4.0 ┆ b   │
│ 2    ┆ 5.0 ┆ c   │
└──────┴─────┴─────┘
r  r  r  )
r   r   r  r   r   r  r&   r.   r  sort_by_exprs)r   r  r  r  r  r  more_bys          r   r  LazyFrame.sort  s    F r3:t,,:t,,##		J  ,B99 SWlDI
 SWlDI
II##
M
 	
r   r   )
table_namec                   SSK Jn  [        S5        U" SSS9 nU(       a  UOSnUR                  XPS9  UR	                  U5      sSSS5        $ ! , (       d  f       g= f)	u 	  
Execute a SQL query against the LazyFrame.

.. versionadded:: 0.20.23

.. warning::
    This functionality is considered **unstable**, although it is close to
    being considered stable. It may be changed at any point without it being
    considered a breaking change.

Parameters
----------
query
    SQL query to execute.
table_name
    Optionally provide an explicit name for the table that represents the
    calling frame (defaults to "self").

Notes
-----
* The calling frame is automatically registered as a table in the SQL context
  under the name "self". If you want access to the DataFrames and LazyFrames
  found in the current globals, use the top-level :meth:`pl.sql <polars.sql>`.
* More control over registration and execution behaviour is available by
  using the :class:`SQLContext` object.

See Also
--------
SQLContext

Examples
--------
>>> lf1 = pl.LazyFrame({"a": [1, 2, 3], "b": [6, 7, 8], "c": ["z", "y", "x"]})
>>> lf2 = pl.LazyFrame({"a": [3, 2, 1], "d": [125, -654, 888]})

Query the LazyFrame using SQL:

>>> lf1.sql("SELECT c, b FROM self WHERE a > 1").collect()
shape: (2, 2)
┌─────┬─────┐
│ c   ┆ b   │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════╪═════╡
│ y   ┆ 7   │
│ x   ┆ 8   │
└─────┴─────┘

Apply SQL transforms (aliasing "self" to "frame") then filter
natively (you can freely mix SQL and native operations):

>>> lf1.sql(
...     query='''
...         SELECT
...             a,
...             (a % 2 == 0) AS a_is_even,
...             (b::float4 / 2) AS "b/2",
...             CONCAT_WS(':', c, c, c) AS c_c_c
...         FROM frame
...         ORDER BY a
...     ''',
...     table_name="frame",
... ).filter(~pl.col("c_c_c").str.starts_with("x")).collect()
shape: (2, 4)
┌─────┬───────────┬─────┬───────┐
│ a   ┆ a_is_even ┆ b/2 ┆ c_c_c │
│ --- ┆ ---       ┆ --- ┆ ---   │
│ i64 ┆ bool      ┆ f32 ┆ str   │
╞═════╪═══════════╪═════╪═══════╡
│ 1   ┆ false     ┆ 3.0 ┆ z:z:z │
│ 2   ┆ true      ┆ 3.5 ┆ y:y:y │
└─────┴───────────┴─────┴───────┘
r   )
SQLContextzS`sql` is considered **unstable** (although it is close to being considered stable).F)register_globalseagerr   )r  frameN)
polars.sqlr  r*   registerexecute)r   queryr  r  ctxr  s         r   sqlLazyFrame.sql  sP    T 	*a	
 e<!+:DLLdL/;;u% =<<s   +A
Areversez1.0.0r  )r  c                   [        U5      n[        U[        U5      SS5      nU R                  U R                  R                  XUS95      $ )ug  
Return the `k` largest rows.

Non-null elements are always preferred over null elements, regardless of
the value of `reverse`. The output is not guaranteed to be in any
particular order, call :func:`sort` after this function if you wish the
output to be sorted.

.. versionchanged:: 1.0.0
    The `descending` parameter was renamed `reverse`.

Parameters
----------
k
    Number of rows to return.
by
    Column(s) used to determine the top rows.
    Accepts expression input. Strings are parsed as column names.
reverse
    Consider the `k` smallest elements of the `by` column(s) (instead of the `k`
    largest). This can be specified per column by passing a sequence of
    booleans.

See Also
--------
bottom_k

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "a": ["a", "b", "a", "b", "b", "c"],
...         "b": [2, 1, 1, 3, 2, 1],
...     }
... )

Get the rows which contain the 4 largest values in column b.

>>> lf.top_k(4, by="b").collect()
shape: (4, 2)
┌─────┬─────┐
│ a   ┆ b   │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════╪═════╡
│ b   ┆ 3   │
│ a   ┆ 2   │
│ b   ┆ 2   │
│ b   ┆ 1   │
└─────┴─────┘

Get the rows which contain the 4 largest values when sorting on column b and a.

>>> lf.top_k(4, by=["b", "a"]).collect()
shape: (4, 2)
┌─────┬─────┐
│ a   ┆ b   │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════╪═════╡
│ b   ┆ 3   │
│ b   ┆ 2   │
│ a   ┆ 2   │
│ c   ┆ 1   │
└─────┴─────┘
r  r  r  r  )r&   r.   r  r   r   top_kr   kr  r  s       r   r  LazyFrame.top_k  sE    T ,B/gs2w	4@		' JKKr   c                   [        U5      n[        U[        U5      SS5      nU R                  U R                  R                  XUS95      $ )up  
Return the `k` smallest rows.

Non-null elements are always preferred over null elements, regardless of
the value of `reverse`. The output is not guaranteed to be in any
particular order, call :func:`sort` after this function if you wish the
output to be sorted.

.. versionchanged:: 1.0.0
    The `descending` parameter was renamed `reverse`.

Parameters
----------
k
    Number of rows to return.
by
    Column(s) used to determine the bottom rows.
    Accepts expression input. Strings are parsed as column names.
reverse
    Consider the `k` largest elements of the `by` column(s) (instead of the `k`
    smallest). This can be specified per column by passing a sequence of
    booleans.

See Also
--------
top_k

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "a": ["a", "b", "a", "b", "b", "c"],
...         "b": [2, 1, 1, 3, 2, 1],
...     }
... )

Get the rows which contain the 4 smallest values in column b.

>>> lf.bottom_k(4, by="b").collect()
shape: (4, 2)
┌─────┬─────┐
│ a   ┆ b   │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════╪═════╡
│ b   ┆ 1   │
│ a   ┆ 1   │
│ c   ┆ 1   │
│ a   ┆ 2   │
└─────┴─────┘

Get the rows which contain the 4 smallest values when sorting on column a and b.

>>> lf.bottom_k(4, by=["a", "b"]).collect()
shape: (4, 2)
┌─────┬─────┐
│ a   ┆ b   │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════╪═════╡
│ a   ┆ 1   │
│ a   ┆ 2   │
│ b   ┆ 1   │
│ b   ┆ 2   │
└─────┴─────┘
r  r  r  )r&   r.   r  r   r   bottom_kr  s       r   r  LazyFrame.bottom_k4  sH    T ,B/gs2w	4@		 2 21W 2 MNNr   r   )      )r  r  r  r  no_optimizationr  r  r  r  r  	show_plottruncate_nodesr  r   r  c               J   U H  nUS;  d  M  SU S3n[        U5      e   [        U5      nUR                  5       nU R                  R	                  UR
                  5      n[        USSSSS9nUR                  S5      b  UR                  S5      nUR                  U5      u  nn[        U5      [        U5      nnU(       Ga^  [        SS	S
9  SSKJn  UR                  SUS9u  nnUS   S   nUR                  5       nUS:  a-  SnUR                  [         R"                  " SS/5      S-  5      nO5US:  a-  SnUR                  [         R"                  " SS/5      S-  5      nOSnUS:  aB  UR                  [         R"                  " S5      R$                  R'                  SU5      S-   5      nUS   S   nUR)                  US   US   US   -
  US   S9  UR+                  S5        UR-                  SU SU U 35        UR/                  S5        UR1                  5         UU4$ )u  
Profile a LazyFrame.

This will run the query and return a tuple
containing the materialized DataFrame and a DataFrame that
contains profiling information of each node that is executed.

The units of the timings are microseconds.

Parameters
----------
type_coercion
    Do type coercion optimization.

    .. deprecated:: 1.30.0
        Use the `optimizations` parameters.
predicate_pushdown
    Do predicate pushdown optimization.

    .. deprecated:: 1.30.0
        Use the `optimizations` parameters.
projection_pushdown
    Do projection pushdown optimization.

    .. deprecated:: 1.30.0
        Use the `optimizations` parameters.
simplify_expression
    Run simplify expressions optimization.

    .. deprecated:: 1.30.0
        Use the `optimizations` parameters.
no_optimization
    Turn off (certain) optimizations.

    .. deprecated:: 1.30.0
        Use the `optimizations` parameters.
slice_pushdown
    Slice pushdown optimization.

    .. deprecated:: 1.30.0
        Use the `optimizations` parameters.
comm_subplan_elim
    Will try to cache branching subplans that occur on self-joins or unions.

    .. deprecated:: 1.30.0
        Use the `optimizations` parameters.
comm_subexpr_elim
    Common subexpressions will be cached and reused.

    .. deprecated:: 1.30.0
        Use the `optimizations` parameters.
cluster_with_columns
    Combine sequential independent calls to with_columns

    .. deprecated:: 1.30.0
        Use the `optimizations` parameters.
collapse_joins
    Collapse a join and filters into a faster join

    .. deprecated:: 1.30.0
        Use the `optimizations` parameters.
show_plot
    Show a gantt chart of the profiling result
truncate_nodes
    Truncate the label lengths in the gantt chart to this number of
    characters.
figsize
    matplotlib figsize of the profiling plot
engine
    Select the engine used to process the query, optional.
    At the moment, if set to `"auto"` (default), the query
    is run using the polars in-memory engine. Polars will also
    attempt to use the engine set by the `POLARS_ENGINE_AFFINITY`
    environment variable. If it cannot run the query using the
    selected engine, the query is run using the polars in-memory
    engine. If set to `"gpu"`, the GPU engine is used. Fine-grained
    control over the GPU engine, for example which device to use
    on a system with multiple devices, is possible by providing a
    :class:`~.GPUEngine` object with configuration options.

    .. note::
       GPU mode is considered **unstable**. Not all queries will run
       successfully on the GPU, however, they should fall back transparently
       to the default engine if execution is not supported.

       Running with `POLARS_VERBOSE=1` will provide information if a query
       falls back (and why).

    .. note::
       The GPU engine does not support streaming, if streaming
       is enabled then GPU execution is switched off.
optimizations
    The optimization passes done during query optimization.

    .. warning::
        This functionality is considered **unstable**. It may be changed
        at any point without it being considered a breaking change.


Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "a": ["a", "b", "a", "b", "b", "c"],
...         "b": [1, 2, 3, 4, 5, 6],
...         "c": [6, 5, 4, 3, 2, 1],
...     }
... )
>>> lf.group_by("a", maintain_order=True).agg(pl.all().sum()).sort(
...     "a"
... ).profile()  # doctest: +SKIP
(shape: (3, 3)
 ┌─────┬─────┬─────┐
 │ a   ┆ b   ┆ c   │
 │ --- ┆ --- ┆ --- │
 │ str ┆ i64 ┆ i64 │
 ╞═════╪═════╪═════╡
 │ a   ┆ 4   ┆ 10  │
 │ b   ┆ 11  ┆ 10  │
 │ c   ┆ 6   ┆ 1   │
 └─────┴─────┴─────┘,
 shape: (3, 3)
 ┌─────────────────────────┬───────┬──────┐
 │ node                    ┆ start ┆ end  │
 │ ---                     ┆ ---   ┆ ---  │
 │ str                     ┆ u64   ┆ u64  │
 ╞═════════════════════════╪═══════╪══════╡
 │ optimization            ┆ 0     ┆ 5    │
 │ group_by_partitioned(a) ┆ 5     ┆ 470  │
 │ sort(a)                 ┆ 475   ┆ 1964 │
 └─────────────────────────┴───────┴──────┘)
)post_opt_callbackz.profile() got an unexpected keyword argument 'r  Fr   r   r   r   r  N
matplotlibz+should be installed to show profiling plots)
err_suffixr   rn  )r  endg    eAr  starti@B g    .Amsi  usnodez...)r  leftzProfiling resultznode duration in [z	], total nodes)r   r   r1  r   r  r  r   getprofiler7   rV   matplotlib.pyplotpyplotsubplotsr  r  r  r  r   r9  barhtitle
set_xlabel
set_ylabelr  ) r   r  r  r  r  r  r  r  r  r  r  r  r  r  r   r  _kwargsr  r  r   callbackdf_py
timings_pydftimingsplt_figaxmax_valtimings_unitmax_in_units                                    r   r)  LazyFrame.profile  s=   r A   MQCqQ		**   '%..0ii**=+D+DE'
 ;;*+7{{#67HKK1z
(;WH ,||Aw|7HD"enR(G(H}#007G1H91TU3#007G1H41OP!#00EE&M%%++A~>F #5/!,KGG uo(99g&   II()MM.tfIk]4&QRMM'"HHJ7{r   )r  r  r  r  r  r  r  r  r  r  r   r  c                   g r   rY  r   r  r  r  r  r  r  r  r  r  r  r   r   r  s                 r   r  LazyFrame.collectZ  s    " r   )r  r  r  r  r  r  r  r  r  r  r   r   r  c                   g r   rY  r@  s                 r   r  rA  m  s    " r   c               B   U H  nUS;  d  M  SU S3n[        U5      e   [        U5      nUR                  SS5      =(       d    [        5       S:H  nU(       a  SnUS:X  a  [	        S5        [        USUUUR                  R                  S9n[        U[        5      (       a  S	nU R                  R                  UR                  5      nU(       a$  [	        S
5        [        UR                  5       5      $ UR                  SU5      n[        UR                  UU5      5      $ )u   
Materialize this LazyFrame into a DataFrame.

By default, all query optimizations are enabled. Individual optimizations may
be disabled by setting the corresponding parameter to `False`.

Parameters
----------
type_coercion
    Do type coercion optimization.

    .. deprecated:: 1.30.0
        Use the `optimizations` parameters.
predicate_pushdown
    Do predicate pushdown optimization.

    .. deprecated:: 1.30.0
        Use the `optimizations` parameters.
projection_pushdown
    Do projection pushdown optimization.

    .. deprecated:: 1.30.0
        Use the `optimizations` parameters.
simplify_expression
    Run simplify expressions optimization.

    .. deprecated:: 1.30.0
        Use the `optimizations` parameters.
slice_pushdown
    Slice pushdown optimization.

    .. deprecated:: 1.30.0
        Use the `optimizations` parameters.
comm_subplan_elim
    Will try to cache branching subplans that occur on self-joins or unions.

    .. deprecated:: 1.30.0
        Use the `optimizations` parameters.
comm_subexpr_elim
    Common subexpressions will be cached and reused.

    .. deprecated:: 1.30.0
        Use the `optimizations` parameters.
cluster_with_columns
    Combine sequential independent calls to with_columns

    .. deprecated:: 1.30.0
        Use the `optimizations` parameters.
collapse_joins
    Collapse a join and filters into a faster join

    .. deprecated:: 1.30.0
        Use the `optimizations` parameters.
no_optimization
    Turn off (certain) optimizations.

    .. deprecated:: 1.30.0
        Use the `optimizations` parameters.
engine
    Select the engine used to process the query, optional.
    At the moment, if set to `"auto"` (default), the query
    is run using the polars in-memory engine. Polars will also
    attempt to use the engine set by the `POLARS_ENGINE_AFFINITY`
    environment variable. If it cannot run the query using the
    selected engine, the query is run using the polars in-memory
    engine. If set to `"gpu"`, the GPU engine is used. Fine-grained
    control over the GPU engine, for example which device to use
    on a system with multiple devices, is possible by providing a
    :class:`~.GPUEngine` object with configuration options.

    .. note::
       GPU mode is considered **unstable**. Not all queries will run
       successfully on the GPU, however, they should fall back transparently
       to the default engine if execution is not supported.

       Running with `POLARS_VERBOSE=1` will provide information if a query
       falls back (and why).

    .. note::
       The GPU engine does not support streaming, or running in the
       background. If either are enabled, then GPU execution is switched off.
background
    Run the query in the background and get a handle to the query.
    This handle can be used to fetch the result or cancel the query.

    .. warning::
        Background mode is considered **unstable**. It may be changed
        at any point without it being considered a breaking change.
optimizations
    The optimization passes done during query optimization.

    .. warning::
        This functionality is considered **unstable**. It may be changed
        at any point without it being considered a breaking change.

Returns
-------
DataFrame

See Also
--------
explain : Print the query plan that is evaluated with collect.
profile : Collect the LazyFrame and time each node in the computation graph.
polars.collect_all : Collect multiple LazyFrames at the same time.
polars.Config.set_streaming_chunk_size : Set the size of streaming batches.

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "a": ["a", "b", "a", "b", "b", "c"],
...         "b": [1, 2, 3, 4, 5, 6],
...         "c": [6, 5, 4, 3, 2, 1],
...     }
... )
>>> lf.group_by("a").agg(pl.all().sum()).collect()  # doctest: +SKIP
shape: (3, 3)
┌─────┬─────┬─────┐
│ a   ┆ b   ┆ c   │
│ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ i64 │
╞═════╪═════╪═════╡
│ a   ┆ 4   ┆ 10  │
│ b   ┆ 11  ┆ 10  │
│ c   ┆ 6   ┆ 1   │
└─────┴─────┴─────┘

Collect in streaming mode

>>> lf.group_by("a").agg(pl.all().sum()).collect(
...     engine="streaming"
... )  # doctest: +SKIP
shape: (3, 3)
┌─────┬─────┬─────┐
│ a   ┆ b   ┆ c   │
│ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ i64 │
╞═════╪═════╪═════╡
│ a   ┆ 4   ┆ 10  │
│ b   ┆ 11  ┆ 10  │
│ c   ┆ 6   ┆ 1   │
└─────┴─────┴─────┘

Collect in GPU mode

>>> lf.group_by("a").agg(pl.all().sum()).collect(engine="gpu")  # doctest: +SKIP
shape: (3, 3)
┌─────┬─────┬─────┐
│ a   ┆ b   ┆ c   │
│ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ i64 │
╞═════╪═════╪═════╡
│ b   ┆ 11  ┆ 10  │
│ a   ┆ 4   ┆ 10  │
│ c   ┆ 6   ┆ 1   │
└─────┴─────┴─────┘

With control over the device used

>>> lf.group_by("a").agg(pl.all().sum()).collect(
...     engine=pl.GPUEngine(device=1)
... )  # doctest: +SKIP
shape: (3, 3)
┌─────┬─────┬─────┐
│ a   ┆ b   ┆ c   │
│ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ i64 │
╞═════╪═════╪═════╡
│ b   ┆ 11  ┆ 10  │
│ a   ┆ 4   ┆ 10  │
│ c   ┆ 6   ┆ 1   │
└─────┴─────┴─────┘
)r   r  z.collect() got an unexpected keyword argument 'r  r   Fr   r  r  r   z'background mode is considered unstable.r  )r   r   r(  re   r*   r   r  r  r   r\   r   r  r^   collect_concurrentlyr7   r  )r   r  r  r  r  r  r  r  r  r  r  r   r   r  r1  r  r  r   r2  r   s                       r   r  rA    s   B A   MQCqQ		**   ' KK/W3F3HK3W 	  F[ "#KL'!' ,,22
 fi((Fii**=+D+DE"#LM!#":":"<== ;;2H=s{{68455r   )r   r  c                   g r   rY  r   geventr   r  s       r   collect_asyncLazyFrame.collect_asynci	  s     -0r   )rG  r   r  c                   g r   rY  rF  s       r   rH  rI  r	  s      #r   c                   [        U5      nUS:X  a  [        S5        U R                  R                  UR                  5      nU(       a
  [        5       O	[        5       nUR                  X%R                  5        U$ )uN
  
Collect DataFrame asynchronously in thread pool.

.. warning::
    This functionality is considered **unstable**. It may be changed
    at any point without it being considered a breaking change.

Collects into a DataFrame (like :func:`collect`) but, instead of returning
a DataFrame directly, it is scheduled to be collected inside a thread pool,
while this method returns almost instantly.

This can be useful if you use `gevent` or `asyncio` and want to release
control to other greenlets/tasks while LazyFrames are being collected.

Parameters
----------
gevent
    Return wrapper to `gevent.event.AsyncResult` instead of Awaitable
engine
    Select the engine used to process the query, optional.
    At the moment, if set to `"auto"` (default), the query
    is run using the polars in-memory engine. Polars will also
    attempt to use the engine set by the `POLARS_ENGINE_AFFINITY`
    environment variable. If it cannot run the query using the
    selected engine, the query is run using the polars in-memory
    engine.

    .. note::
       The GPU engine does not support async, or running in the
       background. If either are enabled, then GPU execution is switched off.
optimizations
    The optimization passes done during query optimization.

    .. warning::
        This functionality is considered **unstable**. It may be changed
        at any point without it being considered a breaking change.

Returns
-------
If `gevent=False` (default) then returns an awaitable.

If `gevent=True` then returns wrapper that has a
`.get(block=True, timeout=None)` method.

See Also
--------
polars.collect_all : Collect multiple LazyFrames at the same time.
polars.collect_all_async : Collect multiple LazyFrames at the same time lazily.

Notes
-----
In case of error `set_exception` is used on
`asyncio.Future`/`gevent.event.AsyncResult` and will be reraised by them.

Examples
--------
>>> import asyncio
>>> lf = pl.LazyFrame(
...     {
...         "a": ["a", "b", "a", "b", "b", "c"],
...         "b": [1, 2, 3, 4, 5, 6],
...         "c": [6, 5, 4, 3, 2, 1],
...     }
... )
>>> async def main():
...     return await (
...         lf.group_by("a", maintain_order=True)
...         .agg(pl.all().sum())
...         .collect_async()
...     )
>>> asyncio.run(main())
shape: (3, 3)
┌─────┬─────┬─────┐
│ a   ┆ b   ┆ c   │
│ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ i64 │
╞═════╪═════╪═════╡
│ a   ┆ 4   ┆ 10  │
│ b   ┆ 11  ┆ 10  │
│ c   ┆ 6   ┆ 1   │
└─────┴─────┴─────┘
r   r  )	r   r*   r   r  r  r   r   collect_with_callback	_callback)r   rG  r   r  r   results         r   rH  rI  {	  sk    t  '[ "#KLii**=+D+DE )/"$4G4I 	 	!!&*:*:;r   c                F    [        U R                  R                  5       SS9$ )a  
Resolve the schema of this LazyFrame.

Examples
--------
Determine the schema.

>>> lf = pl.LazyFrame(
...     {
...         "foo": [1, 2, 3],
...         "bar": [6.0, 7.0, 8.0],
...         "ham": ["a", "b", "c"],
...     }
... )
>>> lf.collect_schema()
Schema({'foo': Int64, 'bar': Float64, 'ham': String})

Access various properties of the schema.

>>> schema = lf.collect_schema()
>>> schema["bar"]
Float64
>>> schema.names()
['foo', 'bar', 'ham']
>>> schema.dtypes()
[Int64, Float64, String]
>>> schema.len()
3
F)check_dtypes)ra   r   r   r   s    r   r   LazyFrame.collect_schema	  s    < dii..0uEEr   zstd   )compressioncompression_level
statisticsrow_group_sizedata_page_sizer  storage_optionscredential_providerretriessync_on_closemkdirr   field_overwritesr   metadatar  c                   g r   rY  r   r   rT  rU  rV  rW  rX  r  rY  rZ  r[  r\  r]  r   r^  r   r_  r  s                     r   sink_parquetLazyFrame.sink_parquet
  s    4 r   )rT  rU  rV  rW  rX  r  rY  rZ  r[  r\  r]  r^  r   r_  r  c                   g r   rY  ra  s                     r   rb  rc  
  s    4 r   )rT  rU  rV  rW  rX  r  rY  rZ  r[  r\  r_  r]  r   r^  r   r  c                  [        U5      nUb  Sn[        U5        [        U[        5      (       a  U(       a  SSSSS.nO,[        U[        5      (       a
  U(       d  0 nOUS:X  a  SSSSS.nSSKJn  U" XUS	5      nA	U(       a  [        UR                  5       5      nOSn[        U5      nU=(       d    S
UUS.n[        U[        5      (       a$  U(       a  [        UR                  5       5      nOSnO[        U5      (       a  [        U5      n/ nUb  SSKnSSKJnJnJn  [        UU5      (       a
  U" U5      /nO[        UUR"                  R$                  5      (       a  U" [        U5      5      nOW[        UUR"                  R&                  5      (       a  U Vs/ s H  nU" U5      PM     nnOS[)        U5       3n[+        U5      eU R,                  R/                  UUUUUUUUU
UUUS9nU(       d@  UR1                  UR2                  5      n[4        R7                  U5      nUR9                  US9  g[4        R7                  U5      $ s  snf )a  
Evaluate the query in streaming mode and write to a Parquet file.

This allows streaming results that are larger than RAM to be written to disk.

Parameters
----------
path
    File path to which the file should be written.
compression : {'lz4', 'uncompressed', 'snappy', 'gzip', 'lzo', 'brotli', 'zstd'}
    Choose "zstd" for good compression performance.
    Choose "lz4" for fast compression/decompression.
    Choose "snappy" for more backwards compatibility guarantees
    when you deal with older parquet readers.
compression_level
    The level of compression to use. Higher compression means smaller files on
    disk.

    - "gzip" : min-level: 0, max-level: 9.
    - "brotli" : min-level: 0, max-level: 11.
    - "zstd" : min-level: 1, max-level: 22.
statistics
    Write statistics to the parquet headers. This is the default behavior.

    Possible values:

    - `True`: enable default set of statistics (default). Some
      statistics may be disabled.
    - `False`: disable all statistics
    - "full": calculate and write all available statistics. Cannot be
      combined with `use_pyarrow`.
    - `{ "statistic-key": True / False, ... }`. Cannot be combined with
      `use_pyarrow`. Available keys:

      - "min": column minimum value (default: `True`)
      - "max": column maximum value (default: `True`)
      - "distinct_count": number of unique column values (default: `False`)
      - "null_count": number of null values in column (default: `True`)
row_group_size
    Size of the row groups in number of rows.
    If None (default), the chunks of the `DataFrame` are
    used. Writing in smaller chunks may reduce memory pressure and improve
    writing speeds.
data_page_size
    Size limit of individual data pages.
    If not set defaults to 1024 * 1024 bytes
maintain_order
    Maintain the order in which data is processed.
    Setting this to `False` will be slightly faster.

    .. warning::
        This functionality is considered **unstable**. It may be changed at any
        point without it being considered a breaking change.
storage_options
    Options that indicate how to connect to a cloud provider.

    The cloud providers currently supported are AWS, GCP, and Azure.
    See supported keys here:

    * `aws <https://docs.rs/object_store/latest/object_store/aws/enum.AmazonS3ConfigKey.html>`_
    * `gcp <https://docs.rs/object_store/latest/object_store/gcp/enum.GoogleConfigKey.html>`_
    * `azure <https://docs.rs/object_store/latest/object_store/azure/enum.AzureConfigKey.html>`_
    * Hugging Face (`hf://`): Accepts an API key under the `token` parameter:             `{'token': '...'}`, or by setting the `HF_TOKEN` environment variable.

    If `storage_options` is not provided, Polars will try to infer the
    information from environment variables.
credential_provider
    Provide a function that can be called to provide cloud storage
    credentials. The function is expected to return a dictionary of
    credential keys along with an optional credential expiry time.

    .. warning::
        This functionality is considered **unstable**. It may be changed
        at any point without it being considered a breaking change.
retries
    Number of retries if accessing a cloud instance fails.
sync_on_close: { None, 'data', 'all' }
    Sync to disk when before closing a file.

    * `None` does not sync.
    * `data` syncs the file contents.
    * `all` syncs the file contents and metadata.

    .. warning::
        This functionality is considered **unstable**. It may be changed at any
        point without it being considered a breaking change.
metadata
    A dictionary or callback to add key-values to the file-level Parquet
    metadata.

    .. warning::
        This functionality is considered **experimental**. It may be removed or
        changed at any point without it being considered a breaking change.
mkdir: bool
    Recursively create all the directories in the path.

    .. warning::
        This functionality is considered **unstable**. It may be changed at any
        point without it being considered a breaking change.
lazy: bool
    Wait to start execution until `collect` is called.

    .. warning::
        This functionality is considered **unstable**. It may be changed at any
        point without it being considered a breaking change.
field_overwrites
    Property overwrites for individual Parquet fields.

    This allows more control over the writing process to the granularity of a
    Parquet field.

    .. warning::
        This functionality is considered **unstable**. It may be changed
        at any point without it being considered a breaking change.
engine
    Select the engine used to process the query, optional.
    At the moment, if set to `"auto"` (default), the query is run
    using the polars streaming engine. Polars will also
    attempt to use the engine set by the `POLARS_ENGINE_AFFINITY`
    environment variable. If it cannot run the query using the
    selected engine, the query is run using the polars streaming
    engine.
optimizations
    The optimization passes done during query optimization.

    This has no effect if `lazy` is set to `True`.

    .. warning::
        This functionality is considered **unstable**. It may be changed
        at any point without it being considered a breaking change.

Returns
-------
DataFrame

Examples
--------
>>> lf = pl.scan_csv("/path/to/my_larger_than_ram_file.csv")  # doctest: +SKIP
>>> lf.sink_parquet("out.parquet")  # doctest: +SKIP
Nz/`metadata` parameter is considered experimentalTF)r|  r  distinct_country  fullr   !_init_credential_provider_builderrb  noner\  r  r]  )r   +_parquet_field_overwrites_dict_to_dict_list!_parquet_field_overwrites_to_dictz$field_overwrites got the wrong type )targetrT  rU  rV  rW  rX  cloud_optionsrZ  r[  sink_optionsr_  r^  r   )r   r*   r   r  ,polars.io.cloud.credential_provider._builderri  r   r   r   r  r   r$   collections"polars.io.parquet.field_overwritesr   rl  rm  abcr   rg   typer   r   rb  r  r  r   r   r  )r   r   rT  rU  rV  rW  rX  r  rY  rZ  r[  r\  r_  r]  r   r^  r   r  r   ri  credential_provider_builderrn  rp  field_overwrites_dictsrr  r   rl  rm  r  ldf_pyr   s                                  r   rb  rc  :
  sh   N  'CC"3'j$''J"'"	J 
D))*J6!"&"	J	
 'H'
#  "?#8#8#:;O #O &*4f,
 h%% 01  h5h?H 8:'  *,BCC56FG*& ,koo.E.EFF)T)**& ,koo.F.FGGBR*BRQ5a8BR ' *& =TBR=S<TUn$''#/!))) ;%3 ( 
 ..}/H/HIF''/CKKvK&$$V,,7*s   I
)rT  compat_levelr  rY  rZ  r[  r\  r]  r   r   r  c                   g r   rY  r   r   rT  ry  r  rY  rZ  r[  r\  r]  r   r   r  s                r   sink_ipcLazyFrame.sink_ipcG  s    $ r   )
rT  ry  r  rY  rZ  r[  r\  r]  r   r  c                   g r   rY  r{  s                r   r|  r}  [  s    $ r   uncompressedc          
     8   [        U5      nSSKJn  U" XaUS5      nAU(       a  [        UR	                  5       5      nOSn[        U5      nU=(       d    SUU	S.nUc  SnO<[        U[        5      (       a  UR                  nOS[        U5      < 3n[        U5      eUc  S	nU R                  R                  UUUUUUUS
9nU
(       d@  UR                  UR                  5      n[        R!                  U5      nUR#                  US9  g[        R!                  U5      $ )aZ  
Evaluate the query in streaming mode and write to an IPC file.

This allows streaming results that are larger than RAM to be written to disk.

Parameters
----------
path
    File path to which the file should be written.
compression : {'uncompressed', 'lz4', 'zstd'}
    Choose "zstd" for good compression performance.
    Choose "lz4" for fast compression/decompression.
compat_level
    Use a specific compatibility level
    when exporting Polars' internal data structures.
maintain_order
    Maintain the order in which data is processed.
    Setting this to `False` will be slightly faster.

    .. warning::
        This functionality is considered **unstable**. It may be changed at any
        point without it being considered a breaking change.
storage_options
    Options that indicate how to connect to a cloud provider.

    The cloud providers currently supported are AWS, GCP, and Azure.
    See supported keys here:

    * `aws <https://docs.rs/object_store/latest/object_store/aws/enum.AmazonS3ConfigKey.html>`_
    * `gcp <https://docs.rs/object_store/latest/object_store/gcp/enum.GoogleConfigKey.html>`_
    * `azure <https://docs.rs/object_store/latest/object_store/azure/enum.AzureConfigKey.html>`_
    * Hugging Face (`hf://`): Accepts an API key under the `token` parameter:             `{'token': '...'}`, or by setting the `HF_TOKEN` environment variable.

    If `storage_options` is not provided, Polars will try to infer the
    information from environment variables.
credential_provider
    Provide a function that can be called to provide cloud storage
    credentials. The function is expected to return a dictionary of
    credential keys along with an optional credential expiry time.

    .. warning::
        This functionality is considered **unstable**. It may be changed
        at any point without it being considered a breaking change.
retries
    Number of retries if accessing a cloud instance fails.
sync_on_close: { None, 'data', 'all' }
    Sync to disk when before closing a file.

    * `None` does not sync.
    * `data` syncs the file contents.
    * `all` syncs the file contents and metadata.

    .. warning::
        This functionality is considered **unstable**. It may be changed at any
        point without it being considered a breaking change.
mkdir: bool
    Recursively create all the directories in the path.

    .. warning::
        This functionality is considered **unstable**. It may be changed at any
        point without it being considered a breaking change.
lazy: bool
    Wait to start execution until `collect` is called.

    .. warning::
        This functionality is considered **unstable**. It may be changed at any
        point without it being considered a breaking change.
engine
    Select the engine used to process the query, optional.
    At the moment, if set to `"auto"` (default), the query is run
    using the polars streaming engine. Polars will also
    attempt to use the engine set by the `POLARS_ENGINE_AFFINITY`
    environment variable. If it cannot run the query using the
    selected engine, the query is run using the polars streaming
    engine.

    .. note::
       The GPU engine is currently not supported.
optimizations
    The optimization passes done during query optimization.

    This has no effect if `lazy` is set to `True`.

    .. warning::
        This functionality is considered **unstable**. It may be changed
        at any point without it being considered a breaking change.

Returns
-------
DataFrame

Examples
--------
>>> lf = pl.scan_csv("/path/to/my_larger_than_ram_file.csv")  # doctest: +SKIP
>>> lf.sink_ipc("out.arrow")  # doctest: +SKIP
r   rh  r|  Nrj  rk  Tz!`compat_level` has invalid type: r  )rn  rT  ry  ro  rZ  r[  rp  r   )r   rq  ri  r   r   r   r   r[   _versionr5   r   r   r|  r  r  r   r   r  )r   r   rT  ry  r  rY  rZ  r[  r\  r]  r   r   r  ri  rv  rn  rp  compat_level_pyr   rx  r   s                        r   r|  r}  o  s2   f  '	
 'H
'
#  "?#8#8#:;O #O &*4f,
 "Ok22*33O56I,6W5Z[CC. (K###() ;% $ 
 ..}/H/HIF''/CKKvK&$$V,,r   ,rN  r  i   )include_bominclude_header	separatorline_terminator
quote_char
batch_sizedatetime_formatdate_formattime_formatfloat_scientificfloat_precisiondecimal_comma
null_valuequote_styler  rY  rZ  r[  r\  r]  r   r   r  c                   g r   rY  r   r   r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  rY  rZ  r[  r\  r]  r   r   r  s                            r   sink_csvLazyFrame.sink_csv  s    < r   )r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  rY  rZ  r[  r\  r]  r   r  c                   g r   rY  r  s                            r   r  r  7  s    < r   c               &   SSK Jn  U" SUSS9  U" SUSS9  U(       d  Sn[        U5      nSSKJn  U" UUUS	5      nAU(       a  [        UR                  5       5      nOSn[        U5      nU=(       d    S
UUS.nU R                  R                  UUU[        U5      U[        U5      UUU	U
UUUUUUUUUS9nU(       d@  UR                  UR                  5      n[        R                  U5      nUR                  US9  g[        R                  U5      $ )ar  
Evaluate the query in streaming mode and write to a CSV file.

This allows streaming results that are larger than RAM to be written to disk.

Parameters
----------
path
    File path to which the file should be written.
include_bom
    Whether to include UTF-8 BOM in the CSV output.
include_header
    Whether to include header in the CSV output.
separator
    Separate CSV fields with this symbol.
line_terminator
    String used to end each row.
quote_char
    Byte to use as quoting character.
batch_size
    Number of rows that will be processed per thread.
datetime_format
    A format string, with the specifiers defined by the
    `chrono <https://docs.rs/chrono/latest/chrono/format/strftime/index.html>`_
    Rust crate. If no format specified, the default fractional-second
    precision is inferred from the maximum timeunit found in the frame's
    Datetime cols (if any).
date_format
    A format string, with the specifiers defined by the
    `chrono <https://docs.rs/chrono/latest/chrono/format/strftime/index.html>`_
    Rust crate.
time_format
    A format string, with the specifiers defined by the
    `chrono <https://docs.rs/chrono/latest/chrono/format/strftime/index.html>`_
    Rust crate.
float_scientific
    Whether to use scientific form always (true), never (false), or
    automatically (None) for `Float32` and `Float64` datatypes.
float_precision
    Number of decimal places to write, applied to both `Float32` and
    `Float64` datatypes.
decimal_comma
    Use a comma as the decimal separator instead of a point. Floats will be
    encapsulated in quotes if necessary; set the field separator to override.
null_value
    A string representing null values (defaulting to the empty string).
quote_style : {'necessary', 'always', 'non_numeric', 'never'}
    Determines the quoting strategy used.

    - necessary (default): This puts quotes around fields only when necessary.
      They are necessary when fields contain a quote,
      delimiter or record terminator.
      Quotes are also necessary when writing an empty record
      (which is indistinguishable from a record with one empty field).
      This is the default.
    - always: This puts quotes around every field. Always.
    - never: This never puts quotes around fields, even if that results in
      invalid CSV data (e.g.: by not quoting strings containing the
      separator).
    - non_numeric: This puts quotes around all fields that are non-numeric.
      Namely, when writing a field that does not parse as a valid float
      or integer, then quotes will be used even if they aren`t strictly
      necessary.
maintain_order
    Maintain the order in which data is processed.
    Setting this to `False` will be slightly faster.

    .. warning::
        This functionality is considered **unstable**. It may be changed at any
        point without it being considered a breaking change.
storage_options
    Options that indicate how to connect to a cloud provider.

    The cloud providers currently supported are AWS, GCP, and Azure.
    See supported keys here:

    * `aws <https://docs.rs/object_store/latest/object_store/aws/enum.AmazonS3ConfigKey.html>`_
    * `gcp <https://docs.rs/object_store/latest/object_store/gcp/enum.GoogleConfigKey.html>`_
    * `azure <https://docs.rs/object_store/latest/object_store/azure/enum.AzureConfigKey.html>`_
    * Hugging Face (`hf://`): Accepts an API key under the `token` parameter:             `{'token': '...'}`, or by setting the `HF_TOKEN` environment variable.

    If `storage_options` is not provided, Polars will try to infer the
    information from environment variables.
credential_provider
    Provide a function that can be called to provide cloud storage
    credentials. The function is expected to return a dictionary of
    credential keys along with an optional credential expiry time.

    .. warning::
        This functionality is considered **unstable**. It may be changed
        at any point without it being considered a breaking change.
retries
    Number of retries if accessing a cloud instance fails.
sync_on_close: { None, 'data', 'all' }
    Sync to disk when before closing a file.

    * `None` does not sync.
    * `data` syncs the file contents.
    * `all` syncs the file contents and metadata.

    .. warning::
        This functionality is considered **unstable**. It may be changed at any
        point without it being considered a breaking change.
mkdir: bool
    Recursively create all the directories in the path.

    .. warning::
        This functionality is considered **unstable**. It may be changed at any
        point without it being considered a breaking change.
lazy: bool
    Wait to start execution until `collect` is called.

    .. warning::
        This functionality is considered **unstable**. It may be changed at any
        point without it being considered a breaking change.
engine
    Select the engine used to process the query, optional.
    At the moment, if set to `"auto"` (default), the query is run
    using the polars streaming engine. Polars will also
    attempt to use the engine set by the `POLARS_ENGINE_AFFINITY`
    environment variable. If it cannot run the query using the
    selected engine, the query is run using the polars streaming
    engine.
optimizations
    The optimization passes done during query optimization.

    This has no effect if `lazy` is set to `True`.

    .. warning::
        This functionality is considered **unstable**. It may be changed
        at any point without it being considered a breaking change.

Returns
-------
DataFrame

Examples
--------
>>> lf = pl.scan_csv("/path/to/my_larger_than_ram_file.csv")  # doctest: +SKIP
>>> lf.sink_csv("out.csv")  # doctest: +SKIP
r   )_check_arg_is_1byter  F)can_be_emptyr  Nrh  r  rj  rk  )rn  r  r  r  r  r  r  r  r  r  r  r  r  r  r  ro  rZ  r[  rp  r   )polars.io.csv._utilsr  r   rq  ri  r   r   r   r   r  ordr  r  r   r   r  ) r   r   r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  rY  rZ  r[  r\  r]  r   r   r  r  ri  rv  rn  rp  rx  r   s                                    r   r  r  W  s8   X 	=KGL*5IJ'	
 'H
'
#  "?#8#8#:;O #O &*4f,
 ###))n+:!+##-+'!#) ;%' $ 
, ..}/H/HIF''/CKKvK&$$V,,r   )	r  rY  rZ  r[  r\  r]  r   r   r  c       	            g r   rY  r   r   r  rY  rZ  r[  r\  r]  r   r   r  s              r   sink_ndjsonLazyFrame.sink_ndjson>  s      r   )r  rY  rZ  r[  r\  r]  r   r  c       	            g r   rY  r  s              r   r  r  P  s      r   c       	           [        U	5      n	SSKJn  U" XAUS5      nAU(       a  [        UR	                  5       5      nOSn[        U5      nU=(       d    SUUS.nU R                  R                  UUUUUS9nU(       d@  UR                  U
R                  5      n[        R                  U5      nUR                  U	S9  g[        R                  U5      $ )	a   
Evaluate the query in streaming mode and write to an NDJSON file.

This allows streaming results that are larger than RAM to be written to disk.

Parameters
----------
path
    File path to which the file should be written.
maintain_order
    Maintain the order in which data is processed.
    Setting this to `False` will be slightly faster.

    .. warning::
        This functionality is considered **unstable**. It may be changed
        at any point without it being considered a breaking change.
storage_options
    Options that indicate how to connect to a cloud provider.

    The cloud providers currently supported are AWS, GCP, and Azure.
    See supported keys here:

    * `aws <https://docs.rs/object_store/latest/object_store/aws/enum.AmazonS3ConfigKey.html>`_
    * `gcp <https://docs.rs/object_store/latest/object_store/gcp/enum.GoogleConfigKey.html>`_
    * `azure <https://docs.rs/object_store/latest/object_store/azure/enum.AzureConfigKey.html>`_
    * Hugging Face (`hf://`): Accepts an API key under the `token` parameter:             `{'token': '...'}`, or by setting the `HF_TOKEN` environment variable.

    If `storage_options` is not provided, Polars will try to infer the
    information from environment variables.
credential_provider
    Provide a function that can be called to provide cloud storage
    credentials. The function is expected to return a dictionary of
    credential keys along with an optional credential expiry time.

    .. warning::
        This functionality is considered **unstable**. It may be changed
        at any point without it being considered a breaking change.
retries
    Number of retries if accessing a cloud instance fails.
sync_on_close: { None, 'data', 'all' }
    Sync to disk when before closing a file.

    * `None` does not sync.
    * `data` syncs the file contents.
    * `all` syncs the file contents and metadata.

    .. warning::
        This functionality is considered **unstable**. It may be changed
        at any point without it being considered a breaking change.
mkdir: bool
    Recursively create all the directories in the path.

    .. warning::
        This functionality is considered **unstable**. It may be changed
        at any point without it being considered a breaking change.
lazy: bool
    Wait to start execution until `collect` is called.

    .. warning::
        This functionality is considered **unstable**. It may be changed
        at any point without it being considered a breaking change.
engine
    Select the engine used to process the query, optional.
    At the moment, if set to `"auto"` (default), the query is run
    using the polars streaming engine. Polars will also
    attempt to use the engine set by the `POLARS_ENGINE_AFFINITY`
    environment variable. If it cannot run the query using the
    selected engine, the query is run using the polars streaming
    engine.
optimizations
    The optimization passes done during query optimization.

    This has no effect if `lazy` is set to `True`.

    .. warning::
        This functionality is considered **unstable**. It may be changed
        at any point without it being considered a breaking change.

Returns
-------
DataFrame

Examples
--------
>>> lf = pl.scan_csv("/path/to/my_larger_than_ram_file.csv")  # doctest: +SKIP
>>> lf.sink_ndjson("out.ndjson")  # doctest: +SKIP
r   rh  r  Nrj  rk  )rn  ro  rZ  r[  rp  r   )r   rq  ri  r   r   r   r   	sink_jsonr  r  r   r   r  )r   r   r  rY  rZ  r[  r\  r]  r   r   r  ri  rv  rn  rp  rx  r   s                    r   r  r  b  s    P  '	
 'H'
#  "?#8#8#:;O #O &*4f,
 $$) ;% % 
 ..}/H/HIF''/CKKvK&$$V,,r   zg`LazyFrame.fetch` is deprecated; use `LazyFrame.collect` instead, in conjunction with a call to `head`.c                D    U R                  U5      R                  " S0 UD6$ )a  
Collect a small number of rows for debugging purposes.

.. deprecated:: 1.0
    Use :meth:`collect` instead, in conjunction with a call to :meth:`head`.`

Notes
-----
This is similar to a :func:`collect` operation, but it overwrites the number of
rows read by *every* scan operation. Be aware that `fetch` does not guarantee
the final number of rows in the DataFrame. Filters, join operations and fewer
rows being available in the scanned data will all influence the final number
of rows (joins are especially susceptible to this, and may return no data
at all if `n_rows` is too small as the join keys may not be present).

Warnings
--------
This is strictly a utility function that can help to debug queries using a
smaller number of rows, and should *not* be used in production code.
rY  )headr  )r   n_rowsri  s      r   fetchLazyFrame.fetch  s!    : yy ((2622r   c                    U $ )a  
Return lazy representation, i.e. itself.

Useful for writing code that expects either a :class:`DataFrame` or
:class:`LazyFrame`. On LazyFrame this is a no-op, and returns the same object.

Returns
-------
LazyFrame

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "a": [None, 2, 3, 4],
...         "b": [0.5, None, 2.5, 13],
...         "c": [True, True, False, None],
...     }
... )
>>> lf.lazy()
<LazyFrame at ...>
rY  r   s    r   r   LazyFrame.lazy  s	    . r   c                T    U R                  U R                  R                  5       5      $ )z
Cache the result once the execution of the physical plan hits this node.

It is not recommended using this as the optimizer likely can do a better job.
)r   r   cacher   s    r   r  LazyFrame.cache)  s      		 122r   r   c          	        [        U[        5      (       d@  [        U5      nU R                  U R                  R                  UR                  U5      5      $ 0 nUR                  5        H  u  pE[        U5      (       dA  [        U[        5      (       d,  [        U[        5      (       a"  [        S U 5       5      (       a  [        U5      n[        U5      nUR                  [        U[        5      (       a  XE0O[         R#                  [%        X5      U5      5        M     U R                  U R                  R'                  X25      5      $ )uc
  
Cast LazyFrame column(s) to the specified dtype(s).

Parameters
----------
dtypes
    Mapping of column names (or selector) to dtypes, or a single dtype
    to which all columns will be cast.
strict
    Throw an error if a cast could not be done (for instance, due to an
    overflow).

Examples
--------
>>> from datetime import date
>>> lf = pl.LazyFrame(
...     {
...         "foo": [1, 2, 3],
...         "bar": [6.0, 7.0, 8.0],
...         "ham": [date(2020, 1, 2), date(2021, 3, 4), date(2022, 5, 6)],
...     }
... )

Cast specific frame columns to the specified dtypes:

>>> lf.cast({"foo": pl.Float32, "bar": pl.UInt8}).collect()
shape: (3, 3)
┌─────┬─────┬────────────┐
│ foo ┆ bar ┆ ham        │
│ --- ┆ --- ┆ ---        │
│ f32 ┆ u8  ┆ date       │
╞═════╪═════╪════════════╡
│ 1.0 ┆ 6   ┆ 2020-01-02 │
│ 2.0 ┆ 7   ┆ 2021-03-04 │
│ 3.0 ┆ 8   ┆ 2022-05-06 │
└─────┴─────┴────────────┘

Cast all frame columns matching one dtype (or dtype group) to another dtype:

>>> lf.cast({pl.Date: pl.Datetime}).collect()
shape: (3, 3)
┌─────┬─────┬─────────────────────┐
│ foo ┆ bar ┆ ham                 │
│ --- ┆ --- ┆ ---                 │
│ i64 ┆ f64 ┆ datetime[μs]        │
╞═════╪═════╪═════════════════════╡
│ 1   ┆ 6.0 ┆ 2020-01-02 00:00:00 │
│ 2   ┆ 7.0 ┆ 2021-03-04 00:00:00 │
│ 3   ┆ 8.0 ┆ 2022-05-06 00:00:00 │
└─────┴─────┴─────────────────────┘

Use selectors to define the columns being cast:

>>> import polars.selectors as cs
>>> lf.cast({cs.numeric(): pl.UInt32, cs.temporal(): pl.String}).collect()
shape: (3, 3)
┌─────┬─────┬────────────┐
│ foo ┆ bar ┆ ham        │
│ --- ┆ --- ┆ ---        │
│ u32 ┆ u32 ┆ str        │
╞═════╪═════╪════════════╡
│ 1   ┆ 6   ┆ 2020-01-02 │
│ 2   ┆ 7   ┆ 2021-03-04 │
│ 3   ┆ 8   ┆ 2022-05-06 │
└─────┴─────┴────────────┘

Cast all frame columns to the specified dtype:

>>> lf.cast(pl.String).collect().to_dict(as_series=False)
{'foo': ['1', '2', '3'],
 'bar': ['6.0', '7.0', '8.0'],
 'ham': ['2020-01-02', '2021-03-04', '2022-05-06']}
c              3  8   #    U  H  n[        U5      v   M     g 7fr   )rQ   r  xs     r   r  !LazyFrame.cast.<locals>.<genexpr>  s     1Pa/!2D2Da   )r   r   rR   r   r   cast_all_pydatatype_exprr   rQ   rT   r   allrb   rS   updater   r  fromkeysrc   r  )r   r   r   cast_mapr  r  s         r   r  LazyFrame.cast1  s    j &'**-f5F##DII$6$6v7N7NPV$WXXHA""jM&B&B1j))c1Pa1P.P.PQK$U+EOOa%% 
]]?4#;UC ' 		x @AAr   c                    [         R                  " U R                  5       S9R                  U5      R	                  5       $ )u}  
Create an empty copy of the current LazyFrame, with zero to 'n' rows.

Returns a copy with an identical schema but no data.

Parameters
----------
n
    Number of (empty) rows to return in the cleared frame.

See Also
--------
clone : Cheap deepcopy/clone.

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "a": [None, 2, 3, 4],
...         "b": [0.5, None, 2.5, 13],
...         "c": [True, True, False, None],
...     }
... )
>>> lf.clear().collect()
shape: (0, 3)
┌─────┬─────┬──────┐
│ a   ┆ b   ┆ c    │
│ --- ┆ --- ┆ ---  │
│ i64 ┆ f64 ┆ bool │
╞═════╪═════╪══════╡
└─────┴─────┴──────┘

>>> lf.clear(2).collect()
shape: (2, 3)
┌──────┬──────┬──────┐
│ a    ┆ b    ┆ c    │
│ ---  ┆ ---  ┆ ---  │
│ i64  ┆ f64  ┆ bool │
╞══════╪══════╪══════╡
│ null ┆ null ┆ null │
│ null ┆ null ┆ null │
└──────┴──────┴──────┘
)r   )r  ro   r   clearr   r   r  s     r   r  LazyFrame.clear  s1    X ||4#6#6#89??BGGIIr   c                T    U R                  U R                  R                  5       5      $ )a  
Create a copy of this LazyFrame.

This is a cheap operation that does not copy data.

See Also
--------
clear : Create an empty copy of the current LazyFrame, with identical
    schema but no data.

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "a": [None, 2, 3, 4],
...         "b": [0.5, None, 2.5, 13],
...         "c": [True, True, False, None],
...     }
... )
>>> lf.clone()
<LazyFrame at ...>
)r   r   r0  r   s    r   r0  LazyFrame.clone  s     . 		 122r   )invertc               B   / n/ nU GH~  nUSL a  U(       d  USL a	  U(       d  M  USL a  U(       d  USL a  U(       d  U R                  5       s  $ [        U5      (       a  [        U5      n[        USS9(       a*  UR	                  [
        R                  " U[        S95        M  [        U5      =n(       a  [        S U 5       5      (       dN  U(       d  [        U[
        R                  5      (       dr  [        U[        5      (       a  X`R                  5       ;   dJ  [        U[
        R                  5      (       a  SUR                   S3O
[        U5      nSU 3n	[!        U	5      eUR#                  S	 [%        U5       5       5        GM     UR#                  S
 UR'                  5        5       5        U(       d  U(       d  Sn	[!        U	5      eU(       a'  [)        U5      S:  a  [*        R,                  " U6 OUS   OSn
U(       a-  [*        R.                  " [1        [2        U5      5      nU
c  UOX-  n
U
c  U R5                  U R6                  5      $ U(       a  U R6                  R8                  OU R6                  R:                  nU R5                  U" U
R<                  5      5      $ )z"Common code for filter/remove ops.FT)include_series)r  c              3  `   #    U  H$  n[        U[        R                  5      (       + v   M&     g 7fr   )r   r  rq   r  s     r   r  $LazyFrame._filter.<locals>.<genexpr>  s!     >AqJq"''222As   ,.u   Series(…, dtype=)z invalid predicate for `filter`: c              3  8   #    U  H  n[        U5      v   M     g 7fr   )r8   r  s     r   r  r    s      &*KQIaLL*Kr  c              3  p   #    U  H,  u  p[         R                  " U5      R                  U5      v   M.     g 7fr   )r  r  eq)r  r  values      r   r  r    s*      
5HkdAEE$KNN5!!5Hs   46z5at least one predicate or constraint must be providedrn  r   N)r  r,   tupler0   r  r  r  r;   r1   anyr   rq   r   r   r  reprr   r  r&   r   r  r  all_horizontalr  r   r   r   r   removefilter_pyexpr)r   
predicatesconstraintsr  all_predicatesboolean_masksr  is_seqerrr   combined_predicate	mask_exprfilter_methods                r   _filterLazyFrame._filter  s6    )+AU
v19VT	f!u*Vzz|#Q!H  $7$$RYYq%@A&q>))>A>>>"1bgg..#As++5H5H5J0J "!RYY// )	3a 
 9>n$%% &*H*K& ; D 	 
5@5F5F5H
 	
 -ICC.   ~&*   .1#A&  	 fT=9:I &- 3  %##DII..,2		((		8H8H.@.H.H IJJr   c                    U(       dU  U(       a  [        U5      S:X  a  US   SL a  U R                  5       $ [        U5      S:X  a  US   SL a  U R                  5       $ U R                  UUSS9$ )u  
Filter rows in the LazyFrame based on a predicate expression.

The original order of the remaining rows is preserved.

Rows where the filter predicate does not evaluate to True are discarded
(this includes rows where the predicate evaluates as `null`).

Parameters
----------
predicates
    Expression that evaluates to a boolean Series.
constraints
    Column filters; use `name = value` to filter columns using the supplied
    value. Each constraint behaves the same as `pl.col(name).eq(value)`,
    and is implicitly joined with the other filter conditions using `&`.

Notes
-----
If you are transitioning from Pandas, and performing filter operations based on
the comparison of two or more columns, please note that in Polars any comparison
involving `null` values will result in a `null` result, *not* boolean True or
False. As a result, these rows will not be retained. Ensure that null values
are handled appropriately to avoid unexpected behaviour (see examples below).

See Also
--------
remove

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "foo": [1, 2, 3, None, 4, None, 0],
...         "bar": [6, 7, 8, None, None, 9, 0],
...         "ham": ["a", "b", "c", None, "d", "e", "f"],
...     }
... )

Filter on one condition:

>>> lf.filter(pl.col("foo") > 1).collect()
shape: (3, 3)
┌─────┬──────┬─────┐
│ foo ┆ bar  ┆ ham │
│ --- ┆ ---  ┆ --- │
│ i64 ┆ i64  ┆ str │
╞═════╪══════╪═════╡
│ 2   ┆ 7    ┆ b   │
│ 3   ┆ 8    ┆ c   │
│ 4   ┆ null ┆ d   │
└─────┴──────┴─────┘

Filter on multiple conditions:

>>> lf.filter((pl.col("foo") < 3) & (pl.col("ham") == "a")).collect()
shape: (1, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 1   ┆ 6   ┆ a   │
└─────┴─────┴─────┘

Provide multiple filters using `*args` syntax:

>>> lf.filter(
...     pl.col("foo") == 1,
...     pl.col("ham") == "a",
... ).collect()
shape: (1, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 1   ┆ 6   ┆ a   │
└─────┴─────┴─────┘

Provide multiple filters using `**kwargs` syntax:

>>> lf.filter(foo=1, ham="a").collect()
shape: (1, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 1   ┆ 6   ┆ a   │
└─────┴─────┴─────┘

Filter on an OR condition:

>>> lf.filter(
...     (pl.col("foo") == 1) | (pl.col("ham") == "c"),
... ).collect()
shape: (2, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 1   ┆ 6   ┆ a   │
│ 3   ┆ 8   ┆ c   │
└─────┴─────┴─────┘

Filter by comparing two columns against each other

>>> lf.filter(
...     pl.col("foo") == pl.col("bar"),
... ).collect()
shape: (1, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 0   ┆ 0   ┆ f   │
└─────┴─────┴─────┘

>>> lf.filter(
...     pl.col("foo") != pl.col("bar"),
... ).collect()
shape: (3, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 1   ┆ 6   ┆ a   │
│ 2   ┆ 7   ┆ b   │
│ 3   ┆ 8   ┆ c   │
└─────┴─────┴─────┘

Notice how the row with `None` values is filtered out; using `ne_missing`
ensures that null values compare equal, and we get similar behaviour to Pandas:

>>> lf.filter(
...     pl.col("foo").ne_missing(pl.col("bar")),
... ).collect()
shape: (5, 3)
┌──────┬──────┬─────┐
│ foo  ┆ bar  ┆ ham │
│ ---  ┆ ---  ┆ --- │
│ i64  ┆ i64  ┆ str │
╞══════╪══════╪═════╡
│ 1    ┆ 6    ┆ a   │
│ 2    ┆ 7    ┆ b   │
│ 3    ┆ 8    ┆ c   │
│ 4    ┆ null ┆ d   │
│ null ┆ 9    ┆ e   │
└──────┴──────┴─────┘
rn  r   TFr  r  r  )r  r0  r  r  r   r  r  s      r   r  LazyFrame.filter6  so    J #j/Q"6:a=D;Pzz|#:!#
1(>zz|#||!#  
 	
r   c                    U(       dU  U(       a  [        U5      S:X  a  US   SL a  U R                  5       $ [        U5      S:X  a  US   SL a  U R                  5       $ U R                  UUSS9$ )u  
Remove rows, dropping those that match the given predicate expression(s).

The original order of the remaining rows is preserved.

Rows where the filter predicate does not evaluate to True are retained
(this includes rows where the predicate evaluates as `null`).

Parameters
----------
predicates
    Expression that evaluates to a boolean Series.
constraints
    Column filters; use `name = value` to filter columns using the supplied
    value. Each constraint behaves the same as `pl.col(name).eq(value)`,
    and is implicitly joined with the other filter conditions using `&`.

Notes
-----
If you are transitioning from Pandas, and performing filter operations based on
the comparison of two or more columns, please note that in Polars any comparison
involving `null` values will result in a `null` result, *not* boolean True or
False. As a result, these rows will not be removed. Ensure that null values
are handled appropriately to avoid unexpected behaviour (see examples below).

See Also
--------
filter

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "foo": [2, 3, None, 4, 0],
...         "bar": [5, 6, None, None, 0],
...         "ham": ["a", "b", None, "c", "d"],
...     }
... )

Remove rows matching a condition:

>>> lf.remove(
...     pl.col("bar") >= 5,
... ).collect()
shape: (3, 3)
┌──────┬──────┬──────┐
│ foo  ┆ bar  ┆ ham  │
│ ---  ┆ ---  ┆ ---  │
│ i64  ┆ i64  ┆ str  │
╞══════╪══════╪══════╡
│ null ┆ null ┆ null │
│ 4    ┆ null ┆ c    │
│ 0    ┆ 0    ┆ d    │
└──────┴──────┴──────┘

Discard rows based on multiple conditions, combined with and/or operators:

>>> lf.remove(
...     (pl.col("foo") >= 0) & (pl.col("bar") >= 0),
... ).collect()
shape: (2, 3)
┌──────┬──────┬──────┐
│ foo  ┆ bar  ┆ ham  │
│ ---  ┆ ---  ┆ ---  │
│ i64  ┆ i64  ┆ str  │
╞══════╪══════╪══════╡
│ null ┆ null ┆ null │
│ 4    ┆ null ┆ c    │
└──────┴──────┴──────┘

>>> lf.remove(
...     (pl.col("foo") >= 0) | (pl.col("bar") >= 0),
... ).collect()
shape: (1, 3)
┌──────┬──────┬──────┐
│ foo  ┆ bar  ┆ ham  │
│ ---  ┆ ---  ┆ ---  │
│ i64  ┆ i64  ┆ str  │
╞══════╪══════╪══════╡
│ null ┆ null ┆ null │
└──────┴──────┴──────┘

Provide multiple constraints using `*args` syntax:

>>> lf.remove(
...     pl.col("ham").is_not_null(),
...     pl.col("bar") >= 0,
... ).collect()
shape: (2, 3)
┌──────┬──────┬──────┐
│ foo  ┆ bar  ┆ ham  │
│ ---  ┆ ---  ┆ ---  │
│ i64  ┆ i64  ┆ str  │
╞══════╪══════╪══════╡
│ null ┆ null ┆ null │
│ 4    ┆ null ┆ c    │
└──────┴──────┴──────┘

Provide constraints(s) using `**kwargs` syntax:

>>> lf.remove(foo=0, bar=0).collect()
shape: (4, 3)
┌──────┬──────┬──────┐
│ foo  ┆ bar  ┆ ham  │
│ ---  ┆ ---  ┆ ---  │
│ i64  ┆ i64  ┆ str  │
╞══════╪══════╪══════╡
│ 2    ┆ 5    ┆ a    │
│ 3    ┆ 6    ┆ b    │
│ null ┆ null ┆ null │
│ 4    ┆ null ┆ c    │
└──────┴──────┴──────┘

Remove rows by comparing two columns against each other; in this case, we
remove rows where the two columns are not equal (using `ne_missing` to
ensure that null values compare equal):

>>> lf.remove(
...     pl.col("foo").ne_missing(pl.col("bar")),
... ).collect()
shape: (2, 3)
┌──────┬──────┬──────┐
│ foo  ┆ bar  ┆ ham  │
│ ---  ┆ ---  ┆ ---  │
│ i64  ┆ i64  ┆ str  │
╞══════╪══════╪══════╡
│ null ┆ null ┆ null │
│ 0    ┆ 0    ┆ d    │
└──────┴──────┴──────┘
rn  r   TFr  )r  r  r0  r  r  s      r   r  LazyFrame.remove  so    Z #j/Q"6:a=D;Pzz|#:!#
1(>zz|#||!#  
 	
r   c                    [        [        [        R                  R	                  SS5      5      5      n[        U0 UDSU0D6nU R                  U R                  R                  U5      5      $ )u  
Select columns from this LazyFrame.

Parameters
----------
*exprs
    Column(s) to select, specified as positional arguments.
    Accepts expression input. Strings are parsed as column names,
    other non-expression inputs are parsed as literals.
**named_exprs
    Additional columns to select, specified as keyword arguments.
    The columns will be renamed to the keyword used.

Examples
--------
Pass the name of a column to select that column.

>>> lf = pl.LazyFrame(
...     {
...         "foo": [1, 2, 3],
...         "bar": [6, 7, 8],
...         "ham": ["a", "b", "c"],
...     }
... )
>>> lf.select("foo").collect()
shape: (3, 1)
┌─────┐
│ foo │
│ --- │
│ i64 │
╞═════╡
│ 1   │
│ 2   │
│ 3   │
└─────┘

Multiple columns can be selected by passing a list of column names.

>>> lf.select(["foo", "bar"]).collect()
shape: (3, 2)
┌─────┬─────┐
│ foo ┆ bar │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 1   ┆ 6   │
│ 2   ┆ 7   │
│ 3   ┆ 8   │
└─────┴─────┘

Multiple columns can also be selected using positional arguments instead of a
list. Expressions are also accepted.

>>> lf.select(pl.col("foo"), pl.col("bar") + 1).collect()
shape: (3, 2)
┌─────┬─────┐
│ foo ┆ bar │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 1   ┆ 7   │
│ 2   ┆ 8   │
│ 3   ┆ 9   │
└─────┴─────┘

Use keyword arguments to easily name your expression inputs.

>>> lf.select(
...     threshold=pl.when(pl.col("foo") > 2).then(10).otherwise(0)
... ).collect()
shape: (3, 1)
┌───────────┐
│ threshold │
│ ---       │
│ i32       │
╞═══════════╡
│ 0         │
│ 0         │
│ 10        │
└───────────┘
POLARS_AUTO_STRUCTIFYr   __structify)	r  intosenvironr(  r&   r   r   r  r   exprsnamed_exprs	structifypyexprss        r   r  LazyFrame.select  sc    h RZZ^^,CQGHI	0
!
/8
 		 0 0 9::r   c                    [        [        [        R                  R	                  SS5      5      5      n[        U0 UDSU0D6nU R                  U R                  R                  U5      5      $ )a  
Select columns from this LazyFrame.

This will run all expression sequentially instead of in parallel.
Use this when the work per expression is cheap.

Parameters
----------
*exprs
    Column(s) to select, specified as positional arguments.
    Accepts expression input. Strings are parsed as column names,
    other non-expression inputs are parsed as literals.
**named_exprs
    Additional columns to select, specified as keyword arguments.
    The columns will be renamed to the keyword used.

See Also
--------
select
r  r   r  )	r  r  r  r  r(  r&   r   r   
select_seqr  s        r   r  LazyFrame.select_seq  sb    . RZZ^^,CQGHI	0
!
/8
 		 4 4W =>>r   )r  c               >   UR                  5        HY  n[        U[        [        R                  [        R
                  45      (       a  M9  S[        U5       SU< SU< S3n[        U5      e   [        U0 UD6nU R                  R                  Xa5      n[        U5      $ )u
  
Start a group by operation.

Parameters
----------
*by
    Column(s) to group by. Accepts expression input. Strings are parsed as
    column names.
maintain_order
    Ensure that the order of the groups is consistent with the input data.
    This is slower than a default group by.
    Setting this to `True` blocks the possibility
    to run on the streaming engine.
**named_by
    Additional columns to group by, specified as keyword arguments.
    The columns will be renamed to the keyword used.

Examples
--------
Group by one column and call `agg` to compute the grouped sum of another
column.

>>> lf = pl.LazyFrame(
...     {
...         "a": ["a", "b", "a", "b", "c"],
...         "b": [1, 2, 1, 3, 3],
...         "c": [5, 4, 3, 2, 1],
...     }
... )
>>> lf.group_by("a").agg(pl.col("b").sum()).collect()  # doctest: +IGNORE_RESULT
shape: (3, 2)
┌─────┬─────┐
│ a   ┆ b   │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════╪═════╡
│ a   ┆ 2   │
│ b   ┆ 5   │
│ c   ┆ 3   │
└─────┴─────┘

Set `maintain_order=True` to ensure the order of the groups is consistent with
the input.

>>> lf.group_by("a", maintain_order=True).agg(pl.col("c")).collect()
shape: (3, 2)
┌─────┬───────────┐
│ a   ┆ c         │
│ --- ┆ ---       │
│ str ┆ list[i64] │
╞═════╪═══════════╡
│ a   ┆ [5, 3]    │
│ b   ┆ [4, 2]    │
│ c   ┆ [1]       │
└─────┴───────────┘

Group by multiple columns by passing a list of column names.

>>> lf.group_by(["a", "b"]).agg(pl.max("c")).collect()  # doctest: +SKIP
shape: (4, 3)
┌─────┬─────┬─────┐
│ a   ┆ b   ┆ c   │
│ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ i64 │
╞═════╪═════╪═════╡
│ a   ┆ 1   ┆ 5   │
│ b   ┆ 2   ┆ 4   │
│ b   ┆ 3   ┆ 2   │
│ c   ┆ 3   ┆ 1   │
└─────┴─────┴─────┘

Or use positional arguments to group by multiple columns in the same way.
Expressions are also accepted.

>>> lf.group_by("a", pl.col("b") // 2).agg(
...     pl.col("c").mean()
... ).collect()  # doctest: +SKIP
shape: (3, 3)
┌─────┬─────┬─────┐
│ a   ┆ b   ┆ c   │
│ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ f64 │
╞═════╪═════╪═════╡
│ a   ┆ 0   ┆ 4.0 │
│ b   ┆ 1   ┆ 3.0 │
│ c   ┆ 1   ┆ 1.0 │
└─────┴─────┴─────┘
z=Expected Polars expression or object convertible to one, got z&.

Hint: if you tried
    group_by(by=z;)
then you probably want to use this instead:
    group_by(r  )valuesr   r   r  rq   r  ru  r   r&   r   group_byr]   )r   r  r  named_byr  r   r  lgbs           r   r  LazyFrame.group_by  s    | __&Eec277BII%>??STXY^T_S` a'',i 0$$)9A	/   n$ ' /?h?ii  73r   r  z0.20.14right)offsetclosedr  c                   [        U5      nUc  [        [        U5      5      nUb  [        U5      O/ n[        U5      n[        U5      nU R                  R                  XbX4U5      n[        U5      $ )u  
Create rolling groups based on a temporal or integer column.

Different from a `group_by_dynamic` the windows are now determined by the
individual values and are not of constant intervals. For constant intervals
use :func:`LazyFrame.group_by_dynamic`.

If you have a time series `<t_0, t_1, ..., t_n>`, then by default the
windows created will be

    * (t_0 - period, t_0]
    * (t_1 - period, t_1]
    * ...
    * (t_n - period, t_n]

whereas if you pass a non-default `offset`, then the windows will be

    * (t_0 + offset, t_0 + offset + period]
    * (t_1 + offset, t_1 + offset + period]
    * ...
    * (t_n + offset, t_n + offset + period]

The `period` and `offset` arguments are created either from a timedelta, or
by using the following string language:

- 1ns   (1 nanosecond)
- 1us   (1 microsecond)
- 1ms   (1 millisecond)
- 1s    (1 second)
- 1m    (1 minute)
- 1h    (1 hour)
- 1d    (1 calendar day)
- 1w    (1 calendar week)
- 1mo   (1 calendar month)
- 1q    (1 calendar quarter)
- 1y    (1 calendar year)
- 1i    (1 index count)

Or combine them:
"3d12h4m25s" # 3 days, 12 hours, 4 minutes, and 25 seconds

By "calendar day", we mean the corresponding time on the next day (which may
not be 24 hours, due to daylight savings). Similarly for "calendar week",
"calendar month", "calendar quarter", and "calendar year".

.. versionchanged:: 0.20.14
    The `by` parameter was renamed `group_by`.

Parameters
----------
index_column
    Column used to group based on the time window.
    Often of type Date/Datetime.
    This column must be sorted in ascending order (or, if `group_by` is
    specified, then it must be sorted in ascending order within each group).

    In case of a rolling group by on indices, dtype needs to be one of
    {UInt32, UInt64, Int32, Int64}. Note that the first three get temporarily
    cast to Int64, so if performance matters use an Int64 column.
period
    Length of the window - must be non-negative.
offset
    Offset of the window. Default is `-period`.
closed : {'right', 'left', 'both', 'none'}
    Define which sides of the temporal interval are closed (inclusive).
group_by
    Also group by this column/these columns

Returns
-------
LazyGroupBy
    Object you can call `.agg` on to aggregate by groups, the result
    of which will be sorted by `index_column` (but note that if `group_by`
    columns are passed, it will only be sorted within each group).

See Also
--------
group_by_dynamic

Examples
--------
>>> dates = [
...     "2020-01-01 13:45:48",
...     "2020-01-01 16:42:13",
...     "2020-01-01 16:45:09",
...     "2020-01-02 18:12:48",
...     "2020-01-03 19:45:32",
...     "2020-01-08 23:16:43",
... ]
>>> df = pl.LazyFrame({"dt": dates, "a": [3, 7, 5, 9, 2, 1]}).with_columns(
...     pl.col("dt").str.strptime(pl.Datetime).set_sorted()
... )
>>> out = (
...     df.rolling(index_column="dt", period="2d")
...     .agg(
...         pl.sum("a").alias("sum_a"),
...         pl.min("a").alias("min_a"),
...         pl.max("a").alias("max_a"),
...     )
...     .collect()
... )
>>> out
shape: (6, 4)
┌─────────────────────┬───────┬───────┬───────┐
│ dt                  ┆ sum_a ┆ min_a ┆ max_a │
│ ---                 ┆ ---   ┆ ---   ┆ ---   │
│ datetime[μs]        ┆ i64   ┆ i64   ┆ i64   │
╞═════════════════════╪═══════╪═══════╪═══════╡
│ 2020-01-01 13:45:48 ┆ 3     ┆ 3     ┆ 3     │
│ 2020-01-01 16:42:13 ┆ 10    ┆ 3     ┆ 7     │
│ 2020-01-01 16:45:09 ┆ 15    ┆ 3     ┆ 7     │
│ 2020-01-02 18:12:48 ┆ 24    ┆ 3     ┆ 9     │
│ 2020-01-03 19:45:32 ┆ 11    ┆ 2     ┆ 9     │
│ 2020-01-08 23:16:43 ┆ 1     ┆ 1     ┆ 1     │
└─────────────────────┴───────┴───────┴───────┘
)r%   r   r   r&   r   rollingr]   )	r   index_columnperiodr  r  r  index_column_py
pyexprs_byr  s	            r   r  LazyFrame.rollingg  sv    | 0=>+,DV,LMF 9A8L*84RT 	 *&1)&1iiT3r   r&  window)r  r  include_boundariesr  labelr  start_byc                   [        U5      n
Uc  SnUc  Un[        U5      n[        U5      n[        U5      nUb  [        U5      O/ nU R                  R	                  U
UUUUUUUU	5	      n[        U5      $ )u5  
Group based on a time value (or index value of type Int32, Int64).

Time windows are calculated and rows are assigned to windows. Different from a
normal group by is that a row can be member of multiple groups.
By default, the windows look like:

- [start, start + period)
- [start + every, start + every + period)
- [start + 2*every, start + 2*every + period)
- ...

where `start` is determined by `start_by`, `offset`, `every`, and the earliest
datapoint. See the `start_by` argument description for details.

.. warning::
    The index column must be sorted in ascending order. If `group_by` is passed, then
    the index column must be sorted in ascending order within each group.

.. versionchanged:: 0.20.14
    The `by` parameter was renamed `group_by`.

Parameters
----------
index_column
    Column used to group based on the time window.
    Often of type Date/Datetime.
    This column must be sorted in ascending order (or, if `group_by` is specified,
    then it must be sorted in ascending order within each group).

    In case of a dynamic group by on indices, dtype needs to be one of
    {Int32, Int64}. Note that Int32 gets temporarily cast to Int64, so if
    performance matters use an Int64 column.
every
    interval of the window
period
    length of the window, if None it will equal 'every'
offset
    offset of the window, does not take effect if `start_by` is 'datapoint'.
    Defaults to zero.
include_boundaries
    Add the lower and upper bound of the window to the "_lower_boundary" and
    "_upper_boundary" columns. This will impact performance because it's harder to
    parallelize
closed : {'left', 'right', 'both', 'none'}
    Define which sides of the temporal interval are closed (inclusive).
label : {'left', 'right', 'datapoint'}
    Define which label to use for the window:

    - 'left': lower boundary of the window
    - 'right': upper boundary of the window
    - 'datapoint': the first value of the index column in the given window.
      If you don't need the label to be at one of the boundaries, choose this
      option for maximum performance
group_by
    Also group by this column/these columns
start_by : {'window', 'datapoint', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday'}
    The strategy to determine the start of the first window by.

    * 'window': Start by taking the earliest timestamp, truncating it with
      `every`, and then adding `offset`.
      Note that weekly windows start on Monday.
    * 'datapoint': Start from the first encountered data point.
    * a day of the week (only takes effect if `every` contains `'w'`):

      * 'monday': Start the window on the Monday before the first data point.
      * 'tuesday': Start the window on the Tuesday before the first data point.
      * ...
      * 'sunday': Start the window on the Sunday before the first data point.

      The resulting window is then shifted back until the earliest datapoint
      is in or in front of it.

Returns
-------
LazyGroupBy
    Object you can call `.agg` on to aggregate by groups, the result
    of which will be sorted by `index_column` (but note that if `group_by` columns are
    passed, it will only be sorted within each group).

See Also
--------
rolling

Notes
-----
1) If you're coming from pandas, then

   .. code-block:: python

       # polars
       df.group_by_dynamic("ts", every="1d").agg(pl.col("value").sum())

   is equivalent to

   .. code-block:: python

       # pandas
       df.set_index("ts").resample("D")["value"].sum().reset_index()

   though note that, unlike pandas, polars doesn't add extra rows for empty
   windows. If you need `index_column` to be evenly spaced, then please combine
   with :func:`DataFrame.upsample`.

2) The `every`, `period` and `offset` arguments are created with
   the following string language:

   - 1ns   (1 nanosecond)
   - 1us   (1 microsecond)
   - 1ms   (1 millisecond)
   - 1s    (1 second)
   - 1m    (1 minute)
   - 1h    (1 hour)
   - 1d    (1 calendar day)
   - 1w    (1 calendar week)
   - 1mo   (1 calendar month)
   - 1q    (1 calendar quarter)
   - 1y    (1 calendar year)
   - 1i    (1 index count)

   Or combine them (except in `every`):
   "3d12h4m25s" # 3 days, 12 hours, 4 minutes, and 25 seconds

   By "calendar day", we mean the corresponding time on the next day (which may
   not be 24 hours, due to daylight savings). Similarly for "calendar week",
   "calendar month", "calendar quarter", and "calendar year".

   In case of a group_by_dynamic on an integer column, the windows are defined by:

   - "1i"      # length 1
   - "10i"     # length 10

Examples
--------
>>> from datetime import datetime
>>> lf = pl.LazyFrame(
...     {
...         "time": pl.datetime_range(
...             start=datetime(2021, 12, 16),
...             end=datetime(2021, 12, 16, 3),
...             interval="30m",
...             eager=True,
...         ),
...         "n": range(7),
...     }
... )
>>> lf.collect()
shape: (7, 2)
┌─────────────────────┬─────┐
│ time                ┆ n   │
│ ---                 ┆ --- │
│ datetime[μs]        ┆ i64 │
╞═════════════════════╪═════╡
│ 2021-12-16 00:00:00 ┆ 0   │
│ 2021-12-16 00:30:00 ┆ 1   │
│ 2021-12-16 01:00:00 ┆ 2   │
│ 2021-12-16 01:30:00 ┆ 3   │
│ 2021-12-16 02:00:00 ┆ 4   │
│ 2021-12-16 02:30:00 ┆ 5   │
│ 2021-12-16 03:00:00 ┆ 6   │
└─────────────────────┴─────┘

Group by windows of 1 hour.

>>> lf.group_by_dynamic("time", every="1h", closed="right").agg(
...     pl.col("n")
... ).collect()
shape: (4, 2)
┌─────────────────────┬───────────┐
│ time                ┆ n         │
│ ---                 ┆ ---       │
│ datetime[μs]        ┆ list[i64] │
╞═════════════════════╪═══════════╡
│ 2021-12-15 23:00:00 ┆ [0]       │
│ 2021-12-16 00:00:00 ┆ [1, 2]    │
│ 2021-12-16 01:00:00 ┆ [3, 4]    │
│ 2021-12-16 02:00:00 ┆ [5, 6]    │
└─────────────────────┴───────────┘

The window boundaries can also be added to the aggregation result

>>> lf.group_by_dynamic(
...     "time", every="1h", include_boundaries=True, closed="right"
... ).agg(pl.col("n").mean()).collect()
shape: (4, 4)
┌─────────────────────┬─────────────────────┬─────────────────────┬─────┐
│ _lower_boundary     ┆ _upper_boundary     ┆ time                ┆ n   │
│ ---                 ┆ ---                 ┆ ---                 ┆ --- │
│ datetime[μs]        ┆ datetime[μs]        ┆ datetime[μs]        ┆ f64 │
╞═════════════════════╪═════════════════════╪═════════════════════╪═════╡
│ 2021-12-15 23:00:00 ┆ 2021-12-16 00:00:00 ┆ 2021-12-15 23:00:00 ┆ 0.0 │
│ 2021-12-16 00:00:00 ┆ 2021-12-16 01:00:00 ┆ 2021-12-16 00:00:00 ┆ 1.5 │
│ 2021-12-16 01:00:00 ┆ 2021-12-16 02:00:00 ┆ 2021-12-16 01:00:00 ┆ 3.5 │
│ 2021-12-16 02:00:00 ┆ 2021-12-16 03:00:00 ┆ 2021-12-16 02:00:00 ┆ 5.5 │
└─────────────────────┴─────────────────────┴─────────────────────┴─────┘

When closed="left", the window excludes the right end of interval:
[lower_bound, upper_bound)

>>> lf.group_by_dynamic("time", every="1h", closed="left").agg(
...     pl.col("n")
... ).collect()
shape: (4, 2)
┌─────────────────────┬───────────┐
│ time                ┆ n         │
│ ---                 ┆ ---       │
│ datetime[μs]        ┆ list[i64] │
╞═════════════════════╪═══════════╡
│ 2021-12-16 00:00:00 ┆ [0, 1]    │
│ 2021-12-16 01:00:00 ┆ [2, 3]    │
│ 2021-12-16 02:00:00 ┆ [4, 5]    │
│ 2021-12-16 03:00:00 ┆ [6]       │
└─────────────────────┴───────────┘

When closed="both" the time values at the window boundaries belong to 2 groups.

>>> lf.group_by_dynamic("time", every="1h", closed="both").agg(
...     pl.col("n")
... ).collect()
shape: (4, 2)
┌─────────────────────┬───────────┐
│ time                ┆ n         │
│ ---                 ┆ ---       │
│ datetime[μs]        ┆ list[i64] │
╞═════════════════════╪═══════════╡
│ 2021-12-16 00:00:00 ┆ [0, 1, 2] │
│ 2021-12-16 01:00:00 ┆ [2, 3, 4] │
│ 2021-12-16 02:00:00 ┆ [4, 5, 6] │
│ 2021-12-16 03:00:00 ┆ [6]       │
└─────────────────────┴───────────┘

Dynamic group bys can also be combined with grouping on normal keys

>>> lf = lf.with_columns(groups=pl.Series(["a", "a", "a", "b", "b", "a", "a"]))
>>> lf.collect()
shape: (7, 3)
┌─────────────────────┬─────┬────────┐
│ time                ┆ n   ┆ groups │
│ ---                 ┆ --- ┆ ---    │
│ datetime[μs]        ┆ i64 ┆ str    │
╞═════════════════════╪═════╪════════╡
│ 2021-12-16 00:00:00 ┆ 0   ┆ a      │
│ 2021-12-16 00:30:00 ┆ 1   ┆ a      │
│ 2021-12-16 01:00:00 ┆ 2   ┆ a      │
│ 2021-12-16 01:30:00 ┆ 3   ┆ b      │
│ 2021-12-16 02:00:00 ┆ 4   ┆ b      │
│ 2021-12-16 02:30:00 ┆ 5   ┆ a      │
│ 2021-12-16 03:00:00 ┆ 6   ┆ a      │
└─────────────────────┴─────┴────────┘
>>> lf.group_by_dynamic(
...     "time",
...     every="1h",
...     closed="both",
...     group_by="groups",
...     include_boundaries=True,
... ).agg(pl.col("n")).collect()
shape: (6, 5)
┌────────┬─────────────────────┬─────────────────────┬─────────────────────┬───────────┐
│ groups ┆ _lower_boundary     ┆ _upper_boundary     ┆ time                ┆ n         │
│ ---    ┆ ---                 ┆ ---                 ┆ ---                 ┆ ---       │
│ str    ┆ datetime[μs]        ┆ datetime[μs]        ┆ datetime[μs]        ┆ list[i64] │
╞════════╪═════════════════════╪═════════════════════╪═════════════════════╪═══════════╡
│ a      ┆ 2021-12-16 00:00:00 ┆ 2021-12-16 01:00:00 ┆ 2021-12-16 00:00:00 ┆ [0, 1, 2] │
│ a      ┆ 2021-12-16 01:00:00 ┆ 2021-12-16 02:00:00 ┆ 2021-12-16 01:00:00 ┆ [2]       │
│ a      ┆ 2021-12-16 02:00:00 ┆ 2021-12-16 03:00:00 ┆ 2021-12-16 02:00:00 ┆ [5, 6]    │
│ a      ┆ 2021-12-16 03:00:00 ┆ 2021-12-16 04:00:00 ┆ 2021-12-16 03:00:00 ┆ [6]       │
│ b      ┆ 2021-12-16 01:00:00 ┆ 2021-12-16 02:00:00 ┆ 2021-12-16 01:00:00 ┆ [3, 4]    │
│ b      ┆ 2021-12-16 02:00:00 ┆ 2021-12-16 03:00:00 ┆ 2021-12-16 02:00:00 ┆ [4]       │
└────────┴─────────────────────┴─────────────────────┴─────────────────────┴───────────┘

Dynamic group by on an index column

>>> lf = pl.LazyFrame(
...     {
...         "idx": pl.int_range(0, 6, eager=True),
...         "A": ["A", "A", "B", "B", "B", "C"],
...     }
... )
>>> lf.group_by_dynamic(
...     "idx",
...     every="2i",
...     period="3i",
...     include_boundaries=True,
...     closed="right",
... ).agg(pl.col("A").alias("A_agg_list")).collect()
shape: (4, 4)
┌─────────────────┬─────────────────┬─────┬─────────────────┐
│ _lower_boundary ┆ _upper_boundary ┆ idx ┆ A_agg_list      │
│ ---             ┆ ---             ┆ --- ┆ ---             │
│ i64             ┆ i64             ┆ i64 ┆ list[str]       │
╞═════════════════╪═════════════════╪═════╪═════════════════╡
│ -2              ┆ 1               ┆ -2  ┆ ["A", "A"]      │
│ 0               ┆ 3               ┆ 0   ┆ ["A", "B", "B"] │
│ 2               ┆ 5               ┆ 2   ┆ ["B", "B", "C"] │
│ 4               ┆ 7               ┆ 4   ┆ ["C"]           │
└─────────────────┴─────────────────┴─────┴─────────────────┘
0ns)r%   r   r&   r   group_by_dynamicr]   )r   r  everyr  r  r  r  r  r  r  r  r   r  s                r   r  LazyFrame.group_by_dynamic  s    n	 0=>F>F)&1)&1(/ 9A8L*84RT 	 ii((

 3r   backward_right)left_onright_ononby_leftby_rightr  strategysuffix	toleranceallow_parallelforce_parallelcoalesceallow_exact_matchescheck_sortednessc               F   [        X5        [        U[        [        R                  45      (       a  UnUnUb  Uc  Sn[        U5      eUb  [        U[        5      (       a  U/OUnUnO?Uc  Ub5  [        U[        5      (       a  U/OUn[        U[        5      (       a  U/OUnOSnSnSnSn[        U
[        5      (       a  U
nO#[        U
[        5      (       a  [        U
5      nOU
n[        U[        R                  5      (       d  [        R                  " U5      n[        U[        R                  5      (       d  [        R                  " U5      nU R                  U R                  R                  UR                  UR                  UR                  UUUUU	UUUUUUS95      $ )u0  
Perform an asof join.

This is similar to a left-join except that we match on nearest key rather than
equal keys.

Both DataFrames must be sorted by the `on` key (within each `by` group, if
specified).

For each row in the left DataFrame:

  - A "backward" search selects the last row in the right DataFrame whose
    'on' key is less than or equal to the left's key.

  - A "forward" search selects the first row in the right DataFrame whose
    'on' key is greater than or equal to the left's key.

    A "nearest" search selects the last row in the right DataFrame whose value
    is nearest to the left's key. String keys are not currently supported for a
    nearest search.

The default is "backward".

Parameters
----------
other
    Lazy DataFrame to join with.
left_on
    Join column of the left DataFrame.
right_on
    Join column of the right DataFrame.
on
    Join column of both DataFrames. If set, `left_on` and `right_on` should be
    None.
by
    Join on these columns before doing asof join.
by_left
    Join on these columns before doing asof join.
by_right
    Join on these columns before doing asof join.
strategy : {'backward', 'forward', 'nearest'}
    Join strategy.
suffix
    Suffix to append to columns with a duplicate name.
tolerance
    Numeric tolerance. By setting this the join will only be done if the near
    keys are within this distance. If an asof join is done on columns of dtype
    "Date", "Datetime", "Duration" or "Time", use either a datetime.timedelta
    object or the following string language:

        - 1ns   (1 nanosecond)
        - 1us   (1 microsecond)
        - 1ms   (1 millisecond)
        - 1s    (1 second)
        - 1m    (1 minute)
        - 1h    (1 hour)
        - 1d    (1 calendar day)
        - 1w    (1 calendar week)
        - 1mo   (1 calendar month)
        - 1q    (1 calendar quarter)
        - 1y    (1 calendar year)

        Or combine them:
        "3d12h4m25s" # 3 days, 12 hours, 4 minutes, and 25 seconds

        By "calendar day", we mean the corresponding time on the next day
        (which may not be 24 hours, due to daylight savings). Similarly for
        "calendar week", "calendar month", "calendar quarter", and
        "calendar year".

allow_parallel
    Allow the physical plan to optionally evaluate the computation of both
    DataFrames up to the join in parallel.
force_parallel
    Force the physical plan to evaluate the computation of both DataFrames up to
    the join in parallel.
coalesce
    Coalescing behavior (merging of `on` / `left_on` / `right_on` columns):

    - True: -> Always coalesce join columns.
    - False: -> Never coalesce join columns.

    Note that joining on any other expressions than `col`
    will turn off coalescing.
allow_exact_matches
    Whether exact matches are valid join predicates.

    - If True, allow matching with the same ``on`` value
        (i.e. less-than-or-equal-to / greater-than-or-equal-to)
    - If False, don't match the same ``on`` value
        (i.e., strictly less-than / strictly greater-than).
check_sortedness
    Check the sortedness of the asof keys. If the keys are not sorted Polars
    will error. Currently, sortedness cannot be checked if 'by' groups are
    provided.


Examples
--------
>>> from datetime import date
>>> gdp = pl.LazyFrame(
...     {
...         "date": pl.date_range(
...             date(2016, 1, 1),
...             date(2020, 1, 1),
...             "1y",
...             eager=True,
...         ),
...         "gdp": [4164, 4411, 4566, 4696, 4827],
...     }
... )
>>> gdp.collect()
shape: (5, 2)
┌────────────┬──────┐
│ date       ┆ gdp  │
│ ---        ┆ ---  │
│ date       ┆ i64  │
╞════════════╪══════╡
│ 2016-01-01 ┆ 4164 │
│ 2017-01-01 ┆ 4411 │
│ 2018-01-01 ┆ 4566 │
│ 2019-01-01 ┆ 4696 │
│ 2020-01-01 ┆ 4827 │
└────────────┴──────┘

>>> population = pl.LazyFrame(
...     {
...         "date": [date(2016, 3, 1), date(2018, 8, 1), date(2019, 1, 1)],
...         "population": [82.19, 82.66, 83.12],
...     }
... ).sort("date")
>>> population.collect()
shape: (3, 2)
┌────────────┬────────────┐
│ date       ┆ population │
│ ---        ┆ ---        │
│ date       ┆ f64        │
╞════════════╪════════════╡
│ 2016-03-01 ┆ 82.19      │
│ 2018-08-01 ┆ 82.66      │
│ 2019-01-01 ┆ 83.12      │
└────────────┴────────────┘

Note how the dates don't quite match. If we join them using `join_asof` and
`strategy='backward'`, then each date from `population` which doesn't have an
exact match is matched with the closest earlier date from `gdp`:

>>> population.join_asof(gdp, on="date", strategy="backward").collect()
shape: (3, 3)
┌────────────┬────────────┬──────┐
│ date       ┆ population ┆ gdp  │
│ ---        ┆ ---        ┆ ---  │
│ date       ┆ f64        ┆ i64  │
╞════════════╪════════════╪══════╡
│ 2016-03-01 ┆ 82.19      ┆ 4164 │
│ 2018-08-01 ┆ 82.66      ┆ 4566 │
│ 2019-01-01 ┆ 83.12      ┆ 4696 │
└────────────┴────────────┴──────┘

Note how:

- date `2016-03-01` from `population` is matched with `2016-01-01` from `gdp`;
- date `2018-08-01` from `population` is matched with `2018-01-01` from `gdp`.

You can verify this by passing `coalesce=False`:

>>> population.join_asof(
...     gdp, on="date", strategy="backward", coalesce=False
... ).collect()
shape: (3, 4)
┌────────────┬────────────┬────────────┬──────┐
│ date       ┆ population ┆ date_right ┆ gdp  │
│ ---        ┆ ---        ┆ ---        ┆ ---  │
│ date       ┆ f64        ┆ date       ┆ i64  │
╞════════════╪════════════╪════════════╪══════╡
│ 2016-03-01 ┆ 82.19      ┆ 2016-01-01 ┆ 4164 │
│ 2018-08-01 ┆ 82.66      ┆ 2018-01-01 ┆ 4566 │
│ 2019-01-01 ┆ 83.12      ┆ 2019-01-01 ┆ 4696 │
└────────────┴────────────┴────────────┴──────┘

If we instead use `strategy='forward'`, then each date from `population` which
doesn't have an exact match is matched with the closest later date from `gdp`:

>>> population.join_asof(gdp, on="date", strategy="forward").collect()
shape: (3, 3)
┌────────────┬────────────┬──────┐
│ date       ┆ population ┆ gdp  │
│ ---        ┆ ---        ┆ ---  │
│ date       ┆ f64        ┆ i64  │
╞════════════╪════════════╪══════╡
│ 2016-03-01 ┆ 82.19      ┆ 4411 │
│ 2018-08-01 ┆ 82.66      ┆ 4696 │
│ 2019-01-01 ┆ 83.12      ┆ 4696 │
└────────────┴────────────┴──────┘

Note how:

- date `2016-03-01` from `population` is matched with `2017-01-01` from `gdp`;
- date `2018-08-01` from `population` is matched with `2019-01-01` from `gdp`.

Finally, `strategy='nearest'` gives us a mix of the two results above, as each
date from `population` which doesn't have an exact match is matched with the
closest date from `gdp`, regardless of whether it's earlier or later:

>>> population.join_asof(gdp, on="date", strategy="nearest").collect()
shape: (3, 3)
┌────────────┬────────────┬──────┐
│ date       ┆ population ┆ gdp  │
│ ---        ┆ ---        ┆ ---  │
│ date       ┆ f64        ┆ i64  │
╞════════════╪════════════╪══════╡
│ 2016-03-01 ┆ 82.19      ┆ 4164 │
│ 2018-08-01 ┆ 82.66      ┆ 4696 │
│ 2019-01-01 ┆ 83.12      ┆ 4696 │
└────────────┴────────────┴──────┘

Note how:

- date `2016-03-01` from `population` is matched with `2016-01-01` from `gdp`;
- date `2018-08-01` from `population` is matched with `2019-01-01` from `gdp`.

They `by` argument allows joining on another column first, before the asof join.
In this example we join by `country` first, then asof join by date, as above.

>>> gdp_dates = pl.date_range(  # fmt: skip
...     date(2016, 1, 1), date(2020, 1, 1), "1y", eager=True
... )
>>> gdp2 = pl.LazyFrame(
...     {
...         "country": ["Germany"] * 5 + ["Netherlands"] * 5,
...         "date": pl.concat([gdp_dates, gdp_dates]),
...         "gdp": [4164, 4411, 4566, 4696, 4827, 784, 833, 914, 910, 909],
...     }
... ).sort("country", "date")
>>>
>>> gdp2.collect()
shape: (10, 3)
┌─────────────┬────────────┬──────┐
│ country     ┆ date       ┆ gdp  │
│ ---         ┆ ---        ┆ ---  │
│ str         ┆ date       ┆ i64  │
╞═════════════╪════════════╪══════╡
│ Germany     ┆ 2016-01-01 ┆ 4164 │
│ Germany     ┆ 2017-01-01 ┆ 4411 │
│ Germany     ┆ 2018-01-01 ┆ 4566 │
│ Germany     ┆ 2019-01-01 ┆ 4696 │
│ Germany     ┆ 2020-01-01 ┆ 4827 │
│ Netherlands ┆ 2016-01-01 ┆ 784  │
│ Netherlands ┆ 2017-01-01 ┆ 833  │
│ Netherlands ┆ 2018-01-01 ┆ 914  │
│ Netherlands ┆ 2019-01-01 ┆ 910  │
│ Netherlands ┆ 2020-01-01 ┆ 909  │
└─────────────┴────────────┴──────┘
>>> pop2 = pl.LazyFrame(
...     {
...         "country": ["Germany"] * 3 + ["Netherlands"] * 3,
...         "date": [
...             date(2016, 3, 1),
...             date(2018, 8, 1),
...             date(2019, 1, 1),
...             date(2016, 3, 1),
...             date(2018, 8, 1),
...             date(2019, 1, 1),
...         ],
...         "population": [82.19, 82.66, 83.12, 17.11, 17.32, 17.40],
...     }
... ).sort("country", "date")
>>>
>>> pop2.collect()
shape: (6, 3)
┌─────────────┬────────────┬────────────┐
│ country     ┆ date       ┆ population │
│ ---         ┆ ---        ┆ ---        │
│ str         ┆ date       ┆ f64        │
╞═════════════╪════════════╪════════════╡
│ Germany     ┆ 2016-03-01 ┆ 82.19      │
│ Germany     ┆ 2018-08-01 ┆ 82.66      │
│ Germany     ┆ 2019-01-01 ┆ 83.12      │
│ Netherlands ┆ 2016-03-01 ┆ 17.11      │
│ Netherlands ┆ 2018-08-01 ┆ 17.32      │
│ Netherlands ┆ 2019-01-01 ┆ 17.4       │
└─────────────┴────────────┴────────────┘
>>> pop2.join_asof(gdp2, by="country", on="date", strategy="nearest").collect()
shape: (6, 4)
┌─────────────┬────────────┬────────────┬──────┐
│ country     ┆ date       ┆ population ┆ gdp  │
│ ---         ┆ ---        ┆ ---        ┆ ---  │
│ str         ┆ date       ┆ f64        ┆ i64  │
╞═════════════╪════════════╪════════════╪══════╡
│ Germany     ┆ 2016-03-01 ┆ 82.19      ┆ 4164 │
│ Germany     ┆ 2018-08-01 ┆ 82.66      ┆ 4696 │
│ Germany     ┆ 2019-01-01 ┆ 83.12      ┆ 4696 │
│ Netherlands ┆ 2016-03-01 ┆ 17.11      ┆ 784  │
│ Netherlands ┆ 2018-08-01 ┆ 17.32      ┆ 910  │
│ Netherlands ┆ 2019-01-01 ┆ 17.4       ┆ 910  │
└─────────────┴────────────┴────────────┴──────┘
Nz4you should pass the column to join on as an argument)r  allow_eqr  )r6   r   r   r  rq   r   r
   r   r  r  r   r   	join_asofr  )r   r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r   by_left_	by_right_tolerance_strtolerance_nums                        r   r  LazyFrame.join_asofD  s{   x	 	$&b3.))GH?h.HCS/!>)"c22tH I!x';$.w$<$<y'H&03&?&?
XI HI$(,0i%%%M	9--4Y?M%M'277++eeGnG(BGG,,uuXHII

  !,!1   
 	
r   
join_nullsnulls_equalz1.24zm:m)	r  r  r  validater#  r  r  r  r  c       	           [        X5        U
c  Sn
USLnUSLnUSLnU=(       d    UnU(       a  U(       a  Sn[        U5      eX:w  a  Sn[        U5      eUS:X  a  Sn[        SSS	9  OrUS
:X  a  Sn	Sn[        SSS	9  O]US:X  aW  U(       d  U(       a  Sn[        U5      eU R                  U R                  R                  UR                  / / UUUUUUU
SS95      $ U(       a  [        U5      nUnUnO+U(       a  [        U5      n[        U5      nOSn[        U5      eU R                  U R                  R                  UR                  UUUUUUUUU
U	5      5      $ )ug  
Add a join operation to the Logical Plan.

.. versionchanged:: 1.24
    The `join_nulls` parameter was renamed `nulls_equal`.

Parameters
----------
other
    Lazy DataFrame to join with.
on
    Name(s) of the join columns in both DataFrames. If set, `left_on` and
    `right_on` should be None. This should not be specified if `how='cross'`.
how : {'inner','left', 'right', 'full', 'semi', 'anti', 'cross'}
    Join strategy.

    .. list-table ::
       :header-rows: 0

       * - **inner**
         - *(Default)* Returns rows that have matching values in both tables.
       * - **left**
         - Returns all rows from the left table, and the matched rows from
           the right table.
       * - **full**
         - Returns all rows when there is a match in either left or right.
       * - **cross**
         - Returns the Cartesian product of rows from both tables
       * - **semi**
         - Returns rows from the left table that have a match in the right
           table.
       * - **anti**
         - Returns rows from the left table that have no match in the right
           table.

left_on
    Join column of the left DataFrame.
right_on
    Join column of the right DataFrame.
suffix
    Suffix to append to columns with a duplicate name.
validate: {'m:m', 'm:1', '1:m', '1:1'}
    Checks if join is of specified type.

    .. list-table ::
       :header-rows: 0

       * - **m:m**
         - *(Default)* Many-to-many. Does not result in checks.
       * - **1:1**
         - One-to-one. Checks if join keys are unique in both left and
           right datasets.
       * - **1:m**
         - One-to-many. Checks if join keys are unique in left dataset.
       * - **m:1**
         - Many-to-one. Check if join keys are unique in right dataset.

    .. note::
        This is currently not supported by the streaming engine.
nulls_equal
    Join on null values. By default null values will never produce matches.
coalesce
    Coalescing behavior (merging of join columns).

    .. list-table ::
       :header-rows: 0

       * - **None**
         - *(Default)* Coalesce unless `how='full'` is specified.
       * - **True**
         - Always coalesce join columns.
       * - **False**
         - Never coalesce join columns.

    .. note::
        Joining on any other expressions than `col`
        will turn off coalescing.
maintain_order : {'none', 'left', 'right', 'left_right', 'right_left'}
    Which DataFrame row order to preserve, if any.
    Do not rely on any observed ordering without explicitly setting this
    parameter, as your code may break in a future release.
    Not specifying any ordering can improve performance.
    Supported for inner, left, right and full joins

    .. list-table ::
       :header-rows: 0

       * - **none**
         - *(Default)* No specific ordering is desired. The ordering might
           differ across Polars versions or even between different runs.
       * - **left**
         - Preserves the order of the left DataFrame.
       * - **right**
         - Preserves the order of the right DataFrame.
       * - **left_right**
         - First preserves the order of the left DataFrame, then the right.
       * - **right_left**
         - First preserves the order of the right DataFrame, then the left.

allow_parallel
    Allow the physical plan to optionally evaluate the computation of both
    DataFrames up to the join in parallel.
force_parallel
    Force the physical plan to evaluate the computation of both DataFrames up to
    the join in parallel.

See Also
--------
join_asof

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "foo": [1, 2, 3],
...         "bar": [6.0, 7.0, 8.0],
...         "ham": ["a", "b", "c"],
...     }
... )
>>> other_lf = pl.LazyFrame(
...     {
...         "apple": ["x", "y", "z"],
...         "ham": ["a", "b", "d"],
...     }
... )
>>> lf.join(other_lf, on="ham").collect()
shape: (2, 4)
┌─────┬─────┬─────┬───────┐
│ foo ┆ bar ┆ ham ┆ apple │
│ --- ┆ --- ┆ --- ┆ ---   │
│ i64 ┆ f64 ┆ str ┆ str   │
╞═════╪═════╪═════╪═══════╡
│ 1   ┆ 6.0 ┆ a   ┆ x     │
│ 2   ┆ 7.0 ┆ b   ┆ y     │
└─────┴─────┴─────┴───────┘
>>> lf.join(other_lf, on="ham", how="full").collect()
shape: (4, 5)
┌──────┬──────┬──────┬───────┬───────────┐
│ foo  ┆ bar  ┆ ham  ┆ apple ┆ ham_right │
│ ---  ┆ ---  ┆ ---  ┆ ---   ┆ ---       │
│ i64  ┆ f64  ┆ str  ┆ str   ┆ str       │
╞══════╪══════╪══════╪═══════╪═══════════╡
│ 1    ┆ 6.0  ┆ a    ┆ x     ┆ a         │
│ 2    ┆ 7.0  ┆ b    ┆ y     ┆ b         │
│ null ┆ null ┆ null ┆ z     ┆ d         │
│ 3    ┆ 8.0  ┆ c    ┆ null  ┆ null      │
└──────┴──────┴──────┴───────┴───────────┘
>>> lf.join(other_lf, on="ham", how="left", coalesce=True).collect()
shape: (3, 4)
┌─────┬─────┬─────┬───────┐
│ foo ┆ bar ┆ ham ┆ apple │
│ --- ┆ --- ┆ --- ┆ ---   │
│ i64 ┆ f64 ┆ str ┆ str   │
╞═════╪═════╪═════╪═══════╡
│ 1   ┆ 6.0 ┆ a   ┆ x     │
│ 2   ┆ 7.0 ┆ b   ┆ y     │
│ 3   ┆ 8.0 ┆ c   ┆ null  │
└─────┴─────┴─────┴───────┘
>>> lf.join(other_lf, on="ham", how="semi").collect()
shape: (2, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ f64 ┆ str │
╞═════╪═════╪═════╡
│ 1   ┆ 6.0 ┆ a   │
│ 2   ┆ 7.0 ┆ b   │
└─────┴─────┴─────┘
>>> lf.join(other_lf, on="ham", how="anti").collect()
shape: (1, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ f64 ┆ str │
╞═════╪═════╪═════╡
│ 3   ┆ 8.0 ┆ c   │
└─────┴─────┴─────┘

>>> lf.join(other_lf, how="cross").collect()
shape: (9, 5)
┌─────┬─────┬─────┬───────┬───────────┐
│ foo ┆ bar ┆ ham ┆ apple ┆ ham_right │
│ --- ┆ --- ┆ --- ┆ ---   ┆ ---       │
│ i64 ┆ f64 ┆ str ┆ str   ┆ str       │
╞═════╪═════╪═════╪═══════╪═══════════╡
│ 1   ┆ 6.0 ┆ a   ┆ x     ┆ a         │
│ 1   ┆ 6.0 ┆ a   ┆ y     ┆ b         │
│ 1   ┆ 6.0 ┆ a   ┆ z     ┆ d         │
│ 2   ┆ 7.0 ┆ b   ┆ x     ┆ a         │
│ 2   ┆ 7.0 ┆ b   ┆ y     ┆ b         │
│ 2   ┆ 7.0 ┆ b   ┆ z     ┆ d         │
│ 3   ┆ 8.0 ┆ c   ┆ x     ┆ a         │
│ 3   ┆ 8.0 ┆ c   ┆ y     ┆ b         │
│ 3   ┆ 8.0 ┆ c   ┆ z     ┆ d         │
└─────┴─────┴─────┴───────┴───────────┘
Nrj  z;cannot use 'on' in conjunction with 'left_on' or 'right_on'z+'left_on' requires corresponding 'right_on'outerrg  :use of `how='outer'` should be replaced with `how='full'`.0.20.29r  outer_coalesceTzRuse of `how='outer_coalesce'` should be replaced with `how='full', coalesce=True`.crossz$cross join should not pass join keys)r  z-must specify `on` OR `left_on` and `right_on`)r6   r   r#   r   r   joinr&   )r   r  r  howr  r  r  r$  r#  r  r  r  r  uses_onuses_left_onuses_right_on
uses_lr_onr   r  pyexprs_leftpyexprs_rights                        r   r+  LazyFrame.join  s   j 	$&!#ND.d* ,!2]
zOCS/!*?CS/!'>C%L! $$HC%d! G^*< o%##		JJ"""!     4R8G"L#M9'BL:8DMACS/!IINN


 	
r   )r  c                   [        X5        [        U6 nU R                  U R                  R	                  UR                  UU5      5      $ )u  
Perform a join based on one or multiple (in)equality predicates.

This performs an inner join, so only rows where all predicates are true
are included in the result, and a row from either DataFrame may be included
multiple times in the result.

.. note::
    The row order of the input DataFrames is not preserved.

.. warning::
    This functionality is experimental. It may be
    changed at any point without it being considered a breaking change.

Parameters
----------
other
    DataFrame to join with.
*predicates
    (In)Equality condition to join the two tables on.
    When a column name occurs in both tables, the proper suffix must
    be applied in the predicate.
suffix
    Suffix to append to columns with a duplicate name.

Examples
--------
Join two lazyframes together based on two predicates which get AND-ed together.

>>> east = pl.LazyFrame(
...     {
...         "id": [100, 101, 102],
...         "dur": [120, 140, 160],
...         "rev": [12, 14, 16],
...         "cores": [2, 8, 4],
...     }
... )
>>> west = pl.LazyFrame(
...     {
...         "t_id": [404, 498, 676, 742],
...         "time": [90, 130, 150, 170],
...         "cost": [9, 13, 15, 16],
...         "cores": [4, 2, 1, 4],
...     }
... )
>>> east.join_where(
...     west,
...     pl.col("dur") < pl.col("time"),
...     pl.col("rev") < pl.col("cost"),
... ).collect()
shape: (5, 8)
┌─────┬─────┬─────┬───────┬──────┬──────┬──────┬─────────────┐
│ id  ┆ dur ┆ rev ┆ cores ┆ t_id ┆ time ┆ cost ┆ cores_right │
│ --- ┆ --- ┆ --- ┆ ---   ┆ ---  ┆ ---  ┆ ---  ┆ ---         │
│ i64 ┆ i64 ┆ i64 ┆ i64   ┆ i64  ┆ i64  ┆ i64  ┆ i64         │
╞═════╪═════╪═════╪═══════╪══════╪══════╪══════╪═════════════╡
│ 100 ┆ 120 ┆ 12  ┆ 2     ┆ 498  ┆ 130  ┆ 13   ┆ 2           │
│ 100 ┆ 120 ┆ 12  ┆ 2     ┆ 676  ┆ 150  ┆ 15   ┆ 1           │
│ 100 ┆ 120 ┆ 12  ┆ 2     ┆ 742  ┆ 170  ┆ 16   ┆ 4           │
│ 101 ┆ 140 ┆ 14  ┆ 8     ┆ 676  ┆ 150  ┆ 15   ┆ 1           │
│ 101 ┆ 140 ┆ 14  ┆ 8     ┆ 742  ┆ 170  ┆ 16   ┆ 4           │
└─────┴─────┴─────┴───────┴──────┴──────┴──────┴─────────────┘

To OR them together, use a single expression and the `|` operator.

>>> east.join_where(
...     west,
...     (pl.col("dur") < pl.col("time")) | (pl.col("rev") < pl.col("cost")),
... ).collect()
shape: (6, 8)
┌─────┬─────┬─────┬───────┬──────┬──────┬──────┬─────────────┐
│ id  ┆ dur ┆ rev ┆ cores ┆ t_id ┆ time ┆ cost ┆ cores_right │
│ --- ┆ --- ┆ --- ┆ ---   ┆ ---  ┆ ---  ┆ ---  ┆ ---         │
│ i64 ┆ i64 ┆ i64 ┆ i64   ┆ i64  ┆ i64  ┆ i64  ┆ i64         │
╞═════╪═════╪═════╪═══════╪══════╪══════╪══════╪═════════════╡
│ 100 ┆ 120 ┆ 12  ┆ 2     ┆ 498  ┆ 130  ┆ 13   ┆ 2           │
│ 100 ┆ 120 ┆ 12  ┆ 2     ┆ 676  ┆ 150  ┆ 15   ┆ 1           │
│ 100 ┆ 120 ┆ 12  ┆ 2     ┆ 742  ┆ 170  ┆ 16   ┆ 4           │
│ 101 ┆ 140 ┆ 14  ┆ 8     ┆ 676  ┆ 150  ┆ 15   ┆ 1           │
│ 101 ┆ 140 ┆ 14  ┆ 8     ┆ 742  ┆ 170  ┆ 16   ┆ 4           │
│ 102 ┆ 160 ┆ 16  ┆ 4     ┆ 742  ┆ 170  ┆ 16   ┆ 4           │
└─────┴─────┴─────┴───────┴──────┴──────┴──────┴─────────────┘
)r6   r&   r   r   
join_where)r   r  r  r  r  s        r   r5  LazyFrame.join_where  sI    t 	$&0*=II  


 	
r   c                    [        [        [        R                  R	                  SS5      5      5      n[        U0 UDSU0D6nU R                  U R                  R                  U5      5      $ )uq  
Add columns to this LazyFrame.

Added columns will replace existing columns with the same name.

Parameters
----------
*exprs
    Column(s) to add, specified as positional arguments.
    Accepts expression input. Strings are parsed as column names, other
    non-expression inputs are parsed as literals.
**named_exprs
    Additional columns to add, specified as keyword arguments.
    The columns will be renamed to the keyword used.

Returns
-------
LazyFrame
    A new LazyFrame with the columns added.

Notes
-----
Creating a new LazyFrame using this method does not create a new copy of
existing data.

Examples
--------
Pass an expression to add it as a new column.

>>> lf = pl.LazyFrame(
...     {
...         "a": [1, 2, 3, 4],
...         "b": [0.5, 4, 10, 13],
...         "c": [True, True, False, True],
...     }
... )
>>> lf.with_columns((pl.col("a") ** 2).alias("a^2")).collect()
shape: (4, 4)
┌─────┬──────┬───────┬─────┐
│ a   ┆ b    ┆ c     ┆ a^2 │
│ --- ┆ ---  ┆ ---   ┆ --- │
│ i64 ┆ f64  ┆ bool  ┆ i64 │
╞═════╪══════╪═══════╪═════╡
│ 1   ┆ 0.5  ┆ true  ┆ 1   │
│ 2   ┆ 4.0  ┆ true  ┆ 4   │
│ 3   ┆ 10.0 ┆ false ┆ 9   │
│ 4   ┆ 13.0 ┆ true  ┆ 16  │
└─────┴──────┴───────┴─────┘

Added columns will replace existing columns with the same name.

>>> lf.with_columns(pl.col("a").cast(pl.Float64)).collect()
shape: (4, 3)
┌─────┬──────┬───────┐
│ a   ┆ b    ┆ c     │
│ --- ┆ ---  ┆ ---   │
│ f64 ┆ f64  ┆ bool  │
╞═════╪══════╪═══════╡
│ 1.0 ┆ 0.5  ┆ true  │
│ 2.0 ┆ 4.0  ┆ true  │
│ 3.0 ┆ 10.0 ┆ false │
│ 4.0 ┆ 13.0 ┆ true  │
└─────┴──────┴───────┘

Multiple columns can be added using positional arguments.

>>> lf.with_columns(
...     (pl.col("a") ** 2).alias("a^2"),
...     (pl.col("b") / 2).alias("b/2"),
...     (pl.col("c").not_()).alias("not c"),
... ).collect()
shape: (4, 6)
┌─────┬──────┬───────┬─────┬──────┬───────┐
│ a   ┆ b    ┆ c     ┆ a^2 ┆ b/2  ┆ not c │
│ --- ┆ ---  ┆ ---   ┆ --- ┆ ---  ┆ ---   │
│ i64 ┆ f64  ┆ bool  ┆ i64 ┆ f64  ┆ bool  │
╞═════╪══════╪═══════╪═════╪══════╪═══════╡
│ 1   ┆ 0.5  ┆ true  ┆ 1   ┆ 0.25 ┆ false │
│ 2   ┆ 4.0  ┆ true  ┆ 4   ┆ 2.0  ┆ false │
│ 3   ┆ 10.0 ┆ false ┆ 9   ┆ 5.0  ┆ true  │
│ 4   ┆ 13.0 ┆ true  ┆ 16  ┆ 6.5  ┆ false │
└─────┴──────┴───────┴─────┴──────┴───────┘

Multiple columns can also be added by passing a list of expressions.

>>> lf.with_columns(
...     [
...         (pl.col("a") ** 2).alias("a^2"),
...         (pl.col("b") / 2).alias("b/2"),
...         (pl.col("c").not_()).alias("not c"),
...     ]
... ).collect()
shape: (4, 6)
┌─────┬──────┬───────┬─────┬──────┬───────┐
│ a   ┆ b    ┆ c     ┆ a^2 ┆ b/2  ┆ not c │
│ --- ┆ ---  ┆ ---   ┆ --- ┆ ---  ┆ ---   │
│ i64 ┆ f64  ┆ bool  ┆ i64 ┆ f64  ┆ bool  │
╞═════╪══════╪═══════╪═════╪══════╪═══════╡
│ 1   ┆ 0.5  ┆ true  ┆ 1   ┆ 0.25 ┆ false │
│ 2   ┆ 4.0  ┆ true  ┆ 4   ┆ 2.0  ┆ false │
│ 3   ┆ 10.0 ┆ false ┆ 9   ┆ 5.0  ┆ true  │
│ 4   ┆ 13.0 ┆ true  ┆ 16  ┆ 6.5  ┆ false │
└─────┴──────┴───────┴─────┴──────┴───────┘

Use keyword arguments to easily name your expression inputs.

>>> lf.with_columns(
...     ab=pl.col("a") * pl.col("b"),
...     not_c=pl.col("c").not_(),
... ).collect()
shape: (4, 5)
┌─────┬──────┬───────┬──────┬───────┐
│ a   ┆ b    ┆ c     ┆ ab   ┆ not_c │
│ --- ┆ ---  ┆ ---   ┆ ---  ┆ ---   │
│ i64 ┆ f64  ┆ bool  ┆ f64  ┆ bool  │
╞═════╪══════╪═══════╪══════╪═══════╡
│ 1   ┆ 0.5  ┆ true  ┆ 0.5  ┆ false │
│ 2   ┆ 4.0  ┆ true  ┆ 8.0  ┆ false │
│ 3   ┆ 10.0 ┆ false ┆ 30.0 ┆ true  │
│ 4   ┆ 13.0 ┆ true  ┆ 52.0 ┆ false │
└─────┴──────┴───────┴──────┴───────┘
r  r   r  )	r  r  r  r  r(  r&   r   r   r  r  s        r   r  LazyFrame.with_columns>  sc    ~ RZZ^^,CQGHI	0
!
/8
 		 6 6w ?@@r   c                    [        [        [        R                  R	                  SS5      5      5      n[        U0 UDSU0D6nU R                  U R                  R                  U5      5      $ )a  
Add columns to this LazyFrame.

Added columns will replace existing columns with the same name.

This will run all expression sequentially instead of in parallel.
Use this when the work per expression is cheap.

Parameters
----------
*exprs
    Column(s) to add, specified as positional arguments.
    Accepts expression input. Strings are parsed as column names, other
    non-expression inputs are parsed as literals.
**named_exprs
    Additional columns to add, specified as keyword arguments.
    The columns will be renamed to the keyword used.

Returns
-------
LazyFrame
    A new LazyFrame with the columns added.

See Also
--------
with_columns
r  r   r  )	r  r  r  r  r(  r&   r   r   with_columns_seqr  s        r   r:  LazyFrame.with_columns_seq  sc    @ RZZ^^,CQGHI	0
!
/8
 		 : :7 CDDr   zW`LazyFrame.with_context` is deprecated; use `pl.concat(..., how='horizontal')` instead.c                    [        U[        5      (       d  U/nU R                  U R                  R	                  U Vs/ s H  o"R                  PM     sn5      5      $ s  snf )uv  
Add an external context to the computation graph.

.. deprecated:: 1.0.0
    Use :func:`concat` instead, with `how='horizontal'`

This allows expressions to also access columns from DataFrames
that are not part of this one.

Parameters
----------
other
    Lazy DataFrame to join with.

Examples
--------
>>> lf = pl.LazyFrame({"a": [1, 2, 3], "b": ["a", "c", None]})
>>> lf_other = pl.LazyFrame({"c": ["foo", "ham"]})
>>> lf.with_context(lf_other).select(  # doctest: +SKIP
...     pl.col("b") + pl.col("c").first()
... ).collect()
shape: (3, 1)
┌──────┐
│ b    │
│ ---  │
│ str  │
╞══════╡
│ afoo │
│ cfoo │
│ null │
└──────┘

Fill nulls with the median from another DataFrame:

>>> train_lf = pl.LazyFrame(
...     {"feature_0": [-1.0, 0, 1], "feature_1": [-1.0, 0, 1]}
... )
>>> test_lf = pl.LazyFrame(
...     {"feature_0": [-1.0, None, 1], "feature_1": [-1.0, 0, 1]}
... )
>>> test_lf.with_context(  # doctest: +SKIP
...     train_lf.select(pl.all().name.suffix("_train"))
... ).select(
...     pl.col("feature_0").fill_null(pl.col("feature_0_train").median())
... ).collect()
shape: (3, 1)
┌───────────┐
│ feature_0 │
│ ---       │
│ f64       │
╞═══════════╡
│ -1.0      │
│ 0.0       │
│ 1.0       │
└───────────┘
)r   r   r   r   with_context)r   r  lfs      r   r=  LazyFrame.with_context  sM    z %&&GE		 6 6%7P%B%7P QRR7Ps   Ac                   / nU H8  n[        U[        5      (       a  [        U[        5      (       d  X4-  nM3  X4/-  nM:     [        X1S9nU R	                  U R
                  R                  UR                  S95      $ )u  
Remove columns from the DataFrame.

Parameters
----------
*columns
    Names of the columns that should be removed from the dataframe.
    Accepts column selector input.
strict
    Validate that all column names exist in the current schema,
    and throw an exception if any do not.

Examples
--------
Drop a single column by passing the name of that column.

>>> lf = pl.LazyFrame(
...     {
...         "foo": [1, 2, 3],
...         "bar": [6.0, 7.0, 8.0],
...         "ham": ["a", "b", "c"],
...     }
... )
>>> lf.drop("ham").collect()
shape: (3, 2)
┌─────┬─────┐
│ foo ┆ bar │
│ --- ┆ --- │
│ i64 ┆ f64 │
╞═════╪═════╡
│ 1   ┆ 6.0 │
│ 2   ┆ 7.0 │
│ 3   ┆ 8.0 │
└─────┴─────┘

Drop multiple columns by passing a selector.

>>> import polars.selectors as cs
>>> lf.drop(cs.numeric()).collect()
shape: (3, 1)
┌─────┐
│ ham │
│ --- │
│ str │
╞═════╡
│ a   │
│ b   │
│ c   │
└─────┘

Use positional arguments to drop multiple columns.

>>> lf.drop("foo", "ham").collect()
shape: (3, 1)
┌─────┐
│ bar │
│ --- │
│ f64 │
╞═════╡
│ 6.0 │
│ 7.0 │
│ 8.0 │
└─────┘
r  )r   )r   r   r   r'   r   r   drop_pyselector)r   r   r   	selectorsr  	drop_colss         r   rA  LazyFrame.drop-  sp    J 13	A!X&&z!S/A/A	S 		  -YF			y7L7L MNNr   c               V   [        U5      (       a=  U R                  [        R                  " 5       R                  R                  U5      5      $ [        UR                  5       5      n[        UR                  5       5      nU R                  U R                  R                  X4U5      5      $ )u  
Rename column names.

Parameters
----------
mapping
    Key value pairs that map from old name to new name, or a function
    that takes the old name as input and returns the new name.
strict
    Validate that all column names exist in the current schema,
    and throw an exception if any do not. (Note that this parameter
    is a no-op when passing a function to `mapping`).

Notes
-----
If existing names are swapped (e.g. 'A' points to 'B' and 'B' points to 'A'),
polars will block projection and predicate pushdowns at this node.

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "foo": [1, 2, 3],
...         "bar": [6, 7, 8],
...         "ham": ["a", "b", "c"],
...     }
... )
>>> lf.rename({"foo": "apple"}).collect()
shape: (3, 3)
┌───────┬─────┬─────┐
│ apple ┆ bar ┆ ham │
│ ---   ┆ --- ┆ --- │
│ i64   ┆ i64 ┆ str │
╞═══════╪═════╪═════╡
│ 1     ┆ 6   ┆ a   │
│ 2     ┆ 7   ┆ b   │
│ 3     ┆ 8   ┆ c   │
└───────┴─────┴─────┘
>>> lf.rename(lambda column_name: "c" + column_name[1:]).collect()
shape: (3, 3)
┌─────┬─────┬─────┐
│ coo ┆ car ┆ cam │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 1   ┆ 6   ┆ a   │
│ 2   ┆ 7   ┆ b   │
│ 3   ┆ 8   ┆ c   │
└─────┴─────┴─────┘
)r   r  r  r  r  mapr   keysr  r   r   rename)r   mappingr   existingnews        r   rI  LazyFrame.rename|  sw    j G;;quuw||//899GLLN+Hw~~'(C##DII$4$4XF$KLLr   c                T    U R                  U R                  R                  5       5      $ )u  
Reverse the DataFrame.

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "key": ["a", "b", "c"],
...         "val": [1, 2, 3],
...     }
... )
>>> lf.reverse().collect()
shape: (3, 2)
┌─────┬─────┐
│ key ┆ val │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════╪═════╡
│ c   ┆ 3   │
│ b   ┆ 2   │
│ a   ┆ 1   │
└─────┴─────┘
)r   r   r  r   s    r   r  LazyFrame.reverse  s"    0 		 1 1 344r   )
fill_valuec                   Ub  [        USS9nOSn[        U5      nU R                  U R                  R                  XC5      5      $ )u  
Shift values by the given number of indices.

Parameters
----------
n
    Number of indices to shift forward. If a negative value is passed, values
    are shifted in the opposite direction instead.
fill_value
    Fill the resulting null values with this value. Accepts scalar expression
    input. Non-expression inputs are parsed as literals.

Notes
-----
This method is similar to the `LAG` operation in SQL when the value for `n`
is positive. With a negative value for `n`, it is similar to `LEAD`.

Examples
--------
By default, values are shifted forward by one index.

>>> lf = pl.LazyFrame(
...     {
...         "a": [1, 2, 3, 4],
...         "b": [5, 6, 7, 8],
...     }
... )
>>> lf.shift().collect()
shape: (4, 2)
┌──────┬──────┐
│ a    ┆ b    │
│ ---  ┆ ---  │
│ i64  ┆ i64  │
╞══════╪══════╡
│ null ┆ null │
│ 1    ┆ 5    │
│ 2    ┆ 6    │
│ 3    ┆ 7    │
└──────┴──────┘

Pass a negative value to shift in the opposite direction instead.

>>> lf.shift(-2).collect()
shape: (4, 2)
┌──────┬──────┐
│ a    ┆ b    │
│ ---  ┆ ---  │
│ i64  ┆ i64  │
╞══════╪══════╡
│ 3    ┆ 7    │
│ 4    ┆ 8    │
│ null ┆ null │
│ null ┆ null │
└──────┴──────┘

Specify `fill_value` to fill the resulting null values.

>>> lf.shift(-2, fill_value=100).collect()
shape: (4, 2)
┌─────┬─────┐
│ a   ┆ b   │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 3   ┆ 7   │
│ 4   ┆ 8   │
│ 100 ┆ 100 │
│ 100 ┆ 100 │
└─────┴─────┘
NT)
str_as_lit)r%   r   r   shift)r   r  rP  fill_value_pyn_pys        r   rS  LazyFrame.shift  sD    R !1*NM M$Q'		 DEEr   c                    U(       a  US:  a  SU< S3n[        U5      eU R                  U R                  R                  X5      5      $ )u  
Get a slice of this DataFrame.

Parameters
----------
offset
    Start index. Negative indexing is supported.
length
    Length of the slice. If set to `None`, all rows starting at the offset
    will be selected.

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "a": ["x", "y", "z"],
...         "b": [1, 3, 5],
...         "c": [2, 4, 6],
...     }
... )
>>> lf.slice(1, 2).collect()
shape: (2, 3)
┌─────┬─────┬─────┐
│ a   ┆ b   ┆ c   │
│ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ i64 │
╞═════╪═════╪═════╡
│ y   ┆ 3   ┆ 4   │
│ z   ┆ 5   ┆ 6   │
└─────┴─────┴─────┘
r   znegative slice lengths (z) are invalid for LazyFrame)r   r   r   r9  )r   r  lengthr   s       r   r9  LazyFrame.slice"  sE    @ fqj,VJ6QRCS/!		 ?@@r   c                $    U R                  U5      $ )uG  
Get the first `n` rows.

Alias for :func:`LazyFrame.head`.

Parameters
----------
n
    Number of rows to return.

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "a": [1, 2, 3, 4, 5, 6],
...         "b": [7, 8, 9, 10, 11, 12],
...     }
... )
>>> lf.limit().collect()
shape: (5, 2)
┌─────┬─────┐
│ a   ┆ b   │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 1   ┆ 7   │
│ 2   ┆ 8   │
│ 3   ┆ 9   │
│ 4   ┆ 10  │
│ 5   ┆ 11  │
└─────┴─────┘
>>> lf.limit(2).collect()
shape: (2, 2)
┌─────┬─────┐
│ a   ┆ b   │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 1   ┆ 7   │
│ 2   ┆ 8   │
└─────┴─────┘
)r  r  s     r   limitLazyFrame.limitG  s    V yy|r   c                &    U R                  SU5      $ )u"  
Get the first `n` rows.

Parameters
----------
n
    Number of rows to return.

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "a": [1, 2, 3, 4, 5, 6],
...         "b": [7, 8, 9, 10, 11, 12],
...     }
... )
>>> lf.head().collect()
shape: (5, 2)
┌─────┬─────┐
│ a   ┆ b   │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 1   ┆ 7   │
│ 2   ┆ 8   │
│ 3   ┆ 9   │
│ 4   ┆ 10  │
│ 5   ┆ 11  │
└─────┴─────┘
>>> lf.head(2).collect()
shape: (2, 2)
┌─────┬─────┐
│ a   ┆ b   │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 1   ┆ 7   │
│ 2   ┆ 8   │
└─────┴─────┘
r   r9  r  s     r   r  LazyFrame.headt  s    R zz!Qr   c                V    U R                  U R                  R                  U5      5      $ )u!  
Get the last `n` rows.

Parameters
----------
n
    Number of rows to return.

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "a": [1, 2, 3, 4, 5, 6],
...         "b": [7, 8, 9, 10, 11, 12],
...     }
... )
>>> lf.tail().collect()
shape: (5, 2)
┌─────┬─────┐
│ a   ┆ b   │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 2   ┆ 8   │
│ 3   ┆ 9   │
│ 4   ┆ 10  │
│ 5   ┆ 11  │
│ 6   ┆ 12  │
└─────┴─────┘
>>> lf.tail(2).collect()
shape: (2, 2)
┌─────┬─────┐
│ a   ┆ b   │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 5   ┆ 11  │
│ 6   ┆ 12  │
└─────┴─────┘
)r   r   tailr  s     r   ra  LazyFrame.tail  s#    R 		q 122r   c                $    U R                  S5      $ )u  
Get the last row of the DataFrame.

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "a": [1, 5, 3],
...         "b": [2, 4, 6],
...     }
... )
>>> lf.last().collect()
shape: (1, 2)
┌─────┬─────┐
│ a   ┆ b   │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 3   ┆ 6   │
└─────┴─────┘
rn  )ra  r   s    r   lastLazyFrame.last  s    , yy|r   c                &    U R                  SS5      $ )u  
Get the first row of the DataFrame.

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "a": [1, 3, 5],
...         "b": [2, 4, 6],
...     }
... )
>>> lf.first().collect()
shape: (1, 2)
┌─────┬─────┐
│ a   ┆ b   │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 1   ┆ 2   │
└─────┴─────┘
r   rn  r^  r   s    r   firstLazyFrame.first  s    , zz!Qr   z\`LazyFrame.approx_n_unique` is deprecated; use `select(pl.all().approx_n_unique())` instead.c                f    U R                  [        R                  " 5       R                  5       5      $ )uM  
Approximate count of unique values.

.. deprecated:: 0.20.11
    Use `select(pl.all().approx_n_unique())` instead.

This is done using the HyperLogLog++ algorithm for cardinality estimation.

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "a": [1, 2, 3, 4],
...         "b": [1, 2, 1, 1],
...     }
... )
>>> lf.approx_n_unique().collect()  # doctest: +SKIP
shape: (1, 2)
┌─────┬─────┐
│ a   ┆ b   │
│ --- ┆ --- │
│ u32 ┆ u32 │
╞═════╪═════╡
│ 4   ┆ 2   │
└─────┴─────┘
)r  r  r  approx_n_uniquer   s    r   rj  LazyFrame.approx_n_unique  s"    > {{155722455r   c                     U R                  U R                  R                  X5      5      $ ! [         a    US:  a  SOSnSU SU 3n[	        U5      Sef = f)u  
Add a row index as the first column in the LazyFrame.

Parameters
----------
name
    Name of the index column.
offset
    Start the index at this offset. Cannot be negative.

Warnings
--------
Using this function can have a negative effect on query performance.
This may, for instance, block predicate pushdown optimization.

Notes
-----
The resulting column does not have any special properties. It is a regular
column of type `UInt32` (or `UInt64` in `polars-u64-idx`).

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "a": [1, 3, 5],
...         "b": [2, 4, 6],
...     }
... )
>>> lf.with_row_index().collect()
shape: (3, 3)
┌───────┬─────┬─────┐
│ index ┆ a   ┆ b   │
│ ---   ┆ --- ┆ --- │
│ u32   ┆ i64 ┆ i64 │
╞═══════╪═════╪═════╡
│ 0     ┆ 1   ┆ 2   │
│ 1     ┆ 3   ┆ 4   │
│ 2     ┆ 5   ┆ 6   │
└───────┴─────┴─────┘
>>> lf.with_row_index("id", offset=1000).collect()
shape: (3, 3)
┌──────┬─────┬─────┐
│ id   ┆ a   ┆ b   │
│ ---  ┆ --- ┆ --- │
│ u32  ┆ i64 ┆ i64 │
╞══════╪═════╪═════╡
│ 1000 ┆ 1   ┆ 2   │
│ 1001 ┆ 3   ┆ 4   │
│ 1002 ┆ 5   ┆ 6   │
└──────┴─────┴─────┘

An index column can also be created using the expressions :func:`int_range`
and :func:`len`.

>>> lf.select(
...     pl.int_range(pl.len(), dtype=pl.UInt32).alias("index"),
...     pl.all(),
... ).collect()
shape: (3, 3)
┌───────┬─────┬─────┐
│ index ┆ a   ┆ b   │
│ ---   ┆ --- ┆ --- │
│ u32   ┆ i64 ┆ i64 │
╞═══════╪═════╪═════╡
│ 0     ┆ 1   ┆ 2   │
│ 1     ┆ 3   ┆ 4   │
│ 2     ┆ 5   ┆ 6   │
└───────┴─────┴─────┘
r   negativez$greater than the maximum index valuez.`offset` input for `with_row_index` cannot be z, got N)r   r   with_row_indexOverflowErrorr   )r   r  r  issuer   s        r   rn  LazyFrame.with_row_index  sd    L	,##DII$<$<T$JKK 	,"(1*J2XEB5'PVxXCS/t+	,s	   ), )Az`LazyFrame.with_row_count` is deprecated; use `LazyFrame.with_row_index` instead. Note that the default column name has changed from 'row_nr' to 'index'.c                $    U R                  X5      $ )u  
Add a column at index 0 that counts the rows.

.. deprecated:: 0.20.4
    Use the :meth:`with_row_index` method instead.
    Note that the default column name has changed from 'row_nr' to 'index'.

Parameters
----------
name
    Name of the column to add.
offset
    Start the row count at this offset.

Warnings
--------
This can have a negative effect on query performance.
This may, for instance, block predicate pushdown optimization.

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "a": [1, 3, 5],
...         "b": [2, 4, 6],
...     }
... )
>>> lf.with_row_count().collect()  # doctest: +SKIP
shape: (3, 3)
┌────────┬─────┬─────┐
│ row_nr ┆ a   ┆ b   │
│ ---    ┆ --- ┆ --- │
│ u32    ┆ i64 ┆ i64 │
╞════════╪═════╪═════╡
│ 0      ┆ 1   ┆ 2   │
│ 1      ┆ 3   ┆ 4   │
│ 2      ┆ 5   ┆ 6   │
└────────┴─────┴─────┘
)rn  )r   r  r  s      r   with_row_countLazyFrame.with_row_counth  s    X ""400r   c                j    U R                  [        R                  " S5      R                  X5      5      $ )u7  
Take every nth row in the LazyFrame and return as a new LazyFrame.

Parameters
----------
n
    Gather every *n*-th row.
offset
    Starting index.

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "a": [1, 2, 3, 4],
...         "b": [5, 6, 7, 8],
...     }
... )
>>> lf.gather_every(2).collect()
shape: (2, 2)
┌─────┬─────┐
│ a   ┆ b   │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 1   ┆ 5   │
│ 3   ┆ 7   │
└─────┴─────┘
>>> lf.gather_every(2, offset=1).collect()
shape: (2, 2)
┌─────┬─────┐
│ a   ┆ b   │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 2   ┆ 6   │
│ 4   ┆ 8   │
└─────┴─────┘
*)r  r  r  gather_every)r   r  r  s      r   rw  LazyFrame.gather_every  s'    P {{155:221=>>r   )matches_supertypec               p   SSK Jn  UGb  [        U[        R                  5      (       a  SnGO[        U[
        5      (       a	  [        /nGOrU(       aW  [        U[        [        45      (       a<  [        [        [        [        [        [        [        [         ["        [$        [&        U/nGO[        U[        5      (       a  [        /nO[        U[        5      (       a  [&        /nO[        U[(        5      (       a)  [*        /[,         Vs/ s H  n[+        U5      PM     sn-   nO[        U[.        5      (       a)  [0        /[,         Vs/ s H  n[1        U5      PM     sn-   nO^[        U[2        5      (       a  [4        /nOA[        U[6        5      (       a  [8        /nO$[        U[:        5      (       a  [<        [>        /nOSnU(       a5  U RA                  [B        RD                  " U5      RG                  XU5      5      $ U RI                  [B        RJ                  " 5       RG                  XU5      5      $ s  snf s  snf )u  
Fill null values using the specified value or strategy.

Parameters
----------
value
    Value used to fill null values.
strategy : {None, 'forward', 'backward', 'min', 'max', 'mean', 'zero', 'one'}
    Strategy used to fill null values.
limit
    Number of consecutive null values to fill when using the 'forward' or
    'backward' strategy.
matches_supertype
    Fill all matching supertypes of the fill `value` literal.

See Also
--------
fill_nan

Notes
-----
A null value is not the same as a NaN value.
To fill NaN values, use :func:`fill_nan`.

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "a": [1, 2, None, 4],
...         "b": [0.5, 4, None, 13],
...     }
... )
>>> lf.fill_null(99).collect()
shape: (4, 2)
┌─────┬──────┐
│ a   ┆ b    │
│ --- ┆ ---  │
│ i64 ┆ f64  │
╞═════╪══════╡
│ 1   ┆ 0.5  │
│ 2   ┆ 4.0  │
│ 99  ┆ 99.0 │
│ 4   ┆ 13.0 │
└─────┴──────┘
>>> lf.fill_null(strategy="forward").collect()
shape: (4, 2)
┌─────┬──────┐
│ a   ┆ b    │
│ --- ┆ ---  │
│ i64 ┆ f64  │
╞═════╪══════╡
│ 1   ┆ 0.5  │
│ 2   ┆ 4.0  │
│ 2   ┆ 4.0  │
│ 4   ┆ 13.0 │
└─────┴──────┘

>>> lf.fill_null(strategy="max").collect()
shape: (4, 2)
┌─────┬──────┐
│ a   ┆ b    │
│ --- ┆ ---  │
│ i64 ┆ f64  │
╞═════╪══════╡
│ 1   ┆ 0.5  │
│ 2   ┆ 4.0  │
│ 4   ┆ 13.0 │
│ 4   ┆ 13.0 │
└─────┴──────┘

>>> lf.fill_null(strategy="zero").collect()
shape: (4, 2)
┌─────┬──────┐
│ a   ┆ b    │
│ --- ┆ ---  │
│ i64 ┆ f64  │
╞═════╪══════╡
│ 1   ┆ 0.5  │
│ 2   ┆ 4.0  │
│ 0   ┆ 0.0  │
│ 4   ┆ 13.0 │
└─────┴──────┘
r   )DecimalN)&polarsr{  r   r  rq   r  r;   r  r  rC   rD   rE   rF   rG   rL   rM   rN   rO   rA   rB   r   r>   r9   r
   r?   r   r=   r	   rK   r   rJ   r<   r  r  r  	fill_nullr  r  )r   r  r  r[  ry  r{  r   us           r   r}  LazyFrame.fill_null  s   v 	# %))E4((!"z%#u'F'F E3''E5))!E8,,"<P&Q<Pqx{<P&QQE9--"<P&Q<Pqx{<P&QQE4((E4((E3'' +. ((EE&M++EUC  {{1557,,UeDEE% 'R&Qs   H.H3c                    [        U[        R                  5      (       d  [        R                  " U5      nU R                  U R                  R                  UR                  5      5      $ )u  
Fill floating point NaN values.

Parameters
----------
value
    Value used to fill NaN values.

See Also
--------
fill_null

Notes
-----
A NaN value is not the same as a null value.
To fill null values, use :func:`fill_null`.

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "a": [1.5, 2, float("nan"), 4],
...         "b": [0.5, 4, float("nan"), 13],
...     }
... )
>>> lf.fill_nan(99).collect()
shape: (4, 2)
┌──────┬──────┐
│ a    ┆ b    │
│ ---  ┆ ---  │
│ f64  ┆ f64  │
╞══════╪══════╡
│ 1.5  ┆ 0.5  │
│ 2.0  ┆ 4.0  │
│ 99.0 ┆ 99.0 │
│ 4.0  ┆ 13.0 │
└──────┴──────┘
)	r   r  rq   r  r  r   r   fill_nanr  )r   r  s     r   r  LazyFrame.fill_nanL  sG    N %))EE%LE		 2 25== ABBr   c                V    U R                  U R                  R                  U5      5      $ )u*  
Aggregate the columns in the LazyFrame to their standard deviation value.

Parameters
----------
ddof
    “Delta Degrees of Freedom”: the divisor used in the calculation is N - ddof,
    where N represents the number of elements.
    By default ddof is 1.

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "a": [1, 2, 3, 4],
...         "b": [1, 2, 1, 1],
...     }
... )
>>> lf.std().collect()
shape: (1, 2)
┌──────────┬─────┐
│ a        ┆ b   │
│ ---      ┆ --- │
│ f64      ┆ f64 │
╞══════════╪═════╡
│ 1.290994 ┆ 0.5 │
└──────────┴─────┘
>>> lf.std(ddof=0).collect()
shape: (1, 2)
┌──────────┬──────────┐
│ a        ┆ b        │
│ ---      ┆ ---      │
│ f64      ┆ f64      │
╞══════════╪══════════╡
│ 1.118034 ┆ 0.433013 │
└──────────┴──────────┘
)r   r   r{  r   ddofs     r   r{  LazyFrame.stdw  #    L 		d 344r   c                V    U R                  U R                  R                  U5      5      $ )u  
Aggregate the columns in the LazyFrame to their variance value.

Parameters
----------
ddof
    “Delta Degrees of Freedom”: the divisor used in the calculation is N - ddof,
    where N represents the number of elements.
    By default ddof is 1.

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "a": [1, 2, 3, 4],
...         "b": [1, 2, 1, 1],
...     }
... )
>>> lf.var().collect()
shape: (1, 2)
┌──────────┬──────┐
│ a        ┆ b    │
│ ---      ┆ ---  │
│ f64      ┆ f64  │
╞══════════╪══════╡
│ 1.666667 ┆ 0.25 │
└──────────┴──────┘
>>> lf.var(ddof=0).collect()
shape: (1, 2)
┌──────┬────────┐
│ a    ┆ b      │
│ ---  ┆ ---    │
│ f64  ┆ f64    │
╞══════╪════════╡
│ 1.25 ┆ 0.1875 │
└──────┴────────┘
)r   r   varr  s     r   r  LazyFrame.var  r  r   c                T    U R                  U R                  R                  5       5      $ )u  
Aggregate the columns in the LazyFrame to their maximum value.

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "a": [1, 2, 3, 4],
...         "b": [1, 2, 1, 1],
...     }
... )
>>> lf.max().collect()
shape: (1, 2)
┌─────┬─────┐
│ a   ┆ b   │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 4   ┆ 2   │
└─────┴─────┘
)r   r   r  r   s    r   r  LazyFrame.max      , 		00r   c                T    U R                  U R                  R                  5       5      $ )u  
Aggregate the columns in the LazyFrame to their minimum value.

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "a": [1, 2, 3, 4],
...         "b": [1, 2, 1, 1],
...     }
... )
>>> lf.min().collect()
shape: (1, 2)
┌─────┬─────┐
│ a   ┆ b   │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 1   ┆ 1   │
└─────┴─────┘
)r   r   r|  r   s    r   r|  LazyFrame.min  r  r   c                T    U R                  U R                  R                  5       5      $ )u  
Aggregate the columns in the LazyFrame to their sum value.

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "a": [1, 2, 3, 4],
...         "b": [1, 2, 1, 1],
...     }
... )
>>> lf.sum().collect()
shape: (1, 2)
┌─────┬─────┐
│ a   ┆ b   │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 10  ┆ 5   │
└─────┴─────┘
)r   r   sumr   s    r   r  LazyFrame.sum  r  r   c                T    U R                  U R                  R                  5       5      $ )u  
Aggregate the columns in the LazyFrame to their mean value.

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "a": [1, 2, 3, 4],
...         "b": [1, 2, 1, 1],
...     }
... )
>>> lf.mean().collect()
shape: (1, 2)
┌─────┬──────┐
│ a   ┆ b    │
│ --- ┆ ---  │
│ f64 ┆ f64  │
╞═════╪══════╡
│ 2.5 ┆ 1.25 │
└─────┴──────┘
)r   r   rz  r   s    r   rz  LazyFrame.mean  s     , 		 011r   c                T    U R                  U R                  R                  5       5      $ )u  
Aggregate the columns in the LazyFrame to their median value.

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "a": [1, 2, 3, 4],
...         "b": [1, 2, 1, 1],
...     }
... )
>>> lf.median().collect()
shape: (1, 2)
┌─────┬─────┐
│ a   ┆ b   │
│ --- ┆ --- │
│ f64 ┆ f64 │
╞═════╪═════╡
│ 2.5 ┆ 1.0 │
└─────┴─────┘
)r   r   medianr   s    r   r  LazyFrame.median'  s"    , 		 0 0 233r   c                T    U R                  U R                  R                  5       5      $ )uB  
Aggregate the columns in the LazyFrame as the sum of their null value count.

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "foo": [1, None, 3],
...         "bar": [6, 7, None],
...         "ham": ["a", "b", "c"],
...     }
... )
>>> lf.null_count().collect()
shape: (1, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ u32 ┆ u32 ┆ u32 │
╞═════╪═════╪═════╡
│ 1   ┆ 1   ┆ 0   │
└─────┴─────┴─────┘
)r   r   ry  r   s    r   ry  LazyFrame.null_count?  s"    . 		 4 4 677r   c                l    [        U5      nU R                  U R                  R                  X25      5      $ )ui  
Aggregate the columns in the LazyFrame to their quantile value.

Parameters
----------
quantile
    Quantile between 0.0 and 1.0.
interpolation : {'nearest', 'higher', 'lower', 'midpoint', 'linear', 'equiprobable'}
    Interpolation method.

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "a": [1, 2, 3, 4],
...         "b": [1, 2, 1, 1],
...     }
... )
>>> lf.quantile(0.7).collect()
shape: (1, 2)
┌─────┬─────┐
│ a   ┆ b   │
│ --- ┆ --- │
│ f64 ┆ f64 │
╞═════╪═════╡
│ 3.0 ┆ 1.0 │
└─────┴─────┘
)r%   r   r   r  )r   r  ru  quantile_pys       r   r  LazyFrame.quantileX  s/    B ,H5		 2 2; NOOr   c                    [        U5      [        U5      -  nU R                  U R                  R                  UR                  S95      $ )u$  
Explode the DataFrame to long format by exploding the given columns.

Parameters
----------
columns
    Column names, expressions, or a selector defining them. The underlying
    columns being exploded must be of the `List` or `Array` data type.
*more_columns
    Additional names of columns to explode, specified as positional arguments.

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "letters": ["a", "a", "b", "c"],
...         "numbers": [[1], [2, 3], [4, 5], [6, 7, 8]],
...     }
... )
>>> lf.explode("numbers").collect()
shape: (8, 2)
┌─────────┬─────────┐
│ letters ┆ numbers │
│ ---     ┆ ---     │
│ str     ┆ i64     │
╞═════════╪═════════╡
│ a       ┆ 1       │
│ a       ┆ 2       │
│ a       ┆ 3       │
│ b       ┆ 4       │
│ b       ┆ 5       │
│ c       ┆ 6       │
│ c       ┆ 7       │
│ c       ┆ 8       │
└─────────┴─────────┘
subset)r'   r   r   exploderB  r   r   more_columnsr  s       r   r  LazyFrame.explode|  sJ    R *'25M6
 
 		 1 19K9K 1 LMMr   r  )keepr  c                   SnUb  [        U5      R                  nU R                  U R                  R	                  X4U5      5      $ )u	  
Drop duplicate rows from this DataFrame.

Parameters
----------
subset
    Column name(s) or selector(s), to consider when identifying
    duplicate rows. If set to `None` (default), use all columns.
keep : {'first', 'last', 'any', 'none'}
    Which of the duplicate rows to keep.

    * 'any': Does not give any guarantee of which row is kept.
             This allows more optimizations.
    * 'none': Don't keep duplicate rows.
    * 'first': Keep first unique row.
    * 'last': Keep last unique row.
maintain_order
    Keep the same order as the original DataFrame. This is more expensive to
    compute.
    Settings this to `True` blocks the possibility
    to run on the streaming engine.

Returns
-------
LazyFrame
    LazyFrame with unique rows.

Warnings
--------
This method will fail if there is a column of type `List` in the DataFrame or
subset.

Notes
-----
If you're coming from pandas, this is similar to
`pandas.DataFrame.drop_duplicates`.

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "foo": [1, 2, 3, 1],
...         "bar": ["a", "a", "a", "a"],
...         "ham": ["b", "b", "b", "b"],
...     }
... )
>>> lf.unique(maintain_order=True).collect()
shape: (3, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ str ┆ str │
╞═════╪═════╪═════╡
│ 1   ┆ a   ┆ b   │
│ 2   ┆ a   ┆ b   │
│ 3   ┆ a   ┆ b   │
└─────┴─────┴─────┘
>>> lf.unique(subset=["bar", "ham"], maintain_order=True).collect()
shape: (1, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ str ┆ str │
╞═════╪═════╪═════╡
│ 1   ┆ a   ┆ b   │
└─────┴─────┴─────┘
>>> lf.unique(keep="last", maintain_order=True).collect()
shape: (3, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ str ┆ str │
╞═════╪═════╪═════╡
│ 2   ┆ a   ┆ b   │
│ 3   ┆ a   ┆ b   │
│ 1   ┆ a   ┆ b   │
└─────┴─────┴─────┘
N)r'   rB  r   r   unique)r   r  r  r  selector_subsets        r   r  LazyFrame.unique  sC    j .26v>JJO		 0 0RV WXXr   c                    SnUb  [        U5      R                  nU R                  U R                  R	                  US95      $ )u	  
Drop all rows that contain one or more NaN values.

The original order of the remaining rows is preserved.

Parameters
----------
subset
    Column name(s) for which NaN values are considered; if set to `None`
    (default), use all columns (note that only floating-point columns
    can contain NaNs).

See Also
--------
drop_nulls

Notes
-----
A NaN value is not the same as a null value.
To drop null values, use :func:`drop_nulls`.

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "foo": [-20.5, float("nan"), 80.0],
...         "bar": [float("nan"), 110.0, 25.5],
...         "ham": ["xxx", "yyy", None],
...     }
... )

The default behavior of this method is to drop rows where any single
value in the row is NaN:

>>> lf.drop_nans().collect()
shape: (1, 3)
┌──────┬──────┬──────┐
│ foo  ┆ bar  ┆ ham  │
│ ---  ┆ ---  ┆ ---  │
│ f64  ┆ f64  ┆ str  │
╞══════╪══════╪══════╡
│ 80.0 ┆ 25.5 ┆ null │
└──────┴──────┴──────┘

This behaviour can be constrained to consider only a subset of columns, as
defined by name, or with a selector. For example, dropping rows only if
there is a NaN in the "bar" column:

>>> lf.drop_nans(subset=["bar"]).collect()
shape: (2, 3)
┌──────┬───────┬──────┐
│ foo  ┆ bar   ┆ ham  │
│ ---  ┆ ---   ┆ ---  │
│ f64  ┆ f64   ┆ str  │
╞══════╪═══════╪══════╡
│ NaN  ┆ 110.0 ┆ yyy  │
│ 80.0 ┆ 25.5  ┆ null │
└──────┴───────┴──────┘

Dropping a row only if *all* values are NaN requires a different formulation:

>>> lf = pl.LazyFrame(
...     {
...         "a": [float("nan"), float("nan"), float("nan"), float("nan")],
...         "b": [10.0, 2.5, float("nan"), 5.25],
...         "c": [65.75, float("nan"), float("nan"), 10.5],
...     }
... )
>>> lf.filter(~pl.all_horizontal(pl.all().is_nan())).collect()
shape: (3, 3)
┌─────┬──────┬───────┐
│ a   ┆ b    ┆ c     │
│ --- ┆ ---  ┆ ---   │
│ f64 ┆ f64  ┆ f64   │
╞═════╪══════╪═══════╡
│ NaN ┆ 10.0 ┆ 65.75 │
│ NaN ┆ 2.5  ┆ NaN   │
│ NaN ┆ 5.25 ┆ 10.5  │
└─────┴──────┴───────┘
Nr  )r'   rB  r   r   	drop_nansr   r  r  s      r   r  LazyFrame.drop_nans  sC    h .26v>JJO		 3 3? 3 KLLr   c                    SnUb  [        U5      R                  nU R                  U R                  R	                  US95      $ )u`  
Drop all rows that contain one or more null values.

The original order of the remaining rows is preserved.

See Also
--------
drop_nans

Notes
-----
A null value is not the same as a NaN value.
To drop NaN values, use :func:`drop_nans`.


Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "foo": [1, 2, 3],
...         "bar": [6, None, 8],
...         "ham": ["a", "b", None],
...     }
... )

The default behavior of this method is to drop rows where any single
value in the row is null:

>>> lf.drop_nulls().collect()
shape: (1, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 1   ┆ 6   ┆ a   │
└─────┴─────┴─────┘

This behaviour can be constrained to consider only a subset of columns, as
defined by name or with a selector. For example, dropping rows if there is
a null in any of the integer columns:

>>> import polars.selectors as cs
>>> lf.drop_nulls(subset=cs.integer()).collect()
shape: (2, 3)
┌─────┬─────┬──────┐
│ foo ┆ bar ┆ ham  │
│ --- ┆ --- ┆ ---  │
│ i64 ┆ i64 ┆ str  │
╞═════╪═════╪══════╡
│ 1   ┆ 6   ┆ a    │
│ 3   ┆ 8   ┆ null │
└─────┴─────┴──────┘

Dropping a row only if *all* values are null requires a different formulation:

>>> lf = pl.LazyFrame(
...     {
...         "a": [None, None, None, None],
...         "b": [1, 2, None, 1],
...         "c": [1, None, None, 1],
...     }
... )
>>> lf.filter(~pl.all_horizontal(pl.all().is_null())).collect()
shape: (3, 3)
┌──────┬─────┬──────┐
│ a    ┆ b   ┆ c    │
│ ---  ┆ --- ┆ ---  │
│ null ┆ i64 ┆ i64  │
╞══════╪═════╪══════╡
│ null ┆ 1   ┆ 1    │
│ null ┆ 2   ┆ null │
│ null ┆ 1   ┆ 1    │
└──────┴─────┴──────┘
Nr  )r'   rB  r   r   
drop_nullsr  s      r   r  LazyFrame.drop_nulls]  sC    ^ .26v>JJO		 4 4O 4 LMMr   )indexvariable_name
value_name
streamablec               2   U(       d
  [        SSS9  Uc  [        R                  " 5       O
[        U5      nUc  [        R                  " 5       O
[        U5      nU R	                  U R
                  R                  UR                  UR                  UU5      5      $ )u  
Unpivot a DataFrame from wide to long format.

Optionally leaves identifiers set.

This function is useful to massage a DataFrame into a format where one or more
columns are identifier variables (index) while all other columns, considered
measured variables (on), are "unpivoted" to the row axis leaving just
two non-identifier columns, 'variable' and 'value'.

Parameters
----------
on
    Column(s) or selector(s) to use as values variables; if `on`
    is empty all columns that are not in `index` will be used.
index
    Column(s) or selector(s) to use as identifier variables.
variable_name
    Name to give to the `variable` column. Defaults to "variable"
value_name
    Name to give to the `value` column. Defaults to "value"
streamable
    deprecated

Notes
-----
If you're coming from pandas, this is similar to `pandas.DataFrame.melt`,
but with `index` replacing `id_vars` and `on` replacing `value_vars`.
In other frameworks, you might know this operation as `pivot_longer`.

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "a": ["x", "y", "z"],
...         "b": [1, 3, 5],
...         "c": [2, 4, 6],
...     }
... )
>>> import polars.selectors as cs
>>> lf.unpivot(cs.numeric(), index="a").collect()
shape: (6, 3)
┌─────┬──────────┬───────┐
│ a   ┆ variable ┆ value │
│ --- ┆ ---      ┆ ---   │
│ str ┆ str      ┆ i64   │
╞═════╪══════════╪═══════╡
│ x   ┆ b        ┆ 1     │
│ y   ┆ b        ┆ 3     │
│ z   ┆ b        ┆ 5     │
│ x   ┆ c        ┆ 2     │
│ y   ┆ c        ┆ 4     │
│ z   ┆ c        ┆ 6     │
└─────┴──────────┴───────┘
z\the `streamable` parameter for `LazyFrame.unpivot` is deprecatedThis parameter has no effectz1.5.0r  )r#   csemptyr'   r   r   unpivotrB  )r   r  r  r  r  r  selector_onselector_indexs           r   r  LazyFrame.unpivot  s    @ %/ *BHHJ*B2*F 	  -BHHJ-Ee-L 	 II''**	
 	
r   )r  r  r  no_optimizationsr   validate_output_schemar  c               x    U(       a  SnSnSnU R                  U R                  R                  UUUUUUUS95      $ )u	  
Apply a custom function.

It is important that the function returns a Polars DataFrame.

Parameters
----------
function
    Lambda/ function to apply.
predicate_pushdown
    Allow predicate pushdown optimization to pass this node.
projection_pushdown
    Allow projection pushdown optimization to pass this node.
slice_pushdown
    Allow slice pushdown optimization to pass this node.
no_optimizations
    Turn off all optimizations past this point.
schema
    Output schema of the function, if set to `None` we assume that the schema
    will remain unchanged by the applied function.
validate_output_schema
    It is paramount that polars' schema is correct. This flag will ensure that
    the output schema of this function will be checked with the expected schema.
    Setting this to `False` will not do this check, but may lead to hard to
    debug bugs.
streamable
    Whether the function that is given is eligible to be running with the
    streaming engine. That means that the function must produce the same result
    when it is executed in batches or when it is be executed on the full
    dataset.

Warnings
--------
The `schema` of a `LazyFrame` must always be correct. It is up to the caller
of this function to ensure that this invariant is upheld.

It is important that the optimization flags are correct. If the custom function
for instance does an aggregation of a column, `predicate_pushdown` should not
be allowed, as this prunes rows and will influence your aggregation results.

Notes
-----
A UDF passed to `map_batches` must be pure, meaning that it cannot modify or
depend on state other than its arguments.

Examples
--------
>>> lf = (  # doctest: +SKIP
...     pl.LazyFrame(
...         {
...             "a": pl.int_range(-100_000, 0, eager=True),
...             "b": pl.int_range(0, 100_000, eager=True),
...         }
...     )
...     .map_batches(lambda x: 2 * x, streamable=True)
...     .collect(engine="streaming")
... )
shape: (100_000, 2)
┌─────────┬────────┐
│ a       ┆ b      │
│ ---     ┆ ---    │
│ i64     ┆ i64    │
╞═════════╪════════╡
│ -200000 ┆ 0      │
│ -199998 ┆ 2      │
│ -199996 ┆ 4      │
│ -199994 ┆ 6      │
│ …       ┆ …      │
│ -8      ┆ 199992 │
│ -6      ┆ 199994 │
│ -4      ┆ 199996 │
│ -2      ┆ 199998 │
└─────────┴────────┘
F)r  r   validate_output)r   r   r  )	r   rg  r  r  r  r  r   r  r  s	            r   r  LazyFrame.map_batches  sX    l !&"'"NII!!"#% 6 " 

 
	
r   c                h    U R                  [        R                  " S5      R                  5       5      $ )uD  
Interpolate intermediate values. The interpolation method is linear.

Nulls at the beginning and end of the series remain null.

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "foo": [1, None, 9, 10],
...         "bar": [6, 7, 9, None],
...         "baz": [1, None, None, 9],
...     }
... )
>>> lf.interpolate().collect()
shape: (4, 3)
┌──────┬──────┬──────────┐
│ foo  ┆ bar  ┆ baz      │
│ ---  ┆ ---  ┆ ---      │
│ f64  ┆ f64  ┆ f64      │
╞══════╪══════╪══════════╡
│ 1.0  ┆ 6.0  ┆ 1.0      │
│ 5.0  ┆ 7.0  ┆ 3.666667 │
│ 9.0  ┆ 9.0  ┆ 6.333333 │
│ 10.0 ┆ null ┆ 9.0      │
└──────┴──────┴──────────┘
rv  )r  r  r  interpolater   s    r   r  LazyFrame.interpolateo  s$    8 {{155:11344r   c                    [        U5      [        U5      -  nU R                  U R                  R                  UR                  5      5      $ )uS  
Decompose struct columns into separate columns for each of their fields.

The new columns will be inserted into the DataFrame at the location of the
struct column.

Parameters
----------
columns
    Name of the struct column(s) that should be unnested.
*more_columns
    Additional columns to unnest, specified as positional arguments.

Examples
--------
>>> df = pl.LazyFrame(
...     {
...         "before": ["foo", "bar"],
...         "t_a": [1, 2],
...         "t_b": ["a", "b"],
...         "t_c": [True, None],
...         "t_d": [[1, 2], [3]],
...         "after": ["baz", "womp"],
...     }
... ).select("before", pl.struct(pl.col("^t_.$")).alias("t_struct"), "after")
>>> df.collect()
shape: (2, 3)
┌────────┬─────────────────────┬───────┐
│ before ┆ t_struct            ┆ after │
│ ---    ┆ ---                 ┆ ---   │
│ str    ┆ struct[4]           ┆ str   │
╞════════╪═════════════════════╪═══════╡
│ foo    ┆ {1,"a",true,[1, 2]} ┆ baz   │
│ bar    ┆ {2,"b",null,[3]}    ┆ womp  │
└────────┴─────────────────────┴───────┘
>>> df.unnest("t_struct").collect()
shape: (2, 6)
┌────────┬─────┬─────┬──────┬───────────┬───────┐
│ before ┆ t_a ┆ t_b ┆ t_c  ┆ t_d       ┆ after │
│ ---    ┆ --- ┆ --- ┆ ---  ┆ ---       ┆ ---   │
│ str    ┆ i64 ┆ str ┆ bool ┆ list[i64] ┆ str   │
╞════════╪═════╪═════╪══════╪═══════════╪═══════╡
│ foo    ┆ 1   ┆ a   ┆ true ┆ [1, 2]    ┆ baz   │
│ bar    ┆ 2   ┆ b   ┆ null ┆ [3]       ┆ womp  │
└────────┴─────┴─────┴──────┴───────────┴───────┘
)r'   r   r   unnestrB  r  s       r   r  LazyFrame.unnest  sG    f *'25M6
 
 		 0 01C1C DEEr   c                    [        X5        U R                  U R                  R                  UR                  U5      5      $ )uR  
Take two sorted DataFrames and merge them by the sorted key.

The output of this operation will also be sorted.
It is the callers responsibility that the frames
are sorted in ascending order by that key otherwise
the output will not make sense.

The schemas of both LazyFrames must be equal.

Parameters
----------
other
    Other DataFrame that must be merged
key
    Key that is sorted.

Examples
--------
>>> df0 = pl.LazyFrame(
...     {"name": ["steve", "elise", "bob"], "age": [42, 44, 18]}
... ).sort("age")
>>> df0.collect()
shape: (3, 2)
┌───────┬─────┐
│ name  ┆ age │
│ ---   ┆ --- │
│ str   ┆ i64 │
╞═══════╪═════╡
│ bob   ┆ 18  │
│ steve ┆ 42  │
│ elise ┆ 44  │
└───────┴─────┘
>>> df1 = pl.LazyFrame(
...     {"name": ["anna", "megan", "steve", "thomas"], "age": [21, 33, 42, 20]}
... ).sort("age")
>>> df1.collect()
shape: (4, 2)
┌────────┬─────┐
│ name   ┆ age │
│ ---    ┆ --- │
│ str    ┆ i64 │
╞════════╪═════╡
│ thomas ┆ 20  │
│ anna   ┆ 21  │
│ megan  ┆ 33  │
│ steve  ┆ 42  │
└────────┴─────┘
>>> df0.merge_sorted(df1, key="age").collect()
shape: (7, 2)
┌────────┬─────┐
│ name   ┆ age │
│ ---    ┆ --- │
│ str    ┆ i64 │
╞════════╪═════╡
│ bob    ┆ 18  │
│ thomas ┆ 20  │
│ anna   ┆ 21  │
│ megan  ┆ 33  │
│ steve  ┆ 42  │
│ steve  ┆ 42  │
│ elise  ┆ 44  │
└────────┴─────┘

Notes
-----
No guarantee is given over the output row order when the key is equal
between the both dataframes.

The key must be sorted in ascending order.
)r6   r   r   merge_sorted)r   r  r+  s      r   r  LazyFrame.merge_sorted  s3    P 	$&		 6 6uzz3 GHHr   r  c                   [        U[        5      (       d  Sn[        U5      eU R                  [        R
                  " U5      R                  US95      $ )a  
Flag a column as sorted.

This can speed up future operations.

Parameters
----------
column
    Column that is sorted
descending
    Whether the column is sorted in descending order.

Warnings
--------
This can lead to incorrect results if the data is NOT sorted!!
Use with care!

z6expected a 'str' for argument 'column' in 'set_sorted'r  )r   r   r   r  r  r  
set_sorted)r   columnr  r   s       r   r  LazyFrame.set_sorted  sG    4 &#&&JCC.   v!9!9Z!9!PQQr   )r  r  include_nullsr  c          
       ^^^ [        X5        US;   a  Sn[        SSS9  US;  a  SU< 3n[        U5      eSn	UcR  Uc.  Uc+  S	n	S
n
U R                  U
5      n UR                  U
5      nU
/=pEO$Uc  Sn[        U5      eUc  Sn[        U5      eOU=pE[	        U[
        5      (       a  U/n[	        U[
        5      (       a  U/nU R                  5       nU H  nX;  d  M
  SU< S3n[        U5      e   UR                  5       nU H  nX;  d  M
  SU< S3n[        U5      e   US:w  a2  [        U5      [        U5      :X  a  U	(       a  U R                  W
5      $ U $ [        U5      R                  U5      [        U5      -
  nT(       a:  SmUR                  [        R                  " S	5      R                  TS   5      5      nOSmSm/ U4S jU 5       QTQnU R                  UR                   " / UQUQTQ76 UUUTS	US9R                  UUU4S jU 5       5      R                  U5      nU	(       a  UR                  W
5      nU R#                  UR$                  5      $ )uJ  
Update the values in this `LazyFrame` with the values in `other`.

.. warning::
    This functionality is considered **unstable**. It may be changed
    at any point without it being considered a breaking change.

Parameters
----------
other
    LazyFrame that will be used to update the values
on
    Column names that will be joined on. If set to `None` (default),
    the implicit row index of each frame is used as a join key.
how : {'left', 'inner', 'full'}
    * 'left' will keep all rows from the left table; rows may be duplicated
      if multiple rows in the right frame match the left row's key.
    * 'inner' keeps only those rows where the key exists in both frames.
    * 'full' will update existing rows where the key matches while also
      adding any new rows contained in the given frame.
left_on
   Join column(s) of the left DataFrame.
right_on
   Join column(s) of the right DataFrame.
include_nulls
    Overwrite values in the left frame with null values from the right frame.
    If set to `False` (default), null values in the right frame are ignored.
maintain_order : {'none', 'left', 'right', 'left_right', 'right_left'}
    Which order of rows from the inputs to preserve. See :func:`~LazyFrame.join`
    for details. Unlike `join` this function preserves the left order by
    default.

Notes
-----
This is syntactic sugar for a left/inner join that preserves the order
of the left `DataFrame` by default, with an optional coalesce when
`include_nulls = False`.

Examples
--------
>>> lf = pl.LazyFrame(
...     {
...         "A": [1, 2, 3, 4],
...         "B": [400, 500, 600, 700],
...     }
... )
>>> lf.collect()
shape: (4, 2)
┌─────┬─────┐
│ A   ┆ B   │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 1   ┆ 400 │
│ 2   ┆ 500 │
│ 3   ┆ 600 │
│ 4   ┆ 700 │
└─────┴─────┘
>>> new_lf = pl.LazyFrame(
...     {
...         "B": [-66, None, -99],
...         "C": [5, 3, 1],
...     }
... )

Update `df` values with the non-null values in `new_df`, by row index:

>>> lf.update(new_lf).collect()
shape: (4, 2)
┌─────┬─────┐
│ A   ┆ B   │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 1   ┆ -66 │
│ 2   ┆ 500 │
│ 3   ┆ -99 │
│ 4   ┆ 700 │
└─────┴─────┘

Update `df` values with the non-null values in `new_df`, by row index,
but only keeping those rows that are common to both frames:

>>> lf.update(new_lf, how="inner").collect()
shape: (3, 2)
┌─────┬─────┐
│ A   ┆ B   │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 1   ┆ -66 │
│ 2   ┆ 500 │
│ 3   ┆ -99 │
└─────┴─────┘

Update `df` values with the non-null values in `new_df`, using a full
outer join strategy that defines explicit join columns in each frame:

>>> lf.update(new_lf, left_on=["A"], right_on=["C"], how="full").collect()
shape: (5, 2)
┌─────┬─────┐
│ A   ┆ B   │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 1   ┆ -99 │
│ 2   ┆ 500 │
│ 3   ┆ 600 │
│ 4   ┆ 700 │
│ 5   ┆ -66 │
└─────┴─────┘

Update `df` values including null values in `new_df`, using a full
outer join strategy that defines explicit join columns in each frame:

>>> lf.update(
...     new_lf, left_on="A", right_on="C", how="full", include_nulls=True
... ).collect()
shape: (5, 2)
┌─────┬──────┐
│ A   ┆ B    │
│ --- ┆ ---  │
│ i64 ┆ i64  │
╞═════╪══════╡
│ 1   ┆ -99  │
│ 2   ┆ 500  │
│ 3   ┆ null │
│ 4   ┆ 700  │
│ 5   ┆ -66  │
└─────┴──────┘
)r&  r)  rg  r'  r(  r  )r&  innerrg  z6`how` must be one of {'left', 'inner', 'full'}; found FT__POLARS_ROW_INDEXz#missing join columns for left framez$missing join columns for right framezleft join column z
 not foundzright join column )__POLARS_VALIDITYr   rY  __POLARS_RIGHTc              3  .   >#    U  H
  o T 3v   M     g 7fr   rY  )r  r  tmp_names     r   r  #LazyFrame.update.<locals>.<genexpr>  s     F+$F8*-+s   )r  r  r,  r  r  r  c              3    >#    U  H  nT(       a  [         R                  " [         R                  " T5      R                  5       5      R	                  [         R                  " U5      5      R                  [         R                  " U T 35      5      O/[         R                  " U T 3[         R                  " U5      /5      R                  U5      v   M     g 7fr   )r  whenr  is_nullthen	otherwiser  r  )r  r  r  r  validitys     r   r  r    s      
 (D % FF155?2245T!%%+&YquuvhZ%89:vhZ%8!%%+$FG%+ (s   CC)r6   r#   r   rn  r   r   r   r  rA  r  intersectionr  r  r  r  r+  r  r   r   )r   r  r  r,  r  r  r  r  r   row_index_usedrow_index_nameleft_schemar  right_schemaright_otherdrop_columnsrN  r  r  s         `          @@r   r  LazyFrame.update/  s   ^ 	$&--C%L!
 //LSGTCS/!:8#3!%!5**>:,,^<&4%55( ??C$S/)#@C$S/) $
 "$#Ggs##iGh$$ zH))+D&)$< o%  ++-D'*4(*= o%  &=S.#h-?yy00K ,'44[ACMQ -H&&quuT{'8'8!'EFEH#RF+FRRII@h@@x@!-   \ 
 (
 
 T,+ 	. [[0F,,r   c                T    U R                  U R                  R                  5       5      $ )u  
Return the number of non-null elements for each column.

Examples
--------
>>> lf = pl.LazyFrame(
...     {"a": [1, 2, 3, 4], "b": [1, 2, 1, None], "c": [None, None, None, None]}
... )
>>> lf.count().collect()
shape: (1, 3)
┌─────┬─────┬─────┐
│ a   ┆ b   ┆ c   │
│ --- ┆ --- ┆ --- │
│ u32 ┆ u32 ┆ u32 │
╞═════╪═════╪═════╡
│ 4   ┆ 3   ┆ 0   │
└─────┴─────┴─────┘
)r   r   rx  r   s    r   rx  LazyFrame.count   s     & 		 122r   z`LazyFrame.melt` is deprecated; use `LazyFrame.unpivot` instead, with `index` instead of `id_vars` and `on` instead of `value_vars`)r  c               (    U R                  UUUUUS9$ )a  
Unpivot a DataFrame from wide to long format.

Optionally leaves identifiers set.

This function is useful to massage a DataFrame into a format where one or more
columns are identifier variables (id_vars) while all other columns, considered
measured variables (value_vars), are "unpivoted" to the row axis leaving just
two non-identifier columns, 'variable' and 'value'.

.. deprecated:: 1.0.0
    Use the :meth:`.unpivot` method instead.

Parameters
----------
id_vars
    Column(s) or selector(s) to use as identifier variables.
value_vars
    Column(s) or selector(s) to use as values variables; if `value_vars`
    is empty all columns that are not in `id_vars` will be used.
variable_name
    Name to give to the `variable` column. Defaults to "variable"
value_name
    Name to give to the `value` column. Defaults to "value"
streamable
    Allow this node to run in the streaming engine.
    If this runs in streaming, the output of the unpivot operation
    will not have a stable ordering.
)r  r  r  r  r  )r  )r   id_vars
value_varsr  r  r  s         r   meltLazyFrame.melt5  s*    T ||'!!  
 	
r   c                ,    [         R                  " XUS9$ )uC  
Run a query remotely on Polars Cloud.

This allows you to run Polars remotely on
one or more workers via several strategies
for distributed compute.

Read more in the `Announcement post <https://pola.rs/posts/polars-cloud-what-we-are-building/>`_

Parameters
----------
context
    Compute context in which queries are executed.
    If none given, it will take the default context.
plan_type: {'plain', 'dot'}
    Whether to give a dot diagram of a plain text
    version of logical plan.

Examples
--------
Run a query on a cloud instance.

>>> lf = pl.LazyFrame([1, 2, 3]).sum()
>>> in_progress = lf.remote().collect()  # doctest: +SKIP
>>> # do some other work
>>> in_progress.await_result()  # doctest: +SKIP
shape: (1, 1)
┌──────────┐
│ column_0 │
│ ---      │
│ i64      │
╞══════════╡
│ 6        │
└──────────┘

Run a query distributed.

>>> lf = (
...     pl.scan_parquet("s3://my_bucket/").group_by("key").agg(pl.sum("values"))
... )
>>> in_progress = lf.remote().distributed().collect()  # doctest: +SKIP
>>> in_progress.await_result()  # doctest: +SKIP
shape: (1, 1)
┌──────────┐
│ column_0 │
│ ---      │
│ i64      │
╞══════════╡
│ 6        │
└──────────┘

)r>  context	plan_type)pcLazyFrameRemote)r   r  r  s      r   remoteLazyFrame.remoteg  s    t !!TiPPr   raiseforbid)missing_columnsmissing_struct_fieldsextra_columnsextra_struct_fieldsinteger_cast
float_castc                 ^ SSK Jm      SU4S jjn[        U[        5      (       a  [	        U5      n	OUn	[        U[        5      (       a+  UR                  5        V
Vs0 s H  u  pX" U5      _M     nn
nO[        UT5      (       a	  U" U5      nOUn[        R                  U R                  R                  U	UUUUUUS95      $ s  snn
f )u  
Match or evolve the schema of a LazyFrame into a specific schema.

By default, match_to_schema returns an error if the input schema does not
exactly match the target schema. It also allows columns to be freely reordered,
with additional coercion rules available through optional parameters.

.. warning::
    This functionality is considered **unstable**. It may be changed
    at any point without it being considered a breaking change.

Parameters
----------
schema
    Target schema to match or evolve to.
missing_columns
    Raise of insert missing columns from the input with respect to the `schema`.

    This can also be an expression per column with what to insert if it is
    missing.
missing_struct_fields
    Raise of insert missing struct fields from the input with respect to the
    `schema`.
extra_columns
    Raise of ignore extra columns from the input with respect to the `schema`.
extra_struct_fields
    Raise of ignore extra struct fields from the input with respect to the
    `schema`.
integer_cast
    Forbid of upcast for integer columns from the input to the respective column
    in `schema`.
float_cast
    Forbid of upcast for float columns from the input to the respective column
    in `schema`.

Examples
--------
Ensuring the schema matches

>>> lf = pl.LazyFrame({"a": [1, 2, 3], "b": ["A", "B", "C"]})
>>> lf.match_to_schema({"a": pl.Int64, "b": pl.String}).collect()
shape: (3, 2)
┌─────┬─────┐
│ a   ┆ b   │
│ --- ┆ --- │
│ i64 ┆ str │
╞═════╪═════╡
│ 1   ┆ A   │
│ 2   ┆ B   │
│ 3   ┆ C   │
└─────┴─────┘
>>> (lf.match_to_schema({"a": pl.Int64}).collect())  # doctest: +SKIP
polars.exceptions.SchemaError: extra columns in `match_to_schema`: "b"

Adding missing columns

>>> (
...     pl.LazyFrame({"a": [1, 2, 3]})
...     .match_to_schema(
...         {"a": pl.Int64, "b": pl.String},
...         missing_columns="insert",
...     )
...     .collect()
... )
shape: (3, 2)
┌─────┬──────┐
│ a   ┆ b    │
│ --- ┆ ---  │
│ i64 ┆ str  │
╞═════╪══════╡
│ 1   ┆ null │
│ 2   ┆ null │
│ 3   ┆ null │
└─────┴──────┘
>>> (
...     pl.LazyFrame({"a": [1, 2, 3]})
...     .match_to_schema(
...         {"a": pl.Int64, "b": pl.String},
...         missing_columns={"b": pl.col.a.cast(pl.String)},
...     )
...     .collect()
... )
shape: (3, 2)
┌─────┬─────┐
│ a   ┆ b   │
│ --- ┆ --- │
│ i64 ┆ str │
╞═════╪═════╡
│ 1   ┆ 1   │
│ 2   ┆ 2   │
│ 3   ┆ 3   │
└─────┴─────┘

Removing extra columns

>>> (
...     pl.LazyFrame({"a": [1, 2, 3], "b": ["A", "B", "C"]})
...     .match_to_schema(
...         {"a": pl.Int64},
...         extra_columns="ignore",
...     )
...     .collect()
... )
shape: (3, 1)
┌─────┐
│ a   │
│ --- │
│ i64 │
╞═════╡
│ 1   │
│ 2   │
│ 3   │
└─────┘

Upcasting integers and floats

>>> (
...     pl.LazyFrame(
...         {"a": [1, 2, 3], "b": [1.0, 2.0, 3.0]},
...         schema={"a": pl.Int32, "b": pl.Float32},
...     )
...     .match_to_schema(
...         {"a": pl.Int64, "b": pl.Float64},
...         integer_cast="upcast",
...         float_cast="upcast",
...     )
...     .collect()
... )
shape: (3, 2)
┌─────┬─────┐
│ a   ┆ b   │
│ --- ┆ --- │
│ i64 ┆ f64 │
╞═════╪═════╡
│ 1   ┆ 1.0 │
│ 2   ┆ 2.0 │
│ 3   ┆ 3.0 │
└─────┴─────┘
r   )rq   c                B   > [        U T5      (       a  U R                  $ U $ r   )r   r  )r  rq   s    r   prepare_missing_columns:LazyFrame.match_to_schema.<locals>.prepare_missing_columnsA   s!     %&&}}$Lr   )r   r  r  r  r  r  r  )r  z!Literal['insert', 'raise'] | Exprr  z#Literal['insert', 'raise'] | PyExpr)
r|  rq   r   r   ra   r   r   r   r   match_to_schema)r   r   r  r  r  r  r  r  r  schema_prepr+  r  missing_columns_pyexprrq   s                @r   r  LazyFrame.match_to_schema  s    x 	 	4	0	 fg&& .K K
 ow// #2"7"7"9&"9JC ,U33"9 # &" ..%<_%M"%4"$$II%%" 6&;+$7)% & 

 
	
&s   Cc                    U nUb)  [        U[        5      (       a  U/nUR                  U5      nUR                  5       R	                  US9$ )zb
Get all runtime metadata for each column.

This is unstable and is meant for debugging purposes.
)stats)r   r   r  r  _to_metadata)r   r   r  r>  s       r   r  LazyFrame._to_metadatag   sK     '3''")7#Bzz|((u(55r   )r   )NN)r   zFrameInitTypes | Noner   zSchemaDefinition | Noner   zSchemaDict | Noner   r  r   zOrientation | Noner   
int | Noner   r  r  None)r   rd   r  r   )r  bytes)r   r	  r  r  )r   z1pa.schema | SchemaDict | Callable[[], SchemaDict]r   r   rY   r  r   r  r   r  r  r   )r   zstr | Path | IOBaser   r   r  r   )r  z	list[str])r  zlist[DataType])r  ra   )r  r  )r  r   )r  r   r  r   )r  objectr  r   )r  r   r  r   )r+  r   r  r  )r  r   r   )r5  r  r  r   )r;  r9  r  r   )r  r   ).)r[  r  r   zLiteral['binary']r  r	  )r[  r  r   zLiteral['json']r  r   )r[  zIOBase | str | Pathr   r   r  r  )r[  zIOBase | str | Path | Noner   r   r  zbytes | str | None)rg  z&Callable[Concatenate[LazyFrame, P], T]rh  zP.argsri  zP.kwargsr  r   )rg  z(Callable[[LazyFrame, Schema], LazyFrame]r  r   ))g      ?g      ?g      ?)r  zSequence[float] | float | Noneru  r   r  ro   ) r   rw   r@  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r   r  r   rv   r  bool | Noner  rk   r  r   )(r@  r  r  r  r  zstr | Path | Noner  r  r  ztuple[float, float]r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r   rv   r  r   r  r  r  rk   r  
str | None)z{})r  r   r  r   )r  IntoExpr | Iterable[IntoExpr]r  rz   r  bool | Sequence[bool]r  r  r  r  r  r  r  r   )r  r   r  r   r  r   )r  r  r  r  r  r  r  r   )"r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  ztuple[int, int]r   rv   r  rk   r1  r   r  ztuple[DataFrame, DataFrame])r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r   rv   r   Literal[True]r  rk   r  r^   )r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r   rv   r   Literal[False]r  rk   r  ro   )r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r   rv   r   r  r  rk   r1  r   r  zDataFrame | InProcessQuery)rG  r  r   rv   r  rk   r  z!_GeventDataFrameResult[DataFrame])rG  r  r   rv   r  rk   r  zAwaitable[DataFrame])rG  r  r   rv   r  rk   r  z8Awaitable[DataFrame] | _GeventDataFrameResult[DataFrame])$r   +str | Path | IO[bytes] | PartitioningSchemerT  r   rU  r  rV  bool | str | dict[str, bool]rW  r  rX  r  r  r  rY  dict[str, Any] | NonerZ  3CredentialProviderFunction | Literal['auto'] | Noner[  r  r\  SyncOnCloseMethod | Noner]  r  r   r  r^  gParquetFieldOverwrites | Sequence[ParquetFieldOverwrites] | Mapping[str, ParquetFieldOverwrites] | Noner   rv   r_  ParquetMetadata | Noner  rk   r  r  )$r   r  rT  r   rU  r  rV  r  rW  r  rX  r  r  r  rY  r  rZ  r  r[  r  r\  r  r]  r  r   r  r^  r  r   rv   r_  r  r  rk   r  r   )$r   r  rT  r   rU  r  rV  r  rW  r  rX  r  r  r  rY  r  rZ  r  r[  r  r\  r  r_  r  r]  r  r   r  r^  r  r   rv   r  rk   r  LazyFrame | None)r   r  rT  IpcCompression | Nonery  CompatLevel | Noner  r  rY  r  rZ  r  r[  r  r\  r  r]  r  r   r  r   rv   r  rk   r  r  )r   r  rT  r  ry  r  r  r  rY  r  rZ  r  r[  r  r\  r  r]  r  r   r  r   rv   r  rk   r  r   )r   r  rT  r  ry  r  r  r  rY  r  rZ  r  r[  r  r\  r  r]  r  r   r  r   rv   r  rk   r  r  )2r   5str | Path | IO[bytes] | IO[str] | PartitioningSchemer  r  r  r  r  r   r  r   r  r   r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  CsvQuoteStyle | Noner  r  rY  r  rZ  r  r[  r  r\  r  r]  r  r   r  r   rv   r  rk   r  r  )2r   r  r  r  r  r  r  r   r  r   r  r   r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  rY  r  rZ  r  r[  r  r\  r  r]  r  r   r  r   rv   r  rk   r  r   )2r   r  r  r  r  r  r  r   r  r   r  r   r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  rY  r  rZ  r  r[  r  r\  r  r]  r  r   r  r   rv   r  rk   r  r  )r   r  r  r  rY  r  rZ  r  r[  r  r\  r  r]  r  r   r  r   rv   r  rk   r  r  )r   r  r  r  rY  r  rZ  r  r[  r  r\  r  r]  r  r   r  r   rv   r  rk   r  r   )r   r  r  r  rY  r  rZ  r  r[  r  r\  r  r]  r  r   r  r   rv   r  rk   r  r  )i  )r  r  ri  r   r  ro   )r   zrMapping[ColumnNameOrSelector | PolarsDataType, PolarsDataType | PythonDataType] | PolarsDataType | pl.DataTypeExprr   r  r  r   )r   )r  r  r  r   )r  z`tuple[IntoExprColumn | Iterable[IntoExprColumn] | bool | list[bool] | np.ndarray[Any, Any], ...]r  zdict[str, Any]r  r  r  r   )r  zTIntoExprColumn | Iterable[IntoExprColumn] | bool | list[bool] | np.ndarray[Any, Any]r  r   r  r   )r  r  r  rz   r  r   )r  r  r  r  r  rz   r  r]   )r  rz   r  str | timedeltar  str | timedelta | Noner  rs   r  $IntoExpr | Iterable[IntoExpr] | Noner  r]   )r  rz   r	  r  r  r  r  r  r  r  r  rs   r  r   r  r  r  r   r  r]   ) r  r   r  str | None | Exprr  r   r  r   r  str | Sequence[str] | Noner  r!  r  r!  r  rr   r  r   r  z$str | int | float | timedelta | Noner  r  r  r  r  r  r  r  r  r  r  r   )Nr  )r  r   r  (str | Expr | Sequence[str | Expr] | Noner,  r}   r  r"  r  r"  r  r   r$  r~   r#  r  r  r  r  MaintainOrderJoin | Noner  r  r  r  r  r   )r  r   r  zExpr | Iterable[Expr]r  r   r  r   )r  zSelf | list[Self]r  r   )r   5ColumnNameOrSelector | Iterable[ColumnNameOrSelector]r   r  r  r   )rJ  z(Mapping[str, str] | Callable[[str], str]r   r  r  r   )rn  )r  zint | IntoExprColumnrP  zIntoExpr | Noner  r   )r  r  rX  r  r  r   )   )r  r   )r  r   r  r  r  r   )row_nrr   )r  r  r  r  r  r   )NNN)
r  zAny | Expr | Noner  zFillNullStrategy | Noner[  r  ry  r  r  r   )r  zint | float | Expr | Noner  r   )r  r  r  r   )rt  )r  zfloat | Exprru  r   r  r   )r   r$  r  rt   r  r   )r  >ColumnNameOrSelector | Collection[ColumnNameOrSelector] | Noner  r   r  r  r  r   )r  r'  r  r   )r  <ColumnNameOrSelector | Sequence[ColumnNameOrSelector] | Noner  r(  r  r  r  r  r  r  r  r   )rg  z Callable[[DataFrame], DataFrame]r  r  r  r  r  r  r  r  r   zNone | SchemaDictr  r  r  r  r  r   )r   z7ColumnNameOrSelector | Collection[ColumnNameOrSelector]r  rt   r  r   )r  r   r+  r   r  r   )r  r   r  r  r  r   )Nr&  )r  r   r  r!  r,  z Literal['left', 'inner', 'full']r  r!  r  r!  r  r  r  r#  r  r   )NNNN)r  r(  r  r(  r  r  r  r  r  r  r  r   )NrL  )r  zpc.ComputeContext | Noner  zpc._typing.PlanTypePreferencer  zpc.LazyFrameRemote)r   zSchemaDict | Schemar  zLLiteral['insert', 'raise'] | Mapping[str, Literal['insert', 'raise'] | Expr]r  zELiteral['insert', 'raise'] | Mapping[str, Literal['insert', 'raise']]r  zLiteral['ignore', 'raise']r  zELiteral['ignore', 'raise'] | Mapping[str, Literal['ignore', 'raise']]r  GLiteral['upcast', 'forbid'] | Mapping[str, Literal['upcast', 'forbid']]r  r)  r  r   )r   None | str | list[str]r  r*  r  ro   )|rG  
__module____qualname____firstlineno____doc____annotations__r  r   r:   r   classmethodr   r   r   r   r   propertyr   r   r   r  r
  r  r  r  r  r"  r%  r(  r,  r1  r6  r<  rB  rI  rV  r   r   rj  r+   rr  r  r!   r`   r_   rA  r  r  r  r
  r    r  r  r)  r  rH  r   rb  r|  r  r  r"   r  r   r  r  r  r0  r  r  r  r  r  r  r  r  r  r+  r5  r  r:  r=  rA  rI  r  rS  r9  r[  r  ra  rd  rg  rj  rn  rs  rw  r}  r  r{  r  r  r|  r  rz  r  ry  r  r  r  r  r  r  r  r  r  r  r  r  rx  r  r  r  r  __static_attributes__rY  r   r   r   r      s    Vp %(UJ"* '+*.

 /3%)*9!
#
 (

 ,
 
 #
 (
 
 

6  
 :   %A 
    
 @ KS=5(=55H=5	=5 =5~ '- '-R '. '.R  %  %D %+ %+N%%$$%%,+1Z@& ?B+<	  Q QJM'4G	  ,0>A '/	>A(>A $	>A
 
>A@I/8I/ I/ 	I/
 
I/V Z;
:;
 
;
 ;
~ 7IL )2	L3L &	L
 
L\ #$ !("#'$($(#"&"&%)###''>#X- X- 	X-
 X- !X- "X- "X- X-  X-  X- #X- X- X- X-  !!X-" %#X-$ 
%X-  %X-t #$ )- '3" #'$($(#"&"&%)## $!'>+Z
 Z
 	Z

 'Z
 Z
 %Z
 Z
 Z
 !Z
 "Z
 "Z
 Z
  Z
  Z
  #!Z
" #Z
$ %Z
& 'Z
( )Z
* %+Z
, 
-Z
  %Z
x
< -2,1$"v
)v
 v
 *	v

 *v
 v
 v
 
v
p 4: R&h !y'J */KLKL *	KL
 'KL 
KL KKLZ !y'J */KOKO *	KO
 'KO 
KO KKOZ  ##'$($( %#"&"&%)##*#'>#U U !	U
 "U "U U U  U  U #U U U U !U  !U" %#U$ %U& 
%'U Un  ##'$($(#"&"&%)# %#'>  !	
 " "      #    " %  
! $  ##'$($(#"&"&%)# %#%*'>  !	
 " "      #    # %  
! $ #$ ##'$($(#"&"&%)# %# '>e6 e6 !	e6
 "e6 "e6 e6  e6  e6 #e6 e6 e6 e6 e6 %e6  !e6" 
$#e6  %e6N 
 $'>0 0 	0
 %0 
+0 0  "'#'># # 	#
 %# 
# # #$ #'>d d 	d
 %d 
Bd %dLF@ 
 "(,37%)%)#15 26" #+/'>19 	
 & 1 # #  /  0  !" #$%, -. )/0 %12 
3 6 
 "(,37%)%)#15 26
 #+/'>19 	
 & 1 # #  /  0  !" #$%, -. )/0 %12 
3 > "(,37%)%)#15 26+/ #'>1K-9K- 	K-
 &K- 1K- #K- #K- K- /K-K- K- 0K-  )!K-" #K-$ %K-&'K-. /K-0 %1K-2 
3K-Z 
 .4+/#15 26"#'>!9 +	
 )  /  0     %!" 
# & 
 .4+/#15 26#'>!9 +	
 )  /  0     %!" 
# . .<+/#15 26#'>!f-9f- +	f-
 )f- f- /f-f- f- 0f- f- f- f-  %!f-" 
#f-P 
 "##&*"&"&(,&*#!%,0#15 26"#'>9C 	
      $     & $   !" *#$ %& /'(). /0 012 34 56 78 %9: 
; > 
 "##&*"&"&(,&*#!%,0#15 26#'>9C 	
      $     & $   !" *#$ %& /'(). /0 012 34 56 78 %9: 
; F "##&*"&"&(,&*#!%,0#15 26#'>9e-Ce- 	e-
 e- e- e- e- e- $e-  e-  e- &e- $e- e-  !e-" *#e-$ %e-& /'e-()e-. /e-0 01e-2 3e-4 5e-6 7e-8 %9e-: 
;e-N 
  $15 26"#'>C 	
 /  0    % 
 " 
  $15 26#'>C 	
 /  0    % 
 *  $15 26#'>M-CM- 	M-
 /M-M- M- 0M- M- M- M- %M- 
M-^ 	9 33 3 
	3	3623$ gBgB gB 
gBR,J ,J\3J SK
SK $SK SK 
SKjp
#p
 p
 
p
dX
#X
 X
 
X
tY;3Y;DLY;	Y;v?3?DL?	?B  %j *j  j  	j 
 
j X !z9E *.!(9=H H   	H 
 'H  H  7H  
H  FH T !z9E *.)-#(!'9=$O O  	O 
 'O  'O  !O  O  O  7O  O  
O  FO j
 &*&* $.2/3)-%/:>#$$(!%#q
q
 #	q

 $q
 q
 ,q
 -q
 'q
 #q
 q
 8q
 q
 q
 q
  "!q
" #q
$ 
%q
f !}fM 8<#	^
 =A=A#(! $37#$^
^
 5^
 	^
 :^
 ;^
 ^
 !^
 ^
 ^
 1^
 ^
 ^
 
^
 N^
@	 Z
 	c
c
 +c
 	c

 
c
 c
JDA-DA  DA 
	DAL%E-%E  %E 
	%EN 	:<S	<SB MOGMO MO 
	MO` TX:M?:MLP:M	:Mx56 )*NFLPNF%NF:INF	NF`#AJ+ +Z)  ) V)3 )3V0 0 	<6	6:K, K,Z 	S(1	(1T(? (?X $(,0 	JF #'JF JF *JF 	JF  JF 
JFX)CV&5 &5P&5 &5P101010204088 )2"P"P &"P 
	"PH,NF,N ,,N 
	,N` RVXY $)$XYNXY !	XY
 XY 
XYx RVWMNWM 
WMv RVRNNRN 
RNl LPU
 OS$(!%U
HU
 L	U

 "U
 U
 U
 
U
v $($(#!&$('+ e
2e
 !	e

 "e
 e
 e
 "e
 !%e
 e
 
e
N5<6FH6F ,6F 
	6FpII^ !	RR 	R
 
R> Z *.06	n- /3/3#39n-n- 'n- .	n- ,n- -n- n- 1n- 
n- n-`3* 	H QUSW$(!%,
  ,
M,
 Q,
 "	,

 ,
 ,
 
,
	,
\ Z -1389Q)9Q 19Q 
	9Q 9Qv Z =D5<4;5<6>6>A
#A
:	A
 3A
 2A
3A
4A
4A
 
A
 A
J +/(,6'6 &6 
	6 6r   r   )r   rv   r  rv   )r   r  r  z1str | Path | IO[bytes] | IO[str] | PyPartitioning)r   rv   r   r  r   r  r   r  r   r  r  z(Callable[[Any, int | None], None] | None)
__future__r   
contextlibr   r  rb  collections.abcr   r   r   r   r   r	   r
   	functoolsr   r   r   r   r   r  r   pathlibr   typingr   r   r   r   r   r   r   polars._reexport	_reexportr  polars.selectorsrC  r  r|  r   r  polars._typingr   r   polars._utils.async_r   r   polars._utils.convertr   r   polars._utils.deprecationr    r!   r"   r#   polars._utils.parquetr$   polars._utils.parser%   r&   polars._utils.parse.exprr'   polars._utils.serder(   polars._utils.slicer)   polars._utils.unstabler*   r+   polars._utils.variousr,   r-   r.   r/   r0   r1   r2   r3   r4   r5   r6   polars._utils.wrapr7   r8   polars.datatypesr9   r:   r;   r<   r=   r>   r?   r@   rA   rB   rC   rD   rE   rF   rG   rH   rI   rJ   rK   rL   rM   rN   rO   rP   rQ   rR   rS   polars.datatypes.grouprT   polars.dependenciesrU   rV   rW   rX   r  rY   r   polars.exceptionsrZ   polars.interchange.protocolr[   polars.lazyframe.engine_configr\   polars.lazyframe.group_byr]   polars.lazyframe.in_processr^   polars.lazyframe.opt_flagsr_   r`   polars.schemara   rb   rc   suppressImportErrorpolars._plrrd   re   sysrf   rg   rh   ri   rj   rk   rl   rm   rn   ro   rp   rq   rr   rs   rt   ru   rv   rw   rx   ry   rz   r{   r|   r}   r~   r   r   r   r   r   r   r   r   r   r   r   r   r   r   nppolars.io.cloudr   polars.io.parquetr   version_infor   r   typing_extensionsr   r   r   r   r   r   r   rY  r   r   <module>r[     s<   "  	 	  9 9 4 4 0 0         ! M R  A > 7 / C    2       : 1 
 3 - 0 3 4 1 6 U   6%< & 3"8			[	)BB 
* 10       : 0:8
7"11<
7"*
7"'0A#AA
?6"(A(A (A 	(A
 (A (A .(AVK~6 K~6G &% 
*	)s   :	KK
K
K(