diff --git a/pandas-stubs/_testing/__init__.pyi b/pandas-stubs/_testing/__init__.pyi index 986655175..6342a9248 100644 --- a/pandas-stubs/_testing/__init__.pyi +++ b/pandas-stubs/_testing/__init__.pyi @@ -40,7 +40,6 @@ def assert_almost_equal( atol: float = 1e-8, **kwargs: Any, ) -> None: ... -def assert_dict_equal(left: dict, right: dict, compare_keys: bool = True) -> None: ... def assert_index_equal( left: Index, right: Index, diff --git a/pandas-stubs/_typing.pyi b/pandas-stubs/_typing.pyi index 794923014..2040c8b69 100644 --- a/pandas-stubs/_typing.pyi +++ b/pandas-stubs/_typing.pyi @@ -182,7 +182,7 @@ Level: TypeAlias = Hashable Shape: TypeAlias = tuple[int, ...] Suffixes: TypeAlias = tuple[str | None, str | None] | list[str | None] Ordered: TypeAlias = bool | None -JSONSerializable: TypeAlias = PythonScalar | list[Any] | dict +JSONSerializable: TypeAlias = PythonScalar | list[Any] | dict[str, Any] Frequency: TypeAlias = str | BaseOffset PeriodFrequency: TypeAlias = ( str diff --git a/pandas-stubs/core/computation/eval.pyi b/pandas-stubs/core/computation/eval.pyi index b1bb890e4..2dd492510 100644 --- a/pandas-stubs/core/computation/eval.pyi +++ b/pandas-stubs/core/computation/eval.pyi @@ -1,4 +1,7 @@ -from collections.abc import Mapping +from collections.abc import ( + Mapping, + MutableSequence, +) from typing import ( Any, Literal, @@ -21,7 +24,7 @@ def eval( engine: Literal["python", "numexpr"] | None = ..., local_dict: dict[str, Any] | None = None, global_dict: dict[str, Any] | None = None, - resolvers: list[Mapping] | None = ..., + resolvers: MutableSequence[Mapping[Any, Any]] | None = ..., level: int = 0, target: object | None = None, inplace: bool = False, diff --git a/pandas-stubs/core/frame.pyi b/pandas-stubs/core/frame.pyi index aaaa8461a..096067bac 100644 --- a/pandas-stubs/core/frame.pyi +++ b/pandas-stubs/core/frame.pyi @@ -13,6 +13,7 @@ from collections.abc import ( Iterator, Mapping, MutableMapping, + MutableSequence, Sequence, ) import datetime as dt @@ -502,7 +503,7 @@ class DataFrame(NDFrame, OpsMixin, _GetItemHack): self, orient: Literal["index"], *, - into: defaultdict, + into: defaultdict[Any, Any], index: Literal[True] = True, ) -> defaultdict[Hashable, dict[Hashable, Any]]: ... @overload @@ -550,7 +551,7 @@ class DataFrame(NDFrame, OpsMixin, _GetItemHack): self, orient: Literal["split", "tight"], *, - into: MutableMapping | type[MutableMapping], + into: MutableMapping[Any, Any] | type[MutableMapping], index: bool = ..., ) -> MutableMapping[str, list[Any]]: ... @overload @@ -907,7 +908,7 @@ class DataFrame(NDFrame, OpsMixin, _GetItemHack): engine: Literal["python", "numexpr"] | None = ..., local_dict: dict[_str, Any] | None = ..., global_dict: dict[_str, Any] | None = ..., - resolvers: list[Mapping] | None = ..., + resolvers: MutableSequence[Mapping[Any, Any]] | None = ..., level: int = ..., target: object | None = ..., inplace: Literal[True], @@ -922,7 +923,7 @@ class DataFrame(NDFrame, OpsMixin, _GetItemHack): engine: Literal["python", "numexpr"] | None = ..., local_dict: dict[_str, Any] | None = ..., global_dict: dict[_str, Any] | None = ..., - resolvers: list[Mapping] | None = ..., + resolvers: MutableSequence[Mapping[Any, Any]] | None = ..., level: int = ..., target: object | None = ..., ) -> Self: ... @@ -1022,7 +1023,7 @@ class DataFrame(NDFrame, OpsMixin, _GetItemHack): @overload def fillna( self, - value: Scalar | NAType | dict | Series | DataFrame | None = ..., + value: Scalar | NAType | dict[Any, Any] | Series | DataFrame | None = ..., *, axis: Axis | None = ..., limit: int = ..., @@ -1032,7 +1033,7 @@ class DataFrame(NDFrame, OpsMixin, _GetItemHack): @overload def fillna( self, - value: Scalar | NAType | dict | Series | DataFrame | None = ..., + value: Scalar | NAType | dict[Any, Any] | Series | DataFrame | None = ..., *, axis: Axis | None = ..., limit: int = ..., @@ -1715,7 +1716,10 @@ class DataFrame(NDFrame, OpsMixin, _GetItemHack): validate: MergeValidate | None = None, ) -> Self: ... def round( - self, decimals: int | dict | Series = ..., *args: Any, **kwargs: Any + self, + decimals: int | dict[Any, Any] | Series = ..., + *args: Any, + **kwargs: Any, ) -> Self: ... def corr( self, @@ -2665,7 +2669,7 @@ class DataFrame(NDFrame, OpsMixin, _GetItemHack): compression: CompressionOptions = ..., index: _bool | None = ..., indent: int | None = ..., - storage_options: dict | None = ..., + storage_options: dict[Any, Any] | None = ..., mode: Literal["a"], ) -> None: ... @overload @@ -2683,7 +2687,7 @@ class DataFrame(NDFrame, OpsMixin, _GetItemHack): compression: CompressionOptions = ..., index: _bool | None = ..., indent: int | None = ..., - storage_options: dict | None = ..., + storage_options: dict[Any, Any] | None = ..., mode: Literal["a"], ) -> _str: ... @overload @@ -2701,7 +2705,7 @@ class DataFrame(NDFrame, OpsMixin, _GetItemHack): compression: CompressionOptions = ..., index: _bool | None = ..., indent: int | None = ..., - storage_options: dict | None = ..., + storage_options: dict[Any, Any] | None = ..., mode: Literal["w"] = ..., ) -> _str: ... @overload @@ -2719,7 +2723,7 @@ class DataFrame(NDFrame, OpsMixin, _GetItemHack): compression: CompressionOptions = ..., index: _bool | None = ..., indent: int | None = ..., - storage_options: dict | None = ..., + storage_options: dict[Any, Any] | None = ..., mode: Literal["w"] = ..., ) -> None: ... @overload diff --git a/pandas-stubs/core/groupby/generic.pyi b/pandas-stubs/core/groupby/generic.pyi index 8b14017c4..754d6b280 100644 --- a/pandas-stubs/core/groupby/generic.pyi +++ b/pandas-stubs/core/groupby/generic.pyi @@ -219,7 +219,7 @@ _TT = TypeVar("_TT", bound=Literal[True, False]) class DFCallable1(Protocol[P]): # ty: ignore[invalid-argument-type] def __call__( self, df: DataFrame, /, *args: P.args, **kwargs: P.kwargs - ) -> Scalar | list[Any] | dict: ... + ) -> Scalar | list[Any] | dict[Hashable, Any]: ... class DFCallable2(Protocol[P]): # ty: ignore[invalid-argument-type] def __call__( diff --git a/pandas-stubs/core/reshape/pivot.pyi b/pandas-stubs/core/reshape/pivot.pyi index b88deca97..eb7d9d479 100644 --- a/pandas-stubs/core/reshape/pivot.pyi +++ b/pandas-stubs/core/reshape/pivot.pyi @@ -35,7 +35,7 @@ from pandas._typing import ( _PivotAggCallable: TypeAlias = Callable[[Series], ScalarT] _PivotAggFunc: TypeAlias = ( - _PivotAggCallable + _PivotAggCallable[ScalarT] | np.ufunc | Literal["mean", "sum", "count", "min", "max", "median", "std", "var"] ) @@ -67,11 +67,17 @@ _Values: TypeAlias = SequenceNotStr[Any] | _ExtendedAnyArrayLike @overload def pivot_table( data: DataFrame, - values: _PivotTableValuesTypes = None, - index: _PivotTableIndexTypes = None, - columns: _PivotTableColumnsTypes = None, + values: _PivotTableValuesTypes[ + Hashable # ty: ignore[invalid-type-arguments] + ] = None, + index: _PivotTableIndexTypes[Hashable] = None, # ty: ignore[invalid-type-arguments] + columns: _PivotTableColumnsTypes[ + Hashable # ty: ignore[invalid-type-arguments] + ] = None, aggfunc: ( - _PivotAggFunc | Sequence[_PivotAggFunc] | Mapping[Hashable, _PivotAggFunc] + _PivotAggFunc[Scalar] + | Sequence[_PivotAggFunc[Scalar]] + | Mapping[Any, _PivotAggFunc[Scalar]] ) = "mean", fill_value: Scalar | None = None, margins: bool = False, @@ -85,12 +91,20 @@ def pivot_table( @overload def pivot_table( data: DataFrame, - values: _PivotTableValuesTypes = None, + values: _PivotTableValuesTypes[ + Hashable # ty: ignore[invalid-type-arguments] + ] = None, *, index: Grouper, - columns: _PivotTableColumnsTypes | np_ndarray | Index[Any] = None, + columns: ( + _PivotTableColumnsTypes[Hashable] # ty: ignore[invalid-type-arguments] + | np_ndarray + | Index[Any] + ) = None, aggfunc: ( - _PivotAggFunc | Sequence[_PivotAggFunc] | Mapping[Hashable, _PivotAggFunc] + _PivotAggFunc[Scalar] + | Sequence[_PivotAggFunc[Scalar]] + | Mapping[Any, _PivotAggFunc[Scalar]] ) = "mean", fill_value: Scalar | None = None, margins: bool = False, @@ -102,12 +116,20 @@ def pivot_table( @overload def pivot_table( data: DataFrame, - values: _PivotTableValuesTypes = None, - index: _PivotTableIndexTypes | np_ndarray | Index[Any] = None, + values: _PivotTableValuesTypes[ + Hashable # ty: ignore[invalid-type-arguments] + ] = None, + index: ( + _PivotTableIndexTypes[Hashable] # ty: ignore[invalid-type-arguments] + | np_ndarray + | Index[Any] + ) = None, *, columns: Grouper, aggfunc: ( - _PivotAggFunc | Sequence[_PivotAggFunc] | Mapping[Hashable, _PivotAggFunc] + _PivotAggFunc[Scalar] + | Sequence[_PivotAggFunc[Scalar]] + | Mapping[Any, _PivotAggFunc[Scalar]] ) = "mean", fill_value: Scalar | None = None, margins: bool = False, diff --git a/pandas-stubs/core/series.pyi b/pandas-stubs/core/series.pyi index 54b224e5c..469d44ea0 100644 --- a/pandas-stubs/core/series.pyi +++ b/pandas-stubs/core/series.pyi @@ -776,7 +776,7 @@ class Series(IndexOpsMixin[S1], ElementOpsMixin[S1], NDFrame): def to_dict(self, *, into: type[dict] = ...) -> dict[Any, S1]: ... @overload def to_dict( - self, *, into: type[MutableMapping] | MutableMapping + self, *, into: type[MutableMapping] | MutableMapping[Any, Any] ) -> MutableMapping[Hashable, S1]: ... def to_frame(self, name: object | None = ...) -> DataFrame: ... @overload @@ -1104,7 +1104,7 @@ class Series(IndexOpsMixin[S1], ElementOpsMixin[S1], NDFrame): def unstack( self, level: IndexLabel = -1, - fill_value: int | _str | dict | None = None, + fill_value: int | _str | dict[Any, Any] | None = None, sort: _bool = True, ) -> DataFrame: ... @overload @@ -1170,7 +1170,13 @@ class Series(IndexOpsMixin[S1], ElementOpsMixin[S1], NDFrame): def apply( self, func: Callable[ - ..., Scalar | Sequence[Any] | AbstractSet[Any] | Mapping | NAType | None + ..., + Scalar + | Sequence[Any] + | AbstractSet[Any] + | Mapping[Any, Any] + | NAType + | None, ], convertDType: _bool = ..., args: tuple[Any, ...] = ..., @@ -1258,7 +1264,7 @@ class Series(IndexOpsMixin[S1], ElementOpsMixin[S1], NDFrame): @overload def fillna( self, - value: Scalar | NAType | dict | Series[S1] | DataFrame | None = ..., + value: Scalar | NAType | dict[Any, Any] | Series[S1] | DataFrame | None = ..., *, axis: AxisIndex = ..., limit: int | None = ..., @@ -1267,7 +1273,7 @@ class Series(IndexOpsMixin[S1], ElementOpsMixin[S1], NDFrame): @overload def fillna( self, - value: Scalar | NAType | dict | Series[S1] | DataFrame | None = ..., + value: Scalar | NAType | dict[Any, Any] | Series[S1] | DataFrame | None = ..., *, axis: AxisIndex = ..., limit: int | None = ..., @@ -4637,7 +4643,7 @@ class Series(IndexOpsMixin[S1], ElementOpsMixin[S1], NDFrame): def rename_axis( self, *, - index: Scalar | ListLike | Callable[..., Any] | dict | None = ..., + index: Scalar | ListLike | Callable[..., Any] | dict[Any, Any] | None = ..., copy: _bool = ..., inplace: Literal[True], ) -> None: ... @@ -4646,7 +4652,7 @@ class Series(IndexOpsMixin[S1], ElementOpsMixin[S1], NDFrame): def rename_axis( self, *, - index: Scalar | ListLike | Callable[..., Any] | dict | None = ..., + index: Scalar | ListLike | Callable[..., Any] | dict[Any, Any] | None = ..., copy: _bool = ..., inplace: Literal[False] = False, ) -> Self: ... diff --git a/pandas-stubs/core/tools/timedeltas.pyi b/pandas-stubs/core/tools/timedeltas.pyi index 3057ce808..895e7cfc2 100644 --- a/pandas-stubs/core/tools/timedeltas.pyi +++ b/pandas-stubs/core/tools/timedeltas.pyi @@ -1,4 +1,3 @@ -from collections.abc import Sequence from datetime import timedelta from typing import overload @@ -28,14 +27,7 @@ def to_timedelta( ) -> Series[Timedelta]: ... @overload def to_timedelta( - arg: ( - SequenceNotStr - | Sequence[float | timedelta] - | tuple[str | float | timedelta, ...] - | range - | ArrayLike - | Index - ), + arg: SequenceNotStr[str | float | timedelta] | range | ArrayLike | Index, unit: TimeDeltaUnitChoices | None = ..., errors: RaiseCoerce = ..., ) -> TimedeltaIndex: ... diff --git a/pandas-stubs/io/clipboards.pyi b/pandas-stubs/io/clipboards.pyi index 25a1ed180..917b3761b 100644 --- a/pandas-stubs/io/clipboards.pyi +++ b/pandas-stubs/io/clipboards.pyi @@ -36,7 +36,7 @@ def read_clipboard( names: ListLikeHashable | None = ..., index_col: int | str | Sequence[str | int] | Literal[False] | None = ..., usecols: UsecolsArgType = ..., - dtype: DtypeArg | defaultdict | None = ..., + dtype: DtypeArg | defaultdict[Any, Any] | None = ..., engine: CSVEngine | None = ..., converters: dict[int | str, Callable[[str], Any]] = ..., true_values: list[str] = ..., @@ -95,7 +95,7 @@ def read_clipboard( names: ListLikeHashable | None = ..., index_col: int | str | Sequence[str | int] | Literal[False] | None = ..., usecols: UsecolsArgType = ..., - dtype: DtypeArg | defaultdict | None = ..., + dtype: DtypeArg | defaultdict[Any, Any] | None = ..., engine: CSVEngine | None = ..., converters: dict[int | str, Callable[[str], Any]] = ..., true_values: list[str] = ..., @@ -154,7 +154,7 @@ def read_clipboard( names: ListLikeHashable | None = ..., index_col: int | str | Sequence[str | int] | Literal[False] | None = ..., usecols: UsecolsArgType = ..., - dtype: DtypeArg | defaultdict | None = ..., + dtype: DtypeArg | defaultdict[Any, Any] | None = ..., engine: CSVEngine | None = ..., converters: dict[int | str, Callable[[str], Any]] = ..., true_values: list[str] = ..., diff --git a/pandas-stubs/io/json/_normalize.pyi b/pandas-stubs/io/json/_normalize.pyi index 254be890f..684954ae5 100644 --- a/pandas-stubs/io/json/_normalize.pyi +++ b/pandas-stubs/io/json/_normalize.pyi @@ -1,9 +1,11 @@ +from typing import Any + from pandas import DataFrame from pandas._typing import IgnoreRaise def json_normalize( - data: dict | list[dict], + data: dict[str, Any] | list[dict[str, Any]], record_path: str | list[str] | None = None, meta: str | list[str | list[str]] | None = None, meta_prefix: str | None = None, diff --git a/pandas-stubs/io/parsers/readers.pyi b/pandas-stubs/io/parsers/readers.pyi index 8d0d4d8c4..e414b4ca3 100644 --- a/pandas-stubs/io/parsers/readers.pyi +++ b/pandas-stubs/io/parsers/readers.pyi @@ -44,7 +44,7 @@ def read_csv( names: ListLikeHashable | None = ..., index_col: int | str | Sequence[str | int] | Literal[False] | None = None, usecols: UsecolsArgType[HashableT] = None, - dtype: DtypeArg | defaultdict | None = None, + dtype: DtypeArg | defaultdict[Any, Any] | None = None, engine: CSVEngine | None = None, converters: ( Mapping[int | str, Callable[[str], Any]] @@ -107,7 +107,7 @@ def read_csv( names: ListLikeHashable | None = ..., index_col: int | str | Sequence[str | int] | Literal[False] | None = None, usecols: UsecolsArgType[HashableT] = None, - dtype: DtypeArg | defaultdict | None = None, + dtype: DtypeArg | defaultdict[Any, Any] | None = None, engine: CSVEngine | None = None, converters: ( Mapping[int | str, Callable[[str], Any]] @@ -170,7 +170,7 @@ def read_csv( names: ListLikeHashable | None = ..., index_col: int | str | Sequence[str | int] | Literal[False] | None = ..., usecols: UsecolsArgType[HashableT] = ..., - dtype: DtypeArg | defaultdict | None = ..., + dtype: DtypeArg | defaultdict[Any, Any] | None = ..., engine: CSVEngine | None = ..., converters: ( Mapping[int | str, Callable[[str], Any]] @@ -233,7 +233,7 @@ def read_table( names: ListLikeHashable | None = ..., index_col: int | str | Sequence[str | int] | Literal[False] | None = None, usecols: UsecolsArgType[HashableT] = None, - dtype: DtypeArg | defaultdict | None = None, + dtype: DtypeArg | defaultdict[Any, Any] | None = None, engine: CSVEngine | None = None, converters: ( Mapping[int | str, Callable[[str], Any]] @@ -296,7 +296,7 @@ def read_table( names: ListLikeHashable | None = ..., index_col: int | str | Sequence[str | int] | Literal[False] | None = None, usecols: UsecolsArgType[HashableT] = None, - dtype: DtypeArg | defaultdict | None = None, + dtype: DtypeArg | defaultdict[Any, Any] | None = None, engine: CSVEngine | None = None, converters: ( Mapping[int | str, Callable[[str], Any]] @@ -359,7 +359,7 @@ def read_table( names: ListLikeHashable | None = ..., index_col: int | str | Sequence[str | int] | Literal[False] | None = None, usecols: UsecolsArgType[HashableT] = None, - dtype: DtypeArg | defaultdict | None = None, + dtype: DtypeArg | defaultdict[Any, Any] | None = None, engine: CSVEngine | None = None, converters: ( Mapping[int | str, Callable[[str], Any]] diff --git a/pandas-stubs/plotting/_core.pyi b/pandas-stubs/plotting/_core.pyi index 2e0adf728..527224442 100644 --- a/pandas-stubs/plotting/_core.pyi +++ b/pandas-stubs/plotting/_core.pyi @@ -171,8 +171,8 @@ class PlotAccessor: colorbar: bool = ..., position: float = ..., table: bool | Series | DataFrame = ..., - yerr: DataFrame | Series | ArrayLike | dict | str = ..., - xerr: DataFrame | Series | ArrayLike | dict | str = ..., + yerr: DataFrame | Series | ArrayLike | dict[Any, Any] | str = ..., + xerr: DataFrame | Series | ArrayLike | dict[Any, Any] | str = ..., stacked: bool = ..., secondary_y: bool | list[HashableT2] | tuple[HashableT2, ...] = ..., mark_right: bool = ..., @@ -225,8 +225,8 @@ class PlotAccessor: colorbar: bool = ..., position: float = ..., table: bool | Series | DataFrame = ..., - yerr: DataFrame | Series | ArrayLike | dict | str = ..., - xerr: DataFrame | Series | ArrayLike | dict | str = ..., + yerr: DataFrame | Series | ArrayLike | dict[Any, Any] | str = ..., + xerr: DataFrame | Series | ArrayLike | dict[Any, Any] | str = ..., stacked: bool = ..., secondary_y: bool | list[HashableT3] | tuple[HashableT3, ...] = ..., mark_right: bool = ..., @@ -268,8 +268,8 @@ class PlotAccessor: colorbar: bool = ..., position: float = ..., table: bool | Series | DataFrame = ..., - yerr: DataFrame | Series | ArrayLike | dict | str = ..., - xerr: DataFrame | Series | ArrayLike | dict | str = ..., + yerr: DataFrame | Series | ArrayLike | dict[Any, Any] | str = ..., + xerr: DataFrame | Series | ArrayLike | dict[Any, Any] | str = ..., stacked: bool = ..., secondary_y: bool | list[HashableT3] | tuple[HashableT3, ...] = ..., mark_right: bool = ..., diff --git a/tests/test_frame.py b/tests/test_frame.py index 99f8cfa7a..b420d8e54 100644 --- a/tests/test_frame.py +++ b/tests/test_frame.py @@ -3991,10 +3991,10 @@ def test_to_dict_into_defaultdict() -> None: """Test DataFrame.to_dict with `into` is an instance of defaultdict[Any, list]""" data = pd.DataFrame({("str", "rts"): [[1, 2, 4], [2, 3], [3]]}) - target: defaultdict[Any, list] = defaultdict(list) + target: defaultdict[Hashable, list[Any]] = defaultdict(list) check( - assert_type(data.to_dict(into=target), defaultdict[Any, list]), + assert_type(data.to_dict(into=target), defaultdict[Hashable, list[Any]]), defaultdict, tuple, ) @@ -4011,7 +4011,9 @@ def test_to_dict_into_defaultdict() -> None: str, ) check( - assert_type(data.to_dict("records", into=target), list[defaultdict[Any, list]]), + assert_type( + data.to_dict("records", into=target), list[defaultdict[Hashable, list[Any]]] + ), list, defaultdict, )