Added utility scripts. Renamed DType to Dtype. Made the

iLocIndexerrFrame synthetic class a subclass of the concrete
non-specialized iLocIndexer, and similar for loc indexers and
for both of these as specialized for Series.
This commit is contained in:
Graham Wheeler 2020-08-06 09:19:23 -07:00
Родитель 293b1eabcd
Коммит 056c79e166
18 изменённых файлов: 868 добавлений и 346 удалений

Просмотреть файл

@ -11,7 +11,7 @@ def reset_option(pat: str) -> None: ...
@overload
def describe_option(pat: str, _print_desc: Literal[True] = ...) -> None: ...
@overload
def describe_option(pat: str, _print_desc: Literal[False]) -> str: ...
def describe_option(pat: str, _print_desc: Literal[False]) -> str: ...
options: Any

Просмотреть файл

@ -1,7 +1,7 @@
from typing import Any, Union, Tuple, Type, Optional, Sequence
import numpy as np
from _typing import DType
from _typing import Dtype
def delta_to_nanoseconds(delta: _Timedelta) -> int: ...
def ints_to_pytimedelta(arr: Sequence[int], box: bool = ...) -> np.ndarray: ...
@ -11,7 +11,7 @@ class _Timedelta():
def __hash__(self) -> int: ...
def __richcmp__(self: Timedelta, other, op: int) -> Any: ...
def to_timedelta64(self) -> np.timedelta64: ...
def view(self, dtype: DType): ...
def view(self, dtype: Dtype): ...
@property
def components(self) -> Tuple: ... # Really a namedtuple
@property

Просмотреть файл

@ -35,15 +35,12 @@ Axes = Collection
Renamer = Union[Mapping[Label, Any], Callable[[Label], Label]]
T = TypeVar('T')
# Abover were generated by stubgen; pylance ones are below
#_str = str # needed because Series/DataFrame have properties called "str"...
#_bool = bool # ditto
num = Union[int, float]
SeriesAxisType = Literal["index", 0] # Restricted subset of _AxisType for series
AxisType = Literal["columns", "index", 0, 1]
DType = TypeVar("DType", bool, int, float, object)
DTypeNp = TypeVar("DTypeNp", bound=np.dtype)
Dtype = TypeVar("Dtype", bool, int, float, object)
DtypeNp = TypeVar("DtypeNp", bound=np.dtype)
KeysArgType = Any
ListLike = TypeVar("_ListLike", Sequence, np.ndarray, Series)
StrLike = Union[str, np.str_]
@ -55,5 +52,5 @@ np_ndarray_int64 = NewType("np_ndarray_int64", np.ndarray)
np_ndarray_bool = NewType("np_ndarray_bool", np.ndarray)
np_ndarray_str = NewType("np_ndarray_str", np.ndarray)
# Scratch types for generics
TT = TypeVar("TT", str, int)
UU = TypeVar("UU", str, int)
T1 = TypeVar("T1", str, int)
T2 = TypeVar("T2", str, int)

Просмотреть файл

@ -1,6 +1,6 @@
import numpy as np
#from pandas._config import get_option as get_option
from pandas._typing import ArrayLike as ArrayLike, DType as DType, Ordered as Ordered, Scalar as Scalar
from pandas._typing import ArrayLike as ArrayLike, Dtype as DType, Ordered as Ordered, Scalar as Scalar
#from pandas.core import ops as ops
from pandas.core.accessor import PandasDelegate as PandasDelegate
#from pandas.core.accessor import delegate_names as delegate_names
@ -40,7 +40,7 @@ class Categorical(ExtensionArray, PandasObject):
@property
def dtype(self) -> CategoricalDtype: ...
def copy(self) -> Categorical: ...
def astype(self, dtype: DType, copy: bool=...) -> ArrayLike: ...
def astype(self, dtype: Dtype, copy: bool=...) -> ArrayLike: ...
def size(self) -> int: ...
def itemsize(self) -> int: ...
def tolist(self) -> List[Scalar]: ...

Просмотреть файл

@ -1,6 +1,6 @@
# merged types from pylance
from pandas._typing import DType as DType, Scalar
from pandas._typing import Dtype as DType, Scalar
from pandas.core.dtypes.base import ExtensionDtype as ExtensionDtype
from pandas.core.dtypes.cast import astype_nansafe as astype_nansafe
from pandas.core.dtypes.common import is_bool_dtype as is_bool_dtype, is_object_dtype as is_object_dtype, is_scalar as is_scalar, is_string_dtype as is_string_dtype, pandas_dtype as pandas_dtype
@ -9,7 +9,7 @@ from pandas.core.dtypes.missing import isna as isna, na_value_for_dtype as na_va
from typing import Any, Optional
class SparseDtype(ExtensionDtype):
def __init__(self, dtype: DType=..., fill_value: Optional[Scalar]=...) -> None: ...
def __init__(self, dtype: Dtype=..., fill_value: Optional[Scalar]=...) -> None: ...
def __hash__(self) -> Any: ...
def __eq__(self, other: Any) -> bool: ...
@property

Просмотреть файл

@ -1,7 +1,7 @@
import numpy as np
#from pandas._libs import lib as lib
#from pandas._libs.tslibs import IncompatibleFrequency as IncompatibleFrequency, OutOfBoundsDatetime as OutOfBoundsDatetime
from pandas._typing import ArrayLike as ArrayLike, DType as DType
from pandas._typing import ArrayLike as ArrayLike, Dtype as DType
#from pandas.core.dtypes.cast import construct_1d_arraylike_from_scalar as construct_1d_arraylike_from_scalar, construct_1d_ndarray_preserving_na as construct_1d_ndarray_preserving_na, construct_1d_object_array_from_listlike as construct_1d_object_array_from_listlike, infer_dtype_from_scalar as infer_dtype_from_scalar, maybe_cast_to_datetime as maybe_cast_to_datetime, maybe_cast_to_integer_array as maybe_cast_to_integer_array, maybe_castable as maybe_castable, maybe_convert_platform as maybe_convert_platform, maybe_upcast as maybe_upcast
#from pandas.core.dtypes.common import is_categorical_dtype as is_categorical_dtype, is_datetime64_ns_dtype as is_datetime64_ns_dtype, is_extension_array_dtype as is_extension_array_dtype, is_float_dtype as is_float_dtype, is_integer_dtype as is_integer_dtype, is_iterator as is_iterator, is_list_like as is_list_like, is_object_dtype as is_object_dtype, is_timedelta64_ns_dtype as is_timedelta64_ns_dtype, pandas_dtype as pandas_dtype
#from pandas.core.dtypes.dtypes import CategoricalDtype as CategoricalDtype, ExtensionDtype as ExtensionDtype, registry as registry
@ -17,4 +17,4 @@ def array(data: Sequence[object], dtype: Optional[Union[str, np.dtype, Extension
def extract_array(obj: Any, extract_numpy: bool = ...): ...
def sanitize_array(data: Any, index: Any, dtype: Any=..., copy: bool=..., raise_cast_failure: bool=...) -> Any: ...
def is_empty_data(data: Any) -> bool: ...
def create_series_with_explicit_dtype(data: Any=..., index: Optional[Union[ArrayLike, Index]]=..., dtype: Optional[DType]=..., name: Optional[str]=..., copy: bool=..., fastpath: bool=..., dtype_if_empty: DType=...) -> Series: ...
def create_series_with_explicit_dtype(data: Any=..., index: Optional[Union[ArrayLike, Index]]=..., dtype: Optional[Dtype]=..., name: Optional[str]=..., copy: bool=..., fastpath: bool=..., dtype_if_empty: Dtype=...) -> Series: ...

Просмотреть файл

@ -3,7 +3,7 @@ import numpy as np
#from pandas._libs.tslibs import NaT as NaT, OutOfBoundsDatetime as OutOfBoundsDatetime, Period as Period, iNaT as iNaT
#from pandas._libs.tslibs import OutOfBoundsDatetime as OutOfBoundsDatetime, Period as Period
#from pandas._libs.tslibs.timezones import tz_compare as tz_compare
from pandas._typing import DType as DType
from pandas._typing import Dtype as DType
#from pandas.core.dtypes.common import ensure_int16 as ensure_int16, ensure_int32 as ensure_int32, ensure_int64 as ensure_int64, ensure_int8 as ensure_int8, ensure_object as ensure_object, ensure_str as ensure_str, is_bool as is_bool, is_bool_dtype as is_bool_dtype, is_complex as is_complex, is_complex_dtype as is_complex_dtype, is_datetime64_dtype as is_datetime64_dtype, is_datetime64_ns_dtype as is_datetime64_ns_dtype, is_datetime64tz_dtype as is_datetime64tz_dtype, is_datetime_or_timedelta_dtype as is_datetime_or_timedelta_dtype, is_dtype_equal as is_dtype_equal, is_extension_array_dtype as is_extension_array_dtype, is_float as is_float, is_float_dtype as is_float_dtype, is_integer as is_integer, is_integer_dtype as is_integer_dtype, is_numeric_dtype as is_numeric_dtype, is_object_dtype as is_object_dtype, is_scalar as is_scalar, is_string_dtype as is_string_dtype, is_timedelta64_dtype as is_timedelta64_dtype, is_timedelta64_ns_dtype as is_timedelta64_ns_dtype, is_unsigned_integer_dtype as is_unsigned_integer_dtype, pandas_dtype as pandas_dtype
#from pandas.core.dtypes.dtypes import DatetimeTZDtype as DatetimeTZDtype, ExtensionDtype as ExtensionDtype, IntervalDtype as IntervalDtype, PeriodDtype as PeriodDtype
#from pandas.core.dtypes.generic import ABCDataFrame as ABCDataFrame, ABCDatetimeArray as ABCDatetimeArray, ABCDatetimeIndex as ABCDatetimeIndex, ABCPeriodArray as ABCPeriodArray, ABCPeriodIndex as ABCPeriodIndex, ABCSeries as ABCSeries
@ -29,7 +29,7 @@ def coerce_to_dtypes(result: Any, dtypes: Any): ...
def astype_nansafe(arr: Any, dtype: Any, copy: bool=..., skipna: bool=...) -> Any: ...
def maybe_convert_objects(values: np.ndarray, convert_numeric: bool=...) -> Any: ...
def soft_convert_objects(values: np.ndarray, datetime: bool=..., numeric: bool=..., timedelta: bool=..., coerce: bool=..., copy: bool=...) -> Any: ...
def convert_dtypes(input_array: Any, convert_string: bool=..., convert_integer: bool=..., convert_boolean: bool=...) -> DType: ...
def convert_dtypes(input_array: Any, convert_string: bool=..., convert_integer: bool=..., convert_boolean: bool=...) -> Dtype: ...
def maybe_castable(arr: Any) -> bool: ...
def maybe_infer_to_datetimelike(value: Any, convert_dates: bool=...) -> Any: ...
def maybe_cast_to_datetime(value: Any, dtype: Any, errors: str=...) -> Any: ...

Просмотреть файл

@ -1,11 +1,12 @@
import datetime
import numpy as np
from core.indexing import _iLocIndexer, _LocIndexer
from matplotlib.axes import Axes as PlotAxes
import sys
#from pandas._config import get_option as get_option
#from pandas._libs import lib as lib
from pandas._typing import Axes as Axes, Axis as Axis, FilePathOrBuffer as FilePathOrBuffer, Level as Level, Renamer as Renamer
from pandas._typing import num, SeriesAxisType, AxisType, DType, DTypeNp, StrLike, Scalar, IndexType, MaskType, np_ndarray_bool, np_ndarray_int64, np_ndarray_str
from pandas._typing import num, SeriesAxisType, AxisType, Dtype, DtypeNp, StrLike, Scalar, IndexType, MaskType, np_ndarray_bool, np_ndarray_int64, np_ndarray_str
#from pandas.compat import PY37 as PY37
#from pandas.compat._optional import import_optional_dependency as import_optional_dependency
#from pandas.core import algorithms as algorithms, nanops as nanops, ops as ops
@ -79,18 +80,16 @@ else:
import numpy as _np
import datetime as _dt
_str = str
_bool = bool
class _iLocIndexerFrame:
class _iLocIndexerFrame(_iLocIndexer):
@overload
def __getitem__(self, idx: Tuple[int, int]) -> DType: ...
def __getitem__(self, idx: Tuple[int, int]) -> Dtype: ...
@overload
def __getitem__(self, idx: Union[IndexType, slice, Tuple[IndexType, IndexType]]) -> DataFrame: ...
@overload
def __getitem__(self, idx: Union[int, Tuple[IndexType, int, Tuple[int, IndexType]]]) -> Series[DType]: ...
def __getitem__(self, idx: Union[int, Tuple[IndexType, int, Tuple[int, IndexType]]]) -> Series[Dtype]: ...
def __setitem__(
self,
idx: Union[
@ -101,15 +100,14 @@ class _iLocIndexerFrame:
Tuple[IndexType, IndexType],
Tuple[int, IndexType],
],
value: Union[float, Series[DType], DataFrame],
value: Union[float, Series[Dtype], DataFrame],
) -> None: ...
class _LocIndexerFrame:
class _LocIndexerFrame(_LocIndexer):
@overload
def __getitem__(self, idx: Union[int, slice, MaskType],) -> DataFrame: ...
@overload
def __getitem__(self, idx: StrLike,) -> Series[DType]: ...
def __getitem__(self, idx: StrLike,) -> Series[Dtype]: ...
@overload
def __getitem__(self, idx: Tuple[StrLike, StrLike],) -> float: ...
@overload
@ -117,13 +115,13 @@ class _LocIndexerFrame:
def __setitem__(
self,
idx: Union[MaskType, StrLike, Tuple[Union[MaskType, List[str]], Union[MaskType, List[str]]],],
value: Union[float, _np.ndarray, Series[DType], DataFrame],
value: Union[float, _np.ndarray, Series[Dtype], DataFrame],
) -> None: ...
class DataFrame(NDFrame):
_ListLike = Union[
np.ndarray, List[DType], Dict[_str, _np.ndarray], Sequence, Index, Series[DType],
np.ndarray, List[Dtype], Dict[_str, _np.ndarray], Sequence, Index, Series[Dtype],
]
def __init__(
@ -135,7 +133,7 @@ class DataFrame(NDFrame):
copy: bool = ...,
): ...
def __init__(self, data: Any=..., index: Optional[Axes]=..., columns: Optional[Axes]=..., dtype: Optional[DType]=..., copy: bool=...) -> None: ...
def __init__(self, data: Any=..., index: Optional[Axes]=..., columns: Optional[Axes]=..., dtype: Optional[Dtype]=..., copy: bool=...) -> None: ...
@property
def axes(self) -> List[Index]: ...
@property
@ -151,7 +149,7 @@ class DataFrame(NDFrame):
@overload
def dot(self, other: DataFrame) -> DataFrame: ...
@overload
def dot(self, other: Series[DType]) -> Series[DType]: ...
def dot(self, other: Series[Dtype]) -> Series[Dtype]: ...
def __matmul__(self, other: Any): ...
def __rmatmul__(self, other: Any): ...
@classmethod
@ -159,7 +157,7 @@ class DataFrame(NDFrame):
@overload
def to_numpy(self) -> _np.ndarray: ...
@overload
def to_numpy(self, dtype: Optional[Type[DTypeNp]]) -> _np.ndarray: ...
def to_numpy(self, dtype: Optional[Type[DtypeNp]]) -> _np.ndarray: ...
@overload
def to_dict(self) -> Dict[_str, Any]: ...
@overload
@ -255,12 +253,12 @@ class DataFrame(NDFrame):
encoding: Optional[_str] = ...,
) -> _str: ...
def info(self, verbose: Any=..., buf: Any=..., max_cols: Any=..., memory_usage: Any=..., null_counts: Any=...) -> None: ...
def memory_usage(self, index: _bool = ..., deep: _bool = ...) -> Series[DType]: ...
def memory_usage(self, index: _bool = ..., deep: _bool = ...) -> Series[Dtype]: ...
def transpose(self, *args: Any, copy: bool=...) -> DataFrame: ...
@property
def T(self) -> DataFrame: ...
@overload
def __getitem__(self, idx: _str) -> Series[DType]: ...
def __getitem__(self, idx: _str) -> Series[Dtype]: ...
@overload
def __getitem__(self, rows: slice) -> DataFrame: ...
@overload
@ -278,7 +276,7 @@ class DataFrame(NDFrame):
def lookup(self, row_labels: Sequence, col_labels: Sequence) -> np.ndarray: ...
def align(
self,
other: Union[DataFrame, Series[DType]],
other: Union[DataFrame, Series[Dtype]],
join: Literal["inner", "outer", "left", "right"] = ...,
axis: Optional[AxisType] = ...,
level: Optional[Level] = ...,
@ -454,7 +452,7 @@ class DataFrame(NDFrame):
self,
subset: Optional[Union[Hashable, Sequence[Hashable]]] = ...,
keep: Union[Literal["first", "last"], _bool] = ...,
) -> Series[DType]: ...
) -> Series[Dtype]: ...
@overload
def sort_values(
self,
@ -517,7 +515,7 @@ class DataFrame(NDFrame):
def combine_first(self, other: DataFrame) -> DataFrame: ...
def update(
self,
other: Union[DataFrame, Series[DType]],
other: Union[DataFrame, Series[Dtype]],
join: Literal["left"] = ...,
overwrite: _bool = ...,
filter_func: Optional[Callable] = ...,
@ -549,11 +547,11 @@ class DataFrame(NDFrame):
margins_name: _str = ...,
observed: _bool = ...,
) -> DataFrame: ...
def stack(self, level: Level = ..., dropna: _bool = ...) -> Union[DataFrame, Series[DType]]: ...
def stack(self, level: Level = ..., dropna: _bool = ...) -> Union[DataFrame, Series[Dtype]]: ...
def explode(self, column: Union[str, Tuple]) -> DataFrame: ...
def unstack(
self, level: Level = ..., fill_value: Optional[Union[int, _str, Dict]] = ...,
) -> Union[DataFrame, Series[DType]]: ...
) -> Union[DataFrame, Series[Dtype]]: ...
def melt(
self,
id_vars: Optional[Any] = ...,
@ -564,18 +562,18 @@ class DataFrame(NDFrame):
) -> DataFrame: ...
def diff(self, periods: int = ..., axis: AxisType = ...) -> DataFrame: ...
@overload
def agg(self, func: Union[Callable, _str], axis: AxisType = ..., **kwargs) -> Series[DType]: ...
def agg(self, func: Union[Callable, _str], axis: AxisType = ..., **kwargs) -> Series[Dtype]: ...
@overload
def agg(self, func: Union[List[Callable], Dict[_str, Callable]], axis: AxisType = ..., **kwargs) -> DataFrame: ...
@overload
def aggregate(self, func: Union[Callable, _str], axis: AxisType = ..., **kwargs) -> Series[DType]: ...
def aggregate(self, func: Union[Callable, _str], axis: AxisType = ..., **kwargs) -> Series[Dtype]: ...
@overload
def aggregate(
self, func: Union[List[Callable], Dict[_str, Callable]], axis: AxisType = ..., **kwargs
) -> DataFrame: ...
def transform(self, func: Callable, axis: AxisType = ..., *args, **kwargs) -> DataFrame: ...
@overload
def apply(self, f: Callable[..., int]) -> Series[DType]: ...
def apply(self, f: Callable[..., int]) -> Series[Dtype]: ...
@overload
def apply(
self, f: Callable, axis: AxisType = ..., raw: _bool = ..., result_type: Optional[_str] = ...,
@ -583,14 +581,14 @@ class DataFrame(NDFrame):
def applymap(self, func: Callable) -> DataFrame: ...
def append(
self,
other: Union[DataFrame, Series[DType], Dict[_str, Any]],
other: Union[DataFrame, Series[Dtype], Dict[_str, Any]],
ignore_index: _bool = ...,
verify_integrity: _bool = ...,
sort: _bool = ...,
) -> DataFrame: ...
def join(
self,
other: Union[DataFrame, Series[DType], List[DataFrame]],
other: Union[DataFrame, Series[Dtype], List[DataFrame]],
on: Optional[Union[_str, List[_str]]] = ...,
how: Literal["left", "right", "outer", "inner"] = ...,
lsuffix: _str = ...,
@ -599,7 +597,7 @@ class DataFrame(NDFrame):
) -> DataFrame: ...
def merge(
self,
right: Union[DataFrame, Series[DType]],
right: Union[DataFrame, Series[Dtype]],
how: Literal["left", "right", "inner", "outer"] = ...,
on: Optional[Union[Level, List[Level]]] = ...,
left_on: Optional[Union[Level, List[Level]]] = ...,
@ -612,12 +610,12 @@ class DataFrame(NDFrame):
indicator: Union[_bool, _str] = ...,
validate: Optional[_str] = ...,
) -> DataFrame: ...
def round(self, decimals: Union[int, Dict, Series[DType]] = ..., *args, **kwargs) -> DataFrame: ...
def round(self, decimals: Union[int, Dict, Series[Dtype]] = ..., *args, **kwargs) -> DataFrame: ...
def corr(self, method: Literal["pearson", "kendall", "spearman"] = ..., min_periods: int = ...,) -> DataFrame: ...
def cov(self, min_periods: Optional[int] = ...) -> DataFrame: ...
def corrwith(
self,
other: Union[DataFrame, Series[DType]],
other: Union[DataFrame, Series[Dtype]],
axis: Optional[AxisType] = ...,
drop: _bool = ...,
method: Literal["pearson", "kendall", "spearman"] = ...,
@ -625,10 +623,10 @@ class DataFrame(NDFrame):
@overload
def count(self, axis: AxisType = ..., numeric_only: _bool = ..., *, level: Level) -> DataFrame: ...
@overload
def count(self, axis: AxisType = ..., level: None = ..., numeric_only: _bool = ...) -> Series[DType]: ...
def nunique(self, axis: AxisType = ..., dropna=True) -> Series[DType]: ...
def idxmax(self, axis: AxisType, skipna: _bool = ...) -> Series[DType]: ...
def idxmin(self, axis: AxisType, skipna: _bool = ...) -> Series[DType]: ...
def count(self, axis: AxisType = ..., level: None = ..., numeric_only: _bool = ...) -> Series[Dtype]: ...
def nunique(self, axis: AxisType = ..., dropna=True) -> Series[Dtype]: ...
def idxmax(self, axis: AxisType, skipna: _bool = ...) -> Series[Dtype]: ...
def idxmin(self, axis: AxisType, skipna: _bool = ...) -> Series[Dtype]: ...
@overload
def mode(
self, axis: AxisType = ..., skipna: _bool = ..., numeric_only: _bool = ..., *, level: Level, **kwargs
@ -636,7 +634,7 @@ class DataFrame(NDFrame):
@overload
def mode(
self, axis: AxisType = ..., skipna: _bool = ..., level: None = ..., numeric_only: _bool = ..., **kwargs
) -> Series[DType]: ...
) -> Series[Dtype]: ...
@overload
def quantile(
self,
@ -661,7 +659,7 @@ class DataFrame(NDFrame):
copy: _bool = ...,
) -> DataFrame: ...
def to_period(self, freq: Optional[_str] = ..., axis: AxisType = ..., copy: _bool = ...) -> DataFrame: ...
def isin(self, values: Union[Iterable, Series[DType], DataFrame, Dict]) -> DataFrame: ...
def isin(self, values: Union[Iterable, Series[Dtype], DataFrame, Dict]) -> DataFrame: ...
def plot(self, kind: _str, yerr: DataFrame, **kwargs) -> PlotAxes: ...
def hist(
data,
@ -709,7 +707,7 @@ class DataFrame(NDFrame):
def __and__(self, other: Union[num, _ListLike, DataFrame], axis: SeriesAxisType = ...) -> DataFrame: ...
def __delitem__(self, key: _str) -> None: ...
def __div__(self, other: Union[num, _ListLike, DataFrame]) -> DataFrame: ...
def __eq__(self, other: Union[float, Series[DType], DataFrame]) -> DataFrame: ... # type: ignore
def __eq__(self, other: Union[float, Series[Dtype], DataFrame]) -> DataFrame: ... # type: ignore
def __exp__(
self,
other: Union[num, _ListLike, DataFrame],
@ -728,7 +726,7 @@ class DataFrame(NDFrame):
def __mod__(self, other: Union[num, _ListLike, DataFrame]) -> DataFrame: ...
def __mul__(self, other: Union[num, _ListLike, DataFrame]) -> DataFrame: ...
def __pow__(self, other: Union[num, _ListLike, DataFrame]) -> DataFrame: ...
def __ne__(self, other: Union[float, Series[DType], DataFrame]) -> DataFrame: ... # type: ignore
def __ne__(self, other: Union[float, Series[Dtype], DataFrame]) -> DataFrame: ... # type: ignore
def __or__(self, other: Union[num, _ListLike, DataFrame]) -> DataFrame: ...
def __radd__(self, other: Union[num, _ListLike, DataFrame]) -> DataFrame: ...
def __rand__(self, other: Union[num, _ListLike, DataFrame]) -> DataFrame: ...
@ -756,7 +754,7 @@ class DataFrame(NDFrame):
@columns.setter # setter needs to be right next to getter; otherwise mypy complains
def columns(self, cols: Union[List[_str], Index[_str]]) -> None: ...
@property
def dtypes(self) -> Series[DType]: ...
def dtypes(self) -> Series[Dtype]: ...
@property
def empty(self) -> _bool: ...
@property
@ -792,7 +790,7 @@ class DataFrame(NDFrame):
@overload
def all(
self, axis: AxisType = ..., bool_only: Optional[_bool] = ..., skipna: _bool = ..., level: None = ..., **kwargs
) -> Series[DType]: ...
) -> Series[Dtype]: ...
@overload
def all(
self,
@ -806,7 +804,7 @@ class DataFrame(NDFrame):
@overload
def any(
self, axis: AxisType = ..., bool_only: Optional[_bool] = ..., skipna: _bool = ..., level: None = ..., **kwargs
) -> Series[DType]: ...
) -> Series[Dtype]: ...
@overload
def any(
self, axis: AxisType = ..., bool_only: _bool = ..., skipna: _bool = ..., *, level: Level, **kwargs
@ -835,7 +833,7 @@ class DataFrame(NDFrame):
@overload
def bfill(
self,
value: Optional[Union[float, Dict, Series[DType], DataFrame]] = ...,
value: Optional[Union[float, Dict, Series[Dtype], DataFrame]] = ...,
axis: Optional[AxisType] = ...,
inplace: Optional[Literal[False]] = ...,
limit: int = ...,
@ -844,7 +842,7 @@ class DataFrame(NDFrame):
@overload
def bfill(
self,
value: Optional[Union[float, Dict, Series[DType], DataFrame]] = ...,
value: Optional[Union[float, Dict, Series[Dtype], DataFrame]] = ...,
axis: Optional[AxisType] = ...,
limit: int = ...,
downcast: Optional[Dict] = ...,
@ -875,8 +873,8 @@ class DataFrame(NDFrame):
def describe(
self,
percentiles: Optional[List[float]] = ...,
include: Optional[Union[Literal["all"], List[DType]]] = ...,
exclude: Optional[List[DType]] = ...,
include: Optional[Union[Literal["all"], List[Dtype]]] = ...,
exclude: Optional[List[Dtype]] = ...,
) -> DataFrame: ...
def div(
self,
@ -894,7 +892,7 @@ class DataFrame(NDFrame):
) -> DataFrame: ...
def droplevel(self, level: Level = ..., axis: AxisType = ...) -> DataFrame: ...
def eq(self, other: Any, axis: AxisType = ..., level: Optional[Level] = ...) -> DataFrame: ...
def equals(self, other: Union[Series[DType], DataFrame]) -> _bool: ...
def equals(self, other: Union[Series[Dtype], DataFrame]) -> _bool: ...
def ewm(
self,
com: Optional[float] = ...,
@ -917,7 +915,7 @@ class DataFrame(NDFrame):
@overload
def ffill(
self,
value: Optional[Union[Scalar, Dict, Series[DType], DataFrame]] = ...,
value: Optional[Union[Scalar, Dict, Series[Dtype], DataFrame]] = ...,
axis: Optional[AxisType] = ...,
inplace: Optional[Literal[False]] = ...,
limit: int = ...,
@ -1008,7 +1006,7 @@ class DataFrame(NDFrame):
level: None = ...,
numeric_only: Optional[_bool] = ...,
**kwargs
) -> Series[DType]: ...
) -> Series[Dtype]: ...
@overload
def kurtosis(
self,
@ -1027,7 +1025,7 @@ class DataFrame(NDFrame):
level: None = ...,
numeric_only: Optional[_bool] = ...,
**kwargs
) -> Series[DType]: ...
) -> Series[Dtype]: ...
def last(self, offset: Any) -> DataFrame: ...
def last_valid_index(self) -> Scalar: ...
def le(self, other: Any, axis: AxisType = ..., level: Optional[Level] = ...) -> DataFrame: ...
@ -1035,14 +1033,14 @@ class DataFrame(NDFrame):
@overload
def mad(
self, axis: Optional[AxisType] = ..., skipna: Optional[_bool] = ..., level: None = ...,
) -> Series[DType]: ...
) -> Series[Dtype]: ...
@overload
def mad(
self, axis: Optional[AxisType] = ..., skipna: Optional[_bool] = ..., *, level: Level, **kwargs
) -> DataFrame: ...
def mask(
self,
cond: Union[Series[DType], DataFrame, _np.ndarray],
cond: Union[Series[Dtype], DataFrame, _np.ndarray],
other: Any = ...,
inplace: _bool = ...,
axis: Optional[AxisType] = ...,
@ -1068,7 +1066,7 @@ class DataFrame(NDFrame):
level: None = ...,
numeric_only: Optional[_bool] = ...,
**kwargs
) -> Series[DType]: ...
) -> Series[Dtype]: ...
@overload
def mean(
self,
@ -1087,7 +1085,7 @@ class DataFrame(NDFrame):
level: None = ...,
numeric_only: Optional[_bool] = ...,
**kwargs
) -> Series[DType]: ...
) -> Series[Dtype]: ...
@overload
def median(
self,
@ -1106,7 +1104,7 @@ class DataFrame(NDFrame):
level: None = ...,
numeric_only: Optional[_bool] = ...,
**kwargs
) -> Series[DType]: ...
) -> Series[Dtype]: ...
@overload
def min(
self,
@ -1125,7 +1123,7 @@ class DataFrame(NDFrame):
level: None = ...,
numeric_only: Optional[_bool] = ...,
**kwargs
) -> Series[DType]: ...
) -> Series[Dtype]: ...
def mod(
self,
other: Union[num, _ListLike, DataFrame],
@ -1157,7 +1155,7 @@ class DataFrame(NDFrame):
**kwargs
) -> DataFrame: ...
def pipe(self, func: Callable, *args, **kwargs) -> Any: ...
def pop(self, item: _str) -> Series[DType]: ...
def pop(self, item: _str) -> Series[Dtype]: ...
def pow(
self,
other: Union[num, _ListLike, DataFrame],
@ -1322,7 +1320,7 @@ class DataFrame(NDFrame):
ddof: int = ...,
numeric_only: Optional[_bool] = ...,
**kwargs
) -> Series[DType]: ...
) -> Series[Dtype]: ...
@overload
def set_axis(self, labels: List, inplace: Literal[True], axis: AxisType = ...) -> None: ...
@overload
@ -1345,7 +1343,7 @@ class DataFrame(NDFrame):
level: None = ...,
numeric_only: Optional[_bool] = ...,
**kwargs
) -> Series[DType]: ...
) -> Series[Dtype]: ...
def slice_shift(self, periods: int = ..., axis: AxisType = ...) -> DataFrame: ...
def squeeze(self, axis: Optional[AxisType] = ...) -> Any: ...
@overload
@ -1368,7 +1366,7 @@ class DataFrame(NDFrame):
ddof: int = ...,
numeric_only: _bool = ...,
**kwargs
) -> Series[DType]: ...
) -> Series[Dtype]: ...
def sub(
self,
other: Union[num, _ListLike, DataFrame],
@ -1403,7 +1401,7 @@ class DataFrame(NDFrame):
numeric_only: Optional[_bool] = ...,
min_count: int = ...,
**kwargs
) -> Series[DType]: ...
) -> Series[Dtype]: ...
def swapaxes(self, axis1: AxisType, axis2: AxisType, copy: _bool = ...) -> DataFrame: ...
def tail(self, n: int = ...) -> DataFrame: ...
def take(self, indices: List, axis: AxisType = ..., is_copy: Optional[_bool] = ..., **kwargs) -> DataFrame: ...
@ -1680,10 +1678,10 @@ class DataFrame(NDFrame):
ddof: int = ...,
numeric_only: Optional[_bool] = ...,
**kwargs
) -> Series[DType]: ...
) -> Series[Dtype]: ...
def where(
self,
cond: Union[Series[DType], DataFrame, _np.ndarray],
cond: Union[Series[Dtype], DataFrame, _np.ndarray],
other: Any = ...,
inplace: _bool = ...,
axis: Optional[AxisType] = ...,

Просмотреть файл

@ -3,7 +3,7 @@ import sys
import pandas.core.indexing as indexing
#from pandas._config import config as config
#from pandas._libs import Timestamp as Timestamp, iNaT as iNaT #, lib as lib, properties as properties
from pandas._typing import Axis as Axis, DType as DType, FilePathOrBuffer as FilePathOrBuffer, FrameOrSeries as FrameOrSeries, JSONSerializable as JSONSerializable, Level as Level, Renamer as Renamer, ListLike as ListLike, Scalar as Scalar, SeriesAxisType as SeriesAxisType
from pandas._typing import Axis as Axis, Dtype as DType, FilePathOrBuffer as FilePathOrBuffer, FrameOrSeries as FrameOrSeries, JSONSerializable as JSONSerializable, Level as Level, Renamer as Renamer, ListLike as ListLike, Scalar as Scalar, SeriesAxisType as SeriesAxisType
#from pandas.compat import set_function_name as set_function_name
#from pandas.compat._optional import import_optional_dependency as import_optional_dependency
#from pandas.core import missing as missing, nanops as nanops
@ -37,7 +37,7 @@ str_t = str
class NDFrame(PandasObject, SelectionMixin, indexing.IndexingMixin):
def __init__(self, data: BlockManager, axes: Optional[List[Index]]=..., copy: bool=..., dtype: Optional[DType]=..., attrs: Optional[Mapping[Optional[Hashable], Any]]=..., fastpath: bool=...) -> None: ...
def __init__(self, data: BlockManager, axes: Optional[List[Index]]=..., copy: bool=..., dtype: Optional[Dtype]=..., attrs: Optional[Mapping[Optional[Hashable], Any]]=..., fastpath: bool=...) -> None: ...
@property
def attrs(self) -> Dict[Optional[Hashable], Any]: ...
@attrs.setter
@ -57,7 +57,7 @@ class NDFrame(PandasObject, SelectionMixin, indexing.IndexingMixin):
@overload
def set_axis(
self, labels: Union[Index, ListLike], axis: SeriesAxisType = ..., inplace: Literal[False] = ...,
) -> Series[DType]: ...
) -> Series[Dtype]: ...
def swapaxes(self, axis1: SeriesAxisType, axis2: SeriesAxisType, copy: bool_t = ...) -> FrameOrSeries: ...
def droplevel(self, level: Level, axis: SeriesAxisType = ...) -> FrameOrSeries: ...
def pop(self, item: str_t) -> FrameOrSeries: ...
@ -85,7 +85,7 @@ class NDFrame(PandasObject, SelectionMixin, indexing.IndexingMixin):
copy: bool_t = ...,
inplace: Optional[Literal[False]] = ...,
) -> Series: ...
def equals(self, other: Series[DType]) -> bool_t: ...
def equals(self, other: Series[Dtype]) -> bool_t: ...
def __neg__(self) -> None: ...
def __pos__(self) -> None: ...
def __invert__(self): ...
@ -289,10 +289,10 @@ class NDFrame(PandasObject, SelectionMixin, indexing.IndexingMixin):
axis: SeriesAxisType = ...,
level: Optional[Level] = ...,
drop_level: bool_t = ...,
) -> Series[DType]: ...
) -> Series[Dtype]: ...
def __getitem__(self, item: Any) -> None: ...
def __delitem__(self, idx: Union[int, str_t]): ...
def get(self, key: object, default: Optional[DType] = ...) -> DType: ...
def get(self, key: object, default: Optional[Dtype] = ...) -> Dtype: ...
def reindex_like(self, other: Any, method: Optional[str]=..., copy: bool_t=..., limit: Any=..., tolerance: Any=...) -> FrameOrSeries: ...
def drop(self, labels: Any=..., axis: Any=..., index: Any=..., columns: Any=..., level: Any=..., inplace: bool_t=..., errors: str=...) -> Any: ...
def add_prefix(self, prefix: str) -> FrameOrSeries: ...

Просмотреть файл

@ -2,7 +2,7 @@ from matplotlib.axes import Axes as PlotAxes, SubplotBase as AxesSubplot
import numpy as np
import sys
#from pandas._libs import Timestamp as Timestamp, lib as lib
from pandas._typing import FrameOrSeries as FrameOrSeries, AxisType, DType, Level
from pandas._typing import FrameOrSeries as FrameOrSeries, AxisType, Dtype, Level
#from pandas.core.base import DataError as DataError, SpecificationError as SpecificationError
#from pandas.core.construction import create_series_with_explicitDType as create_series_with_explicitDType
#from pandas.core.dtypes.cast import maybe_convert_objects as maybe_convert_objects, maybe_downcast_numeric as maybe_downcast_numeric, maybe_downcast_toDType as maybe_downcast_toDType
@ -46,32 +46,32 @@ class SeriesGroupBy(GroupBy):
def value_counts(
self, normalize: bool = ..., sort: bool = ..., ascending: bool = ..., bins: Any = ..., dropna: bool = ...,
) -> DataFrame: ...
def count(self) -> Series[DType]: ...
def count(self) -> Series[Dtype]: ...
def pct_change(
self, periods: int = ..., fill_method: str = ..., limit: Any = ..., freq: Any = ..., axis: AxisType = ...,
) -> Series[DType]: ...
) -> Series[Dtype]: ...
# Overrides and others from original pylance stubs
@property
def is_monotonic_increasing(self) -> bool: ...
@property
def is_monotonic_decreasing(self) -> bool: ...
def __getitem__(self, item: str) -> Series[DType]: ...
def bfill(self, limit: Optional[int] = ...) -> Series[DType]: ...
def cummax(self, axis: AxisType = ..., **kwargs) -> Series[DType]: ...
def cummin(self, axis: AxisType = ..., **kwargs) -> Series[DType]: ...
def cumprod(self, axis: AxisType = ..., **kwargs) -> Series[DType]: ...
def cumsum(self, axis: AxisType = ..., **kwargs) -> Series[DType]: ...
def ffill(self, limit: Optional[int] = ...) -> Series[DType]: ...
def first(self, **kwargs) -> Series[DType]: ...
def head(self, n: int = ...) -> Series[DType]: ...
def last(self, **kwargs) -> Series[DType]: ...
def max(self, **kwargs) -> Series[DType]: ...
def mean(self, **kwargs) -> Series[DType]: ...
def median(self, **kwargs) -> Series[DType]: ...
def min(self, **kwargs) -> Series[DType]: ...
def nlargest(self, n: int = ..., keep: str = ...) -> Series[DType]: ...
def nsmallest(self, n: int = ..., keep: str = ...) -> Series[DType]: ...
def nth(self, n: Union[int, Sequence[int]], dropna: Optional[str] = ...) -> Series[DType]: ...
def __getitem__(self, item: str) -> Series[Dtype]: ...
def bfill(self, limit: Optional[int] = ...) -> Series[Dtype]: ...
def cummax(self, axis: AxisType = ..., **kwargs) -> Series[Dtype]: ...
def cummin(self, axis: AxisType = ..., **kwargs) -> Series[Dtype]: ...
def cumprod(self, axis: AxisType = ..., **kwargs) -> Series[Dtype]: ...
def cumsum(self, axis: AxisType = ..., **kwargs) -> Series[Dtype]: ...
def ffill(self, limit: Optional[int] = ...) -> Series[Dtype]: ...
def first(self, **kwargs) -> Series[Dtype]: ...
def head(self, n: int = ...) -> Series[Dtype]: ...
def last(self, **kwargs) -> Series[Dtype]: ...
def max(self, **kwargs) -> Series[Dtype]: ...
def mean(self, **kwargs) -> Series[Dtype]: ...
def median(self, **kwargs) -> Series[Dtype]: ...
def min(self, **kwargs) -> Series[Dtype]: ...
def nlargest(self, n: int = ..., keep: str = ...) -> Series[Dtype]: ...
def nsmallest(self, n: int = ..., keep: str = ...) -> Series[Dtype]: ...
def nth(self, n: Union[int, Sequence[int]], dropna: Optional[str] = ...) -> Series[Dtype]: ...
class DataFrameGroupBy(GroupBy):
@ -90,7 +90,7 @@ class DataFrameGroupBy(GroupBy):
def transform(self, func: Any, *args: Any, **kwargs: Any): ...
def filter(self, func: Callable, dropna: bool = ..., *args, **kwargs) -> DataFrame: ...
@overload
def __getitem__(self, item: str) -> Series[DType]: ...
def __getitem__(self, item: str) -> Series[Dtype]: ...
@overload
def __getitem__(self, item: Sequence[str]) -> DataFrame: ...
def count(self) -> DataFrame: ...

Просмотреть файл

@ -1,6 +1,6 @@
#from __future__ import annotations
import numpy as np
from pandas._typing import DType, Label, TT, UU
from pandas._typing import Dtype, Label, T1, T2
from pandas.core.arrays import ExtensionArray
from pandas.core.base import IndexOpsMixin, PandasObject
from pandas.core.strings import StringMethods
@ -8,12 +8,12 @@ from typing import Any, Dict, Generic, Hashable, Iterable, Iterator, List, Optio
class InvalidIndexError(Exception): ...
class Index(IndexOpsMixin[TT], PandasObject, Generic[TT]):
class Index(IndexOpsMixin[T1], PandasObject, Generic[T1]):
@property
def str(self) -> StringMethods: ...
def __new__(cls: Any, data: Any=..., dtype: Any=..., copy: Any=..., name: Any=..., tupleize_cols: Any=..., **kwargs: Any) -> Index: ...
def __init__(
self, data: Iterable[TT], dtype: Any = ..., copy: bool = ..., name: Any = ..., tupleize_cols: bool = ...,
self, data: Iterable[T1], dtype: Any = ..., copy: bool = ..., name: Any = ..., tupleize_cols: bool = ...,
): ...
@property
def asi8(self) -> None: ...
@ -25,7 +25,7 @@ class Index(IndexOpsMixin[TT], PandasObject, Generic[TT]):
def ravel(self, order: str = ...): ...
def view(self, cls: Optional[Any] = ...): ...
@overload
def astype(self, dtype: Type[UU]) -> Index[UU]: ...
def astype(self, dtype: Type[T2]) -> Index[T2]: ...
@overload
def astype(self, dtype: str) -> Index: ...
def take(self, indices: Any, axis: int = ..., allow_fill: bool = ..., fill_value: Optional[Any] = ..., **kwargs: Any): ...
@ -82,20 +82,20 @@ class Index(IndexOpsMixin[TT], PandasObject, Generic[TT]):
def unique(self, level: Optional[Any] = ...): ...
def drop_duplicates(self, keep: str = ...): ...
def duplicated(self, keep: str = ...): ...
def __add__(self, other: Any) -> Index[TT]: ...
def __radd__(self, other: Any) -> Index[TT]: ...
def __iadd__(self, other: Any) -> Index[TT]: ...
def __sub__(self, other: Any) -> Index[TT]: ...
def __rsub__(self, other: Any) -> Index[TT]: ...
def __and__(self, other: Any) -> Index[TT]: ...
def __or__(self, other: Any) -> Index[TT]: ...
def __xor__(self, other: Any) -> Index[TT]: ...
def __add__(self, other: Any) -> Index[T1]: ...
def __radd__(self, other: Any) -> Index[T1]: ...
def __iadd__(self, other: Any) -> Index[T1]: ...
def __sub__(self, other: Any) -> Index[T1]: ...
def __rsub__(self, other: Any) -> Index[T1]: ...
def __and__(self, other: Any) -> Index[T1]: ...
def __or__(self, other: Any) -> Index[T1]: ...
def __xor__(self, other: Any) -> Index[T1]: ...
def __nonzero__(self) -> None: ...
__bool__: Any = ...
def union(self, other: Union[List[TT], Index[TT]], sort: Optional[Any] = ...) -> Index[TT]: ...
def intersection(self, other: Union[List[TT], Index[TT]], sort: bool = ...) -> Index[TT]: ...
def difference(self, other: Union[List[TT], Index[TT]]) -> Index[TT]: ...
def symmetric_difference(self, other: Union[List[TT], Index[TT]], result_name: Optional[Any] = ..., sort: Optional[Any] = ...) -> Index[TT]: ...
def union(self, other: Union[List[T1], Index[T1]], sort: Optional[Any] = ...) -> Index[T1]: ...
def intersection(self, other: Union[List[T1], Index[T1]], sort: bool = ...) -> Index[T1]: ...
def difference(self, other: Union[List[T1], Index[T1]]) -> Index[T1]: ...
def symmetric_difference(self, other: Union[List[T1], Index[T1]], result_name: Optional[Any] = ..., sort: Optional[Any] = ...) -> Index[T1]: ...
def get_loc(self, key: Any, method: Optional[Any] = ..., tolerance: Optional[Any] = ...): ...
def get_indexer(self, target: Any, method: Optional[Any] = ..., limit: Optional[Any] = ..., tolerance: Optional[Any] = ...): ...
def reindex(self, target: Any, method: Optional[Any] = ..., level: Optional[Any] = ..., limit: Optional[Any] = ..., tolerance: Optional[Any] = ...): ...
@ -110,11 +110,11 @@ class Index(IndexOpsMixin[TT], PandasObject, Generic[TT]):
def __hash__(self) -> Any: ...
def __setitem__(self, key: Any, value: Any) -> None: ...
@overload
def __getitem__(self, idx: Union[int, Series[bool], slice, np.ndarray]) -> TT: ...
def __getitem__(self, idx: Union[int, Series[bool], slice, np.ndarray]) -> T1: ...
@overload
def __getitem__(self, idx: Index[TT]) -> Index[TT]: ...
def __getitem__(self, idx: Index[T1]) -> Index[T1]: ...
@overload
def __getitem__(self, idx: Tuple[np.ndarray, ...]) -> TT: ...
def __getitem__(self, idx: Tuple[np.ndarray, ...]) -> T1: ...
def append(self, other: Any): ...
def putmask(self, mask: Any, value: Any): ...
def equals(self, other: Any) -> bool: ...
@ -146,10 +146,10 @@ class Index(IndexOpsMixin[TT], PandasObject, Generic[TT]):
def __eq__(self, other: object) -> Series: ... # type: ignore
@overload
def __iter__(self) -> Iterator: ...
def __ne__(self, other: str) -> Index[TT]: ... # type: ignore
def __ne__(self, other: str) -> Index[T1]: ... # type: ignore
def ensure_index_from_sequences(sequences: Sequence[Sequence[DType]], names: Sequence[str]=...) -> Index: ...
def ensure_index_from_sequences(sequences: Sequence[Sequence[Dtype]], names: Sequence[str]=...) -> Index: ...
def ensure_index(index_like: Union[Sequence, Index], copy: bool=...) -> Index: ...
def maybe_extract_name(name, obj, cls) -> Label: ...

Просмотреть файл

@ -1,7 +1,7 @@
import numpy as np
#from pandas._config import get_option as get_option
#from pandas._libs.hashtable import duplicated_int64 as duplicated_int64
from pandas._typing import AnyArrayLike as AnyArrayLike, UU
from pandas._typing import AnyArrayLike as AnyArrayLike, T2
from pandas.core import accessor as accessor
#from pandas.core.algorithms import take_1d as take_1d
#from pandas.core.arrays.categorical import Categorical as Categorical, contains as contains
@ -29,7 +29,7 @@ class CategoricalIndex(ExtensionIndex, accessor.PandasDelegate):
def __contains__(self, key: Any) -> bool: ...
def __array__(self, dtype: Any=...) -> np.ndarray: ...
@overload
def astype(self, dtype: Type[UU]) -> Index[UU]: ...
def astype(self, dtype: Type[T2]) -> Index[T2]: ...
@overload
def astype(self, dtype: str) -> Index: ...
def fillna(self, value: Any, downcast: Optional[Any] = ...): ...

Просмотреть файл

@ -2,7 +2,7 @@ import numpy as np
#from pandas._config import get_option as get_option
#from pandas._libs import Timedelta as Timedelta, Timestamp as Timestamp, lib as lib
from pandas._libs.interval import IntervalMixin as IntervalMixin, Interval as Interval
from pandas._typing import AnyArrayLike as AnyArrayLike, UU
from pandas._typing import AnyArrayLike as AnyArrayLike, T2
#from pandas.core.algorithms import take_1d as take_1d
#from pandas.core.arrays.interval import IntervalArray as IntervalArray
#from pandas.core.dtypes.cast import find_common_type as find_common_type, infer_dtype_from_scalar as infer_dtype_from_scalar, maybe_downcast_to_dtype as maybe_downcast_to_dtype
@ -57,7 +57,7 @@ class IntervalIndex(IntervalMixin, ExtensionIndex):
def values(self): ...
def __array_wrap__(self, result: Any, context: Optional[Any] = ...): ...
def __reduce__(self): ...
def astype(self, dtype: UU, copy: bool = ...) -> Index[UU]: ...
def astype(self, dtype: T2, copy: bool = ...) -> Index[T2]: ...
@property
def inferred_type(self) -> str: ...
def memory_usage(self, deep: bool=...) -> int: ...

Просмотреть файл

@ -1,6 +1,6 @@
import numpy as np
#from pandas._libs import lib as lib
#from pandas._typing import DType as DType
#from pandas._typing import Dtype as Dtype
#from pandas.core import algorithms as algorithms
#from pandas.core.dtypes.cast import astype_nansafe as astype_nansafe
#from pandas.core.dtypes.common import is_bool as is_bool, is_bool_dtype as is_bool_dtype, is_dtype_equal as is_dtype_equal, is_extension_array_dtype as is_extension_array_dtype, is_float as is_float, is_float_dtype as is_float_dtype, is_integer_dtype as is_integer_dtype, is_scalar as is_scalar, is_signed_integer_dtype as is_signed_integer_dtype, is_unsigned_integer_dtype as is_unsigned_integer_dtype, needs_i8_conversion as needs_i8_conversion, pandas_dtype as pandas_dtype

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,4 +1,4 @@
#from pandas._typing import ArrayLike as ArrayLike, DType as DType
#from pandas._typing import ArrayLike as ArrayLike, Dtype as Dtype
#from pandas.arrays import StringArray as StringArray
#from pandas.core.algorithms import take_1d as take_1d
from pandas.core.base import NoNewAttributesMixin as NoNewAttributesMixin

55
utils/count_ids.py Normal file
Просмотреть файл

@ -0,0 +1,55 @@
#!/bin/python
"""Count IDs.
Usage:
count_ids [--path=<root>] [--suffix=<filesuffix>] [--pat=<pat>]
count_ids -h | --help
count_ids --version
Options:
-h --help Show this screen.
--version Show version.
--path=<root> Directory to scan (default is current working directory)
--suffix=<filesuffix> File name suffix to restrict to (default is all files)
--pat=<pat> A regular expression to use to extract IDs
"""
import docopt
import glob
import re
def count(root, suffix, regex):
if root is None:
root = '.'
filepat = '*' if suffix is None else '*.' + suffix[suffix.find('.')+1:]
if regex is None:
regex = '[A-Za-z_][A-Za-z0-9_]*'
data = {}
loc = {}
ctx = {}
prog = re.compile(regex)
for name in glob.iglob(root + '/**/' + filepat, recursive=True):
n = 0
with open(name) as f:
for line in f:
n += 1
for id in prog.findall(line):
if id in data:
data[id] += 1
if id in loc:
del loc[id] # save some memory
del ctx[id]
else:
data[id] = 1
loc[id] = f'{name}:{n}'
ctx[id] = line[:-1]
for id, lc in loc.items():
print(f'{id}: {lc}: {ctx[id]}')
if __name__ == "__main__":
args = docopt.docopt(__doc__, version='Count IDs 0.1')
count(root=args['--path'], suffix=args['--suffix'], regex=args['--pat'])

471
utils/validate_stubs.py Normal file
Просмотреть файл

@ -0,0 +1,471 @@
#!/bin/python
"""Validate Stubs.
Usage:
validate_stubs <package> [--path=<stubpath>] [--class=<c>] [--function=<f>]
validate_stubs -h | --help
validate_stubs --version
Options:
-h --help Show this screen.
--version Show version.
--path=<stubpath> Where to find stubs (default to parent directory)
--function=<f> Restrict to the named function (or method if used with --class).
--class=<c> Restrict to the named class.
"""
from typing import Any, Callable, List, Literal, NoReturn, Optional, Set, Tuple, _overload_dummy
import importlib
import inspect
import os
import sys
import types
from collections import namedtuple
from operator import itemgetter, attrgetter
from enum import Enum
import docopt
import typing as _typing
overloads = {}
def my_overload(func):
key = func.__module__ + '.' + func.__name__
if key not in overloads:
fn = lambda *args, **kwds: _overload_dummy(args, kwds)
overloads[key] = fn
fn.__overloads__ = [func]
else:
overloads[key].__overloads__.append(func)
return overloads[key]
_typing.overload = my_overload
def import_dual(m: str, stub_path: str) -> Tuple:
"""
Import both a stub package and a real package with the same name.
Parameters:
m (str): module name
stub_path (str): location of type stubs
Returns:
Tuple - the tuple of (real module, stub module)
"""
def _clean(m):
to_del = [k for k in sys.modules.keys() if k == m or k.startswith(m + '.')]
for k in to_del:
del sys.modules[k]
importlib.invalidate_caches()
_clean(m)
m1 = importlib.import_module(m)
_clean(m)
sys.path_hooks.insert(0,
importlib.machinery.FileFinder.path_hook(
(importlib.machinery.SourceFileLoader, ['.pyi']))
)
sys.path.insert(0, stub_path)
try:
m2 = importlib.import_module(m)
return m1, m2
finally:
sys.path.pop(0)
sys.path_hooks.pop(0)
_clean(m)
class Item:
class ItemType(Enum):
MODULE = 1
CLASS = 2
FUNCTION = 3
PROPERTY = 4
def __init__(self, file: str, module: str, name: str, object_: object, type_: ItemType, children: dict=None):
self.file = file
self.module = module
self.name = name
self.object_ = object_
self.type_ = type_
self.children = children
self.done = False
self.analog = None
def ismodule(self):
return self.type_ == Item.ItemType.MODULE
def isclass(self):
return self.type_ == Item.ItemType.CLASS
def isfunction(self):
return self.type_ == Item.ItemType.FUNCTION
@staticmethod
def make_function(file: str, module: str, name: str, object_: object):
return Item(file, module, name, object_, Item.ItemType.FUNCTION)
@staticmethod
def make_class(file: str, module: str, name: str, object_: object, children:dict):
return Item(file, module, name, object_, Item.ItemType.CLASS, children)
@staticmethod
def make_module(file: str, module: str, name: str, object_: object, children:dict):
return Item(file, module, name, object_, Item.ItemType.MODULE, children)
def isfrompackage(v: object, path: str) -> bool:
# Try to ensure the object lives below the root path and is not
# imported from elsewhere.
try:
f = inspect.getfile(v)
return f.startswith(path)
except TypeError: # builtins or non-modules; for the latter we return True for now
return not inspect.ismodule(v)
def isfrommodule(v: object, module: str, default: bool=True) -> bool:
try:
# Make sure it came from this module
return v.__dict__['__module__'] == module
except:
return default
def gather(name: str, m: object) -> Item:
"""
Parameters:
name: module name
m: module object
root: package path
completed: a set of modules already traversed
items: the list of discovered items
"""
def _gather(mpath: str, m: object, root: str, fpath: str, completed: set, items: dict):
"""
Parameters:
mpath: module path (e.g. pandas.core)
m: module object
root: package path
fpath: module file path relative to package root directory; (may be unnecessary)
completed: a set of modules already traversed
items: the dict of discovered items
"""
for k, v in m.__dict__.items():
if not (inspect.isclass(v) or inspect.isfunction(v) or inspect.ismodule(v)):
continue
if inspect.isbuiltin(v) or k[0] == '_' or not isfrompackage(v, root) or not isfrommodule(v, mpath):
continue
if inspect.ismodule(v):
if v not in completed:
completed.add(v)
mfpath = inspect.getfile(v)
if mfpath.startswith(root):
mfpath = mfpath[len(root)+1:]
members = dict()
items[k] = Item.make_module(mfpath, mpath, k, v, members)
_gather(mpath + '.' + k, v, root, mfpath, completed, members)
elif inspect.isfunction(v):
if k in items:
print(f'{name} already has a function {k}')
items[k] = Item.make_function(fpath, mpath, k, v)
elif inspect.isclass(v):
members = dict()
items[k] = Item.make_class(fpath, mpath, k, v, members)
for kc, vc in inspect.getmembers(v):
if kc[0] != '_' and (inspect.isfunction(vc) or str(type(vc)) == "<class 'property'>"):
members[kc] = Item.make_function(fpath, mpath, kc, vc)
else:
pass
fpath = m.__dict__['__file__']
root = fpath[:fpath.rfind('/')] # fix for windows
members = dict()
package = Item.make_module(fpath, '', name, m, members)
_gather(name, m, root, fpath, set(), members)
return package
def walk(tree: dict, fn: Callable, *args, postproc: Callable=None, path=None):
"""
Walk the object tree and apply a function.
If the function returns True, do not walk its children,
but add the object to a postproc list and if a postproc function
is provided, call that at the end for those objects. This gives
us some flexibility in both traversing the tree and collecting
and processing certain nodes.
TODO: see if we ever use the postproc facility and remove it if not.
"""
if path is None:
path = ''
to_postproc = []
for k, v in tree.items():
if fn(path, k, v, *args):
to_postproc.append(k)
elif v.children:
walk(v.children, fn, *args, postproc=postproc, path=path + '/' + k)
if postproc:
postproc(tree, to_postproc)
def collect_items(root: Item) -> Tuple[List[Item], List[Item]]:
def _collect(path, name, node, functions, classes):
if node.isclass():
classes.append(node)
return True # Don't recurse
elif node.isfunction():
functions.append(node)
functions = []
classes = []
walk(root.children, _collect, functions, classes)
functions = sorted(functions, key=attrgetter('name'))
classes = sorted(classes, key=attrgetter('name'))
return functions, classes
def match_pairs(real: List[Item], stub: List[Item], label: str, owner: str=''):
i_r = 0
i_s = 0
while i_r < len(real) or i_s < len(stub):
if i_r == len(real) or (i_s < len(stub) and real[i_r].name > stub[i_s].name):
fn = stub[i_s]
print(f"No match for stub {label} {fn.module}.{owner}{fn.name}")
i_s += 1
elif i_s == len(stub) or real[i_r].name < stub[i_s].name:
fn = real[i_r]
print(f"No stub for {label} {fn.module}.{owner}{fn.name}")
i_r += 1
else:
# TODO: Check for uniqueness
stub[i_s].analog = real[i_r]
real[i_r].analog = stub[i_s]
i_s += 1
i_r += 1
def compare_args(real: Item, stub: Item, owner: Optional[str] = None):
"""
owner - name of owner class, if a member; else None if a top-level function
"""
if owner is None:
owner = ''
elif owner and owner[-1] != '.':
owner += '.'
module = stub.module
name = stub.name
#if stub.object_ == _overload_dummy:
if hasattr(stub.object_, '__overloads__'):
print(f"Can't validate @overloaded function {module}.{owner}{name} with {len(stub.object_.__overloads__)} overloads")
return
try:
sc = stub.object_.__code__.co_argcount
ac = real.object_.__code__.co_argcount
sa = inspect.signature(stub.object_)
sn = list(sa.parameters.keys())
aa = inspect.signature(real.object_)
an = list(aa.parameters.keys())
diff = ''
for i, p in enumerate(sn):
if i >= len(an):
diff += f'\tExtra stub parameter {p}\n'
elif p != an[i]:
diff += f'\tMismatched parameter names at position {i}: {p} != {an[i]}\n'
else:
sp = sa.parameters[p].kind
ap = aa.parameters[p].kind
if sp != ap:
diff += f'\tMismatched parameter types at position {i} {p}: {sp.description} != {ap.description}\n'
if len(an) > len(sn):
i = len(sn)
while i < len(an):
diff += f'\tExtra real parameter {an[i]}\n'
i += 1
if diff:
print(f"Mismatched arguments for {module}.{owner}{name}:\n{diff}")
else:
print(f"{module}.{owner}{name} passes argument checks")
except Exception as e:
if str(e).find("'property' object") >= 0:
pass
#print(f"Failed to validate property {module}.{owner}{name}")
else:
print(f"Failed to validate {module}.{owner}{name}: {e}")
def compare_functions(real: List[Item], stub: List[Item], owner: Optional[str]=None):
if owner is None:
owner = ''
elif owner and owner[-1] != '.':
owner += '.'
match_pairs(real, stub, 'function', owner)
# For the functions that do have analogs, compare the
# signatures.
i_s = 0
while i_s < len(stub):
s = stub[i_s]
a = s.analog
if a:
compare_args(a, s, owner)
i_s += 1
def compare_classes(real: List[Item], stub: List[Item]):
match_pairs(real, stub, 'class')
# For the classes that do have analogs, compare the
# methods.
i_s = 0
while i_s < len(stub):
s = stub[i_s]
a = s.analog
if a:
real_functions, _ = collect_items(a)
stub_functions, _ = collect_items(s)
compare_functions(real_functions, stub_functions, s.name)
i_s += 1
def find_item(items: List[Item], name: str,
which: Literal['stub', 'real'],
type_: Literal['class', 'function']) -> Optional[Item]:
"""
which - whether this is 'stub' or 'real'
"""
i = 0
while i < len(items):
if items[i].name == name:
return items[i]
break
i += 1
print(f"No {which} {type_} found with name {name}")
def compare_class(real: List[Item], stub: List[Item], class_: str):
a = find_item(real, class_, 'real', 'class')
s = find_item(stub, class_, 'stub', 'class')
if a is None or s is None:
return
real_functions, _ = collect_items(a)
stub_functions, _ = collect_items(s)
compare_functions(real_functions, stub_functions, s.name)
def find_mismatched_modules(real: Item, stub: Item):
"""
Print out all the modules in real package where
we don't have a matching module in the stubs.
"""
def has_module(path: str, name: str, node: Item, stub: Item):
if not node.ismodule():
return
components = path.split('/')[1:]
components.append(name)
for c in components:
if c in stub.children:
stubs = stub.children[c]
else:
print(f"No module {node.module}.{name} in stubs")
break
walk(real.children, has_module, stub)
def find_module(package: Item, module: str):
module = module.split('.')[1:]
root = package
for m in module:
if m not in root.children:
return
root = root.children[m]
return root
def compare(name: str, stubpath: Optional[str] = None, submodule: Optional[str] = None,
class_: Optional[str] = None,
function_: Optional[str] = None):
split = name.find('.')
if split > 0:
submodule = name
name = name[:split]
if stubpath is None:
stubpath = '..'
real, stub = import_dual(name, stubpath)
real = gather(name, real)
stub = gather(name, stub)
# Collect the top level functions and classes
real_functions, real_classes = collect_items(real)
stub_functions, stub_classes = collect_items(stub)
if function_ is not None:
if class_ is not None:
ac = find_item(real_classes, class_, 'real', 'class')
sc = find_item(stub_classes, class_, 'stub', 'class')
if ac is not None and sc is not None:
real_functions, _ = collect_items(ac)
stub_functions, _ = collect_items(sc)
af = find_item(real_functions, function_, 'real', 'function')
sf = find_item(stub_functions, function_, 'stub', 'function')
if af is not None and sf is not None:
compare_args(af, sf, class_)
else:
# Top-level function
af = find_item(real_functions, function_, 'real', 'function')
sf = find_item(stub_functions, function_, 'stub', 'function')
if af is not None and sf is not None:
compare_args(af, sf)
elif class_ is not None:
compare_class(real_classes, stub_classes, class_=class_)
elif submodule is not None:
s = find_module(stub, submodule)
if s is None:
print(f"No stub {submodule} found")
else:
a = find_module(real, submodule)
if a is None:
print(f"No real module {submodule} found")
# TODO: add the other checks but limit to this submodule
else:
find_mismatched_modules(real, stub)
compare_functions(real_functions, stub_functions)
compare_classes(real_classes, stub_classes)
# TODO: if real code has type hints should compare with stubs
# Get the docstrings and report mismatches
# TODO
if __name__ == "__main__":
args = docopt.docopt(__doc__, version='Validate Stubs 0.1')
compare(args['<package>'], args['--path'], class_=args['--class'], function_=args['--function'])