This commit is contained in:
Graham Wheeler 2022-12-19 15:25:37 -08:00
Родитель 9e695dbb99
Коммит 72dc4ebcf4
158 изменённых файлов: 670 добавлений и 1894 удалений

Просмотреть файл

@ -22,14 +22,10 @@ class ExponentialDispersionModel(metaclass=ABCMeta):
@abstractmethod
def unit_variance(self, y_pred: ArrayLike): ...
@abstractmethod
def unit_deviance(
self, y: ArrayLike, y_pred: ArrayLike, check_input: bool = False
) -> ArrayLike: ...
def unit_deviance(self, y: ArrayLike, y_pred: ArrayLike, check_input: bool = False) -> ArrayLike: ...
def unit_deviance_derivative(self, y: ArrayLike, y_pred: ArrayLike): ...
def deviance(self, y: ArrayLike, y_pred: ArrayLike, weights: NDArray | int = 1): ...
def deviance_derivative(
self, y: NDArray, y_pred: NDArray, weights: NDArray | int = 1
): ...
def deviance_derivative(self, y: NDArray, y_pred: NDArray, weights: NDArray | int = 1): ...
class TweedieDistribution(ExponentialDispersionModel):
def __init__(self, power: float = 0): ...
@ -38,9 +34,7 @@ class TweedieDistribution(ExponentialDispersionModel):
@power.setter
def power(self, power): ...
def unit_variance(self, y_pred: ArrayLike): ...
def unit_deviance(
self, y: ArrayLike, y_pred: ArrayLike, check_input: bool = False
) -> ArrayLike: ...
def unit_deviance(self, y: ArrayLike, y_pred: ArrayLike, check_input: bool = False) -> ArrayLike: ...
class NormalDistribution(TweedieDistribution):
def __init__(self): ...

Просмотреть файл

@ -36,9 +36,7 @@ class BaseLink(ABC):
@abstractmethod
def link(self, y_pred: ArrayLike, out: ArrayLike | None = None) -> ArrayLike: ...
@abstractmethod
def inverse(
self, raw_prediction: ArrayLike, out: ArrayLike | None = None
) -> ArrayLike: ...
def inverse(self, raw_prediction: ArrayLike, out: ArrayLike | None = None) -> ArrayLike: ...
class IdentityLink(BaseLink):
def link(self, y_pred: ArrayLike, out: ArrayLike | None = None) -> ArrayLike: ...
@ -50,18 +48,14 @@ class LogLink(BaseLink):
interval_y_pred = ...
def link(self, y_pred: ArrayLike, out: ArrayLike | None = None) -> ArrayLike: ...
def inverse(
self, raw_prediction: ArrayLike, out: ArrayLike | None = None
) -> ArrayLike: ...
def inverse(self, raw_prediction: ArrayLike, out: ArrayLike | None = None) -> ArrayLike: ...
class LogitLink(BaseLink):
interval_y_pred = ...
def link(self, y_pred: ArrayLike, out: ArrayLike | None = None) -> ArrayLike: ...
def inverse(
self, raw_prediction: ArrayLike, out: ArrayLike | None = None
) -> ArrayLike: ...
def inverse(self, raw_prediction: ArrayLike, out: ArrayLike | None = None) -> ArrayLike: ...
class MultinomialLogit(BaseLink):
@ -70,8 +64,6 @@ class MultinomialLogit(BaseLink):
def symmetrize_raw_prediction(self, raw_prediction): ...
def link(self, y_pred: ArrayLike, out: ArrayLike | None = None) -> ArrayLike: ...
def inverse(
self, raw_prediction: ArrayLike, out: ArrayLike | None = None
) -> ArrayLike: ...
def inverse(self, raw_prediction: ArrayLike, out: ArrayLike | None = None) -> ArrayLike: ...
_LINKS: dict = ...

Просмотреть файл

@ -28,21 +28,20 @@ from ..utils import check_scalar
from ..utils.stats import _weighted_percentile
from sklearn._loss.link import IdentityLink, LogLink, LogitLink, MultinomialLogit
class CyLossFunction:
def cy_loss(self, y_true:float, raw_prediction: float) -> float: ...
def cy_gradient(self, y_true: float, raw_prediction: float) -> float:...
def cy_grad_hess(self, y_true: float, raw_prediction: float) -> tuple[float,float]:...
def cy_loss(self, y_true: float, raw_prediction: float) -> float: ...
def cy_gradient(self, y_true: float, raw_prediction: float) -> float: ...
def cy_grad_hess(self, y_true: float, raw_prediction: float) -> tuple[float, float]: ...
class CyHalfSquaredError(CyLossFunction):...
class CyAbsoluteError(CyLossFunction):...
class CyPinballLoss(CyLossFunction):...
class CyHalfPoissonLoss(CyLossFunction):...
class CyHalfGammaLoss(CyLossFunction):...
class CyHalfTweedieLoss(CyLossFunction):...
class CyHalfTweedieLossIdentity(CyLossFunction):...
class CyHalfBinomialLoss(CyLossFunction):...
class CyHalfMultinomialLoss(CyLossFunction):...
class CyHalfSquaredError(CyLossFunction): ...
class CyAbsoluteError(CyLossFunction): ...
class CyPinballLoss(CyLossFunction): ...
class CyHalfPoissonLoss(CyLossFunction): ...
class CyHalfGammaLoss(CyLossFunction): ...
class CyHalfTweedieLoss(CyLossFunction): ...
class CyHalfTweedieLossIdentity(CyLossFunction): ...
class CyHalfBinomialLoss(CyLossFunction): ...
class CyHalfMultinomialLoss(CyLossFunction): ...
# Note: The shape of raw_prediction for multiclass classifications are
# - GradientBoostingClassifier: (n_samples, n_classes)
@ -132,9 +131,7 @@ class BaseLoss:
sample_weight: NDArray | None = None,
n_threads: int = 1,
) -> float: ...
def fit_intercept_only(
self, y_true: ArrayLike, sample_weight: ArrayLike | None = None
) -> Scalar | NDArray: ...
def fit_intercept_only(self, y_true: ArrayLike, sample_weight: ArrayLike | None = None) -> Scalar | NDArray: ...
def constant_to_optimal_zero(self, y_true, sample_weight=None): ...
def init_gradient_and_hessian(
self,
@ -164,15 +161,11 @@ class PinballLoss(BaseLoss):
need_update_leaves_values: bool = ...
def __init__(self, sample_weight: None = None, quantile: float = 0.5) -> None: ...
def fit_intercept_only(
self, y_true: ndarray, sample_weight: None = None
) -> float64: ...
def fit_intercept_only(self, y_true: ndarray, sample_weight: None = None) -> float64: ...
class HalfPoissonLoss(BaseLoss):
def __init__(self, sample_weight: None = None) -> None: ...
def constant_to_optimal_zero(
self, y_true: ndarray, sample_weight: None = None
) -> ndarray: ...
def constant_to_optimal_zero(self, y_true: ndarray, sample_weight: None = None) -> ndarray: ...
class HalfGammaLoss(BaseLoss):
def __init__(self, sample_weight=None): ...

Просмотреть файл

@ -48,25 +48,12 @@ class BaseEstimator:
def set_params(self, **params) -> BaseEstimator: ...
def __repr__(self, N_CHAR_MAX: int = 700) -> str: ...
def __getstate__(self) -> Dict[str, Optional[Union[int, str, ndarray]]]: ...
def __setstate__(
self, state: Dict[str, Optional[Union[int, str, ndarray]]]
) -> None: ...
def __setstate__(self, state: Dict[str, Optional[Union[int, str, ndarray]]]) -> None: ...
def _more_tags(self) -> Dict[str, Union[bool, List[str], List[Type[float64]]]]: ...
def _get_tags(
self,
) -> Dict[
str,
Union[
bool,
List[str],
Dict[str, str],
List[Type[float64]],
List[Union[Type[float64], Type[float32]]],
],
]: ...
def _check_n_features(
self, X: Union[DataFrame, ndarray, csr_matrix, csc_matrix], reset: bool
) -> None: ...
) -> Dict[str, Union[bool, List[str], Dict[str, str], List[Type[float64]], List[Union[Type[float64], Type[float32]]],],]: ...
def _check_n_features(self, X: Union[DataFrame, ndarray, csr_matrix, csc_matrix], reset: bool) -> None: ...
def _check_feature_names(self, X: Any, *, reset) -> None: ...
def _validate_data(
self,
@ -87,13 +74,7 @@ class BaseEstimator:
Tuple[Dict[str, Union[bool, str]], Dict[str, bool]],
] = False,
**check_params,
) -> Union[
Tuple[ndarray, ndarray],
Tuple[csr_matrix, ndarray],
ndarray,
csr_matrix,
Tuple[csc_matrix, ndarray],
]: ...
) -> Union[Tuple[ndarray, ndarray], Tuple[csr_matrix, ndarray], ndarray, csr_matrix, Tuple[csc_matrix, ndarray],]: ...
@property
def _repr_html_(self): ...
def _repr_html_inner(self): ...
@ -103,18 +84,14 @@ class ClassifierMixin:
_estimator_type: str = ...
def score(
self, X: ArrayLike, y: ArrayLike, sample_weight: ArrayLike | None = None
) -> float: ...
def score(self, X: ArrayLike, y: ArrayLike, sample_weight: ArrayLike | None = None) -> float: ...
def _more_tags(self) -> Dict[str, bool]: ...
class RegressorMixin:
_estimator_type: str = ...
def score(
self, X: ArrayLike, y: ArrayLike, sample_weight: ArrayLike | None = None
) -> float: ...
def score(self, X: ArrayLike, y: ArrayLike, sample_weight: ArrayLike | None = None) -> float: ...
def _more_tags(self) -> Dict[str, bool]: ...
class ClusterMixin:
@ -132,19 +109,13 @@ class BiclusterMixin:
def get_submatrix(self, i: int, data: ArrayLike) -> np.ndarray: ...
class TransformerMixin:
def fit_transform(
self, X: ArrayLike, y: ArrayLike | None = None, **fit_params
) -> NDArray: ...
def fit_transform(self, X: ArrayLike, y: ArrayLike | None = None, **fit_params) -> NDArray: ...
class _OneToOneFeatureMixin:
def get_feature_names_out(
self, input_features: ArrayLike | None = None
) -> np.ndarray: ...
def get_feature_names_out(self, input_features: ArrayLike | None = None) -> np.ndarray: ...
class _ClassNamePrefixFeaturesOutMixin:
def get_feature_names_out(
self, input_features: ArrayLike | None = None
) -> np.ndarray: ...
def get_feature_names_out(self, input_features: ArrayLike | None = None) -> np.ndarray: ...
class DensityMixin:

Просмотреть файл

@ -91,12 +91,8 @@ def _fit_classifier_calibrator_pair(
sample_weight: Optional[ndarray] = None,
**fit_params,
) -> "_CalibratedClassifier": ...
def _get_prediction_method(
clf: Union[GaussianNB, RandomForestClassifier]
) -> Tuple[Callable, str]: ...
def _compute_predictions(
pred_method: Callable, method_name: str, X: ndarray, n_classes: int
) -> ndarray: ...
def _get_prediction_method(clf: Union[GaussianNB, RandomForestClassifier]) -> Tuple[Callable, str]: ...
def _compute_predictions(pred_method: Callable, method_name: str, X: ndarray, n_classes: int) -> ndarray: ...
def _fit_calibrator(
clf: Union[GaussianNB, RandomForestClassifier],
predictions: ndarray,
@ -122,9 +118,7 @@ def _sigmoid_calibration(
) -> Tuple[float64, float64]: ...
class _SigmoidCalibration(RegressorMixin, BaseEstimator):
def fit(
self, X: ArrayLike, y: ArrayLike, sample_weight: ArrayLike | None = None
) -> "_SigmoidCalibration": ...
def fit(self, X: ArrayLike, y: ArrayLike, sample_weight: ArrayLike | None = None) -> "_SigmoidCalibration": ...
def predict(self, T: ArrayLike) -> NDArray: ...
def calibration_curve(
@ -132,7 +126,7 @@ def calibration_curve(
y_prob: ArrayLike,
*,
pos_label: int | str | None = None,
normalize: bool|str = "deprecated",
normalize: bool | str = "deprecated",
n_bins: int = 5,
strategy: Literal["uniform", "quantile"] = "uniform",
) -> tuple[NDArray, NDArray]: ...

Просмотреть файл

@ -2,7 +2,6 @@ from numpy import bool_, ndarray
from typing import Tuple, Union, Literal
from numpy.typing import ArrayLike, NDArray
# Author: Alexandre Gramfort alexandre.gramfort@inria.fr
# Gael Varoquaux gael.varoquaux@normalesup.org

Просмотреть файл

@ -23,9 +23,7 @@ from scipy.sparse._lil import lil_matrix
###############################################################################
# For non fully-connected graphs
def _fix_connectivity(
X: ndarray, connectivity: Union[csr_matrix, coo_matrix], affinity: str
) -> Tuple[lil_matrix, int]: ...
def _fix_connectivity(X: ndarray, connectivity: Union[csr_matrix, coo_matrix], affinity: str) -> Tuple[lil_matrix, int]: ...
def _single_linkage_tree(
connectivity: coo_matrix,
n_samples: int,
@ -57,15 +55,9 @@ def linkage_tree(
) -> tuple[np.ndarray, int, int, NDArray | None, np.ndarray]: ...
# Matching names to tree-building strategies
def _complete_linkage(
*args, **kwargs
) -> Union[Tuple[ndarray, int, int, ndarray], Tuple[ndarray, int, int, None]]: ...
def _average_linkage(
*args, **kwargs
) -> Union[Tuple[ndarray, int, int, ndarray], Tuple[ndarray, int, int, None]]: ...
def _single_linkage(
*args, **kwargs
) -> Union[Tuple[ndarray, int, int, ndarray], Tuple[ndarray, int, int, None]]: ...
def _complete_linkage(*args, **kwargs) -> Union[Tuple[ndarray, int, int, ndarray], Tuple[ndarray, int, int, None]]: ...
def _average_linkage(*args, **kwargs) -> Union[Tuple[ndarray, int, int, ndarray], Tuple[ndarray, int, int, None]]: ...
def _single_linkage(*args, **kwargs) -> Union[Tuple[ndarray, int, int, ndarray], Tuple[ndarray, int, int, None]]: ...
_TREE_BUILDERS = ...
@ -82,7 +74,7 @@ class AgglomerativeClustering(ClusterMixin, BaseEstimator):
n_clusters: int | None = 2,
*,
affinity: str | Callable = "euclidean",
memory = None,
memory=None,
connectivity: ArrayLike | Callable | None = None,
compute_full_tree: bool | Literal["auto"] = "auto",
linkage: Literal["ward", "complete", "average", "single"] = "ward",
@ -90,20 +82,16 @@ class AgglomerativeClustering(ClusterMixin, BaseEstimator):
compute_distances: bool = False,
) -> None: ...
def fit(self, X: ArrayLike, y: None = None) -> "AgglomerativeClustering": ...
def _fit(
self, X: ndarray
) -> Union[AgglomerativeClustering, FeatureAgglomeration]: ...
def _fit(self, X: ndarray) -> Union[AgglomerativeClustering, FeatureAgglomeration]: ...
def fit_predict(self, X: ArrayLike, y: None = None) -> NDArray: ...
class FeatureAgglomeration(
_ClassNamePrefixFeaturesOutMixin, AgglomerativeClustering, AgglomerationTransform
):
class FeatureAgglomeration(_ClassNamePrefixFeaturesOutMixin, AgglomerativeClustering, AgglomerationTransform):
def __init__(
self,
n_clusters: int = 2,
*,
affinity: str | Callable = "euclidean",
memory = None,
memory=None,
connectivity: ArrayLike | Callable | None = None,
compute_full_tree: bool | Literal["auto"] = "auto",
linkage: Literal["ward", "complete", "average", "single"] = "ward",
@ -111,8 +99,6 @@ class FeatureAgglomeration(
distance_threshold: float | None = None,
compute_distances: bool = False,
) -> None: ...
def fit(
self, X: ArrayLike, y: Optional[ndarray] = None
) -> "FeatureAgglomeration": ...
def fit(self, X: ArrayLike, y: Optional[ndarray] = None) -> "FeatureAgglomeration": ...
@property
def fit_predict(self): ...

Просмотреть файл

@ -47,12 +47,8 @@ class BaseSpectral(BiclusterMixin, BaseEstimator, metaclass=ABCMeta):
random_state: Optional[int] = None,
) -> None: ...
def _check_parameters(self, n_samples: int) -> None: ...
def fit(
self, X: ArrayLike, y: None = None
) -> Union[SpectralCoclustering, SpectralBiclustering]: ...
def _svd(
self, array: Union[ndarray, csr_matrix], n_components: int, n_discard: int
) -> Tuple[ndarray, ndarray]: ...
def fit(self, X: ArrayLike, y: None = None) -> Union[SpectralCoclustering, SpectralBiclustering]: ...
def _svd(self, array: Union[ndarray, csr_matrix], n_components: int, n_discard: int) -> Tuple[ndarray, ndarray]: ...
def _k_means(self, data: ndarray, n_clusters: int) -> Tuple[ndarray, ndarray]: ...
def _more_tags(self): ...
@ -88,9 +84,5 @@ class SpectralBiclustering(BaseSpectral):
) -> None: ...
def _check_parameters(self, n_samples: int) -> None: ...
def _fit(self, X: ndarray) -> None: ...
def _fit_best_piecewise(
self, vectors: ndarray, n_best: int, n_clusters: int
) -> ndarray: ...
def _project_and_cluster(
self, data: ndarray, vectors: ndarray, n_clusters: int
) -> ndarray: ...
def _fit_best_piecewise(self, vectors: ndarray, n_best: int, n_clusters: int) -> ndarray: ...
def _project_and_cluster(self, data: ndarray, vectors: ndarray, n_clusters: int) -> ndarray: ...

Просмотреть файл

@ -29,14 +29,10 @@ from .._config import config_context
from typing import List, Optional, Tuple
def _iterate_sparse_X(X): ...
def _split_node(
node: "_CFNode", threshold: float, branching_factor: int
) -> Tuple[_CFSubcluster, _CFSubcluster]: ...
def _split_node(node: "_CFNode", threshold: float, branching_factor: int) -> Tuple[_CFSubcluster, _CFSubcluster]: ...
class _CFNode:
def __init__(
self, *, threshold: float, branching_factor: int, is_leaf: bool, n_features: int
) -> None: ...
def __init__(self, *, threshold: float, branching_factor: int, is_leaf: bool, n_features: int) -> None: ...
def append_subcluster(self, subcluster: "_CFSubcluster") -> None: ...
def update_split_subclusters(
self,
@ -49,15 +45,11 @@ class _CFNode:
class _CFSubcluster:
def __init__(self, *, linear_sum: NDArray | None = None) -> None: ...
def update(self, subcluster: "_CFSubcluster") -> None: ...
def merge_subcluster(
self, nominee_cluster: "_CFSubcluster", threshold: float
) -> bool: ...
def merge_subcluster(self, nominee_cluster: "_CFSubcluster", threshold: float) -> bool: ...
@property
def radius(self): ...
class Birch(
_ClassNamePrefixFeaturesOutMixin, ClusterMixin, TransformerMixin, BaseEstimator
):
class Birch(_ClassNamePrefixFeaturesOutMixin, ClusterMixin, TransformerMixin, BaseEstimator):
def __init__(
self,
*,
@ -70,17 +62,13 @@ class Birch(
# TODO: Remove in 1.2
# mypy error: Decorated property not supported
@deprecated( # type: ignore
"`fit_` is deprecated in 1.0 and will be removed in 1.2."
)
@deprecated("`fit_` is deprecated in 1.0 and will be removed in 1.2.") # type: ignore
@property
def fit_(self): ...
# TODO: Remove in 1.2
# mypy error: Decorated property not supported
@deprecated( # type: ignore
"`partial_fit_` is deprecated in 1.0 and will be removed in 1.2."
)
@deprecated("`partial_fit_` is deprecated in 1.0 and will be removed in 1.2.") # type: ignore
@property
def partial_fit_(self): ...
def fit(self, X: NDArray | ArrayLike, y: None = None) -> "Birch": ...

Просмотреть файл

@ -21,9 +21,7 @@ from ..utils.validation import _is_arraylike_not_scalar
from numpy import float64, ndarray
class _BisectingTree:
def __init__(
self, center: ndarray, indices: ndarray, score: Union[int, float64]
) -> None: ...
def __init__(self, center: ndarray, indices: ndarray, score: Union[int, float64]) -> None: ...
def split(self, labels: ndarray, centers: ndarray, scores: ndarray) -> None: ...
def get_cluster_to_bisect(self) -> "_BisectingTree": ...
def iter_leaves(self) -> Iterator[_BisectingTree]: ...
@ -41,15 +39,11 @@ class BisectingKMeans(_BaseKMeans):
tol: float = 1e-4,
copy_x: bool = True,
algorithm: Literal["lloyd", "elkan"] = "lloyd",
bisecting_strategy: Literal[
"biggest_inertia", "largest_cluster"
] = "biggest_inertia",
bisecting_strategy: Literal["biggest_inertia", "largest_cluster"] = "biggest_inertia",
) -> None: ...
def _check_params(self, X: ndarray) -> None: ...
def _warn_mkl_vcomp(self, n_active_threads): ...
def _inertia_per_cluster(
self, X: ndarray, centers: ndarray, labels: ndarray, sample_weight: ndarray
) -> ndarray: ...
def _inertia_per_cluster(self, X: ndarray, centers: ndarray, labels: ndarray, sample_weight: ndarray) -> ndarray: ...
def _bisect(
self,
X: ndarray,

Просмотреть файл

@ -18,8 +18,6 @@ from ..base import BaseEstimator, ClusterMixin
from ..utils.validation import _check_sample_weight
from ..neighbors import NearestNeighbors
def dbscan(
X: ArrayLike,
eps: float = 0.5,

Просмотреть файл

@ -115,9 +115,7 @@ def _labels_inertia_threadpool_limit(
return_inertia: bool = True,
) -> Tuple[ndarray, float]: ...
class _BaseKMeans(
_ClassNamePrefixFeaturesOutMixin, TransformerMixin, ClusterMixin, BaseEstimator, ABC
):
class _BaseKMeans(_ClassNamePrefixFeaturesOutMixin, TransformerMixin, ClusterMixin, BaseEstimator, ABC):
def __init__(
self,
n_clusters: Union[int64, int],
@ -132,9 +130,7 @@ class _BaseKMeans(
def _check_params(self, X: Union[ndarray, csr_matrix]) -> None: ...
@abstractmethod
def _warn_mkl_vcomp(self, n_active_threads): ...
def _check_mkl_vcomp(
self, X: Union[ndarray, csr_matrix], n_samples: int
) -> None: ...
def _check_mkl_vcomp(self, X: Union[ndarray, csr_matrix], n_samples: int) -> None: ...
def _validate_center_shape(self, X: ndarray, centers: ndarray) -> None: ...
def _check_test_data(self, X: ndarray) -> ndarray: ...
def _init_centroids(
@ -152,17 +148,11 @@ class _BaseKMeans(
y: None = None,
sample_weight: ArrayLike | None = None,
) -> NDArray: ...
def predict(
self, X: NDArray | ArrayLike, sample_weight: ArrayLike | None = None
) -> NDArray: ...
def fit_transform(
self, X: NDArray | ArrayLike, y=None, sample_weight: ArrayLike | None = None
) -> np.ndarray: ...
def predict(self, X: NDArray | ArrayLike, sample_weight: ArrayLike | None = None) -> NDArray: ...
def fit_transform(self, X: NDArray | ArrayLike, y=None, sample_weight: ArrayLike | None = None) -> np.ndarray: ...
def transform(self, X: NDArray | ArrayLike) -> np.ndarray: ...
def _transform(self, X): ...
def score(
self, X: NDArray | ArrayLike, y=None, sample_weight: ArrayLike | None = None
) -> float: ...
def score(self, X: NDArray | ArrayLike, y=None, sample_weight: ArrayLike | None = None) -> float: ...
def _more_tags(self): ...
class KMeans(_BaseKMeans):

Просмотреть файл

@ -11,7 +11,6 @@ import numpy as np
from numpy.random import RandomState
import warnings
from collections import defaultdict
from ..utils.validation import check_is_fitted
from ..utils.fixes import delayed
@ -46,9 +45,7 @@ def mean_shift(
max_iter: int = 300,
n_jobs: int | None = None,
) -> tuple[np.ndarray, NDArray]: ...
def get_bin_seeds(
X: ArrayLike, bin_size: float, min_bin_freq: int = 1
) -> ArrayLike: ...
def get_bin_seeds(X: ArrayLike, bin_size: float, min_bin_freq: int = 1) -> ArrayLike: ...
class MeanShift(ClusterMixin, BaseEstimator):
def __init__(

Просмотреть файл

@ -35,14 +35,10 @@ class OPTICS(ClusterMixin, BaseEstimator):
) -> None: ...
def fit(self, X: NDArray, y: None = None) -> "OPTICS": ...
def _validate_size(
size: Union[int, float], n_samples: int, param_name: str
) -> None: ...
def _validate_size(size: Union[int, float], n_samples: int, param_name: str) -> None: ...
# OPTICS helper functions
def _compute_core_distances_(
X: ndarray, neighbors: NearestNeighbors, min_samples: int, working_memory: None
) -> ndarray: ...
def _compute_core_distances_(X: ndarray, neighbors: NearestNeighbors, min_samples: int, working_memory: None) -> ndarray: ...
def compute_optics_graph(
X: NDArray,
*,
@ -85,13 +81,9 @@ def cluster_optics_xi(
xi: float = 0.05,
predecessor_correction: bool = True,
) -> tuple[NDArray, np.ndarray]: ...
def _extend_region(
steep_point: ndarray, xward_point: ndarray, start: int64, min_samples: int
) -> int64: ...
def _extend_region(steep_point: ndarray, xward_point: ndarray, start: int64, min_samples: int) -> int64: ...
def _update_filter_sdas(
sdas: List[
Union[Dict[str, Union[int64, float64]], Dict[str, Union[int64, float]], Any]
],
sdas: List[Union[Dict[str, Union[int64, float64]], Dict[str, Union[int64, float]], Any]],
mib: float64,
xi_complement: float,
reachability_plot: ndarray,

Просмотреть файл

@ -3,7 +3,6 @@ from typing import Any, Dict, Iterator, List, Optional, Tuple, Union, Mapping, C
from numpy.typing import ArrayLike, NDArray, DTypeLike
from numpy import ndarray
# Author: Andreas Mueller
# Joris Van den Bossche
# License: BSD
@ -14,7 +13,6 @@ from typing import Literal, Callable, Sequence
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, clone, TransformerMixin
from ..utils._estimator_html_repr import _VisualBlock
from ..pipeline import _fit_transform_one, _transform_one, _name_estimators
@ -76,10 +74,7 @@ class ColumnTransformer(TransformerMixin, _BaseComposition):
def _validate_remainder(self, X: Union[DataFrame, ndarray]) -> None: ...
@property
def named_transformers_(self): ...
@deprecated(
"get_feature_names is deprecated in 1.0 and will be removed "
"in 1.2. Please use get_feature_names_out instead."
)
@deprecated("get_feature_names is deprecated in 1.0 and will be removed " "in 1.2. Please use get_feature_names_out instead.")
def get_feature_names(self) -> ArrayLike: ...
def _get_feature_name_out_for_transformer(
self,
@ -88,9 +83,7 @@ class ColumnTransformer(TransformerMixin, _BaseComposition):
column: List[str],
feature_names_in: ndarray,
) -> ndarray: ...
def get_feature_names_out(
self, input_features: ArrayLike | None = None
) -> np.ndarray: ...
def get_feature_names_out(self, input_features: ArrayLike | None = None) -> np.ndarray: ...
def _update_fitted_transformers(
self,
transformers: Union[
@ -119,12 +112,8 @@ class ColumnTransformer(TransformerMixin, _BaseComposition):
fitted: bool = False,
column_as_strings: bool = False,
) -> List[Any]: ...
def fit(
self, X: ArrayLike | DataFrame, y: ArrayLike | None = None
) -> ColumnTransformer: ...
def fit_transform(
self, X: ArrayLike | DataFrame, y: ArrayLike | None = None
) -> NDArray | ArrayLike: ...
def fit(self, X: ArrayLike | DataFrame, y: ArrayLike | None = None) -> ColumnTransformer: ...
def fit_transform(self, X: ArrayLike | DataFrame, y: ArrayLike | None = None) -> NDArray | ArrayLike: ...
def transform(self, X: ArrayLike | DataFrame) -> NDArray | ArrayLike: ...
def _hstack(self, Xs: List[Union[csr_matrix, ndarray]]) -> ndarray: ...
def _sk_visual_block_(self): ...
@ -132,9 +121,7 @@ class ColumnTransformer(TransformerMixin, _BaseComposition):
def _check_X(X: Union[DataFrame, ndarray]) -> Union[DataFrame, ndarray]: ...
def _is_empty_column_selection(column: Union[int, List[str]]) -> bool: ...
def _get_transformer_list(
estimators: Tuple[
Tuple[Pipeline, Tuple[str, str]], Tuple[Pipeline, Tuple[str, str]]
]
estimators: Tuple[Tuple[Pipeline, Tuple[str, str]], Tuple[Pipeline, Tuple[str, str]]]
) -> List[Tuple[str, Pipeline, Tuple[str, str]]]: ...
def make_column_transformer(
*transformers,

Просмотреть файл

@ -27,6 +27,4 @@ class EllipticEnvelope(OutlierMixin, MinCovDet):
def decision_function(self, X: ArrayLike) -> NDArray: ...
def score_samples(self, X: ArrayLike) -> ArrayLike: ...
def predict(self, X: ArrayLike) -> NDArray: ...
def score(
self, X: ArrayLike, y: ArrayLike, sample_weight: ArrayLike | None = None
) -> float: ...
def score(self, X: ArrayLike, y: ArrayLike, sample_weight: ArrayLike | None = None) -> float: ...

Просмотреть файл

@ -23,9 +23,7 @@ def log_likelihood(emp_cov: NDArray, precision: NDArray) -> float: ...
def empirical_covariance(X: NDArray, *, assume_centered: bool = False) -> NDArray: ...
class EmpiricalCovariance(BaseEstimator):
def __init__(
self, *, store_precision: bool = True, assume_centered: bool = False
) -> None: ...
def __init__(self, *, store_precision: bool = True, assume_centered: bool = False) -> None: ...
def _set_covariance(self, covariance: ndarray) -> None: ...
def get_precision(self) -> ArrayLike: ...
def fit(self, X: ArrayLike, y: None = None) -> "EmpiricalCovariance": ...

Просмотреть файл

@ -11,7 +11,6 @@ import numbers
import numpy as np
from scipy import linalg
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet
from ..utils import check_random_state, check_array
@ -72,10 +71,6 @@ class MinCovDet(EmpiricalCovariance):
support_fraction: float | None = None,
random_state: int | RandomState | None = None,
) -> None: ...
def fit(
self, X: ArrayLike, y: None = None
) -> Union[MinCovDet, EllipticEnvelope]: ...
def fit(self, X: ArrayLike, y: None = None) -> Union[MinCovDet, EllipticEnvelope]: ...
def correct_covariance(self, data: ArrayLike) -> NDArray: ...
def reweight_covariance(
self, data: ArrayLike
) -> tuple[NDArray, NDArray, np.ndarray]: ...
def reweight_covariance(self, data: ArrayLike) -> tuple[NDArray, NDArray, np.ndarray]: ...

Просмотреть файл

@ -21,32 +21,16 @@ from numpy import float64, ndarray
def shrunk_covariance(emp_cov: ArrayLike, shrinkage: float = 0.1) -> NDArray: ...
class ShrunkCovariance(EmpiricalCovariance):
def __init__(
self,
*,
store_precision: bool = True,
assume_centered: bool = False,
shrinkage: float = 0.1
) -> None: ...
def __init__(self, *, store_precision: bool = True, assume_centered: bool = False, shrinkage: float = 0.1) -> None: ...
def fit(self, X: ArrayLike, y: None = None) -> "ShrunkCovariance": ...
# Ledoit-Wolf estimator
def ledoit_wolf_shrinkage(
X: ArrayLike, assume_centered: bool = False, block_size: int = 1000
) -> float: ...
def ledoit_wolf(
X: ArrayLike, *, assume_centered: bool = False, block_size: int = 1000
) -> tuple[NDArray, float]: ...
def ledoit_wolf_shrinkage(X: ArrayLike, assume_centered: bool = False, block_size: int = 1000) -> float: ...
def ledoit_wolf(X: ArrayLike, *, assume_centered: bool = False, block_size: int = 1000) -> tuple[NDArray, float]: ...
class LedoitWolf(EmpiricalCovariance):
def __init__(
self,
*,
store_precision: bool = True,
assume_centered: bool = False,
block_size: int = 1000
) -> None: ...
def __init__(self, *, store_precision: bool = True, assume_centered: bool = False, block_size: int = 1000) -> None: ...
def fit(self, X: ArrayLike, y: None = None) -> "LedoitWolf": ...
# OAS estimator

Просмотреть файл

@ -60,15 +60,9 @@ class _PLS(
tol=1e-06,
copy=True,
) -> None: ...
def fit(
self, X: ArrayLike, Y: ArrayLike
) -> Union[PLSCanonical, CCA, PLSRegression]: ...
def transform(
self, X: ArrayLike, Y: ArrayLike | None = None, copy: bool = True
) -> ArrayLike | tuple[ArrayLike, ...]: ...
def inverse_transform(
self, X: ArrayLike, Y: ArrayLike | None = None
) -> tuple[NDArray, NDArray]: ...
def fit(self, X: ArrayLike, Y: ArrayLike) -> Union[PLSCanonical, CCA, PLSRegression]: ...
def transform(self, X: ArrayLike, Y: ArrayLike | None = None, copy: bool = True) -> ArrayLike | tuple[ArrayLike, ...]: ...
def inverse_transform(self, X: ArrayLike, Y: ArrayLike | None = None) -> tuple[NDArray, NDArray]: ...
def predict(self, X: ArrayLike, copy: bool = True) -> NDArray: ...
def fit_transform(self, X: ArrayLike, y: ArrayLike | None = None) -> NDArray: ...
@property
@ -127,13 +121,7 @@ class CCA(_PLS):
) -> None: ...
class PLSSVD(_ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
def __init__(
self, n_components: int = 2, *, scale: bool = True, copy: bool = True
): ...
def __init__(self, n_components: int = 2, *, scale: bool = True, copy: bool = True): ...
def fit(self, X: ArrayLike, Y: ArrayLike) -> Any: ...
def transform(
self, X: ArrayLike, Y: ArrayLike | None = None
) -> ArrayLike | tuple[ArrayLike, ...]: ...
def fit_transform(
self, X: ArrayLike, y: ArrayLike | None = None
) -> ArrayLike | tuple[ArrayLike, ...]: ...
def transform(self, X: ArrayLike, Y: ArrayLike | None = None) -> ArrayLike | tuple[ArrayLike, ...]: ...
def fit_transform(self, X: ArrayLike, y: ArrayLike | None = None) -> ArrayLike | tuple[ArrayLike, ...]: ...

Просмотреть файл

@ -14,12 +14,8 @@ from ..utils import (
is_scalar_nan,
)
def _split_sparse_columns(
arff_data: ArffSparseDataType, include_columns: List
) -> ArffSparseDataType: ...
def _sparse_data_to_array(
arff_data: ArffSparseDataType, include_columns: List
) -> np.ndarray: ...
def _split_sparse_columns(arff_data: ArffSparseDataType, include_columns: List) -> ArffSparseDataType: ...
def _sparse_data_to_array(arff_data: ArffSparseDataType, include_columns: List) -> np.ndarray: ...
def _feature_to_dtype(feature: Dict[str, str]): ...
def _convert_arff_data(
arff: ArffContainerType,
@ -27,9 +23,7 @@ def _convert_arff_data(
col_slice_y: List[int],
shape: Optional[Tuple] = None,
) -> Tuple: ...
def _convert_arff_data_dataframe(
arff: ArffContainerType, columns: List, features_dict: Dict[str, Any]
) -> Tuple: ...
def _convert_arff_data_dataframe(arff: ArffContainerType, columns: List, features_dict: Dict[str, Any]) -> Tuple: ...
def _liac_arff_parser(
arff_container,
output_arrays_type,

Просмотреть файл

@ -76,24 +76,12 @@ def load_gzip_compressed_csv_data(
**kwargs,
) -> tuple[NDArray, str]: ...
def load_descr(descr_file_name: str, *, descr_module: str | module = ...) -> str: ...
def load_wine(
*, return_X_y: bool = False, as_frame: bool = False
) -> tuple[Bunch, tuple]: ...
def load_iris(
*, return_X_y: bool = False, as_frame: bool = False
) -> tuple[Bunch, tuple]: ...
def load_breast_cancer(
*, return_X_y: bool = False, as_frame: bool = False
) -> tuple[Bunch, tuple]: ...
def load_digits(
*, n_class: int = 10, return_X_y: bool = False, as_frame: bool = False
) -> tuple[Bunch, tuple]: ...
def load_diabetes(
*, return_X_y: bool = False, as_frame: bool = False, scaled: bool = True
) -> tuple[Bunch, tuple]: ...
def load_linnerud(
*, return_X_y: bool = False, as_frame: bool = False
) -> tuple[Bunch, tuple]: ...
def load_wine(*, return_X_y: bool = False, as_frame: bool = False) -> tuple[Bunch, tuple]: ...
def load_iris(*, return_X_y: bool = False, as_frame: bool = False) -> tuple[Bunch, tuple]: ...
def load_breast_cancer(*, return_X_y: bool = False, as_frame: bool = False) -> tuple[Bunch, tuple]: ...
def load_digits(*, n_class: int = 10, return_X_y: bool = False, as_frame: bool = False) -> tuple[Bunch, tuple]: ...
def load_diabetes(*, return_X_y: bool = False, as_frame: bool = False, scaled: bool = True) -> tuple[Bunch, tuple]: ...
def load_linnerud(*, return_X_y: bool = False, as_frame: bool = False) -> tuple[Bunch, tuple]: ...
@deprecated(
r"""`load_boston` is deprecated in 1.0 and will be removed in 1.2.

Просмотреть файл

@ -27,9 +27,5 @@ ARCHIVE = ...
logger = ...
def fetch_california_housing(
*,
data_home: str | None = None,
download_if_missing: bool = True,
return_X_y: bool = False,
as_frame: bool = False
*, data_home: str | None = None, download_if_missing: bool = True, return_X_y: bool = False, as_frame: bool = False
) -> tuple[Bunch, tuple]: ...

Просмотреть файл

@ -42,18 +42,14 @@ TARGETS = ...
# local disk caching, and image decoding.
#
def _check_fetch_lfw(
data_home: None = None, funneled: bool = True, download_if_missing: bool = True
) -> Tuple[str, str]: ...
def _check_fetch_lfw(data_home: None = None, funneled: bool = True, download_if_missing: bool = True) -> Tuple[str, str]: ...
def _load_imgs(file_paths, slice_, color, resize): ...
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(
data_folder_path, slice_=None, color=False, resize=None, min_faces_per_person=0
): ...
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None, min_faces_per_person=0): ...
def fetch_lfw_people(
*,
data_home: str | None = None,
@ -70,9 +66,7 @@ def fetch_lfw_people(
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(
index_file_path, data_folder_path, slice_=None, color=False, resize=None
): ...
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None, color=False, resize=None): ...
def fetch_lfw_pairs(
*,
subset: Literal["train", "test", "10_folds"] = "train",

Просмотреть файл

@ -35,12 +35,8 @@ OpenmlFeaturesType = ...
def _get_local_path(openml_path: str, data_home: str) -> str: ...
def _retry_with_clean_cache(openml_path: str, data_home: Optional[str]) -> Callable: ...
def _retry_on_network_error(
n_retries: int = 3, delay: float = 1.0, url: str = ""
) -> Callable: ...
def _open_openml_url(
openml_path: str, data_home: Optional[str], n_retries: int = 3, delay: float = 1.0
): ...
def _retry_on_network_error(n_retries: int = 3, delay: float = 1.0, url: str = "") -> Callable: ...
def _open_openml_url(openml_path: str, data_home: Optional[str], n_retries: int = 3, delay: float = 1.0): ...
class OpenMLError(ValueError):
pass

Просмотреть файл

@ -54,9 +54,7 @@ def make_multilabel_classification(
return_distributions: bool = False,
random_state: int | RandomState | None = None,
) -> tuple[NDArray, NDArray, NDArray, np.ndarray]: ...
def make_hastie_10_2(
n_samples: int = 12000, *, random_state: int | RandomState | None = None
) -> tuple[np.ndarray, NDArray]: ...
def make_hastie_10_2(n_samples: int = 12000, *, random_state: int | RandomState | None = None) -> tuple[np.ndarray, NDArray]: ...
def make_regression(
n_samples: int = 100,
n_features: int = 100,
@ -142,9 +140,7 @@ def make_sparse_uncorrelated(
*,
random_state: int | RandomState | None = None,
) -> tuple[NDArray, NDArray]: ...
def make_spd_matrix(
n_dim: int, *, random_state: int | RandomState | None = None
) -> np.ndarray: ...
def make_spd_matrix(n_dim: int, *, random_state: int | RandomState | None = None) -> np.ndarray: ...
def make_sparse_spd_matrix(
dim: int = 1,
*,

Просмотреть файл

@ -36,6 +36,4 @@ logger = ...
def _load_coverage(F, header_length=6, dtype=np.int16): ...
def _load_csv(F): ...
def construct_grids(batch: Batch) -> NDArray: ...
def fetch_species_distributions(
*, data_home: str | None = None, download_if_missing: bool = True
) -> Bunch: ...
def fetch_species_distributions(*, data_home: str | None = None, download_if_missing: bool = True) -> Bunch: ...

Просмотреть файл

@ -17,9 +17,7 @@ from ..base import BaseEstimator, TransformerMixin, _ClassNamePrefixFeaturesOutM
from ..utils.validation import check_is_fitted
from abc import ABCMeta, abstractmethod
class _BasePCA(
_ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator, metaclass=ABCMeta
):
class _BasePCA(_ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator, metaclass=ABCMeta):
def get_covariance(self) -> NDArray: ...
def get_precision(self) -> NDArray: ...
@abstractmethod

Просмотреть файл

@ -48,9 +48,7 @@ def sparse_encode(
*,
gram: NDArray | None = None,
cov: NDArray | None = None,
algorithm: Literal[
"lasso_lars", "lasso_cd", "lars", "omp", "threshold"
] = "lasso_lars",
algorithm: Literal["lasso_lars", "lasso_cd", "lars", "omp", "threshold"] = "lasso_lars",
n_nonzero_coefs: int | None = None,
alpha: float | None = None,
copy_cov: bool = True,
@ -96,21 +94,21 @@ def dict_learning_online(
n_components: int | None = 2,
*,
alpha: float = 1,
n_iter: int|str = "deprecated",
n_iter: int | str = "deprecated",
max_iter: int | None = None,
return_code: bool = True,
dict_init: NDArray | None = None,
callback: Callable | None = None,
batch_size: int|str = "warn",
batch_size: int | str = "warn",
verbose: bool = False,
shuffle: bool = True,
n_jobs: int | None = None,
method: Literal["lars", "cd"] = "lars",
iter_offset: int|str = "deprecated",
iter_offset: int | str = "deprecated",
random_state: int | RandomState | None = None,
return_inner_stats: bool|str = "deprecated",
inner_stats: tuple[NDArray, ...]|str = "deprecated",
return_n_iter: bool|str = "deprecated",
return_inner_stats: bool | str = "deprecated",
inner_stats: tuple[NDArray, ...] | str = "deprecated",
return_n_iter: bool | str = "deprecated",
positive_dict: bool = False,
positive_code: bool = False,
method_max_iter: int = 1000,
@ -140,9 +138,7 @@ class SparseCoder(_BaseSparseCoding, BaseEstimator):
self,
dictionary: NDArray,
*,
transform_algorithm: Literal[
"lasso_lars", "lasso_cd", "lars", "omp", "threshold"
] = "omp",
transform_algorithm: Literal["lasso_lars", "lasso_cd", "lars", "omp", "threshold"] = "omp",
transform_n_nonzero_coefs: int | None = None,
transform_alpha: float | None = None,
split_sign: bool = False,
@ -169,9 +165,7 @@ class DictionaryLearning(_BaseSparseCoding, BaseEstimator):
max_iter: int = 1000,
tol: float = 1e-8,
fit_algorithm: Literal["lars", "cd"] = "lars",
transform_algorithm: Literal[
"lasso_lars", "lasso_cd", "lars", "omp", "threshold"
] = "omp",
transform_algorithm: Literal["lasso_lars", "lasso_cd", "lars", "omp", "threshold"] = "omp",
transform_n_nonzero_coefs: int | None = None,
transform_alpha: float | None = None,
n_jobs: int | None = None,
@ -195,16 +189,14 @@ class MiniBatchDictionaryLearning(_BaseSparseCoding, BaseEstimator):
n_components: int | None = None,
*,
alpha: float = 1,
n_iter: int|str = "deprecated",
n_iter: int | str = "deprecated",
max_iter: int | None = None,
fit_algorithm: Literal["lars", "cd"] = "lars",
n_jobs: int | None = None,
batch_size: int|str = "warn",
batch_size: int | str = "warn",
shuffle: bool = True,
dict_init: NDArray | None = None,
transform_algorithm: Literal[
"lasso_lars", "lasso_cd", "lars", "omp", "threshold"
] = "omp",
transform_algorithm: Literal["lasso_lars", "lasso_cd", "lars", "omp", "threshold"] = "omp",
transform_n_nonzero_coefs: int | None = None,
transform_alpha: float | None = None,
verbose: bool | int = False,
@ -217,29 +209,19 @@ class MiniBatchDictionaryLearning(_BaseSparseCoding, BaseEstimator):
tol: float = 1e-3,
max_no_improvement: int = 10,
) -> None: ...
@deprecated( # type: ignore
"The attribute `iter_offset_` is deprecated in 1.1 and will be removed in 1.3."
)
@deprecated("The attribute `iter_offset_` is deprecated in 1.1 and will be removed in 1.3.") # type: ignore
@property
def iter_offset_(self): ...
@deprecated( # type: ignore
"The attribute `random_state_` is deprecated in 1.1 and will be removed in 1.3."
)
@deprecated("The attribute `random_state_` is deprecated in 1.1 and will be removed in 1.3.") # type: ignore
@property
def random_state_(self): ...
@deprecated( # type: ignore
"The attribute `inner_stats_` is deprecated in 1.1 and will be removed in 1.3."
)
@deprecated("The attribute `inner_stats_` is deprecated in 1.1 and will be removed in 1.3.") # type: ignore
@property
def inner_stats_(self): ...
def _check_params(self, X: ndarray) -> None: ...
def _initialize_dict(self, X: ndarray, random_state: RandomState) -> ndarray: ...
def _update_inner_stats(
self, X: ndarray, code: ndarray, batch_size: int, step: int
) -> None: ...
def _minibatch_step(
self, X: ndarray, dictionary: ndarray, random_state: RandomState, step: int
) -> float64: ...
def _update_inner_stats(self, X: ndarray, code: ndarray, batch_size: int, step: int) -> None: ...
def _minibatch_step(self, X: ndarray, dictionary: ndarray, random_state: RandomState, step: int) -> float64: ...
def _check_convergence(
self,
X: ndarray,
@ -251,9 +233,7 @@ class MiniBatchDictionaryLearning(_BaseSparseCoding, BaseEstimator):
n_steps: int,
) -> bool: ...
def fit(self, X: ArrayLike, y: None = None) -> "MiniBatchDictionaryLearning": ...
def partial_fit(
self, X: ArrayLike, y=None, iter_offset: int|str = "deprecated"
) -> Any: ...
def partial_fit(self, X: ArrayLike, y=None, iter_offset: int | str = "deprecated") -> Any: ...
@property
def _n_features_out(self): ...
def _more_tags(self): ...

Просмотреть файл

@ -40,12 +40,8 @@ class FactorAnalysis(_ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEst
def get_precision(self) -> NDArray: ...
def score_samples(self, X: NDArray) -> NDArray: ...
def score(self, X: NDArray, y: None = None) -> float: ...
def _rotate(
self, components: ndarray, n_components: None = None, tol: float = 1e-6
) -> ndarray: ...
def _rotate(self, components: ndarray, n_components: None = None, tol: float = 1e-6) -> ndarray: ...
@property
def _n_features_out(self): ...
def _ortho_rotation(
components: ndarray, method: str = "varimax", tol: float = 1e-6, max_iter: int = 100
) -> ndarray: ...
def _ortho_rotation(components: ndarray, method: str = "varimax", tol: float = 1e-6, max_iter: int = 100) -> ndarray: ...

Просмотреть файл

@ -34,9 +34,7 @@ def _ica_par(
# Some standard non-linear functions.
# XXX: these should be optimized, as they can be a bottleneck.
def _logcosh(
x: ndarray, fun_args: Optional[Dict[Any, Any]] = None
) -> Tuple[ndarray, ndarray]: ...
def _logcosh(x: ndarray, fun_args: Optional[Dict[Any, Any]] = None) -> Tuple[ndarray, ndarray]: ...
def _exp(x, fun_args): ...
def _cube(x, fun_args): ...
def fastica(

Просмотреть файл

@ -15,15 +15,8 @@ from numpy import ndarray
class IncrementalPCA(_BasePCA):
def __init__(
self,
n_components: int | None = None,
*,
whiten: bool = False,
copy: bool = True,
batch_size: int | None = None
self, n_components: int | None = None, *, whiten: bool = False, copy: bool = True, batch_size: int | None = None
) -> None: ...
def fit(self, X: NDArray | ArrayLike, y: None = None) -> "IncrementalPCA": ...
def partial_fit(
self, X: ArrayLike, y: None = None, check_input: bool = True
) -> "IncrementalPCA": ...
def partial_fit(self, X: ArrayLike, y: None = None, check_input: bool = True) -> "IncrementalPCA": ...
def transform(self, X: NDArray | ArrayLike) -> NDArray: ...

Просмотреть файл

@ -30,9 +30,7 @@ class KernelPCA(_ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimato
self,
n_components: int | None = None,
*,
kernel: Literal[
"linear", "poly", "rbf", "sigmoid", "cosine", "precomputed"
] = "linear",
kernel: Literal["linear", "poly", "rbf", "sigmoid", "cosine", "precomputed"] = "linear",
gamma: float | None = None,
degree: int = 3,
coef0: float = 1,
@ -52,16 +50,14 @@ class KernelPCA(_ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimato
# TODO: Remove in 1.2
# mypy error: Decorated property not supported
@deprecated( # type: ignore
"Attribute `lambdas_` was deprecated in version 1.0 and will be "
"removed in 1.2. Use `eigenvalues_` instead."
"Attribute `lambdas_` was deprecated in version 1.0 and will be " "removed in 1.2. Use `eigenvalues_` instead."
)
@property
def lambdas_(self): ...
# mypy error: Decorated property not supported
@deprecated( # type: ignore
"Attribute `alphas_` was deprecated in version 1.0 and will be "
"removed in 1.2. Use `eigenvectors_` instead."
"Attribute `alphas_` was deprecated in version 1.0 and will be " "removed in 1.2. Use `eigenvectors_` instead."
)
@property
def alphas_(self): ...
@ -69,9 +65,7 @@ class KernelPCA(_ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimato
def _fit_transform(self, K: ndarray) -> ndarray: ...
def _fit_inverse_transform(self, X_transformed: ndarray, X: ndarray) -> None: ...
def fit(self, X: NDArray | ArrayLike, y: None = None) -> "KernelPCA": ...
def fit_transform(
self, X: NDArray | ArrayLike, y: None = None, **params
) -> NDArray: ...
def fit_transform(self, X: NDArray | ArrayLike, y: None = None, **params) -> NDArray: ...
def transform(self, X: NDArray | ArrayLike) -> NDArray: ...
def inverse_transform(self, X: NDArray | ArrayLike) -> NDArray: ...
def _more_tags(self): ...

Просмотреть файл

@ -14,7 +14,6 @@ from ..utils.validation import check_non_negative
from ..utils.validation import check_is_fitted
from ..utils.fixes import delayed
from numpy import float64, ndarray
from numpy.random import RandomState
from scipy.sparse._csr import csr_matrix
@ -31,9 +30,7 @@ def _update_doc_distribution(
random_state: Optional[RandomState],
) -> Union[Tuple[ndarray, ndarray], Tuple[ndarray, None]]: ...
class LatentDirichletAllocation(
_ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator
):
class LatentDirichletAllocation(_ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
def __init__(
self,
n_components: int = 10,
@ -46,7 +43,7 @@ class LatentDirichletAllocation(
max_iter: int = 10,
batch_size: int = 128,
evaluate_every: int = ...,
total_samples: float|int = 1e6,
total_samples: float | int = 1e6,
perp_tol: float = 1e-1,
mean_change_tol: float = 1e-3,
max_doc_update_iter: int = 100,
@ -61,28 +58,22 @@ class LatentDirichletAllocation(
X: csr_matrix,
cal_sstats: bool,
random_init: bool,
parallel = None,
parallel=None,
) -> Union[Tuple[ndarray, ndarray], Tuple[ndarray, None]]: ...
def _em_step(
self,
X: csr_matrix,
total_samples: int,
batch_update: bool,
parallel = None,
parallel=None,
) -> None: ...
def _more_tags(self): ...
def _check_non_neg_array(
self, X: csr_matrix, reset_n_features: bool, whom: str
) -> csr_matrix: ...
def _check_non_neg_array(self, X: csr_matrix, reset_n_features: bool, whom: str) -> csr_matrix: ...
def partial_fit(self, X: NDArray | ArrayLike, y=None): ...
def fit(
self, X: NDArray | ArrayLike, y: None = None
) -> "LatentDirichletAllocation": ...
def fit(self, X: NDArray | ArrayLike, y: None = None) -> "LatentDirichletAllocation": ...
def _unnormalized_transform(self, X): ...
def transform(self, X: NDArray | ArrayLike) -> NDArray: ...
def _approx_bound(
self, X: csr_matrix, doc_topic_distr: ndarray, sub_sampling: bool
) -> float64: ...
def _approx_bound(self, X: csr_matrix, doc_topic_distr: ndarray, sub_sampling: bool) -> float64: ...
def score(self, X: NDArray | ArrayLike, y=None) -> float: ...
def _perplexity_precomp_distr(
self,
@ -90,8 +81,6 @@ class LatentDirichletAllocation(
doc_topic_distr: Optional[ndarray] = None,
sub_sampling: bool = False,
) -> float64: ...
def perplexity(
self, X: NDArray | ArrayLike, sub_sampling: bool = False
) -> float: ...
def perplexity(self, X: NDArray | ArrayLike, sub_sampling: bool = False) -> float: ...
@property
def _n_features_out(self): ...

Просмотреть файл

@ -94,9 +94,7 @@ def _multiplicative_update_w(
HHt: None = None,
XHt: None = None,
update_H: bool = True,
) -> Union[
Tuple[ndarray, ndarray, None, None], Tuple[ndarray, None, ndarray, ndarray]
]: ...
) -> Union[Tuple[ndarray, ndarray, None, None], Tuple[ndarray, None, ndarray, ndarray]]: ...
def _multiplicative_update_h(
X: csr_matrix,
W: ndarray,
@ -113,7 +111,7 @@ def _fit_multiplicative_update(
X: csr_matrix,
W: ndarray,
H: ndarray,
beta_loss: int|str = "frobenius",
beta_loss: int | str = "frobenius",
max_iter: int = 200,
tol: float = 1e-4,
l1_reg_W: float = 0,
@ -132,15 +130,14 @@ def non_negative_factorization(
init: Literal["random", "nndsvd", "nndsvda", "nndsvdar", "custom"] | None = None,
update_H: bool = True,
solver: Literal["cd", "mu"] = "cd",
beta_loss: float
| Literal["frobenius", "kullback-leibler", "itakura-saito"] = "frobenius",
beta_loss: float | Literal["frobenius", "kullback-leibler", "itakura-saito"] = "frobenius",
tol: float = 1e-4,
max_iter: int = 200,
alpha: float|str = "deprecated",
alpha: float | str = "deprecated",
alpha_W: float = 0.0,
alpha_H: float | Literal["same"] = "same",
l1_ratio: float = 0.0,
regularization: Literal["both", "components", "transformation"]|str = "deprecated",
regularization: Literal["both", "components", "transformation"] | str = "deprecated",
random_state: int | RandomState | None = None,
verbose: int = 0,
shuffle: bool = False,
@ -151,27 +148,22 @@ class NMF(_ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
self,
n_components: int | None = None,
*,
init: Literal["random", "nndsvd", "nndsvda", "nndsvdar", "custom"]
| None = None,
init: Literal["random", "nndsvd", "nndsvda", "nndsvdar", "custom"] | None = None,
solver: Literal["cd", "mu"] = "cd",
beta_loss: float
| Literal["frobenius", "kullback-leibler", "itakura-saito"] = "frobenius",
beta_loss: float | Literal["frobenius", "kullback-leibler", "itakura-saito"] = "frobenius",
tol: float = 1e-4,
max_iter: int = 200,
random_state: int | RandomState | None = None,
alpha: float|str = "deprecated",
alpha: float | str = "deprecated",
alpha_W: float = 0.0,
alpha_H: float | Literal["same"] = "same",
l1_ratio: float = 0.0,
verbose: int = 0,
shuffle: bool = False,
regularization: None
| Literal["both", "components", "transformation"]|str = "deprecated",
regularization: None | Literal["both", "components", "transformation"] | str = "deprecated",
) -> None: ...
def _more_tags(self) -> Dict[str, bool]: ...
def _check_params(
self, X: Union[ndarray, csr_matrix]
) -> Union[NMF, MiniBatchNMF]: ...
def _check_params(self, X: Union[ndarray, csr_matrix]) -> Union[NMF, MiniBatchNMF]: ...
def _check_w_h(
self,
X: Union[ndarray, csr_matrix],
@ -179,9 +171,7 @@ class NMF(_ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
H: Optional[ndarray],
update_H: bool,
) -> Tuple[ndarray, ndarray]: ...
def _scale_regularization(
self, X: Union[ndarray, csr_matrix]
) -> Tuple[float, float, float, float]: ...
def _scale_regularization(self, X: Union[ndarray, csr_matrix]) -> Tuple[float, float, float, float]: ...
def fit_transform(
self,
X: NDArray | ArrayLike,
@ -197,9 +187,7 @@ class NMF(_ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
H: Optional[ndarray] = None,
update_H: bool = True,
) -> Tuple[ndarray, ndarray, int]: ...
def fit(
self, X: NDArray | ArrayLike, y: None = None, **params
) -> Union[NMF, MiniBatchNMF]: ...
def fit(self, X: NDArray | ArrayLike, y: None = None, **params) -> Union[NMF, MiniBatchNMF]: ...
def transform(self, X: NDArray | ArrayLike) -> NDArray: ...
def inverse_transform(self, W: NDArray) -> NDArray: ...
@property
@ -210,11 +198,9 @@ class MiniBatchNMF(NMF):
self,
n_components: int | None = None,
*,
init: Literal["random", "nndsvd", "nndsvda", "nndsvdar", "custom"]
| None = None,
init: Literal["random", "nndsvd", "nndsvda", "nndsvdar", "custom"] | None = None,
batch_size: int = 1024,
beta_loss: float
| Literal["frobenius", "kullback-leibler", "itakura-saito"] = "frobenius",
beta_loss: float | Literal["frobenius", "kullback-leibler", "itakura-saito"] = "frobenius",
tol: float = 1e-4,
max_no_improvement: int = 10,
max_iter: int = 200,
@ -230,9 +216,7 @@ class MiniBatchNMF(NMF):
) -> None: ...
def _check_params(self, X: csr_matrix) -> "MiniBatchNMF": ...
def _solve_W(self, X, H, max_iter): ...
def _minibatch_step(
self, X: csr_matrix, W: ndarray, H: ndarray, update_H: bool
) -> float64: ...
def _minibatch_step(self, X: csr_matrix, W: ndarray, H: ndarray, update_H: bool) -> float64: ...
def _minibatch_convergence(
self,
X: csr_matrix,

Просмотреть файл

@ -48,12 +48,8 @@ class PCA(_BasePCA):
def fit(self, X: ArrayLike, y: Optional[ndarray] = None) -> "PCA": ...
def fit_transform(self, X: ArrayLike, y: Optional[ndarray] = None) -> NDArray: ...
def _fit(self, X: ndarray) -> Tuple[ndarray, ndarray, ndarray]: ...
def _fit_full(
self, X: ndarray, n_components: Union[int64, str, int]
) -> Tuple[ndarray, ndarray, ndarray]: ...
def _fit_truncated(
self, X: ndarray, n_components: int, svd_solver: str
) -> Tuple[ndarray, ndarray, ndarray]: ...
def _fit_full(self, X: ndarray, n_components: Union[int64, str, int]) -> Tuple[ndarray, ndarray, ndarray]: ...
def _fit_truncated(self, X: ndarray, n_components: int, svd_solver: str) -> Tuple[ndarray, ndarray, ndarray]: ...
def score_samples(self, X: ArrayLike) -> NDArray: ...
def score(self, X: ArrayLike, y: None = None) -> float: ...
def _more_tags(self) -> Dict[str, List[Union[Type[float64], Type[np.float32]]]]: ...

Просмотреть файл

@ -36,9 +36,7 @@ class TruncatedSVD(_ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstim
tol: float = 0.0,
) -> None: ...
def fit(self, X: NDArray | ArrayLike, y=None) -> Any: ...
def fit_transform(
self, X: NDArray | ArrayLike, y: Optional[ndarray] = None
) -> NDArray: ...
def fit_transform(self, X: NDArray | ArrayLike, y: Optional[ndarray] = None) -> NDArray: ...
def transform(self, X: NDArray | ArrayLike) -> NDArray: ...
def inverse_transform(self, X: ArrayLike) -> NDArray: ...
def _more_tags(self): ...

Просмотреть файл

@ -75,12 +75,7 @@ class LinearDiscriminantAnalysis(
class QuadraticDiscriminantAnalysis(ClassifierMixin, BaseEstimator):
def __init__(
self,
*,
priors: NDArray | None = None,
reg_param: float = 0.0,
store_covariance: bool = False,
tol: float = 1.0e-4
self, *, priors: NDArray | None = None, reg_param: float = 0.0, store_covariance: bool = False, tol: float = 1.0e-4
) -> None: ...
def fit(self, X: ArrayLike, y: ArrayLike) -> "QuadraticDiscriminantAnalysis": ...
def _decision_function(self, X: ndarray) -> ndarray: ...

Просмотреть файл

@ -28,28 +28,20 @@ class DummyClassifier(MultiOutputMixin, ClassifierMixin, BaseEstimator):
def __init__(
self,
*,
strategy: Literal[
"most_frequent", "prior", "stratified", "uniform", "constant"
] = "prior",
strategy: Literal["most_frequent", "prior", "stratified", "uniform", "constant"] = "prior",
random_state: int | RandomState | None = None,
constant: int | str | ArrayLike | None = None
constant: int | str | ArrayLike | None = None,
) -> None: ...
def fit(
self, X: ArrayLike, y: ArrayLike, sample_weight: ArrayLike | None = None
) -> "DummyClassifier": ...
def fit(self, X: ArrayLike, y: ArrayLike, sample_weight: ArrayLike | None = None) -> "DummyClassifier": ...
def predict(self, X: ArrayLike) -> ArrayLike: ...
def predict_proba(self, X: ArrayLike) -> NDArray | list[ArrayLike]: ...
def predict_log_proba(self, X: ArrayLike) -> NDArray | list[ArrayLike]: ...
def _more_tags(self): ...
def score(
self, X: ArrayLike | None, y: ArrayLike, sample_weight: ArrayLike | None = None
) -> float: ...
def score(self, X: ArrayLike | None, y: ArrayLike, sample_weight: ArrayLike | None = None) -> float: ...
# TODO: Remove in 1.2
# mypy error: Decorated property not supported
@deprecated( # type: ignore
"`n_features_in_` is deprecated in 1.0 and will be removed in 1.2."
)
@deprecated("`n_features_in_` is deprecated in 1.0 and will be removed in 1.2.") # type: ignore
@property
def n_features_in_(self): ...
@ -59,23 +51,15 @@ class DummyRegressor(MultiOutputMixin, RegressorMixin, BaseEstimator):
*,
strategy: Literal["mean", "median", "quantile", "constant"] = "mean",
constant: int | float | ArrayLike | None = None,
quantile: float | None = None
quantile: float | None = None,
) -> None: ...
def fit(
self, X: ArrayLike, y: ArrayLike, sample_weight: ArrayLike | None = None
) -> "DummyRegressor": ...
def predict(
self, X: ArrayLike, return_std: bool = False
) -> tuple[ArrayLike, ArrayLike]: ...
def fit(self, X: ArrayLike, y: ArrayLike, sample_weight: ArrayLike | None = None) -> "DummyRegressor": ...
def predict(self, X: ArrayLike, return_std: bool = False) -> tuple[ArrayLike, ArrayLike]: ...
def _more_tags(self): ...
def score(
self, X: ArrayLike | None, y: ArrayLike, sample_weight: ArrayLike | None = None
) -> float: ...
def score(self, X: ArrayLike | None, y: ArrayLike, sample_weight: ArrayLike | None = None) -> float: ...
# TODO: Remove in 1.2
# mypy error: Decorated property not supported
@deprecated( # type: ignore
"`n_features_in_` is deprecated in 1.0 and will be removed in 1.2."
)
@deprecated("`n_features_in_` is deprecated in 1.0 and will be removed in 1.2.") # type: ignore
@property
def n_features_in_(self): ...

Просмотреть файл

@ -30,9 +30,7 @@ __all__ = ["BaggingClassifier", "BaggingRegressor"]
MAX_INT = ...
def _generate_indices(
random_state: RandomState, bootstrap: bool, n_population: int, n_samples: int
) -> ndarray: ...
def _generate_indices(random_state: RandomState, bootstrap: bool, n_population: int, n_samples: int) -> ndarray: ...
def _generate_bagging_indices(
random_state: int64,
bootstrap_features: bool,
@ -52,10 +50,7 @@ def _parallel_build_estimators(
total_n_estimators: int,
verbose: int,
check_input: bool,
) -> Union[
Tuple[List[DecisionTreeRegressor], List[ndarray]],
Tuple[List[ExtraTreeRegressor], List[ndarray]],
]: ...
) -> Union[Tuple[List[DecisionTreeRegressor], List[ndarray]], Tuple[List[ExtraTreeRegressor], List[ndarray]],]: ...
def _parallel_predict_proba(estimators, estimators_features, X, n_classes): ...
def _parallel_predict_log_proba(estimators, estimators_features, X, n_classes): ...
def _parallel_decision_function(estimators, estimators_features, X): ...
@ -70,9 +65,7 @@ class BaseBagging(BaseEnsemble, metaclass=ABCMeta):
@abstractmethod
def __init__(
self,
base_estimator: Optional[
Union[DecisionTreeRegressor, ExtraTreeRegressor]
] = None,
base_estimator: Optional[Union[DecisionTreeRegressor, ExtraTreeRegressor]] = None,
n_estimators: int = 10,
*,
max_samples=1.0,
@ -111,8 +104,7 @@ class BaseBagging(BaseEnsemble, metaclass=ABCMeta):
# TODO: Remove in 1.2
# mypy error: Decorated property not supported
@deprecated( # type: ignore
"Attribute `n_features_` was deprecated in version 1.0 and will be "
"removed in 1.2. Use `n_features_in_` instead."
"Attribute `n_features_` was deprecated in version 1.0 and will be " "removed in 1.2. Use `n_features_in_` instead."
)
@property
def n_features_(self): ...

Просмотреть файл

@ -78,7 +78,7 @@ class BaseEnsemble(MetaEstimatorMixin, BaseEstimator, metaclass=ABCMeta):
],
*,
n_estimators: int = 10,
estimator_params: ArrayLike = ...
estimator_params: ArrayLike = ...,
) -> None: ...
def _validate_estimator(
self,
@ -88,23 +88,14 @@ class BaseEnsemble(MetaEstimatorMixin, BaseEstimator, metaclass=ABCMeta):
self,
append: bool = True,
random_state: Optional[Union[int64, RandomState]] = None,
) -> Union[
DecisionTreeRegressor,
DecisionTreeClassifier,
ExtraTreeRegressor,
ExtraTreeClassifier,
]: ...
) -> Union[DecisionTreeRegressor, DecisionTreeClassifier, ExtraTreeRegressor, ExtraTreeClassifier,]: ...
def __len__(self) -> int: ...
def __getitem__(self, index): ...
def __iter__(self): ...
def _partition_estimators(
n_estimators: int, n_jobs: Optional[int]
) -> Tuple[int, List[int], List[int]]: ...
def _partition_estimators(n_estimators: int, n_jobs: Optional[int]) -> Tuple[int, List[int], List[int]]: ...
class _BaseHeterogeneousEnsemble(
MetaEstimatorMixin, _BaseComposition, metaclass=ABCMeta
):
class _BaseHeterogeneousEnsemble(MetaEstimatorMixin, _BaseComposition, metaclass=ABCMeta):
_required_parameters: list = ...

Просмотреть файл

@ -64,12 +64,8 @@ __all__ = [
MAX_INT = ...
def _get_n_samples_bootstrap(n_samples: int, max_samples: Optional[float]) -> int: ...
def _generate_sample_indices(
random_state: int, n_samples: int, n_samples_bootstrap: int
) -> ndarray: ...
def _generate_unsampled_indices(
random_state: int, n_samples: int, n_samples_bootstrap: int
) -> ndarray: ...
def _generate_sample_indices(random_state: int, n_samples: int, n_samples_bootstrap: int) -> ndarray: ...
def _generate_unsampled_indices(random_state: int, n_samples: int, n_samples_bootstrap: int) -> ndarray: ...
def _parallel_build_trees(
tree: Union[
DecisionTreeRegressor,
@ -86,12 +82,7 @@ def _parallel_build_trees(
verbose: int = 0,
class_weight: None = None,
n_samples_bootstrap: Optional[int] = None,
) -> Union[
DecisionTreeRegressor,
DecisionTreeClassifier,
ExtraTreeRegressor,
ExtraTreeClassifier,
]: ...
) -> Union[DecisionTreeRegressor, DecisionTreeClassifier, ExtraTreeRegressor, ExtraTreeClassifier,]: ...
class BaseForest(MultiOutputMixin, BaseEnsemble, metaclass=ABCMeta):
@abstractmethod
@ -133,17 +124,14 @@ class BaseForest(MultiOutputMixin, BaseEnsemble, metaclass=ABCMeta):
def _set_oob_score_and_attributes(self, X, y): ...
def _compute_oob_predictions(self, X: ndarray, y: ndarray) -> ndarray: ...
def _validate_y_class_weight(self, y: ndarray) -> Tuple[ndarray, None]: ...
def _validate_X_predict(
self, X: Union[ndarray, csr_matrix]
) -> Union[ndarray, csr_matrix]: ...
def _validate_X_predict(self, X: Union[ndarray, csr_matrix]) -> Union[ndarray, csr_matrix]: ...
@property
def feature_importances_(self) -> NDArray: ...
# TODO: Remove in 1.2
# mypy error: Decorated property not supported
@deprecated( # type: ignore
"Attribute `n_features_` was deprecated in version 1.0 and will be "
"removed in 1.2. Use `n_features_in_` instead."
"Attribute `n_features_` was deprecated in version 1.0 and will be " "removed in 1.2. Use `n_features_in_` instead."
)
@property
def n_features_(self): ...
@ -173,9 +161,7 @@ class ForestClassifier(ClassifierMixin, BaseForest, metaclass=ABCMeta):
def _validate_y_class_weight(self, y: ndarray) -> Tuple[ndarray, None]: ...
def predict(self, X: NDArray | ArrayLike) -> NDArray: ...
def predict_proba(self, X: NDArray | ArrayLike) -> NDArray | list[ArrayLike]: ...
def predict_log_proba(
self, X: NDArray | ArrayLike
) -> NDArray | list[ArrayLike]: ...
def predict_log_proba(self, X: NDArray | ArrayLike) -> NDArray | list[ArrayLike]: ...
def _more_tags(self) -> Dict[str, bool]: ...
class ForestRegressor(RegressorMixin, BaseForest, metaclass=ABCMeta):
@ -220,10 +206,7 @@ class RandomForestClassifier(ForestClassifier):
random_state: int | RandomState | None = None,
verbose: int = 0,
warm_start: bool = False,
class_weight: Literal["balanced", "balanced_subsample"]
| dict
| Sequence[dict]
| None = None,
class_weight: Literal["balanced", "balanced_subsample"] | dict | Sequence[dict] | None = None,
ccp_alpha: float = 0.0,
max_samples: int | float | None = None,
) -> None: ...
@ -233,9 +216,7 @@ class RandomForestRegressor(ForestRegressor):
self,
n_estimators: int = 100,
*,
criterion: Literal[
"squared_error", "absolute_error", "poisson"
] = "squared_error",
criterion: Literal["squared_error", "absolute_error", "poisson"] = "squared_error",
max_depth: int | None = None,
min_samples_split: int | float = 2,
min_samples_leaf: int | float = 1,
@ -272,10 +253,7 @@ class ExtraTreesClassifier(ForestClassifier):
random_state: int | RandomState | None = None,
verbose: int = 0,
warm_start: bool = False,
class_weight: Literal["balanced", "balanced_subsample"]
| dict
| Sequence[dict]
| None = None,
class_weight: Literal["balanced", "balanced_subsample"] | dict | Sequence[dict] | None = None,
ccp_alpha: float = 0.0,
max_samples: int | float | None = None,
) -> None: ...
@ -325,16 +303,12 @@ class RandomTreesEmbedding(TransformerMixin, BaseForest):
warm_start: bool = False,
) -> None: ...
def _set_oob_score_and_attributes(self, X, y): ...
def fit(
self, X: NDArray | ArrayLike, y=None, sample_weight: ArrayLike | None = None
) -> Any: ...
def fit(self, X: NDArray | ArrayLike, y=None, sample_weight: ArrayLike | None = None) -> Any: ...
def fit_transform(
self,
X: NDArray | ArrayLike,
y: Optional[ndarray] = None,
sample_weight: ArrayLike | None = None,
) -> NDArray: ...
def get_feature_names_out(
self, input_features: ArrayLike | None = None
) -> np.ndarray: ...
def get_feature_names_out(self, input_features: ArrayLike | None = None) -> np.ndarray: ...
def transform(self, X: NDArray | ArrayLike) -> NDArray: ...

Просмотреть файл

@ -113,9 +113,7 @@ class BaseGradientBoosting(BaseEnsemble, metaclass=ABCMeta):
def _make_estimator(self, append=True): ...
def _raw_predict_init(self, X: ndarray) -> ndarray: ...
def _raw_predict(self, X: ndarray) -> ndarray: ...
def _staged_raw_predict(
self, X: ndarray, check_input: bool = True
) -> Iterator[ndarray]: ...
def _staged_raw_predict(self, X: ndarray, check_input: bool = True) -> Iterator[ndarray]: ...
@property
def feature_importances_(self) -> NDArray: ...
def _compute_partial_dependence_recursion(self, grid, target_features): ...
@ -124,17 +122,14 @@ class BaseGradientBoosting(BaseEnsemble, metaclass=ABCMeta):
# TODO(1.2): Remove
# mypy error: Decorated property not supported
@deprecated( # type: ignore
"Attribute `n_features_` was deprecated in version 1.0 and will be "
"removed in 1.2. Use `n_features_in_` instead."
"Attribute `n_features_` was deprecated in version 1.0 and will be " "removed in 1.2. Use `n_features_in_` instead."
)
@property
def n_features_(self): ...
# TODO(1.3): Remove
# mypy error: Decorated property not supported
@deprecated( # type: ignore
"Attribute `loss_` was deprecated in version 1.1 and will be removed in 1.3."
)
@deprecated("Attribute `loss_` was deprecated in version 1.1 and will be removed in 1.3.") # type: ignore
@property
def loss_(self): ...
@ -184,9 +179,7 @@ class GradientBoostingRegressor(RegressorMixin, BaseGradientBoosting):
def __init__(
self,
*,
loss: Literal[
"squared_error", "absolute_error", "huber", "quantile"
] = "squared_error",
loss: Literal["squared_error", "absolute_error", "huber", "quantile"] = "squared_error",
learning_rate: float = 0.1,
n_estimators: int = 100,
subsample: float = 1.0,

Просмотреть файл

@ -21,9 +21,7 @@ class LossFunction(metaclass=ABCMeta):
def __init__(self, n_classes: int) -> None: ...
def init_estimator(self): ...
@abstractmethod
def __call__(
self, y: NDArray, raw_predictions: NDArray, sample_weight: NDArray | None = None
): ...
def __call__(self, y: NDArray, raw_predictions: NDArray, sample_weight: NDArray | None = None): ...
@abstractmethod
def negative_gradient(self, y: NDArray, raw_predictions: NDArray, **kargs): ...
def update_terminal_regions(
@ -56,18 +54,12 @@ class LossFunction(metaclass=ABCMeta):
class RegressionLossFunction(LossFunction, metaclass=ABCMeta):
def __init__(self) -> None: ...
def check_init_estimator(self, estimator: Any): ...
def get_init_raw_predictions(
self, X: NDArray, estimator: DummyRegressor
) -> NDArray: ...
def get_init_raw_predictions(self, X: NDArray, estimator: DummyRegressor) -> NDArray: ...
class LeastSquaresError(RegressionLossFunction):
def init_estimator(self) -> DummyRegressor: ...
def __call__(
self, y: NDArray, raw_predictions: NDArray, sample_weight: NDArray | None = None
) -> float64: ...
def negative_gradient(
self, y: NDArray, raw_predictions: NDArray, **kargs
) -> ndarray: ...
def __call__(self, y: NDArray, raw_predictions: NDArray, sample_weight: NDArray | None = None) -> float64: ...
def negative_gradient(self, y: NDArray, raw_predictions: NDArray, **kargs) -> ndarray: ...
def update_terminal_regions(
self,
tree: Tree,
@ -94,9 +86,7 @@ class LeastSquaresError(RegressionLossFunction):
class LeastAbsoluteError(RegressionLossFunction):
def init_estimator(self): ...
def __call__(
self, y: NDArray, raw_predictions: NDArray, sample_weight: NDArray | None = None
): ...
def __call__(self, y: NDArray, raw_predictions: NDArray, sample_weight: NDArray | None = None): ...
def negative_gradient(self, y: NDArray, raw_predictions: NDArray, **kargs): ...
def _update_terminal_region(
self,
@ -113,16 +103,8 @@ class LeastAbsoluteError(RegressionLossFunction):
class HuberLossFunction(RegressionLossFunction):
def __init__(self, alpha: float = 0.9): ...
def init_estimator(self): ...
def __call__(
self, y: NDArray, raw_predictions: NDArray, sample_weight: NDArray | None = None
): ...
def negative_gradient(
self,
y: NDArray,
raw_predictions: NDArray,
sample_weight: NDArray | None = None,
**kargs
): ...
def __call__(self, y: NDArray, raw_predictions: NDArray, sample_weight: NDArray | None = None): ...
def negative_gradient(self, y: NDArray, raw_predictions: NDArray, sample_weight: NDArray | None = None, **kargs): ...
def _update_terminal_region(
self,
tree,
@ -138,12 +120,8 @@ class HuberLossFunction(RegressionLossFunction):
class QuantileLossFunction(RegressionLossFunction):
def __init__(self, alpha: float = 0.9) -> None: ...
def init_estimator(self) -> DummyRegressor: ...
def __call__(
self, y: NDArray, raw_predictions: NDArray, sample_weight: NDArray | None = None
) -> float64: ...
def negative_gradient(
self, y: NDArray, raw_predictions: NDArray, **kargs
) -> ndarray: ...
def __call__(self, y: NDArray, raw_predictions: NDArray, sample_weight: NDArray | None = None) -> float64: ...
def negative_gradient(self, y: NDArray, raw_predictions: NDArray, **kargs) -> ndarray: ...
def _update_terminal_region(
self,
tree: Tree,
@ -165,12 +143,8 @@ class ClassificationLossFunction(LossFunction, metaclass=ABCMeta):
class BinomialDeviance(ClassificationLossFunction):
def __init__(self, n_classes: int) -> None: ...
def init_estimator(self) -> DummyClassifier: ...
def __call__(
self, y: NDArray, raw_predictions: NDArray, sample_weight: NDArray | None = None
) -> float64: ...
def negative_gradient(
self, y: NDArray, raw_predictions: NDArray, **kargs
) -> ndarray: ...
def __call__(self, y: NDArray, raw_predictions: NDArray, sample_weight: NDArray | None = None) -> float64: ...
def negative_gradient(self, y: NDArray, raw_predictions: NDArray, **kargs) -> ndarray: ...
def _update_terminal_region(
self,
tree: Tree,
@ -184,9 +158,7 @@ class BinomialDeviance(ClassificationLossFunction):
) -> None: ...
def _raw_prediction_to_proba(self, raw_predictions: ndarray) -> ndarray: ...
def _raw_prediction_to_decision(self, raw_predictions: ndarray) -> ndarray: ...
def get_init_raw_predictions(
self, X: NDArray, estimator: DummyClassifier
) -> NDArray: ...
def get_init_raw_predictions(self, X: NDArray, estimator: DummyClassifier) -> NDArray: ...
class MultinomialDeviance(ClassificationLossFunction):
@ -194,12 +166,8 @@ class MultinomialDeviance(ClassificationLossFunction):
def __init__(self, n_classes: int) -> None: ...
def init_estimator(self) -> DummyClassifier: ...
def __call__(
self, y: NDArray, raw_predictions: NDArray, sample_weight: NDArray | None = None
) -> float64: ...
def negative_gradient(
self, y: NDArray, raw_predictions: NDArray, k: int = 0, **kwargs
) -> ndarray: ...
def __call__(self, y: NDArray, raw_predictions: NDArray, sample_weight: NDArray | None = None) -> float64: ...
def negative_gradient(self, y: NDArray, raw_predictions: NDArray, k: int = 0, **kwargs) -> ndarray: ...
def _update_terminal_region(
self,
tree: Tree,
@ -213,16 +181,12 @@ class MultinomialDeviance(ClassificationLossFunction):
) -> None: ...
def _raw_prediction_to_proba(self, raw_predictions: ndarray) -> ndarray: ...
def _raw_prediction_to_decision(self, raw_predictions: ndarray) -> ndarray: ...
def get_init_raw_predictions(
self, X: NDArray, estimator: DummyClassifier
) -> NDArray: ...
def get_init_raw_predictions(self, X: NDArray, estimator: DummyClassifier) -> NDArray: ...
class ExponentialLoss(ClassificationLossFunction):
def __init__(self, n_classes: int): ...
def init_estimator(self): ...
def __call__(
self, y: NDArray, raw_predictions: NDArray, sample_weight: NDArray | None = None
): ...
def __call__(self, y: NDArray, raw_predictions: NDArray, sample_weight: NDArray | None = None): ...
def negative_gradient(self, y: NDArray, raw_predictions: NDArray, **kargs): ...
def _update_terminal_region(
self,

Просмотреть файл

@ -11,7 +11,6 @@ from ...utils import check_random_state, check_array
from ...base import BaseEstimator, TransformerMixin
from ...utils.validation import check_is_fitted
def _find_binning_thresholds(col_data: ndarray, max_bins: int) -> ndarray: ...
class _BinMapper(TransformerMixin, BaseEstimator):

Просмотреть файл

@ -34,7 +34,6 @@ from ...metrics import check_scoring
from ...model_selection import train_test_split
from ...preprocessing import LabelEncoder
from .binning import _BinMapper
from .grower import TreeGrower
from pandas.core.frame import DataFrame
@ -88,9 +87,7 @@ class BaseHistGradientBoosting(BaseEstimator, ABC):
) -> Union[HistGradientBoostingRegressor, HistGradientBoostingClassifier]: ...
def _is_fitted(self) -> bool: ...
def _clear_state(self) -> None: ...
def _get_small_trainset(
self, X_binned_train, y_train, sample_weight_train, seed
): ...
def _get_small_trainset(self, X_binned_train, y_train, sample_weight_train, seed): ...
def _check_early_stopping_scorer(
self,
X_binned_small_train,
@ -123,9 +120,7 @@ class BaseHistGradientBoosting(BaseEstimator, ABC):
n_threads: int,
) -> None: ...
def _staged_raw_predict(self, X): ...
def _compute_partial_dependence_recursion(
self, grid: ndarray, target_features: ndarray
) -> ndarray: ...
def _compute_partial_dependence_recursion(self, grid: ndarray, target_features: ndarray) -> ndarray: ...
def _more_tags(self): ...
@abstractmethod
def _get_loss(self, sample_weight): ...
@ -141,9 +136,7 @@ class HistGradientBoostingRegressor(RegressorMixin, BaseHistGradientBoosting):
def __init__(
self,
loss: Literal[
"squared_error", "absolute_error", "poisson", "quantile"
] = "squared_error",
loss: Literal["squared_error", "absolute_error", "poisson", "quantile"] = "squared_error",
*,
quantile: float | None = None,
learning_rate: float = 0.1,
@ -167,9 +160,7 @@ class HistGradientBoostingRegressor(RegressorMixin, BaseHistGradientBoosting):
def predict(self, X: ArrayLike) -> NDArray: ...
def staged_predict(self, X: ArrayLike): ...
def _encode_y(self, y: ndarray) -> ndarray: ...
def _get_loss(
self, sample_weight: None
) -> Union[HalfPoissonLoss, PinballLoss, HalfSquaredError]: ...
def _get_loss(self, sample_weight: None) -> Union[HalfPoissonLoss, PinballLoss, HalfSquaredError]: ...
class HistGradientBoostingClassifier(ClassifierMixin, BaseHistGradientBoosting):
@ -178,9 +169,7 @@ class HistGradientBoostingClassifier(ClassifierMixin, BaseHistGradientBoosting):
def __init__(
self,
loss: Literal[
"log_loss", "auto", "binary_crossentropy", "categorical_crossentropy"
] = "log_loss",
loss: Literal["log_loss", "auto", "binary_crossentropy", "categorical_crossentropy"] = "log_loss",
*,
learning_rate: float = 0.1,
max_iter: int = 100,

Просмотреть файл

@ -75,9 +75,7 @@ class TreeGrower:
) -> None: ...
def grow(self) -> None: ...
def _apply_shrinkage(self) -> None: ...
def _intilialize_root(
self, gradients: ndarray, hessians: ndarray, hessians_are_constant: bool
) -> None: ...
def _intilialize_root(self, gradients: ndarray, hessians: ndarray, hessians_are_constant: bool) -> None: ...
def _compute_best_split_and_push(self, node: TreeNode) -> None: ...
def split_next(self) -> tuple[TreeNode, TreeNode]: ...
def _finalize_leaf(self, node: TreeNode) -> None: ...

Просмотреть файл

@ -14,12 +14,6 @@ class TreePredictor:
) -> None: ...
def get_n_leaf_nodes(self): ...
def get_max_depth(self): ...
def predict(
self, X: NDArray, known_cat_bitsets: NDArray, f_idx_map: NDArray, n_threads: int
) -> NDArray: ...
def predict_binned(
self, X: NDArray, missing_values_bin_idx: int, n_threads: int
) -> NDArray: ...
def compute_partial_dependence(
self, grid: NDArray, target_features: NDArray, out: NDArray
) -> None: ...
def predict(self, X: NDArray, known_cat_bitsets: NDArray, f_idx_map: NDArray, n_threads: int) -> NDArray: ...
def predict_binned(self, X: NDArray, missing_values_bin_idx: int, n_threads: int) -> NDArray: ...
def compute_partial_dependence(self, grid: NDArray, target_features: NDArray, out: NDArray) -> None: ...

Просмотреть файл

@ -52,9 +52,7 @@ class IsolationForest(OutlierMixin, BaseBagging):
def decision_function(self, X: NDArray | ArrayLike) -> NDArray: ...
def score_samples(self, X: NDArray | ArrayLike) -> NDArray: ...
def _compute_chunked_score_samples(self, X: ndarray) -> ndarray: ...
def _compute_score_samples(
self, X: ndarray, subsample_features: bool
) -> ndarray: ...
def _compute_score_samples(self, X: ndarray, subsample_features: bool) -> ndarray: ...
def _more_tags(self): ...
def _average_path_length(n_samples_leaf: Union[List[int], ndarray]) -> ndarray: ...

Просмотреть файл

@ -47,9 +47,7 @@ class _BaseStacking(TransformerMixin, _BaseHeterogeneousEnsemble, metaclass=ABCM
@abstractmethod
def __init__(
self,
estimators: List[
Union[Tuple[str, RandomForestClassifier], Tuple[str, Pipeline]]
],
estimators: List[Union[Tuple[str, RandomForestClassifier], Tuple[str, Pipeline]]],
final_estimator: Optional[LogisticRegression] = None,
*,
cv=None,
@ -59,13 +57,9 @@ class _BaseStacking(TransformerMixin, _BaseHeterogeneousEnsemble, metaclass=ABCM
passthrough=False,
) -> None: ...
def _clone_final_estimator(self, default: LogisticRegression) -> None: ...
def _concatenate_predictions(
self, X: ndarray, predictions: List[ndarray]
) -> ndarray: ...
def _concatenate_predictions(self, X: ndarray, predictions: List[ndarray]) -> ndarray: ...
@staticmethod
def _method_name(
name: str, estimator: Union[Pipeline, RandomForestClassifier], method: str
) -> str: ...
def _method_name(name: str, estimator: Union[Pipeline, RandomForestClassifier], method: str) -> str: ...
def fit(
self,
X: NDArray | ArrayLike,
@ -75,9 +69,7 @@ class _BaseStacking(TransformerMixin, _BaseHeterogeneousEnsemble, metaclass=ABCM
@property
def n_features_in_(self): ...
def _transform(self, X: ndarray) -> ndarray: ...
def get_feature_names_out(
self, input_features: ArrayLike | None = None
) -> np.ndarray: ...
def get_feature_names_out(self, input_features: ArrayLike | None = None) -> np.ndarray: ...
@available_if(_estimator_has("predict"))
def predict(self, X: NDArray | ArrayLike, **predict_params) -> np.ndarray: ...
def _sk_visual_block_(self, final_estimator): ...
@ -89,9 +81,7 @@ class StackingClassifier(ClassifierMixin, _BaseStacking):
final_estimator: BaseEstimator | None = None,
*,
cv: int | Generator | Iterable | Literal["prefit"] | None = None,
stack_method: Literal[
"auto", "predict_proba", "decision_function", "predict"
] = "auto",
stack_method: Literal["auto", "predict_proba", "decision_function", "predict"] = "auto",
n_jobs: int | None = None,
passthrough: bool = False,
verbose: int = 0,

Просмотреть файл

@ -41,12 +41,8 @@ class _BaseVoting(TransformerMixin, _BaseHeterogeneousEnsemble):
def _weights_not_none(self) -> Optional[List[int]]: ...
def _predict(self, X: ndarray) -> ndarray: ...
@abstractmethod
def fit(
self, X: ndarray, y: ndarray, sample_weight: None = None
) -> Union[VotingRegressor, VotingClassifier]: ...
def fit_transform(
self, X: ArrayLike, y: NDArray | None = None, **fit_params
) -> NDArray: ...
def fit(self, X: ndarray, y: ndarray, sample_weight: None = None) -> Union[VotingRegressor, VotingClassifier]: ...
def fit_transform(self, X: ArrayLike, y: NDArray | None = None, **fit_params) -> NDArray: ...
@property
def n_features_in_(self): ...
def _sk_visual_block_(self): ...
@ -75,9 +71,7 @@ class VotingClassifier(ClassifierMixin, _BaseVoting):
@available_if(_check_voting)
def predict_proba(self, X: NDArray | ArrayLike) -> ArrayLike: ...
def transform(self, X: NDArray | ArrayLike): ...
def get_feature_names_out(
self, input_features: ArrayLike | None = None
) -> np.ndarray: ...
def get_feature_names_out(self, input_features: ArrayLike | None = None) -> np.ndarray: ...
class VotingRegressor(RegressorMixin, _BaseVoting):
def __init__(
@ -96,6 +90,4 @@ class VotingRegressor(RegressorMixin, _BaseVoting):
) -> "VotingRegressor": ...
def predict(self, X: NDArray | ArrayLike) -> NDArray: ...
def transform(self, X: NDArray | ArrayLike) -> np.ndarray: ...
def get_feature_names_out(
self, input_features: ArrayLike | None = None
) -> np.ndarray: ...
def get_feature_names_out(self, input_features: ArrayLike | None = None) -> np.ndarray: ...

Просмотреть файл

@ -43,9 +43,7 @@ class BaseWeightBoosting(BaseEnsemble, metaclass=ABCMeta):
@abstractmethod
def __init__(
self,
base_estimator: Optional[
Union[DecisionTreeRegressor, DecisionTreeClassifier]
] = None,
base_estimator: Optional[Union[DecisionTreeRegressor, DecisionTreeClassifier]] = None,
*,
n_estimators=50,
estimator_params=...,
@ -70,9 +68,7 @@ class BaseWeightBoosting(BaseEnsemble, metaclass=ABCMeta):
@property
def feature_importances_(self) -> NDArray: ...
def _samme_proba(
estimator: DecisionTreeClassifier, n_classes: int, X: ndarray
) -> ndarray: ...
def _samme_proba(estimator: DecisionTreeClassifier, n_classes: int, X: ndarray) -> ndarray: ...
class AdaBoostClassifier(ClassifierMixin, BaseWeightBoosting):
def __init__(

4
sklearn/externals/_numpy_compiler_patch.pyi поставляемый
Просмотреть файл

@ -40,6 +40,4 @@ from numpy.distutils import log
def is_sequence(seq): ...
def forward_bytes_to_stdout(val): ...
def CCompiler_spawn(
self, cmd: str, display: str | Sequence[str] | None = None, env: dict | None = None
) -> None: ...
def CCompiler_spawn(self, cmd: str, display: str | Sequence[str] | None = None, env: dict | None = None) -> None: ...

4
sklearn/externals/_packaging/version.pyi поставляемый
Просмотреть файл

@ -136,9 +136,7 @@ class Version(_BaseVersion):
@property
def micro(self) -> int: ...
def _parse_letter_version(
letter: str, number: Union[str, bytes, SupportsInt]
) -> Optional[Tuple[str, int]]: ...
def _parse_letter_version(letter: str, number: Union[str, bytes, SupportsInt]) -> Optional[Tuple[str, int]]: ...
_local_version_separators = ...

Просмотреть файл

@ -44,20 +44,11 @@ class DictVectorizer(TransformerMixin, BaseEstimator):
): ...
def fit(self, X: Mapping | Iterable[Mapping], y=None) -> Any: ...
def _transform(self, X: List[Dict[str, int]], fitting: bool) -> csr_matrix: ...
def fit_transform(
self, X: Mapping | Iterable[Mapping], y: Optional[ndarray] = None
) -> NDArray: ...
def inverse_transform(
self, X: NDArray | ArrayLike, dict_type: type = ...
) -> list[Mapping]: ...
def fit_transform(self, X: Mapping | Iterable[Mapping], y: Optional[ndarray] = None) -> NDArray: ...
def inverse_transform(self, X: NDArray | ArrayLike, dict_type: type = ...) -> list[Mapping]: ...
def transform(self, X: Mapping | Iterable[Mapping]) -> NDArray: ...
@deprecated(
"get_feature_names is deprecated in 1.0 and will be removed "
"in 1.2. Please use get_feature_names_out instead."
)
@deprecated("get_feature_names is deprecated in 1.0 and will be removed " "in 1.2. Please use get_feature_names_out instead.")
def get_feature_names(self) -> list: ...
def get_feature_names_out(
self, input_features: ArrayLike | None = None
) -> np.ndarray: ...
def get_feature_names_out(self, input_features: ArrayLike | None = None) -> np.ndarray: ...
def restrict(self, support: ArrayLike, indices: bool = False) -> Any: ...
def _more_tags(self): ...

Просмотреть файл

@ -2,7 +2,6 @@ from numpy import ndarray
from typing import Optional, Tuple, Type, Any
from numpy.typing import NDArray, ArrayLike, DTypeLike
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Olivier Grisel
@ -36,9 +35,7 @@ def _compute_gradient_3d(edges: ndarray, img: ndarray) -> ndarray: ...
# XXX: Why mask the image after computing the weights?
def _mask_edges_weights(
mask: ndarray, edges: ndarray, weights: Optional[ndarray] = None
) -> Tuple[ndarray, ndarray]: ...
def _mask_edges_weights(mask: ndarray, edges: ndarray, weights: Optional[ndarray] = None) -> Tuple[ndarray, ndarray]: ...
def _to_graph(
n_x: int,
n_y: int,
@ -49,41 +46,25 @@ def _to_graph(
dtype: Optional[Type[int]] = None,
) -> coo_matrix: ...
def img_to_graph(
img: NDArray,
*,
mask: NDArray | None = None,
return_as: NDArray = ...,
dtype: DTypeLike | None = None
img: NDArray, *, mask: NDArray | None = None, return_as: NDArray = ..., dtype: DTypeLike | None = None
) -> NDArray: ...
def grid_to_graph(
n_x: int,
n_y: int,
n_z: int = 1,
*,
mask: NDArray | None = None,
return_as: NDArray = ...,
dtype: DTypeLike = ...
n_x: int, n_y: int, n_z: int = 1, *, mask: NDArray | None = None, return_as: NDArray = ..., dtype: DTypeLike = ...
) -> NDArray: ...
###############################################################################
# From an image to a set of small image patches
def _compute_n_patches(
i_h: int, i_w: int, p_h: int, p_w: int, max_patches: Optional[int] = None
) -> int: ...
def _extract_patches(
arr: ndarray, patch_shape: Tuple[int, int, int] = ..., extraction_step: int = 1
) -> ndarray: ...
def _compute_n_patches(i_h: int, i_w: int, p_h: int, p_w: int, max_patches: Optional[int] = None) -> int: ...
def _extract_patches(arr: ndarray, patch_shape: Tuple[int, int, int] = ..., extraction_step: int = 1) -> ndarray: ...
def extract_patches_2d(
image: NDArray,
patch_size: tuple[int, int],
*,
max_patches: int | float | None = None,
random_state: int | RandomState | None = None
) -> NDArray: ...
def reconstruct_from_patches_2d(
patches: NDArray, image_size: tuple[int, int] | tuple[int, int, int]
random_state: int | RandomState | None = None,
) -> NDArray: ...
def reconstruct_from_patches_2d(patches: NDArray, image_size: tuple[int, int] | tuple[int, int, int]) -> NDArray: ...
class PatchExtractor(BaseEstimator):
def __init__(
@ -91,7 +72,7 @@ class PatchExtractor(BaseEstimator):
*,
patch_size: tuple[int, int] | None = None,
max_patches: int | float | None = None,
random_state: int | RandomState | None = None
random_state: int | RandomState | None = None,
): ...
def fit(self, X: ArrayLike, y=None) -> Any: ...
def transform(self, X: NDArray) -> NDArray: ...

Просмотреть файл

@ -78,9 +78,7 @@ class _VectorizerMixin:
_white_spaces = ...
def decode(self, doc: bytes | str) -> str: ...
def _word_ngrams(
self, tokens: List[Union[str, Any]], stop_words: Optional[frozenset] = None
) -> List[Union[str, Any]]: ...
def _word_ngrams(self, tokens: List[Union[str, Any]], stop_words: Optional[frozenset] = None) -> List[Union[str, Any]]: ...
def _char_ngrams(self, text_document): ...
def _char_wb_ngrams(self, text_document): ...
def build_preprocessor(self) -> Callable: ...
@ -147,9 +145,7 @@ class CountVectorizer(_VectorizerMixin, BaseEstimator):
binary: bool = False,
dtype: type = ...,
) -> None: ...
def _sort_features(
self, X: csr_matrix, vocabulary: Dict[str, Union[int, int64]]
) -> csr_matrix: ...
def _sort_features(self, X: csr_matrix, vocabulary: Dict[str, Union[int, int64]]) -> csr_matrix: ...
def _limit_features(
self,
X: csr_matrix,
@ -158,24 +154,15 @@ class CountVectorizer(_VectorizerMixin, BaseEstimator):
low: Optional[int] = None,
limit: Optional[int] = None,
) -> Union[Tuple[csr_matrix, Set[str]], Tuple[csr_matrix, Set[Any]]]: ...
def _count_vocab(
self, raw_documents: Union[ndarray, List[str]], fixed_vocab: bool
) -> Tuple[Dict[str, int], csr_matrix]: ...
def _count_vocab(self, raw_documents: Union[ndarray, List[str]], fixed_vocab: bool) -> Tuple[Dict[str, int], csr_matrix]: ...
def _validate_params(self) -> None: ...
def fit(self, raw_documents: Iterable, y=None) -> Any: ...
def fit_transform(
self, raw_documents: Iterable, y: Optional[Union[ndarray, List[int64]]] = None
) -> NDArray: ...
def fit_transform(self, raw_documents: Iterable, y: Optional[Union[ndarray, List[int64]]] = None) -> NDArray: ...
def transform(self, raw_documents: Iterable) -> NDArray: ...
def inverse_transform(self, X: NDArray | ArrayLike) -> list[ArrayLike]: ...
@deprecated(
"get_feature_names is deprecated in 1.0 and will be removed "
"in 1.2. Please use get_feature_names_out instead."
)
@deprecated("get_feature_names is deprecated in 1.0 and will be removed " "in 1.2. Please use get_feature_names_out instead.")
def get_feature_names(self) -> ArrayLike: ...
def get_feature_names_out(
self, input_features: ArrayLike | None = None
) -> np.ndarray: ...
def get_feature_names_out(self, input_features: ArrayLike | None = None) -> np.ndarray: ...
def _more_tags(self): ...
def _make_int_array() -> array.array: ...
@ -189,9 +176,7 @@ class TfidfTransformer(_OneToOneFeatureMixin, TransformerMixin, BaseEstimator):
smooth_idf: bool = True,
sublinear_tf: bool = False,
) -> None: ...
def fit(
self, X: NDArray, y: Optional[Union[List[int64], ndarray]] = None
) -> "TfidfTransformer": ...
def fit(self, X: NDArray, y: Optional[Union[List[int64], ndarray]] = None) -> "TfidfTransformer": ...
def transform(self, X: NDArray, copy: bool = True) -> NDArray: ...
@property
def idf_(self) -> NDArray: ...
@ -235,8 +220,6 @@ class TfidfVectorizer(CountVectorizer):
def idf_(self, value) -> NDArray: ...
def _check_params(self) -> None: ...
def fit(self, raw_documents: Iterable, y=None) -> Any: ...
def fit_transform(
self, raw_documents: Iterable, y: Optional[ndarray] = None
) -> NDArray: ...
def fit_transform(self, raw_documents: Iterable, y: Optional[ndarray] = None) -> NDArray: ...
def transform(self, raw_documents: Iterable) -> NDArray: ...
def _more_tags(self): ...

Просмотреть файл

@ -32,9 +32,7 @@ class SelectorMixin(TransformerMixin, metaclass=ABCMeta):
def transform(self, X: ArrayLike) -> ArrayLike: ...
def _transform(self, X: ndarray) -> ndarray: ...
def inverse_transform(self, X: ArrayLike) -> NDArray: ...
def get_feature_names_out(
self, input_features: ArrayLike | None = None
) -> np.ndarray: ...
def get_feature_names_out(self, input_features: ArrayLike | None = None) -> np.ndarray: ...
def _get_feature_importances(
estimator: Union[RidgeCV, LogisticRegression, SVC],

Просмотреть файл

@ -20,9 +20,7 @@ from ..utils.metaestimators import available_if
from numpy import float64, ndarray
from sklearn.linear_model._ridge import RidgeCV
def _calculate_threshold(
estimator: RidgeCV, importances: ndarray, threshold: float64
) -> float: ...
def _calculate_threshold(estimator: RidgeCV, importances: ndarray, threshold: float64) -> float: ...
def _estimator_has(attr: str) -> Callable: ...
class SelectFromModel(MetaEstimatorMixin, SelectorMixin, BaseEstimator):
@ -38,15 +36,11 @@ class SelectFromModel(MetaEstimatorMixin, SelectorMixin, BaseEstimator):
) -> None: ...
def _get_support_mask(self) -> ndarray: ...
def _check_max_features(self, X: ndarray) -> None: ...
def fit(
self, X: ArrayLike, y: ArrayLike | None = None, **fit_params
) -> "SelectFromModel": ...
def fit(self, X: ArrayLike, y: ArrayLike | None = None, **fit_params) -> "SelectFromModel": ...
@property
def threshold_(self): ...
@available_if(_estimator_has("partial_fit"))
def partial_fit(
self, X: ArrayLike, y: ArrayLike | None = None, **fit_params
) -> Any: ...
def partial_fit(self, X: ArrayLike, y: ArrayLike | None = None, **fit_params) -> Any: ...
@property
def n_features_in_(self): ...
def _more_tags(self): ...

Просмотреть файл

@ -18,9 +18,7 @@ from ..utils.multiclass import check_classification_targets
def _compute_mi_cc(x: ndarray, y: ndarray, n_neighbors: int) -> Union[int, float64]: ...
def _compute_mi_cd(c, d, n_neighbors): ...
def _compute_mi(
x: ndarray, y: ndarray, x_discrete: bool_, y_discrete: bool, n_neighbors: int = 3
) -> Union[int, float64]: ...
def _compute_mi(x: ndarray, y: ndarray, x_discrete: bool_, y_discrete: bool, n_neighbors: int = 3) -> Union[int, float64]: ...
def _iterate_columns(X: ndarray, columns: None = None) -> Iterator[ndarray]: ...
def _estimate_mi(
X: ndarray,
@ -35,17 +33,17 @@ def mutual_info_regression(
X: ArrayLike | NDArray,
y: ArrayLike,
*,
discrete_features: Literal["auto"]| bool | ArrayLike = "auto",
discrete_features: Literal["auto"] | bool | ArrayLike = "auto",
n_neighbors: int = 3,
copy: bool = True,
random_state: int | RandomState | None = None
random_state: int | RandomState | None = None,
) -> NDArray: ...
def mutual_info_classif(
X: ArrayLike | NDArray,
y: ArrayLike,
*,
discrete_features: Literal["auto"]| bool | ArrayLike = "auto",
discrete_features: Literal["auto"] | bool | ArrayLike = "auto",
n_neighbors: int = 3,
copy: bool = True,
random_state: int | RandomState | None = None
random_state: int | RandomState | None = None,
) -> NDArray: ...

Просмотреть файл

@ -48,9 +48,7 @@ class RFE(SelectorMixin, MetaEstimatorMixin, BaseEstimator):
@property
def classes_(self) -> NDArray: ...
def fit(self, X: NDArray | ArrayLike, y: ArrayLike, **fit_params) -> "RFE": ...
def _fit(
self, X: ndarray, y: ndarray, step_score: None = None, **fit_params
) -> "RFE": ...
def _fit(self, X: ndarray, y: ndarray, step_score: None = None, **fit_params) -> "RFE": ...
@available_if(_estimator_has("predict"))
def predict(self, X: ArrayLike) -> ArrayLike: ...
@available_if(_estimator_has("score"))
@ -77,9 +75,7 @@ class RFECV(RFE):
n_jobs: int | None = None,
importance_getter: str | Callable = "auto",
) -> None: ...
def fit(
self, X: NDArray | ArrayLike, y: ArrayLike, groups: ArrayLike | None = None
) -> "RFECV": ...
def fit(self, X: NDArray | ArrayLike, y: ArrayLike, groups: ArrayLike | None = None) -> "RFECV": ...
# TODO: Remove in v1.2 when grid_scores_ is removed
# mypy error: Decorated property not supported

Просмотреть файл

@ -30,9 +30,7 @@ class SequentialFeatureSelector(SelectorMixin, MetaEstimatorMixin, BaseEstimator
cv: int | Generator | Iterable = 5,
n_jobs: int | None = None,
) -> None: ...
def fit(
self, X: ArrayLike, y: ArrayLike | None = None
) -> "SequentialFeatureSelector": ...
def fit(self, X: ArrayLike, y: ArrayLike | None = None) -> "SequentialFeatureSelector": ...
def _get_best_new_feature_score(
self,
estimator: Union[RidgeCV, KNeighborsClassifier],

Просмотреть файл

@ -31,19 +31,9 @@ def f_oneway(*args) -> tuple[float, float]: ...
def f_classif(X: NDArray | ArrayLike, y: NDArray) -> tuple[NDArray, NDArray]: ...
def _chisquare(f_obs: ndarray, f_exp: matrix) -> Tuple[ndarray, ndarray]: ...
def chi2(X: NDArray | ArrayLike, y: ArrayLike) -> tuple[NDArray, NDArray]: ...
def r_regression(
X: NDArray | ArrayLike,
y: ArrayLike,
*,
center: bool = True,
force_finite: bool = True
) -> NDArray: ...
def r_regression(X: NDArray | ArrayLike, y: ArrayLike, *, center: bool = True, force_finite: bool = True) -> NDArray: ...
def f_regression(
X: NDArray | ArrayLike,
y: ArrayLike,
*,
center: bool = True,
force_finite: bool = True
X: NDArray | ArrayLike, y: ArrayLike, *, center: bool = True, force_finite: bool = True
) -> tuple[NDArray, NDArray]: ...
######################################################################
@ -51,9 +41,7 @@ def f_regression(
class _BaseFilter(SelectorMixin, BaseEstimator):
def __init__(self, score_func: Callable) -> None: ...
def fit(
self, X: ArrayLike, y: ArrayLike
) -> Union[SelectPercentile, SelectKBest]: ...
def fit(self, X: ArrayLike, y: ArrayLike) -> Union[SelectPercentile, SelectKBest]: ...
def _check_params(self, X, y): ...
def _more_tags(self) -> Dict[str, bool]: ...
@ -66,9 +54,7 @@ class SelectPercentile(_BaseFilter):
def _get_support_mask(self) -> ndarray: ...
class SelectKBest(_BaseFilter):
def __init__(
self, score_func: Callable = ..., *, k: int | Literal["all"] = 10
) -> None: ...
def __init__(self, score_func: Callable = ..., *, k: int | Literal["all"] = 10) -> None: ...
def _check_params(self, X: ndarray, y: ndarray) -> None: ...
def _get_support_mask(self) -> ndarray: ...
@ -99,7 +85,7 @@ class GenericUnivariateSelect(_BaseFilter):
score_func: Callable = ...,
*,
mode: Literal["percentile", "k_best", "fpr", "fdr", "fwe"] = "percentile",
param: float | int = 1e-5
param: float | int = 1e-5,
): ...
def _make_selector(self): ...
def _more_tags(self): ...

Просмотреть файл

@ -44,9 +44,7 @@ class _BinaryGaussianProcessClassifierLaplace(BaseEstimator):
copy_X_train: bool = True,
random_state: int | RandomState | None = None,
) -> None: ...
def fit(
self, X: ArrayLike | Sequence[Any], y: ArrayLike
) -> "_BinaryGaussianProcessClassifierLaplace": ...
def fit(self, X: ArrayLike | Sequence[Any], y: ArrayLike) -> "_BinaryGaussianProcessClassifierLaplace": ...
def predict(self, X: ArrayLike | Sequence[Any]) -> NDArray: ...
def predict_proba(self, X: ArrayLike | Sequence[Any]) -> ArrayLike: ...
def log_marginal_likelihood(
@ -76,9 +74,7 @@ class GaussianProcessClassifier(ClassifierMixin, BaseEstimator):
multi_class: Literal["one_vs_rest", "one_vs_one"] = "one_vs_rest",
n_jobs: int | None = None,
) -> None: ...
def fit(
self, X: ArrayLike | Sequence[Any], y: ArrayLike
) -> "GaussianProcessClassifier": ...
def fit(self, X: ArrayLike | Sequence[Any], y: ArrayLike) -> "GaussianProcessClassifier": ...
def predict(self, X: ArrayLike | Sequence[Any]) -> NDArray: ...
def predict_proba(self, X: ArrayLike | Sequence[Any]) -> ArrayLike: ...
@property

Просмотреть файл

@ -36,9 +36,7 @@ class GaussianProcessRegressor(MultiOutputMixin, RegressorMixin, BaseEstimator):
copy_X_train: bool = True,
random_state: int | RandomState | None = None,
) -> None: ...
def fit(
self, X: ArrayLike | Sequence[Any], y: ArrayLike
) -> "GaussianProcessRegressor": ...
def fit(self, X: ArrayLike | Sequence[Any], y: ArrayLike) -> "GaussianProcessRegressor": ...
def predict(
self,
X: ArrayLike | Sequence[Any],

Просмотреть файл

@ -37,15 +37,9 @@ from ..exceptions import ConvergenceWarning
import warnings
def _check_length_scale(
X: ndarray, length_scale: Union[float64, ndarray, float, int]
) -> Union[ndarray, float64]: ...
def _check_length_scale(X: ndarray, length_scale: Union[float64, ndarray, float, int]) -> Union[ndarray, float64]: ...
class Hyperparameter(
namedtuple(
"Hyperparameter", ("name", "value_type", "bounds", "n_elements", "fixed")
)
):
class Hyperparameter(namedtuple("Hyperparameter", ("name", "value_type", "bounds", "n_elements", "fixed"))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
@ -236,9 +230,7 @@ class RBF(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
def anisotropic(self) -> bool: ...
@property
def hyperparameter_length_scale(self) -> Hyperparameter: ...
def __call__(
self, X: NDArray, Y: NDArray | None = None, eval_gradient: bool = False
) -> tuple[np.ndarray, np.ndarray]: ...
def __call__(self, X: NDArray, Y: NDArray | None = None, eval_gradient: bool = False) -> tuple[np.ndarray, np.ndarray]: ...
def __repr__(self) -> str: ...
class Matern(RBF):
@ -248,9 +240,7 @@ class Matern(RBF):
length_scale_bounds: tuple[float, float] | Literal["fixed"] = ...,
nu: float = 1.5,
) -> None: ...
def __call__(
self, X: NDArray, Y: NDArray | None = None, eval_gradient: bool = False
) -> tuple[np.ndarray, np.ndarray]: ...
def __call__(self, X: NDArray, Y: NDArray | None = None, eval_gradient: bool = False) -> tuple[np.ndarray, np.ndarray]: ...
def __repr__(self) -> str: ...
class RationalQuadratic(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
@ -265,9 +255,7 @@ class RationalQuadratic(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
def hyperparameter_length_scale(self) -> Hyperparameter: ...
@property
def hyperparameter_alpha(self) -> Hyperparameter: ...
def __call__(
self, X: NDArray, Y: NDArray | None = None, eval_gradient: bool = False
) -> tuple[np.ndarray, np.ndarray]: ...
def __call__(self, X: NDArray, Y: NDArray | None = None, eval_gradient: bool = False) -> tuple[np.ndarray, np.ndarray]: ...
def __repr__(self) -> str: ...
class ExpSineSquared(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
@ -282,9 +270,7 @@ class ExpSineSquared(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
def hyperparameter_length_scale(self) -> Hyperparameter: ...
@property
def hyperparameter_periodicity(self) -> Hyperparameter: ...
def __call__(
self, X: NDArray, Y: NDArray | None = None, eval_gradient: bool = False
) -> tuple[np.ndarray, np.ndarray]: ...
def __call__(self, X: NDArray, Y: NDArray | None = None, eval_gradient: bool = False) -> tuple[np.ndarray, np.ndarray]: ...
def __repr__(self) -> str: ...
class DotProduct(Kernel):
@ -295,9 +281,7 @@ class DotProduct(Kernel):
) -> None: ...
@property
def hyperparameter_sigma_0(self) -> Hyperparameter: ...
def __call__(
self, X: NDArray, Y: NDArray | None = None, eval_gradient: bool = False
) -> tuple[np.ndarray, np.ndarray]: ...
def __call__(self, X: NDArray, Y: NDArray | None = None, eval_gradient: bool = False) -> tuple[np.ndarray, np.ndarray]: ...
def diag(self, X: NDArray) -> np.ndarray: ...
def is_stationary(self): ...
def __repr__(self) -> str: ...
@ -326,9 +310,7 @@ class PairwiseKernel(Kernel):
): ...
@property
def hyperparameter_gamma(self): ...
def __call__(
self, X: NDArray, Y: NDArray | None = None, eval_gradient: bool = False
) -> tuple[np.ndarray, np.ndarray]: ...
def __call__(self, X: NDArray, Y: NDArray | None = None, eval_gradient: bool = False) -> tuple[np.ndarray, np.ndarray]: ...
def diag(self, X: NDArray) -> np.ndarray: ...
def is_stationary(self): ...
def __repr__(self): ...

Просмотреть файл

@ -31,9 +31,7 @@ class _BaseImputer(TransformerMixin, BaseEstimator):
def __init__(self, *, missing_values=..., add_indicator=False) -> None: ...
def _fit_indicator(self, X: ndarray) -> None: ...
def _transform_indicator(self, X: ndarray) -> Optional[ndarray]: ...
def _concatenate_indicator(
self, X_imputed: ndarray, X_indicator: Optional[ndarray]
) -> ndarray: ...
def _concatenate_indicator(self, X_imputed: ndarray, X_indicator: Optional[ndarray]) -> ndarray: ...
def _concatenate_indicator_feature_names_out(self, names, input_features): ...
def _more_tags(self) -> Dict[str, bool]: ...
@ -44,24 +42,18 @@ class SimpleImputer(_BaseImputer):
missing_values: int | float | str | None = ...,
strategy: str = "mean",
fill_value: str | int | float | None = None,
verbose: int|str = "deprecated",
verbose: int | str = "deprecated",
copy: bool = True,
add_indicator: bool = False,
) -> None: ...
def _validate_input(self, X: ndarray, in_fit: bool) -> ndarray: ...
def fit(
self, X: NDArray | ArrayLike, y: Optional[ndarray] = None
) -> "SimpleImputer": ...
def fit(self, X: NDArray | ArrayLike, y: Optional[ndarray] = None) -> "SimpleImputer": ...
def _sparse_fit(self, X, strategy, missing_values, fill_value): ...
def _dense_fit(
self, X: ndarray, strategy: str, missing_values: float, fill_value: int
) -> ndarray: ...
def _dense_fit(self, X: ndarray, strategy: str, missing_values: float, fill_value: int) -> ndarray: ...
def transform(self, X: NDArray | ArrayLike) -> NDArray: ...
def inverse_transform(self, X: ArrayLike) -> NDArray: ...
def _more_tags(self) -> Dict[str, bool]: ...
def get_feature_names_out(
self, input_features: ArrayLike | None = None
) -> np.ndarray: ...
def get_feature_names_out(self, input_features: ArrayLike | None = None) -> np.ndarray: ...
class MissingIndicator(TransformerMixin, BaseEstimator):
def __init__(
@ -74,13 +66,9 @@ class MissingIndicator(TransformerMixin, BaseEstimator):
) -> None: ...
def _get_missing_features_info(self, X: ndarray) -> Tuple[ndarray, ndarray]: ...
def _validate_input(self, X, in_fit): ...
def _fit(
self, X: ndarray, y: None = None, precomputed: bool = False
) -> ndarray: ...
def _fit(self, X: ndarray, y: None = None, precomputed: bool = False) -> ndarray: ...
def fit(self, X: NDArray | ArrayLike, y=None) -> Any: ...
def transform(self, X: NDArray | ArrayLike) -> NDArray: ...
def fit_transform(self, X: NDArray | ArrayLike, y=None) -> NDArray: ...
def get_feature_names_out(
self, input_features: ArrayLike | None = None
) -> np.ndarray: ...
def get_feature_names_out(self, input_features: ArrayLike | None = None) -> np.ndarray: ...
def _more_tags(self): ...

Просмотреть файл

@ -36,12 +36,8 @@ class IterativeImputer(_BaseImputer):
max_iter: int = 10,
tol: float = 1e-3,
n_nearest_features: int | None = None,
initial_strategy: Literal[
"mean", "median", "most_frequent", "constant"
] = "mean",
imputation_order: Literal[
"ascending", "descending", "roman", "arabic", "random"
] = "ascending",
initial_strategy: Literal["mean", "median", "most_frequent", "constant"] = "mean",
imputation_order: Literal["ascending", "descending", "roman", "arabic", "random"] = "ascending",
skip_complete: bool = False,
min_value: float | ArrayLike = ...,
max_value: float | ArrayLike = ...,
@ -55,9 +51,7 @@ class IterativeImputer(_BaseImputer):
mask_missing_values: ndarray,
feat_idx: int64,
neighbor_feat_idx: ndarray,
estimator: Optional[
Union[KNeighborsRegressor, BayesianRidge, Pipeline, RandomForestRegressor]
] = None,
estimator: Optional[Union[KNeighborsRegressor, BayesianRidge, Pipeline, RandomForestRegressor]] = None,
fit_mode: bool = True,
) -> Union[
Tuple[ndarray, RandomForestRegressor],
@ -65,21 +59,13 @@ class IterativeImputer(_BaseImputer):
Tuple[ndarray, KNeighborsRegressor],
Tuple[ndarray, Pipeline],
]: ...
def _get_neighbor_feat_idx(
self, n_features: int, feat_idx: int64, abs_corr_mat: Optional[ndarray]
) -> ndarray: ...
def _get_neighbor_feat_idx(self, n_features: int, feat_idx: int64, abs_corr_mat: Optional[ndarray]) -> ndarray: ...
def _get_ordered_idx(self, mask_missing_values: ndarray) -> ndarray: ...
def _get_abs_corr_mat(
self, X_filled: ndarray, tolerance: float = 1e-6
) -> Optional[ndarray]: ...
def _initial_imputation(
self, X: ndarray, in_fit: bool = False
) -> Tuple[ndarray, ndarray, ndarray, ndarray]: ...
def _get_abs_corr_mat(self, X_filled: ndarray, tolerance: float = 1e-6) -> Optional[ndarray]: ...
def _initial_imputation(self, X: ndarray, in_fit: bool = False) -> Tuple[ndarray, ndarray, ndarray, ndarray]: ...
@staticmethod
def _validate_limit(limit: float, limit_type: str, n_features: int) -> ndarray: ...
def fit_transform(self, X: ArrayLike, y: Optional[ndarray] = None) -> ArrayLike: ...
def transform(self, X: ArrayLike) -> ArrayLike: ...
def fit(self, X: ArrayLike, y=None) -> Any: ...
def get_feature_names_out(
self, input_features: ArrayLike | None = None
) -> np.ndarray: ...
def get_feature_names_out(self, input_features: ArrayLike | None = None) -> np.ndarray: ...

Просмотреть файл

@ -39,6 +39,4 @@ class KNNImputer(_BaseImputer):
) -> ndarray: ...
def fit(self, X: ArrayLike, y: Optional[ndarray] = None) -> "KNNImputer": ...
def transform(self, X: ArrayLike) -> ArrayLike: ...
def get_feature_names_out(
self, input_features: ArrayLike | None = None
) -> np.ndarray: ...
def get_feature_names_out(self, input_features: ArrayLike | None = None) -> np.ndarray: ...

Просмотреть файл

@ -6,7 +6,6 @@ import numpy as np
from numpy.random import RandomState
from pandas import DataFrame
from ..ensemble._bagging import _generate_indices
from ..metrics import check_scoring
from ..metrics._scorer import _check_multimetric_scoring, _MultimetricScorer
@ -37,9 +36,7 @@ def _calculate_permutation_scores(
scorer: Callable,
max_samples: int,
) -> ndarray: ...
def _create_importances_bunch(
baseline_score: float64, permuted_score: ndarray
) -> Bunch: ...
def _create_importances_bunch(baseline_score: float64, permuted_score: ndarray) -> Bunch: ...
def permutation_importance(
estimator: Union[RandomForestClassifier, GradientBoostingRegressor],
X: NDArray | DataFrame,

Просмотреть файл

@ -15,9 +15,7 @@ from pandas import DataFrame
from matplotlib.axes import Axes
from sklearn.base import BaseEstimator
def _check_boundary_response_method(
estimator: BaseEstimator, response_method: str
) -> Callable: ...
def _check_boundary_response_method(estimator: BaseEstimator, response_method: str) -> Callable: ...
class DecisionBoundaryDisplay:
def __init__(
@ -46,9 +44,7 @@ class DecisionBoundaryDisplay:
grid_resolution: int = 100,
eps: float = 1.0,
plot_method: Literal["contourf", "contour", "pcolormesh"] = "contourf",
response_method: Literal[
"auto", "predict_proba", "decision_function", "predict"
] = "auto",
response_method: Literal["auto", "predict_proba", "decision_function", "predict"] = "auto",
xlabel: str | None = None,
ylabel: str | None = None,
ax: Axes | None = None,

Просмотреть файл

@ -14,7 +14,6 @@ from matplotlib.axes import Axes
from scipy import sparse
from scipy.stats.mstats import mquantiles
from .. import partial_dependence
from ...base import is_regressor
from ...utils import Bunch
@ -40,7 +39,7 @@ from sklearn.tree._classes import DecisionTreeRegressor
def plot_partial_dependence(
estimator: BaseEstimator,
X: ArrayLike | DataFrame,
features: Sequence[int| str|tuple[int, int]| tuple[str, str]],
features: Sequence[int | str | tuple[int, int] | tuple[str, str]],
*,
feature_names: ArrayLike | None = None,
target: int | None = None,
@ -102,7 +101,7 @@ class PartialDependenceDisplay:
feature_names: ArrayLike,
target_idx: int,
deciles: Mapping,
pdp_lim: Mapping | None|str = "deprecated",
pdp_lim: Mapping | None | str = "deprecated",
kind: Literal["average", "individual", "both"] | Sequence[str] = "average",
subsample: float | int | None = 1000,
random_state: int | RandomState | None = None,
@ -112,7 +111,7 @@ class PartialDependenceDisplay:
cls,
estimator: BaseEstimator,
X: ArrayLike | DataFrame,
features: Sequence[int| str| tuple[int, int]|tuple[str, str]],
features: Sequence[int | str | tuple[int, int] | tuple[str, str]],
*,
feature_names: ArrayLike | None = None,
target: int | None = None,

Просмотреть файл

@ -27,7 +27,7 @@ def isotonic_regression(
sample_weight: ArrayLike | None = None,
y_min: float | None = None,
y_max: float | None = None,
increasing: bool = True
increasing: bool = True,
) -> list[float]: ...
class IsotonicRegression(RegressorMixin, TransformerMixin, BaseEstimator):
@ -37,7 +37,7 @@ class IsotonicRegression(RegressorMixin, TransformerMixin, BaseEstimator):
y_min: float | None = None,
y_max: float | None = None,
increasing: bool | Literal["auto"] = True,
out_of_bounds: Literal["nan", "clip", "raise"] = "nan"
out_of_bounds: Literal["nan", "clip", "raise"] = "nan",
) -> None: ...
def _check_input_data_shape(self, X: ndarray) -> None: ...
def _build_f(self, X: ndarray, y: ndarray) -> None: ...
@ -48,9 +48,7 @@ class IsotonicRegression(RegressorMixin, TransformerMixin, BaseEstimator):
sample_weight: Optional[ndarray],
trim_duplicates: bool = True,
) -> Tuple[ndarray, ndarray]: ...
def fit(
self, X: ArrayLike, y: ArrayLike, sample_weight: ArrayLike | None = None
) -> "IsotonicRegression": ...
def fit(self, X: ArrayLike, y: ArrayLike, sample_weight: ArrayLike | None = None) -> "IsotonicRegression": ...
def transform(self, T: ArrayLike) -> NDArray: ...
def predict(self, T: ArrayLike) -> NDArray: ...
@ -58,9 +56,7 @@ class IsotonicRegression(RegressorMixin, TransformerMixin, BaseEstimator):
# `_ClassNamePrefixFeaturesOutMixin`` because `input_features` are ignored.
# `input_features` are ignored because `IsotonicRegression` accepts 1d
# arrays and the semantics of `feature_names_in_` are not clear for 1d arrays.
def get_feature_names_out(
self, input_features: ArrayLike | None = None
) -> np.ndarray: ...
def get_feature_names_out(self, input_features: ArrayLike | None = None) -> np.ndarray: ...
def __getstate__(self): ...
def __setstate__(self, state): ...
def _more_tags(self): ...

Просмотреть файл

@ -24,9 +24,7 @@ from .utils.validation import _check_feature_names_in
from .metrics.pairwise import pairwise_kernels, KERNEL_PARAMS
from .utils.validation import check_non_negative
class PolynomialCountSketch(
_ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator
):
class PolynomialCountSketch(_ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
def __init__(
self,
*,
@ -36,9 +34,7 @@ class PolynomialCountSketch(
n_components: int = 100,
random_state: int | RandomState | None = None,
) -> None: ...
def fit(
self, X: NDArray | ArrayLike, y: ArrayLike | None = None
) -> "PolynomialCountSketch": ...
def fit(self, X: NDArray | ArrayLike, y: ArrayLike | None = None) -> "PolynomialCountSketch": ...
def transform(self, X: ArrayLike) -> ArrayLike: ...
class RBFSampler(_ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
@ -49,14 +45,10 @@ class RBFSampler(_ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimat
n_components: int = 100,
random_state: int | RandomState | None = None,
) -> None: ...
def fit(
self, X: NDArray | ArrayLike, y: ArrayLike | None = None
) -> "RBFSampler": ...
def fit(self, X: NDArray | ArrayLike, y: ArrayLike | None = None) -> "RBFSampler": ...
def transform(self, X: NDArray | ArrayLike) -> ArrayLike: ...
class SkewedChi2Sampler(
_ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator
):
class SkewedChi2Sampler(_ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
def __init__(
self,
*,
@ -68,14 +60,10 @@ class SkewedChi2Sampler(
def transform(self, X: ArrayLike) -> ArrayLike: ...
class AdditiveChi2Sampler(TransformerMixin, BaseEstimator):
def __init__(
self, *, sample_steps: int = 2, sample_interval: float | None = None
): ...
def __init__(self, *, sample_steps: int = 2, sample_interval: float | None = None): ...
def fit(self, X: ArrayLike, y: ArrayLike | None = None) -> Any: ...
def transform(self, X: NDArray | ArrayLike) -> NDArray: ...
def get_feature_names_out(
self, input_features: ArrayLike | None = None
) -> np.ndarray: ...
def get_feature_names_out(self, input_features: ArrayLike | None = None) -> np.ndarray: ...
def _transform_dense(self, X): ...
def _transform_sparse(self, X): ...
def _more_tags(self): ...

Просмотреть файл

@ -26,7 +26,6 @@ from scipy import sparse
from scipy.sparse.linalg import lsqr
from scipy.special import expit
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin, MultiOutputMixin
from ..preprocessing._data import _is_constant_feature
from ..utils import check_array
@ -43,7 +42,6 @@ from scipy.sparse._csc import csc_matrix
from scipy.sparse._csr import csr_matrix
from sklearn.linear_model._stochastic_gradient import SGDClassifier
# TODO: bayesian_ridge_regression and bayesian_regression_ard
# should be squashed into its respective objects.
@ -54,9 +52,7 @@ SPARSE_INTERCEPT_DECAY: float = ...
# FIXME in 1.2: parameter 'normalize' should be removed from linear models
# in cases where now normalize=False. The default value of 'normalize' should
# be changed to False in linear models where now normalize=True
def _deprecate_normalize(
normalize: Union[str, bool], default: bool, estimator_name: str
) -> bool: ...
def _deprecate_normalize(normalize: Union[str, bool], default: bool, estimator_name: str) -> bool: ...
def make_dataset(
X: ArrayLike,
y: ArrayLike,
@ -92,9 +88,7 @@ class LinearModel(BaseEstimator, metaclass=ABCMeta):
def fit(self, X, y): ...
def _decision_function(self, X: Union[ndarray, csr_matrix]) -> ndarray: ...
def predict(self, X: ArrayLike | NDArray) -> NDArray: ...
def _set_intercept(
self, X_offset: ndarray, y_offset: Union[ndarray, float64], X_scale: ndarray
) -> None: ...
def _set_intercept(self, X_offset: ndarray, y_offset: Union[ndarray, float64], X_scale: ndarray) -> None: ...
def _more_tags(self) -> Dict[str, bool]: ...
# XXX Should this derive from LinearModel? It should be a mixin, not an ABC.
@ -113,7 +107,7 @@ class LinearRegression(MultiOutputMixin, RegressorMixin, LinearModel):
self,
*,
fit_intercept: bool = True,
normalize: bool|str = "deprecated",
normalize: bool | str = "deprecated",
copy_X: bool = True,
n_jobs: int | None = None,
positive: bool = False,

Просмотреть файл

@ -33,16 +33,12 @@ class BayesianRidge(RegressorMixin, LinearModel):
lambda_init: float | None = None,
compute_score: bool = False,
fit_intercept: bool = True,
normalize: bool|str = "deprecated",
normalize: bool | str = "deprecated",
copy_X: bool = True,
verbose: bool = False,
) -> None: ...
def fit(
self, X: NDArray, y: NDArray, sample_weight: NDArray | None = None
) -> "BayesianRidge": ...
def predict(
self, X: NDArray | ArrayLike, return_std: bool = False
) -> tuple[ArrayLike, ArrayLike]: ...
def fit(self, X: NDArray, y: NDArray, sample_weight: NDArray | None = None) -> "BayesianRidge": ...
def predict(self, X: NDArray | ArrayLike, return_std: bool = False) -> tuple[ArrayLike, ArrayLike]: ...
def _update_coef_(
self,
X: ndarray,
@ -83,15 +79,11 @@ class ARDRegression(RegressorMixin, LinearModel):
compute_score: bool = False,
threshold_lambda: float = 1.0e4,
fit_intercept: bool = True,
normalize: bool|str = "deprecated",
normalize: bool | str = "deprecated",
copy_X: bool = True,
verbose: bool = False,
) -> None: ...
def fit(self, X: ArrayLike, y: ArrayLike) -> "ARDRegression": ...
def _update_sigma_woodbury(self, X, alpha_, lambda_, keep_lambda): ...
def _update_sigma(
self, X: ndarray, alpha_: float64, lambda_: ndarray, keep_lambda: ndarray
) -> ndarray: ...
def predict(
self, X: NDArray | ArrayLike, return_std: bool = False
) -> tuple[ArrayLike, ArrayLike]: ...
def _update_sigma(self, X: ndarray, alpha_: float64, lambda_: ndarray, keep_lambda: ndarray) -> ndarray: ...
def predict(self, X: NDArray | ArrayLike, return_std: bool = False) -> tuple[ArrayLike, ArrayLike]: ...

Просмотреть файл

@ -57,9 +57,7 @@ class _GeneralizedLinearRegressor(RegressorMixin, BaseEstimator):
def _get_loss(self): ...
# TODO(1.3): remove
@deprecated( # type: ignore
"Attribute `family` was deprecated in version 1.1 and will be removed in 1.3."
)
@deprecated("Attribute `family` was deprecated in version 1.1 and will be removed in 1.3.") # type: ignore
@property
def family(self): ...

Просмотреть файл

@ -36,6 +36,4 @@ class HuberRegressor(LinearModel, RegressorMixin, BaseEstimator):
fit_intercept: bool = True,
tol: float = 1e-05,
) -> None: ...
def fit(
self, X: ArrayLike, y: ArrayLike, sample_weight: ArrayLike | None = None
) -> "HuberRegressor": ...
def fit(self, X: ArrayLike, y: ArrayLike, sample_weight: ArrayLike | None = None) -> "HuberRegressor": ...

Просмотреть файл

@ -17,7 +17,6 @@ from numpy.random import RandomState
from scipy import linalg, interpolate
from scipy.linalg.lapack import get_lapack_funcs
from ._base import LinearModel, LinearRegression
from ._base import _deprecate_normalize, _preprocess_data
from ..base import RegressorMixin, MultiOutputMixin
@ -111,9 +110,7 @@ class Lars(MultiOutputMixin, RegressorMixin, LinearModel):
random_state: int | RandomState | None = None,
) -> None: ...
@staticmethod
def _get_gram(
precompute: Union[ndarray, str], X: ndarray, y: ndarray
) -> ndarray: ...
def _get_gram(precompute: Union[ndarray, str], X: ndarray, y: ndarray) -> ndarray: ...
def _fit(
self,
X: ndarray,
@ -124,9 +121,7 @@ class Lars(MultiOutputMixin, RegressorMixin, LinearModel):
normalize: bool,
Xy: Optional[ndarray] = None,
) -> Union[LassoLars, LassoLarsCV, Lars]: ...
def fit(
self, X: ArrayLike, y: ArrayLike, Xy: ArrayLike | None = None
) -> Union[LassoLars, Lars]: ...
def fit(self, X: ArrayLike, y: ArrayLike, Xy: ArrayLike | None = None) -> Union[LassoLars, Lars]: ...
class LassoLars(Lars):
@ -226,9 +221,5 @@ class LassoLarsIC(LassoLars):
noise_variance: float | None = None,
) -> None: ...
def _more_tags(self): ...
def fit(
self, X: ArrayLike, y: ArrayLike, copy_X: bool | None = None
) -> "LassoLarsIC": ...
def _estimate_noise_variance(
self, X: ndarray, y: ndarray, positive: bool
) -> float64: ...
def fit(self, X: ArrayLike, y: ArrayLike, copy_X: bool | None = None) -> "LassoLarsIC": ...
def _estimate_noise_variance(self, X: ndarray, y: ndarray, positive: bool) -> float64: ...

Просмотреть файл

@ -16,7 +16,6 @@ import warnings
import numpy as np
from scipy import optimize
from ._base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from ._linear_loss import LinearModelLoss
from ._sag import sag_solver
@ -146,7 +145,5 @@ class LogisticRegressionCV(LogisticRegression, LinearClassifierMixin, BaseEstima
y: ArrayLike,
sample_weight: ArrayLike | None = None,
) -> Any: ...
def score(
self, X: ArrayLike, y: ArrayLike, sample_weight: ArrayLike | None = None
) -> float: ...
def score(self, X: ArrayLike, y: ArrayLike, sample_weight: ArrayLike | None = None) -> float: ...
def _more_tags(self): ...

Просмотреть файл

@ -13,7 +13,6 @@ import numpy as np
from scipy import linalg
from scipy.linalg.lapack import get_lapack_funcs
from ._base import LinearModel, _pre_fit, _deprecate_normalize
from ..base import RegressorMixin, MultiOutputMixin
from ..utils import as_float_array, check_array

Просмотреть файл

@ -25,9 +25,7 @@ class QuantileRegressor(LinearModel, RegressorMixin, BaseEstimator):
quantile: float = 0.5,
alpha: float = 1.0,
fit_intercept: bool = True,
solver: Literal[
"highs-ds", "highs-ipm", "highs", "interior-point", "revised simplex"
] = "interior-point",
solver: Literal["highs-ds", "highs-ipm", "highs", "interior-point", "revised simplex"] = "interior-point",
solver_options: Mapping | None = None,
) -> None: ...
def fit(

Просмотреть файл

@ -21,13 +21,9 @@ from ..exceptions import ConvergenceWarning
_EPSILON = ...
def _dynamic_max_trials(
n_inliers: int64, n_samples: int, min_samples: int, probability: float
) -> float: ...
def _dynamic_max_trials(n_inliers: int64, n_samples: int, min_samples: int, probability: float) -> float: ...
class RANSACRegressor(
MetaEstimatorMixin, RegressorMixin, MultiOutputMixin, BaseEstimator
):
class RANSACRegressor(MetaEstimatorMixin, RegressorMixin, MultiOutputMixin, BaseEstimator):
def __init__(
self,
estimator: None = None,

Просмотреть файл

@ -4,7 +4,6 @@ from typing import List, Optional, Tuple, Union, Literal, Any, Mapping, Callable
from numpy.typing import NDArray, ArrayLike
from numpy.random import RandomState
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Reuben Fletcher-Costin <reuben.fletchercostin@gmail.com>
# Fabian Pedregosa <fabian@fseoane.net>
@ -45,9 +44,7 @@ from scipy.sparse._coo import coo_matrix
from scipy.sparse._csr import csr_matrix
from scipy.sparse.linalg._interface import _CustomLinearOperator
def _get_rescaled_operator(
X: csr_matrix, X_offset: ndarray, sample_weight_sqrt: ndarray
) -> _CustomLinearOperator: ...
def _get_rescaled_operator(X: csr_matrix, X_offset: ndarray, sample_weight_sqrt: ndarray) -> _CustomLinearOperator: ...
def _solve_sparse_cg(
X: csr_matrix,
y: ndarray,
@ -91,18 +88,14 @@ def _solve_lbfgs(
X_scale=None,
sample_weight_sqrt=None,
): ...
def _get_valid_accept_sparse(
is_X_sparse: bool, solver: str
) -> Union[List[str], str]: ...
def _get_valid_accept_sparse(is_X_sparse: bool, solver: str) -> Union[List[str], str]: ...
def ridge_regression(
X: NDArray,
y: NDArray,
alpha: float | ArrayLike,
*,
sample_weight: float | ArrayLike | None = None,
solver: Literal[
"auto", "svd", "cholesky", "lsqr", "sparse_cg", "sag", "saga", "lbfgs"
] = "auto",
solver: Literal["auto", "svd", "cholesky", "lsqr", "sparse_cg", "sag", "saga", "lbfgs"] = "auto",
max_iter: int | None = None,
tol: float = 1e-3,
verbose: int = 0,
@ -163,15 +156,11 @@ class Ridge(MultiOutputMixin, RegressorMixin, _BaseRidge):
copy_X: bool = True,
max_iter: int | None = None,
tol: float = 1e-3,
solver: Literal[
"auto", "svd", "cholesky", "lsqr", "sparse_cg", "sag", "saga", "lbfgs"
] = "auto",
solver: Literal["auto", "svd", "cholesky", "lsqr", "sparse_cg", "sag", "saga", "lbfgs"] = "auto",
positive: bool = False,
random_state: int | RandomState | None = None,
) -> None: ...
def fit(
self, X: NDArray, y: NDArray, sample_weight: float | NDArray | None = None
) -> "Ridge": ...
def fit(self, X: NDArray, y: NDArray, sample_weight: float | NDArray | None = None) -> "Ridge": ...
class _RidgeClassifierMixin(LinearClassifierMixin):
def _prepare_data(
@ -193,15 +182,11 @@ class RidgeClassifier(_RidgeClassifierMixin, _BaseRidge):
max_iter: int | None = None,
tol: float = 1e-3,
class_weight: Mapping | Literal["balanced"] | None = None,
solver: Literal[
"auto", "svd", "cholesky", "lsqr", "sparse_cg", "sag", "saga", "lbfgs"
] = "auto",
solver: Literal["auto", "svd", "cholesky", "lsqr", "sparse_cg", "sag", "saga", "lbfgs"] = "auto",
positive: bool = False,
random_state: int | RandomState | None = None,
) -> None: ...
def fit(
self, X: NDArray, y: NDArray, sample_weight: float | NDArray | None = None
) -> "RidgeClassifier": ...
def fit(self, X: NDArray, y: NDArray, sample_weight: float | NDArray | None = None) -> "RidgeClassifier": ...
def _check_gcv_mode(X: ndarray, gcv_mode: None) -> str: ...
def _find_smallest_angle(query: ndarray, vectors: ndarray) -> int64: ...
@ -243,14 +228,10 @@ class _RidgeGCV(LinearModel):
def _decomp_diag(v_prime: ndarray, Q: ndarray) -> ndarray: ...
@staticmethod
def _diag_dot(D: ndarray, B: ndarray) -> ndarray: ...
def _compute_gram(
self, X: ndarray, sqrt_sw: ndarray
) -> Tuple[ndarray, ndarray]: ...
def _compute_gram(self, X: ndarray, sqrt_sw: ndarray) -> Tuple[ndarray, ndarray]: ...
def _compute_covariance(self, X, sqrt_sw): ...
def _sparse_multidot_diag(self, X, A, X_mean, sqrt_sw): ...
def _eigen_decompose_gram(
self, X: ndarray, y: ndarray, sqrt_sw: ndarray
) -> Tuple[ndarray, ndarray, ndarray, ndarray]: ...
def _eigen_decompose_gram(self, X: ndarray, y: ndarray, sqrt_sw: ndarray) -> Tuple[ndarray, ndarray, ndarray, ndarray]: ...
def _solve_eigen_gram(
self,
alpha: float,
@ -262,12 +243,8 @@ class _RidgeGCV(LinearModel):
QT_y: ndarray,
) -> Tuple[ndarray, ndarray]: ...
def _eigen_decompose_covariance(self, X, y, sqrt_sw): ...
def _solve_eigen_covariance_no_intercept(
self, alpha, y, sqrt_sw, X_mean, eigvals, V, X
): ...
def _solve_eigen_covariance_intercept(
self, alpha, y, sqrt_sw, X_mean, eigvals, V, X
): ...
def _solve_eigen_covariance_no_intercept(self, alpha, y, sqrt_sw, X_mean, eigvals, V, X): ...
def _solve_eigen_covariance_intercept(self, alpha, y, sqrt_sw, X_mean, eigvals, V, X): ...
def _solve_eigen_covariance(self, alpha, y, sqrt_sw, X_mean, eigvals, V, X): ...
def _svd_decompose_design_matrix(
self, X: ndarray, y: ndarray, sqrt_sw: ndarray
@ -282,9 +259,7 @@ class _RidgeGCV(LinearModel):
U: ndarray,
UT_y: ndarray,
) -> Tuple[ndarray, ndarray]: ...
def fit(
self, X: NDArray, y: NDArray, sample_weight: float | NDArray | None = None
) -> "_RidgeGCV": ...
def fit(self, X: NDArray, y: NDArray, sample_weight: float | NDArray | None = None) -> "_RidgeGCV": ...
class _BaseRidgeCV(LinearModel):
def __init__(
@ -299,9 +274,7 @@ class _BaseRidgeCV(LinearModel):
store_cv_values=False,
alpha_per_target=False,
) -> None: ...
def fit(
self, X: NDArray, y: NDArray, sample_weight: float | NDArray | None = None
) -> "RidgeCV": ...
def fit(self, X: NDArray, y: NDArray, sample_weight: float | NDArray | None = None) -> "RidgeCV": ...
class RidgeCV(MultiOutputMixin, RegressorMixin, _BaseRidgeCV):
pass
@ -318,7 +291,5 @@ class RidgeClassifierCV(_RidgeClassifierMixin, _BaseRidgeCV):
class_weight: Mapping | Literal["balanced"] | None = None,
store_cv_values: bool = False,
): ...
def fit(
self, X: NDArray, y: NDArray, sample_weight: float | NDArray | None = None
) -> Any: ...
def fit(self, X: NDArray, y: NDArray, sample_weight: float | NDArray | None = None) -> Any: ...
def _more_tags(self): ...

Просмотреть файл

@ -13,7 +13,6 @@ import warnings
from abc import ABCMeta, abstractmethod
from ..base import clone, is_classifier
from ._base import LinearClassifierMixin, SparseCoefMixin
from ._base import make_dataset
@ -33,22 +32,20 @@ from scipy.sparse._csr import csr_matrix
from sklearn.linear_model._passive_aggressive import PassiveAggressiveClassifier
from sklearn.linear_model._perceptron import Perceptron
class LossFunction:
def loss(self, p:float, y:float)->float:...
def dloss(self, p:float, y: float)->float:...
def loss(self, p: float, y: float) -> float: ...
def dloss(self, p: float, y: float) -> float: ...
class Regression(LossFunction):...
class Classification(LossFunction):...
class Regression(LossFunction): ...
class Classification(LossFunction): ...
class Log(Classification): ...
class SquaredLoss(Regression):...
class ModifiedHuber(Classification):...
class Hinge(Classification):...
class SquaredHinge(Classification):...
class Huber(Regression):...
class EpsilonInsensitive(Regression):...
class SquaredEpsilonInsensitive(Regression):...
class SquaredLoss(Regression): ...
class ModifiedHuber(Classification): ...
class Hinge(Classification): ...
class SquaredHinge(Classification): ...
class Huber(Regression): ...
class EpsilonInsensitive(Regression): ...
class SquaredEpsilonInsensitive(Regression): ...
LEARNING_RATE_TYPES: dict = ...
@ -98,9 +95,7 @@ class BaseSGD(SparseCoefMixin, BaseEstimator, metaclass=ABCMeta):
@abstractmethod
def fit(self, X, y): ...
def _validate_params(self, for_partial_fit: bool = False) -> None: ...
def _get_loss_function(
self, loss: str
) -> Union[ModifiedHuber, Hinge, Log, SquaredLoss]: ...
def _get_loss_function(self, loss: str) -> Union[ModifiedHuber, Hinge, Log, SquaredLoss]: ...
def _get_learning_rate_type(self, learning_rate: str) -> int: ...
def _get_penalty_type(self, penalty: Optional[str]) -> int: ...
def _allocate_parameter_mem(
@ -125,10 +120,7 @@ def _prepare_fit_binary(
est: Union[PassiveAggressiveClassifier, Perceptron, SGDClassifier],
y: ndarray,
i: int,
) -> Union[
Tuple[ndarray, ndarray, float64, None, int],
Tuple[ndarray, ndarray, float64, ndarray, float64],
]: ...
) -> Union[Tuple[ndarray, ndarray, float64, None, int], Tuple[ndarray, ndarray, float64, ndarray, float64],]: ...
def fit_binary(
est: BaseEstimator,
i: int,
@ -325,9 +317,7 @@ class BaseSGDRegressor(RegressorMixin, BaseSGD):
coef_init: None,
intercept_init: None,
) -> "SGDRegressor": ...
def partial_fit(
self, X: NDArray | ArrayLike, y: NDArray, sample_weight: ArrayLike | None = None
) -> Any: ...
def partial_fit(self, X: NDArray | ArrayLike, y: NDArray, sample_weight: ArrayLike | None = None) -> Any: ...
def _fit(
self,
X: ndarray,
@ -401,9 +391,7 @@ class SGDOneClassSVM(BaseSGD, OutlierMixin):
shuffle: bool = True,
verbose: int = 0,
random_state: int | RandomState | None = None,
learning_rate: Literal[
"constant", "optimal", "invscaling", "adaptive"
] = "optimal",
learning_rate: Literal["constant", "optimal", "invscaling", "adaptive"] = "optimal",
eta0: float = 0.0,
power_t: float = 0.5,
warm_start: bool = False,
@ -431,9 +419,7 @@ class SGDOneClassSVM(BaseSGD, OutlierMixin):
coef_init: None,
offset_init: None,
) -> "SGDOneClassSVM": ...
def partial_fit(
self, X: NDArray | ArrayLike, y=None, sample_weight: ArrayLike | None = None
) -> Any: ...
def partial_fit(self, X: NDArray | ArrayLike, y=None, sample_weight: ArrayLike | None = None) -> Any: ...
def _fit(
self,
X: ndarray,

Просмотреть файл

@ -26,13 +26,9 @@ from typing import Tuple
_EPSILON = ...
def _modified_weiszfeld_step(X: ndarray, x_old: ndarray) -> ndarray: ...
def _spatial_median(
X: ndarray, max_iter: int = 300, tol: float = 1.0e-3
) -> Tuple[int, ndarray]: ...
def _spatial_median(X: ndarray, max_iter: int = 300, tol: float = 1.0e-3) -> Tuple[int, ndarray]: ...
def _breakdown_point(n_samples: int, n_subsamples: int) -> float: ...
def _lstsq(
X: ndarray, y: ndarray, indices: ndarray, fit_intercept: bool
) -> ndarray: ...
def _lstsq(X: ndarray, y: ndarray, indices: ndarray, fit_intercept: bool) -> ndarray: ...
class TheilSenRegressor(RegressorMixin, LinearModel):
def __init__(
@ -40,7 +36,7 @@ class TheilSenRegressor(RegressorMixin, LinearModel):
*,
fit_intercept: bool = True,
copy_X: bool = True,
max_subpopulation: int|float = 1e4,
max_subpopulation: int | float = 1e4,
n_subsamples: int | None = None,
max_iter: int = 300,
tol: float = 1.0e-3,

Просмотреть файл

@ -41,10 +41,6 @@ class Isomap(_ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
) -> None: ...
def _fit_transform(self, X: Union[ndarray, csr_matrix]) -> None: ...
def reconstruction_error(self) -> float: ...
def fit(
self, X: ArrayLike | BallTree | KDTree | NearestNeighbors, y: None = None
) -> "Isomap": ...
def fit_transform(
self, X: ArrayLike | BallTree | KDTree, y: None = None
) -> ArrayLike: ...
def fit(self, X: ArrayLike | BallTree | KDTree | NearestNeighbors, y: None = None) -> "Isomap": ...
def fit_transform(self, X: ArrayLike | BallTree | KDTree, y: None = None) -> ArrayLike: ...
def transform(self, X: ArrayLike) -> ArrayLike: ...

Просмотреть файл

@ -28,10 +28,7 @@ import scipy.sparse._csr
from numpy.random import RandomState
from scipy.sparse.linalg import LinearOperator
def barycenter_weights(
X: ArrayLike, Y: ArrayLike, indices: ArrayLike, reg: float = 1e-3
) -> ArrayLike: ...
def barycenter_weights(X: ArrayLike, Y: ArrayLike, indices: ArrayLike, reg: float = 1e-3) -> ArrayLike: ...
def barycenter_kneighbors_graph(
X: NearestNeighbors | ArrayLike,
n_neighbors: int,

Просмотреть файл

@ -56,6 +56,4 @@ class MDS(BaseEstimator):
): ...
def _more_tags(self): ...
def fit(self, X: ArrayLike, y=None, init: NDArray | None = None) -> Any: ...
def fit_transform(
self, X: ArrayLike, y=None, init: NDArray | None = None
) -> NDArray: ...
def fit_transform(self, X: ArrayLike, y=None, init: NDArray | None = None) -> NDArray: ...

Просмотреть файл

@ -32,9 +32,7 @@ from scipy.sparse._dia import dia_matrix
def _graph_connected_component(graph, node_id): ...
def _graph_is_connected(graph: Union[csr_matrix, coo_matrix]) -> bool: ...
def _set_diag(
laplacian: coo_matrix, value: int, norm_laplacian: bool
) -> Union[csr_matrix, dia_matrix]: ...
def _set_diag(laplacian: coo_matrix, value: int, norm_laplacian: bool) -> Union[csr_matrix, dia_matrix]: ...
def spectral_embedding(
adjacency: ArrayLike,
*,
@ -51,9 +49,7 @@ class SpectralEmbedding(BaseEstimator):
self,
n_components: int = 2,
*,
affinity: Literal[
"nearest_neighbors", "rbf", "precomputed", "precomputed_nearest_neighbors"
]
affinity: Literal["nearest_neighbors", "rbf", "precomputed", "precomputed_nearest_neighbors"]
| Callable = "nearest_neighbors",
gamma: float | None = None,
random_state: int | RandomState | None = None,

Просмотреть файл

@ -111,7 +111,7 @@ class TSNE(BaseEstimator):
method: str = "barnes_hut",
angle: float = 0.5,
n_jobs: int | None = None,
square_distances: Literal[True]|str = "deprecated",
square_distances: Literal[True] | str = "deprecated",
) -> None: ...
def _check_params_vs_input(self, X: ndarray) -> None: ...
def _fit(self, X: ndarray, skip_num_points: int = 0) -> ndarray: ...

Просмотреть файл

@ -42,9 +42,7 @@ def _check_zero_division(zero_division: str) -> None: ...
def _check_targets(
y_true: Union[List[int], ndarray, Series], y_pred: Union[List[int], ndarray]
) -> Tuple[str, ndarray, ndarray]: ...
def _weighted_sum(
sample_score: ndarray, sample_weight: Optional[ndarray], normalize: bool = False
) -> float64: ...
def _weighted_sum(sample_score: ndarray, sample_weight: Optional[ndarray], normalize: bool = False) -> float64: ...
def accuracy_score(
y_true: ArrayLike,
y_pred: ArrayLike,
@ -82,14 +80,11 @@ def jaccard_score(
*,
labels: ArrayLike | None = None,
pos_label: str | int = 1,
average: Literal["micro", "macro", "samples", "weighted", "binary"]
| None = "binary",
average: Literal["micro", "macro", "samples", "weighted", "binary"] | None = "binary",
sample_weight: ArrayLike | None = None,
zero_division: float | Literal["warn"] = "warn",
) -> float | np.ndarray: ...
def matthews_corrcoef(
y_true: NDArray, y_pred: NDArray, *, sample_weight: ArrayLike | None = None
) -> float: ...
def matthews_corrcoef(y_true: NDArray, y_pred: NDArray, *, sample_weight: ArrayLike | None = None) -> float: ...
def zero_one_loss(
y_true: ArrayLike,
y_pred: ArrayLike,
@ -103,8 +98,7 @@ def f1_score(
*,
labels: ArrayLike | None = None,
pos_label: str | int = 1,
average: Literal["micro", "macro", "samples", "weighted", "binary"]
| None = "binary",
average: Literal["micro", "macro", "samples", "weighted", "binary"] | None = "binary",
sample_weight: ArrayLike | None = None,
zero_division: Literal["warn", 0, 1] = "warn",
) -> float | NDArray: ...
@ -115,8 +109,7 @@ def fbeta_score(
beta: float,
labels: ArrayLike | None = None,
pos_label: str | int = 1,
average: Literal["micro", "macro", "samples", "weighted", "binary"]
| None = "binary",
average: Literal["micro", "macro", "samples", "weighted", "binary"] | None = "binary",
sample_weight: ArrayLike | None = None,
zero_division: Literal["warn", 0, 1] = "warn",
) -> float | NDArray: ...
@ -155,8 +148,7 @@ def precision_score(
*,
labels: ArrayLike | None = None,
pos_label: str | int = 1,
average: Literal["micro", "macro", "samples", "weighted", "binary"]
| None = "binary",
average: Literal["micro", "macro", "samples", "weighted", "binary"] | None = "binary",
sample_weight: ArrayLike | None = None,
zero_division: Literal["warn", 0, 1] = "warn",
) -> float | NDArray: ...
@ -166,8 +158,7 @@ def recall_score(
*,
labels: ArrayLike | None = None,
pos_label: str | int = 1,
average: Literal["micro", "macro", "samples", "weighted", "binary"]
| None = "binary",
average: Literal["micro", "macro", "samples", "weighted", "binary"] | None = "binary",
sample_weight: ArrayLike | None = None,
zero_division: Literal["warn", 0, 1] = "warn",
) -> float | NDArray: ...
@ -189,9 +180,7 @@ def classification_report(
output_dict: bool = False,
zero_division: Literal["warn", 0, 1] = "warn",
) -> str | dict: ...
def hamming_loss(
y_true: ArrayLike, y_pred: ArrayLike, *, sample_weight: ArrayLike | None = None
) -> float | int: ...
def hamming_loss(y_true: ArrayLike, y_pred: ArrayLike, *, sample_weight: ArrayLike | None = None) -> float | int: ...
def log_loss(
y_true: ArrayLike,
y_pred: ArrayLike,

Просмотреть файл

@ -3,9 +3,7 @@ from numpy import bool_, int64, ndarray
from sklearn.base import BaseEstimator
from typing import Callable, Tuple, Union
def _check_classifier_response_method(
estimator: BaseEstimator, response_method: str
) -> Callable: ...
def _check_classifier_response_method(estimator: BaseEstimator, response_method: str) -> Callable: ...
def _get_response(
X: ndarray, estimator: BaseEstimator, response_method: str, pos_label: None = None
) -> Union[Tuple[ndarray, int64], Tuple[ndarray, bool_]]: ...

Просмотреть файл

@ -17,9 +17,7 @@ from sklearn.model_selection._search import RandomizedSearchCV
from sklearn.svm._classes import SVC
class ConfusionMatrixDisplay:
def __init__(
self, confusion_matrix: NDArray, *, display_labels: NDArray | None = None
) -> None: ...
def __init__(self, confusion_matrix: NDArray, *, display_labels: NDArray | None = None) -> None: ...
def plot(
self,
*,

Просмотреть файл

@ -16,7 +16,6 @@ from sklearn.pipeline import Pipeline
from typing import Union, Sequence, Literal, Mapping
from matplotlib.axes import Axes
class DetCurveDisplay:
def __init__(
self,
@ -52,9 +51,7 @@ class DetCurveDisplay:
ax: Axes | None = None,
**kwargs,
) -> DetCurveDisplay: ...
def plot(
self, ax: Axes | None = None, *, name: str | None = None, **kwargs
) -> DetCurveDisplay: ...
def plot(self, ax: Axes | None = None, *, name: str | None = None, **kwargs) -> DetCurveDisplay: ...
@deprecated(
"Function plot_det_curve is deprecated in 1.0 and will be "

Просмотреть файл

@ -24,9 +24,7 @@ class PrecisionRecallDisplay:
estimator_name: str | None = None,
pos_label: str | int | None = None,
) -> None: ...
def plot(
self, ax: Axes | None = None, *, name: str | None = None, **kwargs
) -> PrecisionRecallDisplay: ...
def plot(self, ax: Axes | None = None, *, name: str | None = None, **kwargs) -> PrecisionRecallDisplay: ...
@classmethod
def from_estimator(
cls,

Просмотреть файл

@ -26,9 +26,7 @@ class RocCurveDisplay:
estimator_name: str | None = None,
pos_label: str | int | None = None,
) -> None: ...
def plot(
self, ax: Axes | None = None, *, name: str | None = None, **kwargs
) -> RocCurveDisplay: ...
def plot(self, ax: Axes | None = None, *, name: str | None = None, **kwargs) -> RocCurveDisplay: ...
@classmethod
def from_estimator(
cls,

Просмотреть файл

@ -53,9 +53,7 @@ def det_curve(
pos_label: int | str | None = None,
sample_weight: ArrayLike | None = None,
) -> tuple[np.ndarray, np.ndarray, np.ndarray]: ...
def _binary_roc_auc_score(
y_true: ndarray, y_score: ndarray, sample_weight: None = None, max_fpr: None = None
) -> float64: ...
def _binary_roc_auc_score(y_true: ndarray, y_score: ndarray, sample_weight: None = None, max_fpr: None = None) -> float64: ...
def roc_auc_score(
y_true: ArrayLike,
y_score: ArrayLike,
@ -98,12 +96,8 @@ def roc_curve(
def label_ranking_average_precision_score(
y_true: NDArray, y_score: NDArray, *, sample_weight: ArrayLike | None = None
) -> float: ...
def coverage_error(
y_true: NDArray, y_score: NDArray, *, sample_weight: ArrayLike | None = None
) -> float: ...
def label_ranking_loss(
y_true: NDArray, y_score: NDArray, *, sample_weight: ArrayLike | None = None
) -> float: ...
def coverage_error(y_true: NDArray, y_score: NDArray, *, sample_weight: ArrayLike | None = None) -> float: ...
def label_ranking_loss(y_true: NDArray, y_score: NDArray, *, sample_weight: ArrayLike | None = None) -> float: ...
def _dcg_sample_scores(y_true, y_score, k=None, log_base=2, ignore_ties=False): ...
def _tie_averaged_dcg(y_true, y_score, discount_cumsum): ...
def _check_dcg_target_type(y_true): ...

Просмотреть файл

@ -48,8 +48,7 @@ def mean_absolute_error(
y_pred: ArrayLike,
*,
sample_weight: ArrayLike | None = None,
multioutput: Literal["raw_values", "uniform_average"]
| ArrayLike = "uniform_average",
multioutput: Literal["raw_values", "uniform_average"] | ArrayLike = "uniform_average",
) -> float | np.ndarray: ...
def mean_pinball_loss(
y_true: ArrayLike,
@ -57,24 +56,21 @@ def mean_pinball_loss(
*,
sample_weight: ArrayLike | None = None,
alpha: float = 0.5,
multioutput: Literal["raw_values", "uniform_average"]
| ArrayLike = "uniform_average",
multioutput: Literal["raw_values", "uniform_average"] | ArrayLike = "uniform_average",
) -> float | np.ndarray: ...
def mean_absolute_percentage_error(
y_true: ArrayLike,
y_pred: ArrayLike,
*,
sample_weight: ArrayLike | None = None,
multioutput: Literal["raw_values", "uniform_average"]
| ArrayLike = "uniform_average",
multioutput: Literal["raw_values", "uniform_average"] | ArrayLike = "uniform_average",
) -> float | np.ndarray: ...
def mean_squared_error(
y_true: ArrayLike,
y_pred: ArrayLike,
*,
sample_weight: ArrayLike | None = None,
multioutput: Literal["raw_values", "uniform_average"]
| ArrayLike = "uniform_average",
multioutput: Literal["raw_values", "uniform_average"] | ArrayLike = "uniform_average",
squared: bool = True,
) -> float | np.ndarray: ...
def mean_squared_log_error(
@ -82,16 +78,14 @@ def mean_squared_log_error(
y_pred: ArrayLike,
*,
sample_weight: ArrayLike | None = None,
multioutput: Literal["raw_values", "uniform_average"]
| ArrayLike = "uniform_average",
multioutput: Literal["raw_values", "uniform_average"] | ArrayLike = "uniform_average",
squared: bool = True,
) -> float | np.ndarray: ...
def median_absolute_error(
y_true: ArrayLike | tuple[int, int],
y_pred: ArrayLike | tuple[int, int],
*,
multioutput: Literal["raw_values", "uniform_average"]
| ArrayLike = "uniform_average",
multioutput: Literal["raw_values", "uniform_average"] | ArrayLike = "uniform_average",
sample_weight: ArrayLike | None = None,
) -> float | np.ndarray: ...
def _assemble_r2_explained_variance(
@ -106,8 +100,7 @@ def explained_variance_score(
y_pred: ArrayLike,
*,
sample_weight: ArrayLike | None = None,
multioutput: Literal["raw_values", "uniform_average", "variance_weighted"]
| ArrayLike = "uniform_average",
multioutput: Literal["raw_values", "uniform_average", "variance_weighted"] | ArrayLike = "uniform_average",
force_finite: bool = True,
) -> float | np.ndarray: ...
def r2_score(
@ -115,9 +108,7 @@ def r2_score(
y_pred: ArrayLike,
*,
sample_weight: ArrayLike | None = None,
multioutput: Literal["raw_values", "uniform_average", "variance_weighted"]
| ArrayLike
| None = "uniform_average",
multioutput: Literal["raw_values", "uniform_average", "variance_weighted"] | ArrayLike | None = "uniform_average",
force_finite: bool = True,
) -> float | np.ndarray: ...
def max_error(y_true: ArrayLike, y_pred: ArrayLike) -> float: ...
@ -129,12 +120,8 @@ def mean_tweedie_deviance(
sample_weight: ArrayLike | None = None,
power: float = 0,
) -> float: ...
def mean_poisson_deviance(
y_true: ArrayLike, y_pred: ArrayLike, *, sample_weight: ArrayLike | None = None
) -> float: ...
def mean_gamma_deviance(
y_true: ArrayLike, y_pred: ArrayLike, *, sample_weight: ArrayLike | None = None
) -> float: ...
def mean_poisson_deviance(y_true: ArrayLike, y_pred: ArrayLike, *, sample_weight: ArrayLike | None = None) -> float: ...
def mean_gamma_deviance(y_true: ArrayLike, y_pred: ArrayLike, *, sample_weight: ArrayLike | None = None) -> float: ...
def d2_tweedie_score(
y_true: ArrayLike,
y_pred: ArrayLike,
@ -148,14 +135,12 @@ def d2_pinball_score(
*,
sample_weight: ArrayLike | None = None,
alpha: float = 0.5,
multioutput: Literal["raw_values", "uniform_average"]
| ArrayLike = "uniform_average",
multioutput: Literal["raw_values", "uniform_average"] | ArrayLike = "uniform_average",
) -> float | np.ndarray: ...
def d2_absolute_error_score(
y_true: ArrayLike,
y_pred: ArrayLike,
*,
sample_weight: ArrayLike | None = None,
multioutput: Literal["raw_values", "uniform_average"]
| ArrayLike = "uniform_average",
multioutput: Literal["raw_values", "uniform_average"] | ArrayLike = "uniform_average",
) -> float | np.ndarray: ...

Просмотреть файл

@ -66,9 +66,7 @@ def _cached_call(
class _MultimetricScorer:
def __init__(self, **scorers: Mapping) -> None: ...
def __call__(
self, estimator: BaseEstimator, *args, **kwargs
) -> Dict[str, float64]: ...
def __call__(self, estimator: BaseEstimator, *args, **kwargs) -> Dict[str, float64]: ...
def _use_cache(self, estimator: BaseEstimator) -> bool: ...
class _BaseScorer:

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше