Skip to content

Commit

Permalink
split stubs file
Browse files Browse the repository at this point in the history
  • Loading branch information
glemaitre committed Jan 8, 2025
1 parent 1e7f909 commit e6d2a69
Show file tree
Hide file tree
Showing 5 changed files with 275 additions and 174 deletions.
3 changes: 3 additions & 0 deletions skore/src/skore/sklearn/_estimator/__init__.pyi
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
from skore.sklearn._estimator.report import EstimatorReport

__all__ = ["EstimatorReport"]
33 changes: 33 additions & 0 deletions skore/src/skore/sklearn/_estimator/base.pyi
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
from typing import Any, Literal, Optional

import numpy as np
from rich.panel import Panel
from rich.tree import Tree

class _HelpMixin:
def _get_methods_for_help(self) -> list[tuple[str, Any]]: ...
def _sort_methods_for_help(
self, methods: list[tuple[str, Any]]
) -> list[tuple[str, Any]]: ...
def _format_method_name(self, name: str) -> str: ...
def _get_method_description(self, method: Any) -> str: ...
def _create_help_panel(self) -> Panel: ...
def _get_help_panel_title(self) -> str: ...
def _create_help_tree(self) -> Tree: ...
def help(self) -> None: ...
def __repr__(self) -> str: ...

class _BaseAccessor(_HelpMixin):
_parent: Any
_icon: str

def __init__(self, parent: Any, icon: str) -> None: ...
def _get_help_panel_title(self) -> str: ...
def _create_help_tree(self) -> Tree: ...
def _get_X_y_and_data_source_hash(
self,
*,
data_source: Literal["test", "train", "X_y"],
X: Optional[np.ndarray] = None,
y: Optional[np.ndarray] = None,
) -> tuple[Optional[np.ndarray], Optional[np.ndarray], Optional[str]]: ...
174 changes: 0 additions & 174 deletions skore/src/skore/sklearn/_estimator/estimator.pyi

This file was deleted.

168 changes: 168 additions & 0 deletions skore/src/skore/sklearn/_estimator/metrics_accessor.pyi
Original file line number Diff line number Diff line change
@@ -0,0 +1,168 @@
from typing import Any, Callable, Literal, Optional, Union

import matplotlib.axes
import numpy as np
import pandas as pd
from sklearn.metrics import PrecisionRecallDisplay, RocCurveDisplay

from skore.sklearn._estimator.base import _BaseAccessor
from skore.sklearn._plot import PredictionErrorDisplay

class _PlotMetricsAccessor(_BaseAccessor):
_metrics_parent: _MetricsAccessor

def __init__(self, parent: _MetricsAccessor) -> None: ...
def _get_display(
self,
*,
X: Optional[np.ndarray],
y: Optional[np.ndarray],
data_source: Literal["test", "train", "X_y"],
response_method: Union[str, list[str]],
display_class: Any,
display_kwargs: dict[str, Any],
display_plot_kwargs: dict[str, Any],
) -> Union[RocCurveDisplay, PrecisionRecallDisplay, PredictionErrorDisplay]: ...
def roc(
self,
*,
data_source: Literal["test", "train", "X_y"] = "test",
X: Optional[np.ndarray] = None,
y: Optional[np.ndarray] = None,
pos_label: Optional[Union[str, int]] = None,
ax: Optional[matplotlib.axes.Axes] = None,
) -> RocCurveDisplay: ...
def precision_recall(
self,
*,
data_source: Literal["test", "train", "X_y"] = "test",
X: Optional[np.ndarray] = None,
y: Optional[np.ndarray] = None,
pos_label: Optional[Union[str, int]] = None,
ax: Optional[matplotlib.axes.Axes] = None,
) -> PrecisionRecallDisplay: ...
def prediction_error(
self,
*,
data_source: Literal["test", "train", "X_y"] = "test",
X: Optional[np.ndarray] = None,
y: Optional[np.ndarray] = None,
ax: Optional[matplotlib.axes.Axes] = None,
kind: Literal[
"actual_vs_predicted", "residual_vs_predicted"
] = "residual_vs_predicted",
subsample: Optional[Union[int, float]] = 1_000,
) -> PredictionErrorDisplay: ...

class _MetricsAccessor(_BaseAccessor):
_SCORE_OR_LOSS_ICONS: dict[str, str]
plot: _PlotMetricsAccessor

def _compute_metric_scores(
self,
metric_fn: Callable,
X: Optional[np.ndarray],
y_true: Optional[np.ndarray],
*,
data_source: Literal["test", "train", "X_y"] = "test",
response_method: Union[str, list[str]],
pos_label: Optional[Union[str, int]] = None,
metric_name: Optional[str] = None,
**metric_kwargs: Any,
) -> pd.DataFrame: ...
def report_metrics(
self,
*,
data_source: Literal["test", "train", "X_y"] = "test",
X: Optional[np.ndarray] = None,
y: Optional[np.ndarray] = None,
scoring: Optional[Union[list[str], Callable]] = None,
pos_label: Optional[Union[str, int]] = None,
scoring_kwargs: Optional[dict[str, Any]] = None,
) -> pd.DataFrame: ...
def accuracy(
self,
*,
data_source: Literal["test", "train", "X_y"] = "test",
X: Optional[np.ndarray] = None,
y: Optional[np.ndarray] = None,
) -> pd.DataFrame: ...
def precision(
self,
*,
data_source: Literal["test", "train", "X_y"] = "test",
X: Optional[np.ndarray] = None,
y: Optional[np.ndarray] = None,
average: Optional[
Literal["binary", "micro", "macro", "weighted", "samples"]
] = None,
pos_label: Optional[Union[str, int]] = None,
) -> pd.DataFrame: ...
def recall(
self,
*,
data_source: Literal["test", "train", "X_y"] = "test",
X: Optional[np.ndarray] = None,
y: Optional[np.ndarray] = None,
average: Optional[
Literal["binary", "micro", "macro", "weighted", "samples"]
] = None,
pos_label: Optional[Union[str, int]] = None,
) -> pd.DataFrame: ...
def brier_score(
self,
*,
data_source: Literal["test", "train", "X_y"] = "test",
X: Optional[np.ndarray] = None,
y: Optional[np.ndarray] = None,
pos_label: Optional[Union[str, int]] = None,
) -> pd.DataFrame: ...
def roc_auc(
self,
*,
data_source: Literal["test", "train", "X_y"] = "test",
X: Optional[np.ndarray] = None,
y: Optional[np.ndarray] = None,
average: Optional[
Literal["auto", "micro", "macro", "weighted", "samples"]
] = None,
multi_class: Literal["raise", "ovr", "ovo"] = "ovr",
) -> pd.DataFrame: ...
def log_loss(
self,
*,
data_source: Literal["test", "train", "X_y"] = "test",
X: Optional[np.ndarray] = None,
y: Optional[np.ndarray] = None,
) -> pd.DataFrame: ...
def r2(
self,
*,
data_source: Literal["test", "train", "X_y"] = "test",
X: Optional[np.ndarray] = None,
y: Optional[np.ndarray] = None,
multioutput: Union[
Literal["raw_values", "uniform_average"], np.ndarray
] = "raw_values",
) -> pd.DataFrame: ...
def rmse(
self,
*,
data_source: Literal["test", "train", "X_y"] = "test",
X: Optional[np.ndarray] = None,
y: Optional[np.ndarray] = None,
multioutput: Union[
Literal["raw_values", "uniform_average"], np.ndarray
] = "raw_values",
) -> pd.DataFrame: ...
def custom_metric(
self,
metric_function: Callable,
response_method: Union[str, list[str]],
*,
metric_name: Optional[str] = None,
data_source: Literal["test", "train", "X_y"] = "test",
X: Optional[np.ndarray] = None,
y: Optional[np.ndarray] = None,
**kwargs: Any,
) -> pd.DataFrame: ...
Loading

0 comments on commit e6d2a69

Please sign in to comment.