зеркало из https://github.com/microsoft/LightGBM.git
[python-package] fix mypy errors related to eval result parsing in callbacks (#6096)
This commit is contained in:
Родитель
0b3d9da2eb
Коммит
163416d2f5
|
@ -131,6 +131,11 @@ class _RecordEvaluationCallback:
|
|||
self.eval_result = eval_result
|
||||
|
||||
def _init(self, env: CallbackEnv) -> None:
|
||||
if env.evaluation_result_list is None:
|
||||
raise RuntimeError(
|
||||
"record_evaluation() callback enabled but no evaluation results found. This is a probably bug in LightGBM. "
|
||||
"Please report it at https://github.com/microsoft/LightGBM/issues"
|
||||
)
|
||||
self.eval_result.clear()
|
||||
for item in env.evaluation_result_list:
|
||||
if len(item) == 4: # regular train
|
||||
|
@ -147,6 +152,11 @@ class _RecordEvaluationCallback:
|
|||
def __call__(self, env: CallbackEnv) -> None:
|
||||
if env.iteration == env.begin_iteration:
|
||||
self._init(env)
|
||||
if env.evaluation_result_list is None:
|
||||
raise RuntimeError(
|
||||
"record_evaluation() callback enabled but no evaluation results found. This is a probably bug in LightGBM. "
|
||||
"Please report it at https://github.com/microsoft/LightGBM/issues"
|
||||
)
|
||||
for item in env.evaluation_result_list:
|
||||
if len(item) == 4:
|
||||
data_name, eval_name, result = item[:3]
|
||||
|
@ -285,6 +295,10 @@ class _EarlyStoppingCallback:
|
|||
return (ds_name == "cv_agg" and eval_name == "train") or ds_name == train_name
|
||||
|
||||
def _init(self, env: CallbackEnv) -> None:
|
||||
if env.evaluation_result_list is None or env.evaluation_result_list == []:
|
||||
raise ValueError(
|
||||
"For early stopping, at least one dataset and eval metric is required for evaluation"
|
||||
)
|
||||
is_dart = any(env.params.get(alias, "") == 'dart' for alias in _ConfigAliases.get("boosting"))
|
||||
only_train_set = (
|
||||
len(env.evaluation_result_list) == 1
|
||||
|
@ -300,9 +314,6 @@ class _EarlyStoppingCallback:
|
|||
elif only_train_set:
|
||||
_log_warning('Only training set found, disabling early stopping.')
|
||||
return
|
||||
if not env.evaluation_result_list:
|
||||
raise ValueError('For early stopping, '
|
||||
'at least one dataset and eval metric is required for evaluation')
|
||||
|
||||
if self.stopping_rounds <= 0:
|
||||
raise ValueError("stopping_rounds should be greater than zero.")
|
||||
|
@ -364,6 +375,11 @@ class _EarlyStoppingCallback:
|
|||
self._init(env)
|
||||
if not self.enabled:
|
||||
return
|
||||
if env.evaluation_result_list is None:
|
||||
raise RuntimeError(
|
||||
"early_stopping() callback enabled but no evaluation results found. This is a probably bug in LightGBM. "
|
||||
"Please report it at https://github.com/microsoft/LightGBM/issues"
|
||||
)
|
||||
# self.best_score_list is initialized to an empty list
|
||||
first_time_updating_best_score_list = (self.best_score_list == [])
|
||||
for i in range(len(env.evaluation_result_list)):
|
||||
|
|
Загрузка…
Ссылка в новой задаче