[R-package] clarified error messages and documentation for lgb.get.eval.result() (#2686)

* [R-package] clarified error messages and documentation for lgb.get.eval.result()

* fixed bad link

* Update R-package/man/lgb.get.eval.result.Rd

Co-Authored-By: Nikita Titov <nekit94-08@mail.ru>

* [R-package] added fixes from codereview back in

Co-authored-by: Nikita Titov <nekit94-08@mail.ru>
This commit is contained in:
James Lamb 2020-01-24 13:09:14 -06:00 коммит произвёл Nikita Titov
Родитель 710f1ae627
Коммит 08fd53cd13
3 изменённых файлов: 141 добавлений и 12 удалений

Просмотреть файл

@ -884,16 +884,19 @@ lgb.dump <- function(booster, num_iteration = NULL) {
#' @name lgb.get.eval.result
#' @title Get record evaluation result from booster
#' @description Get record evaluation result from booster
#' @description Given a \code{lgb.Booster}, return evaluation results for a
#' particular metric on a particular dataset.
#' @param booster Object of class \code{lgb.Booster}
#' @param data_name name of dataset
#' @param eval_name name of evaluation
#' @param iters iterations, NULL will return all
#' @param data_name Name of the dataset to return evaluation results for.
#' @param eval_name Name of the evaluation metric to return results for.
#' @param iters An integer vector of iterations you want to get evaluation results for. If NULL
#' (the default), evaluation results for all iterations will be returned.
#' @param is_err TRUE will return evaluation error instead
#'
#' @return vector of evaluation result
#'
#' @examples
#' # train a regression model
#' library(lightgbm)
#' data(agaricus.train, package = "lightgbm")
#' train <- agaricus.train
@ -912,6 +915,14 @@ lgb.dump <- function(booster, num_iteration = NULL) {
#' , learning_rate = 1.0
#' , early_stopping_rounds = 5L
#' )
#'
#' # Examine valid data_name values
#' print(setdiff(names(model$record_evals), "start_iter"))
#'
#' # Examine valid eval_name values for dataset "test"
#' print(names(model$record_evals[["test"]]))
#'
#' # Get L2 values for "test" dataset
#' lgb.get.eval.result(model, "test", "l2")
#' @export
lgb.get.eval.result <- function(booster, data_name, eval_name, iters = NULL, is_err = FALSE) {
@ -926,13 +937,30 @@ lgb.get.eval.result <- function(booster, data_name, eval_name, iters = NULL, is_
stop("lgb.get.eval.result: data_name and eval_name should be characters")
}
# Check if recorded evaluation is existing
if (is.null(booster$record_evals[[data_name]])) {
stop("lgb.get.eval.result: wrong data name")
# NOTE: "start_iter" exists in booster$record_evals but is not a valid data_name
data_names <- setdiff(names(booster$record_evals), "start_iter")
if (!(data_name %in% data_names)) {
stop(paste0(
"lgb.get.eval.result: data_name "
, shQuote(data_name)
, " not found. Only the following datasets exist in record evals: ["
, paste(data_names, collapse = ", ")
, "]"
))
}
# Check if evaluation result is existing
if (is.null(booster$record_evals[[data_name]][[eval_name]])) {
eval_names <- names(booster$record_evals[[data_name]])
if (!(eval_name %in% eval_names)) {
stop(paste0(
"lgb.get.eval.result: eval_name "
, shQuote(eval_name)
, " not found. Only the following eval_names exist for dataset "
, shQuote(data_name)
, ": ["
, paste(eval_names, collapse = ", ")
, "]"
))
stop("lgb.get.eval.result: wrong eval name")
}

Просмотреть файл

@ -15,11 +15,12 @@ lgb.get.eval.result(
\arguments{
\item{booster}{Object of class \code{lgb.Booster}}
\item{data_name}{name of dataset}
\item{data_name}{Name of the dataset to return evaluation results for.}
\item{eval_name}{name of evaluation}
\item{eval_name}{Name of the evaluation metric to return results for.}
\item{iters}{iterations, NULL will return all}
\item{iters}{An integer vector of iterations you want to get evaluation results for. If NULL
(the default), evaluation results for all iterations will be returned.}
\item{is_err}{TRUE will return evaluation error instead}
}
@ -27,9 +28,11 @@ lgb.get.eval.result(
vector of evaluation result
}
\description{
Get record evaluation result from booster
Given a \code{lgb.Booster}, return evaluation results for a
particular metric on a particular dataset.
}
\examples{
# train a regression model
library(lightgbm)
data(agaricus.train, package = "lightgbm")
train <- agaricus.train
@ -48,5 +51,13 @@ model <- lgb.train(
, learning_rate = 1.0
, early_stopping_rounds = 5L
)
# Examine valid data_name values
print(setdiff(names(model$record_evals), "start_iter"))
# Examine valid eval_name values for dataset "test"
print(names(model$record_evals[["test"]]))
# Get L2 values for "test" dataset
lgb.get.eval.result(model, "test", "l2")
}

Просмотреть файл

@ -0,0 +1,90 @@
context("lgb.get.eval.result")
test_that("lgb.get.eval.result() should throw an informative error if booster is not an lgb.Booster", {
bad_inputs <- list(
matrix(1.0:10.0, 2L, 5L)
, TRUE
, c("a", "b")
, NA
, 10L
, lgb.Dataset(
data = matrix(1.0:10.0, 2L, 5L)
, params = list()
)
)
for (bad_input in bad_inputs) {
expect_error({
lgb.get.eval.result(
booster = bad_input
, data_name = "test"
, eval_name = "l2"
)
}, regexp = "Can only use", fixed = TRUE)
}
})
test_that("lgb.get.eval.result() should throw an informative error for incorrect data_name", {
data(agaricus.train, package = "lightgbm")
data(agaricus.test, package = "lightgbm")
dtrain <- lgb.Dataset(
agaricus.train$data
, label = agaricus.train$label
)
model <- lgb.train(
params = list(
objective = "regression"
, metric = "l2"
)
, data = dtrain
, nrounds = 5L
, valids = list(
"test" = lgb.Dataset.create.valid(
dtrain
, agaricus.test$data
, label = agaricus.test$label
)
)
, min_data = 1L
, learning_rate = 1.0
)
expect_error({
eval_results <- lgb.get.eval.result(
booster = model
, data_name = "testing"
, eval_name = "l2"
)
}, regexp = "Only the following datasets exist in record evals: [test]", fixed = TRUE)
})
test_that("lgb.get.eval.result() should throw an informative error for incorrect eval_name", {
data(agaricus.train, package = "lightgbm")
data(agaricus.test, package = "lightgbm")
dtrain <- lgb.Dataset(
agaricus.train$data
, label = agaricus.train$label
)
model <- lgb.train(
params = list(
objective = "regression"
, metric = "l2"
)
, data = dtrain
, nrounds = 5L
, valids = list(
"test" = lgb.Dataset.create.valid(
dtrain
, agaricus.test$data
, label = agaricus.test$label
)
)
, min_data = 1L
, learning_rate = 1.0
)
expect_error({
eval_results <- lgb.get.eval.result(
booster = model
, data_name = "test"
, eval_name = "l1"
)
}, regexp = "Only the following eval_names exist for dataset 'test': [l2]", fixed = TRUE)
})