From 0d12262957b1c3d3657c1919a3addda42cb3dcf5 Mon Sep 17 00:00:00 2001 From: nikosbosse Date: Mon, 16 Sep 2024 18:30:38 +0200 Subject: [PATCH] fix docs --- R/score_model_out.R | 14 ++++++++------ man/score_model_out.Rd | 11 +++++------ 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/R/score_model_out.R b/R/score_model_out.R index 1b35f16..0fd3a29 100644 --- a/R/score_model_out.R +++ b/R/score_model_out.R @@ -16,15 +16,17 @@ #' @details If `metrics` is `NULL` (the default), this function chooses #' appropriate metrics based on the `output_type` contained in the `model_out_tbl`: #' -#' - For `output_type == "quantile"`, we use the default metrics provided by +#' \itemize{ +#' \item For `output_type == "quantile"`, we use the default metrics provided by #' `scoringutils`: #' `r names(scoringutils::get_metrics(scoringutils::example_quantile))` -#' - For `output_type == "pmf"` and `output_type_id_order` is `NULL` (indicating +#' \item For `output_type == "pmf"` and `output_type_id_order` is `NULL` (indicating #' that the predicted variable is a nominal variable), we use the default metric -#' provided by `scoringutils`:, +#' provided by `scoringutils`: #' `r names(scoringutils::get_metrics(scoringutils::example_nominal))` -#' - For `output_type == "median"`, we use "ae_point" -#' - For `output_type == "mean"`, we use "se_point" +#' \item For `output_type == "median"`, we use "ae_point" +#' \item For `output_type == "mean"`, we use "se_point" +#' } #' #' Alternatively, a character vector of scoring metrics can be provided. In this #' case, the following options are supported: @@ -46,7 +48,7 @@ #' - `output_type == "pmf"`: #' - "log_score": log score #' -#' See [scoringutils::get_metrics()] for more details on the default meterics +#' See [scoringutils::get_metrics()] for more details on the default metrics #' used by `scoringutils`. #' #' @examplesIf requireNamespace("hubExamples", quietly = TRUE) diff --git a/man/score_model_out.Rd b/man/score_model_out.Rd index 94fd2d0..64b1209 100644 --- a/man/score_model_out.Rd +++ b/man/score_model_out.Rd @@ -41,19 +41,18 @@ Score model output predictions with a single \code{output_type} against observed \details{ If \code{metrics} is \code{NULL} (the default), this function chooses appropriate metrics based on the \code{output_type} contained in the \code{model_out_tbl}: + \itemize{ \item For \code{output_type == "quantile"}, we use the default metrics provided by \code{scoringutils}: -\verb{r names(scoringutils::get_metrics(scoringutils::example_quantile))} +wis, overprediction, underprediction, dispersion, bias, interval_coverage_50, interval_coverage_90, interval_coverage_deviation, ae_median \item For \code{output_type == "pmf"} and \code{output_type_id_order} is \code{NULL} (indicating that the predicted variable is a nominal variable), we use the default metric -provided by \code{scoringutils}:, -\verb{r names(scoringutils::get_metrics(scoringutils::example_nominal))} -\itemize{ +provided by \code{scoringutils}: +log_score \item For \code{output_type == "median"}, we use "ae_point" \item For \code{output_type == "mean"}, we use "se_point" } -} Alternatively, a character vector of scoring metrics can be provided. In this case, the following options are supported: @@ -83,7 +82,7 @@ based on quantiles at the probability levels 0.025 and 0.975. } } -See \code{\link[scoringutils:get_metrics]{scoringutils::get_metrics()}} for more details on the default meterics +See \code{\link[scoringutils:get_metrics]{scoringutils::get_metrics()}} for more details on the default metrics used by \code{scoringutils}. } \examples{