diff --git a/tests/testthat.R b/tests/testthat.R index 736e1c1e..b157b97b 100644 --- a/tests/testthat.R +++ b/tests/testthat.R @@ -2,8 +2,6 @@ library(testthat) library(xgboost) library(randomForest) -library(kernlab) -library(earth) library(thief) library(smooth) library(greybox) @@ -25,7 +23,5 @@ library(timetk) library(modeltime) library(tidyverse) -library(lubridate) - test_check("modeltime") diff --git a/tests/testthat/test-algo-adam_reg-Adam.R b/tests/testthat/test-algo-adam_reg-Adam.R index 05f95244..749d435b 100644 --- a/tests/testthat/test-algo-adam_reg-Adam.R +++ b/tests/testthat/test-algo-adam_reg-Adam.R @@ -5,10 +5,10 @@ context("TEST adam_reg: ADAM") # SETUP ---- # Data -m750 <- m4_monthly %>% filter(id == "M750") +m750 <- timetk::m4_monthly %>% dplyr::filter(id == "M750") # Split Data 80/20 -splits <- initial_time_split(m750, prop = 0.8) +splits <- rsample::initial_time_split(m750, prop = 0.8) @@ -28,7 +28,7 @@ test_that("adam_reg: Adam, (No xregs), Test Model Fit Object", { seasonal_differences = 0, seasonal_ma = 1 ) %>% - set_engine("adam") + parsnip::set_engine("adam") # PARSNIP ---- @@ -37,12 +37,12 @@ test_that("adam_reg: Adam, (No xregs), Test Model Fit Object", { # Fit Spec model_fit <- model_spec %>% - fit(value ~ date, data = training(splits)) + fit(value ~ date, data = rsample::training(splits)) # Predictions predictions_tbl <- model_fit %>% - modeltime_calibrate(testing(splits)) %>% - modeltime_forecast(new_data = testing(splits)) + modeltime_calibrate(rsample::testing(splits)) %>% + modeltime_forecast(new_data = rsample::testing(splits)) expect_s3_class(model_fit$fit, "Adam_fit_impl") @@ -61,12 +61,12 @@ test_that("adam_reg: Adam, (No xregs), Test Model Fit Object", { expect_equal(model_fit$preproc$y_var, "value") # Structure - expect_identical(nrow(testing(splits)), nrow(predictions_tbl)) - expect_identical(testing(splits)$date, predictions_tbl$.index) + expect_identical(nrow(rsample::testing(splits)), nrow(predictions_tbl)) + expect_identical(rsample::testing(splits)$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - resid <- testing(splits)$value - predictions_tbl$.value + resid <- rsample::testing(splits)$value - predictions_tbl$.value # - Max Error less than 1500 expect_lte(max(abs(resid)), 3000) @@ -78,20 +78,20 @@ test_that("adam_reg: Adam, (No xregs), Test Model Fit Object", { # * XREGS ---- # Data - m750 <- m4_monthly %>% filter(id == "M750") %>% - mutate(month = month(date, label = TRUE)) + m750 <- timetk::m4_monthly %>% dplyr::filter(id == "M750") %>% + dplyr::mutate(month = lubridate::month(date, label = TRUE)) # Split Data 80/20 - splits <- initial_time_split(m750, prop = 0.8) + splits <- rsample::initial_time_split(m750, prop = 0.8) # Fit Spec model_fit <- model_spec %>% - fit(value ~ date + month, data = training(splits)) + fit(value ~ date + month, data = rsample::training(splits)) # Predictions predictions_tbl <- model_fit %>% - modeltime_calibrate(testing(splits)) %>% - modeltime_forecast(new_data = testing(splits)) + modeltime_calibrate(rsample::testing(splits)) %>% + modeltime_forecast(new_data = rsample::testing(splits)) # Model Fit ---- @@ -115,12 +115,12 @@ test_that("adam_reg: Adam, (No xregs), Test Model Fit Object", { # Structure - expect_identical(nrow(testing(splits)), nrow(predictions_tbl)) - expect_identical(testing(splits)$date, predictions_tbl$.index) + expect_identical(nrow(rsample::testing(splits)), nrow(predictions_tbl)) + expect_identical(rsample::testing(splits)$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - resid <- testing(splits)$value - predictions_tbl$.value + resid <- rsample::testing(splits)$value - predictions_tbl$.value # - Max Error less than 1500 expect_lte(max(abs(resid)), 2000) @@ -147,23 +147,23 @@ test_that("adam_reg: Adam (workflow)", { seasonal_differences = 0, seasonal_ma = 1 ) %>% - set_engine("adam") + parsnip::set_engine("adam") # Recipe spec - recipe_spec <- recipe(value ~ date, data = training(splits)) + recipe_spec <- recipes::recipe(value ~ date, data = rsample::training(splits)) # Workflow - wflw <- workflow() %>% - add_recipe(recipe_spec) %>% - add_model(model_spec) + wflw <- workflows::workflow() %>% + workflows::add_recipe(recipe_spec) %>% + workflows::add_model(model_spec) wflw_fit <- wflw %>% - fit(training(splits)) + fit(rsample::training(splits)) # Forecast predictions_tbl <- wflw_fit %>% - modeltime_calibrate(testing(splits)) %>% - modeltime_forecast(new_data = testing(splits), actual_data = training(splits)) + modeltime_calibrate(rsample::testing(splits)) %>% + modeltime_forecast(new_data = rsample::testing(splits), actual_data = rsample::training(splits)) expect_s3_class(wflw_fit$fit$fit$fit, "Adam_fit_impl") @@ -183,15 +183,15 @@ test_that("adam_reg: Adam (workflow)", { # * Test Predictions ---- - full_data <- bind_rows(training(splits), testing(splits)) + full_data <- dplyr::bind_rows(rsample::training(splits), rsample::testing(splits)) # Structure expect_identical(nrow(full_data), nrow(predictions_tbl)) expect_identical(full_data$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - predictions_tbl <- predictions_tbl %>% filter(.key == "prediction") - resid <- testing(splits)$value - predictions_tbl$.value + predictions_tbl <- predictions_tbl %>% dplyr::filter(.key == "prediction") + resid <- rsample::testing(splits)$value - predictions_tbl$.value # - Max Error less than 1500 expect_lte(max(abs(resid)), 3000) diff --git a/tests/testthat/test-algo-adam_reg-auto_adam.R b/tests/testthat/test-algo-adam_reg-auto_adam.R index d8049204..64d56883 100644 --- a/tests/testthat/test-algo-adam_reg-auto_adam.R +++ b/tests/testthat/test-algo-adam_reg-auto_adam.R @@ -14,25 +14,25 @@ test_that("adam_reg: Auto ADAM, (No xregs), Test Model Fit Object", { # SETUP # Data - m750 <- m4_monthly %>% filter(id == "M750") + m750 <- timetk::m4_monthly %>% dplyr::filter(id == "M750") # Split Data 80/20 - splits <- initial_time_split(m750, prop = 0.8) + splits <- rsample::initial_time_split(m750, prop = 0.8) # Model Spec model_spec <- adam_reg( seasonal_period = 12 ) %>% - set_engine("auto_adam") + parsnip::set_engine("auto_adam") # Fit Spec model_fit <- model_spec %>% - fit(value ~ date, data = training(splits)) + fit(value ~ date, data = rsample::training(splits)) # Predictions predictions_tbl <- model_fit %>% - modeltime_calibrate(testing(splits), quiet = FALSE) %>% - modeltime_forecast(new_data = testing(splits)) + modeltime_calibrate(rsample::testing(splits), quiet = FALSE) %>% + modeltime_forecast(new_data = rsample::testing(splits)) expect_s3_class(model_fit$fit, "Auto_adam_fit_impl") @@ -51,12 +51,12 @@ test_that("adam_reg: Auto ADAM, (No xregs), Test Model Fit Object", { expect_equal(model_fit$preproc$y_var, "value") # Structure - expect_identical(nrow(testing(splits)), nrow(predictions_tbl)) - expect_identical(testing(splits)$date, predictions_tbl$.index) + expect_identical(nrow(rsample::testing(splits)), nrow(predictions_tbl)) + expect_identical(rsample::testing(splits)$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - resid <- testing(splits)$value - predictions_tbl$.value + resid <- rsample::testing(splits)$value - predictions_tbl$.value # - Max Error less than 1500 expect_lte(max(abs(resid)), 3000) @@ -76,25 +76,25 @@ test_that("adam_reg: Auto ADAM, (XREGS)", { # Data - m750 <- m4_monthly %>% filter(id == "M750") %>% mutate(month = month(date, label = TRUE)) + m750 <- timetk::m4_monthly %>% dplyr::filter(id == "M750") %>% dplyr::mutate(month = lubridate::month(date, label = TRUE)) # Split Data 80/20 - splits <- initial_time_split(m750, prop = 0.8) + splits <- rsample::initial_time_split(m750, prop = 0.8) # Model Spec model_spec <- adam_reg( seasonal_period = 12 ) %>% - set_engine("auto_adam") + parsnip::set_engine("auto_adam") # Fit Spec model_fit <- model_spec %>% - fit(value ~ date + month, data = training(splits)) + fit(value ~ date + month, data = rsample::training(splits)) # Predictions predictions_tbl <- model_fit %>% - modeltime_calibrate(testing(splits)) %>% - modeltime_forecast(new_data = testing(splits)) + modeltime_calibrate(rsample::testing(splits)) %>% + modeltime_forecast(new_data = rsample::testing(splits)) expect_s3_class(model_fit$fit, "Auto_adam_fit_impl") @@ -115,12 +115,12 @@ test_that("adam_reg: Auto ADAM, (XREGS)", { # Structure - expect_identical(nrow(testing(splits)), nrow(predictions_tbl)) - expect_identical(testing(splits)$date, predictions_tbl$.index) + expect_identical(nrow(rsample::testing(splits)), nrow(predictions_tbl)) + expect_identical(rsample::testing(splits)$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - resid <- testing(splits)$value - predictions_tbl$.value + resid <- rsample::testing(splits)$value - predictions_tbl$.value # - Max Error less than 1500 expect_lte(max(abs(resid)), 3000) @@ -139,10 +139,10 @@ test_that("adam_reg: Auto ADAM (workflow), Test Model Fit Object", { skip_on_cran() # Data - m750 <- m4_monthly %>% filter(id == "M750") %>% mutate(month = month(date, label = TRUE)) + m750 <- timetk::m4_monthly %>% dplyr::filter(id == "M750") %>% dplyr::mutate(month = lubridate::month(date, label = TRUE)) # Split Data 80/20 - splits <- initial_time_split(m750, prop = 0.8) + splits <- rsample::initial_time_split(m750, prop = 0.8) # Model Spec model_spec <- adam_reg( @@ -154,23 +154,23 @@ test_that("adam_reg: Auto ADAM (workflow), Test Model Fit Object", { seasonal_differences = 0, seasonal_ma = 1 ) %>% - set_engine("auto_adam") + parsnip::set_engine("auto_adam") # Recipe spec - recipe_spec <- recipe(value ~ date, data = training(splits)) + recipe_spec <- recipes::recipe(value ~ date, data = rsample::training(splits)) # Workflow - wflw <- workflow() %>% - add_recipe(recipe_spec) %>% - add_model(model_spec) + wflw <- workflows::workflow() %>% + workflows::add_recipe(recipe_spec) %>% + workflows::add_model(model_spec) wflw_fit <- wflw %>% - fit(training(splits)) + fit(rsample::training(splits)) # Forecast predictions_tbl <- wflw_fit %>% - modeltime_calibrate(testing(splits)) %>% - modeltime_forecast(new_data = testing(splits), actual_data = training(splits)) + modeltime_calibrate(rsample::testing(splits)) %>% + modeltime_forecast(new_data = rsample::testing(splits), actual_data = rsample::training(splits)) expect_s3_class(wflw_fit$fit$fit$fit, "Auto_adam_fit_impl") @@ -190,15 +190,15 @@ test_that("adam_reg: Auto ADAM (workflow), Test Model Fit Object", { expect_equal(names(mld$outcomes), "value") - full_data <- bind_rows(training(splits), testing(splits)) + full_data <- dplyr::bind_rows(rsample::training(splits), rsample::testing(splits)) # Structure expect_identical(nrow(full_data), nrow(predictions_tbl)) expect_identical(full_data$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - predictions_tbl <- predictions_tbl %>% filter(.key == "prediction") - resid <- testing(splits)$value - predictions_tbl$.value + predictions_tbl <- predictions_tbl %>% dplyr::filter(.key == "prediction") + resid <- rsample::testing(splits)$value - predictions_tbl$.value # - Max Error less than 1500 expect_lte(max(abs(resid)), 3000) diff --git a/tests/testthat/test-algo-arima_boost-Arima.R b/tests/testthat/test-algo-arima_boost-Arima.R index 75574cba..a8f56f6c 100644 --- a/tests/testthat/test-algo-arima_boost-Arima.R +++ b/tests/testthat/test-algo-arima_boost-Arima.R @@ -1,11 +1,10 @@ # ---- STANDARD ARIMA ---- context("TEST arima_boost: arima_xgboost") -library(testthat) library(xgboost) library(randomForest) -library(kernlab) -library(earth) +# library(earth) +# library(kernlab) library(stats) library(tidymodels) library(parsnip) @@ -23,10 +22,10 @@ library(lubridate) # SETUP ---- # Data -m750 <- m4_monthly %>% filter(id == "M750") +m750 <- timetk::m4_monthly %>% dplyr::filter(id == "M750") # Split Data 80/20 -splits <- initial_time_split(m750, prop = 0.8) +splits <- rsample::initial_time_split(m750, prop = 0.8) # Model Spec model_spec <- arima_boost( @@ -45,7 +44,7 @@ model_spec <- arima_boost( loss_reduction = 0.4, sample_size = 0.9 ) %>% - set_engine("arima_xgboost") + parsnip::set_engine("arima_xgboost") # PARSNIP ---- @@ -61,12 +60,12 @@ test_that("arima_boost: Arima, (No xregs), Test Model Fit Object", { # Fit Spec model_fit <- model_spec %>% - fit(log(value) ~ date, data = training(splits)) + fit(log(value) ~ date, data = rsample::training(splits)) # Predictions predictions_tbl <- model_fit %>% - modeltime_calibrate(testing(splits)) %>% - modeltime_forecast(new_data = testing(splits)) + modeltime_calibrate(rsample::testing(splits)) %>% + modeltime_forecast(new_data = rsample::testing(splits)) expect_s3_class(model_fit$fit, "arima_xgboost_fit_impl") @@ -92,12 +91,12 @@ test_that("arima_boost: Arima, (No xregs), Test Model Fit Object", { # arima_boost: Arima, (No xregs), Test Predictions # Structure - expect_identical(nrow(testing(splits)), nrow(predictions_tbl)) - expect_identical(testing(splits)$date, predictions_tbl$.index) + expect_identical(nrow(rsample::testing(splits)), nrow(predictions_tbl)) + expect_identical(rsample::testing(splits)$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - resid <- testing(splits)$value - exp(predictions_tbl$.value) + resid <- rsample::testing(splits)$value - exp(predictions_tbl$.value) # - Max Error less than 1500 expect_lte(max(abs(resid)), 1500) @@ -119,12 +118,12 @@ test_that("arima_boost: Arima, (XREGS), Test Model Fit Object", { # Fit Spec model_fit <- model_spec %>% - fit(log(value) ~ date + as.numeric(date) + month(date, label = TRUE), data = training(splits)) + fit(log(value) ~ date + as.numeric(date) + lubridate::month(date, label = TRUE), data = rsample::training(splits)) # Predictions predictions_tbl <- model_fit %>% - modeltime_calibrate(testing(splits)) %>% - modeltime_forecast(new_data = testing(splits)) + modeltime_calibrate(rsample::testing(splits)) %>% + modeltime_forecast(new_data = rsample::testing(splits)) expect_s3_class(model_fit$fit, "arima_xgboost_fit_impl") @@ -166,12 +165,12 @@ test_that("arima_boost: Arima, (XREGS), Test Model Fit Object", { # arima_boost: Arima (XREGS), Test Predictions # Structure - expect_identical(nrow(testing(splits)), nrow(predictions_tbl)) - expect_identical(testing(splits)$date, predictions_tbl$.index) + expect_identical(nrow(rsample::testing(splits)), nrow(predictions_tbl)) + expect_identical(rsample::testing(splits)$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - resid <- testing(splits)$value - exp(predictions_tbl$.value) + resid <- rsample::testing(splits)$value - exp(predictions_tbl$.value) # - Max Error less than 1500 expect_lte(max(abs(resid)), 1500) @@ -209,27 +208,27 @@ test_that("arima_boost: Arima (workflow), Test Model Fit Object", { loss_reduction = 0.4, sample_size = 0.9 ) %>% - set_engine("arima_xgboost") + parsnip::set_engine("arima_xgboost") # Recipe spec - recipe_spec <- recipe(value ~ date, data = training(splits)) %>% - step_log(value, skip = FALSE) %>% - step_date(date, features = "month") %>% - step_mutate(date_num = as.numeric(date)) + recipe_spec <- recipes::recipe(value ~ date, data = rsample::training(splits)) %>% + recipes::step_log(value, skip = FALSE) %>% + recipes::step_date(date, features = "month") %>% + recipes::step_mutate(date_num = as.numeric(date)) # Workflow - wflw <- workflow() %>% - add_recipe(recipe_spec) %>% - add_model(model_spec) + wflw <- workflows::workflow() %>% + workflows::add_recipe(recipe_spec) %>% + workflows::add_model(model_spec) wflw_fit <- wflw %>% - fit(training(splits)) + fit(rsample::training(splits)) # Forecast predictions_tbl <- wflw_fit %>% - modeltime_calibrate(testing(splits)) %>% - modeltime_forecast(new_data = testing(splits), actual_data = training(splits)) %>% - mutate_at(vars(.value), exp) + modeltime_calibrate(rsample::testing(splits)) %>% + modeltime_forecast(new_data = rsample::testing(splits), actual_data = rsample::training(splits)) %>% + dplyr::mutate_at(dplyr::vars(.value), exp) expect_s3_class(wflw_fit$fit$fit$fit, "arima_xgboost_fit_impl") @@ -272,15 +271,15 @@ test_that("arima_boost: Arima (workflow), Test Model Fit Object", { # arima_boost: Arima (workflow), Test Predictions - full_data <- bind_rows(training(splits), testing(splits)) + full_data <- dplyr::bind_rows(rsample::training(splits), rsample::testing(splits)) # Structure expect_identical(nrow(full_data), nrow(predictions_tbl)) expect_identical(full_data$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - predictions_tbl <- predictions_tbl %>% filter(.key == "prediction") - resid <- testing(splits)$value - predictions_tbl$.value + predictions_tbl <- predictions_tbl %>% dplyr::filter(.key == "prediction") + resid <- rsample::testing(splits)$value - predictions_tbl$.value # - Max Error less than 1500 expect_lte(max(abs(resid)), 1500) diff --git a/tests/testthat/test-algo-arima_boost-auto_arima.R b/tests/testthat/test-algo-arima_boost-auto_arima.R index 9a9ec467..46ecb118 100644 --- a/tests/testthat/test-algo-arima_boost-auto_arima.R +++ b/tests/testthat/test-algo-arima_boost-auto_arima.R @@ -5,10 +5,10 @@ context("TEST arima_boost: auto_arima_xgboost") # SETUP ---- # Data -m750 <- m4_monthly %>% filter(id == "M750") +m750 <- timetk::m4_monthly %>% dplyr::filter(id == "M750") # Split Data 80/20 -splits <- initial_time_split(m750, prop = 0.8) +splits <- rsample::initial_time_split(m750, prop = 0.8) # Model Spec model_spec <- arima_boost( @@ -27,7 +27,7 @@ model_spec <- arima_boost( loss_reduction = 0.4, sample_size = 0.9 ) %>% - set_engine("auto_arima_xgboost") + parsnip::set_engine("auto_arima_xgboost") # PARSNIP ---- @@ -42,12 +42,12 @@ test_that("arima_boost: Arima, (No xregs), Test Model Fit Object", { # Fit Spec model_fit <- model_spec %>% - fit(log(value) ~ date, data = training(splits)) + fit(log(value) ~ date, data = rsample::training(splits)) # Predictions predictions_tbl <- model_fit %>% - modeltime_calibrate(testing(splits)) %>% - modeltime_forecast(new_data = testing(splits)) + modeltime_calibrate(rsample::testing(splits)) %>% + modeltime_forecast(new_data = rsample::testing(splits)) # TEST @@ -75,12 +75,12 @@ test_that("arima_boost: Arima, (No xregs), Test Model Fit Object", { # PREDICTIONS # Structure - expect_identical(nrow(testing(splits)), nrow(predictions_tbl)) - expect_identical(testing(splits)$date, predictions_tbl$.index) + expect_identical(nrow(rsample::testing(splits)), nrow(predictions_tbl)) + expect_identical(rsample::testing(splits)$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - resid <- testing(splits)$value - exp(predictions_tbl$.value) + resid <- rsample::testing(splits)$value - exp(predictions_tbl$.value) # - Max Error less than 1500 expect_lte(max(abs(resid)), 1500) @@ -101,12 +101,12 @@ test_that("arima_boost: Arima, (XREGS), Test Model Fit Object", { # Fit Spec model_fit <- model_spec %>% - fit(log(value) ~ date + as.numeric(date) + month(date, label = TRUE), data = training(splits)) + fit(log(value) ~ date + as.numeric(date) + lubridate::month(date, label = TRUE), data = rsample::training(splits)) # Predictions predictions_tbl <- model_fit %>% - modeltime_calibrate(testing(splits)) %>% - modeltime_forecast(new_data = testing(splits)) + modeltime_calibrate(rsample::testing(splits)) %>% + modeltime_forecast(new_data = rsample::testing(splits)) expect_s3_class(model_fit$fit, "auto_arima_xgboost_fit_impl") @@ -147,12 +147,12 @@ test_that("arima_boost: Arima, (XREGS), Test Model Fit Object", { # PREDICTIONS # Structure - expect_identical(nrow(testing(splits)), nrow(predictions_tbl)) - expect_identical(testing(splits)$date, predictions_tbl$.index) + expect_identical(nrow(rsample::testing(splits)), nrow(predictions_tbl)) + expect_identical(rsample::testing(splits)$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - resid <- testing(splits)$value - exp(predictions_tbl$.value) + resid <- rsample::testing(splits)$value - exp(predictions_tbl$.value) # - Max Error less than 1500 expect_lte(max(abs(resid)), 1500) @@ -189,27 +189,27 @@ test_that("arima_boost: Arima (workflow), Test Model Fit Object", { loss_reduction = 0.4, sample_size = 0.9 ) %>% - set_engine("auto_arima_xgboost") + parsnip::set_engine("auto_arima_xgboost") # Recipe spec - recipe_spec <- recipe(value ~ date, data = training(splits)) %>% - step_log(value, skip = FALSE) %>% - step_date(date, features = "month") %>% - step_mutate(date_num = as.numeric(date)) + recipe_spec <- recipes::recipe(value ~ date, data = rsample::training(splits)) %>% + recipes::step_log(value, skip = FALSE) %>% + recipes::step_date(date, features = "month") %>% + recipes::step_mutate(date_num = as.numeric(date)) # Workflow - wflw <- workflow() %>% - add_recipe(recipe_spec) %>% - add_model(model_spec) + wflw <- workflows::workflow() %>% + workflows::add_recipe(recipe_spec) %>% + workflows::add_model(model_spec) wflw_fit <- wflw %>% - fit(training(splits)) + fit(rsample::training(splits)) # Forecast predictions_tbl <- wflw_fit %>% - modeltime_calibrate(testing(splits)) %>% - modeltime_forecast(new_data = testing(splits), actual_data = training(splits)) %>% - mutate_at(vars(.value), exp) + modeltime_calibrate(rsample::testing(splits)) %>% + modeltime_forecast(new_data = rsample::testing(splits), actual_data = rsample::training(splits)) %>% + dplyr::mutate_at(dplyr::vars(.value), exp) # TESTS @@ -253,15 +253,15 @@ test_that("arima_boost: Arima (workflow), Test Model Fit Object", { # PREDICTIONS - full_data <- bind_rows(training(splits), testing(splits)) + full_data <- dplyr::bind_rows(rsample::training(splits), rsample::testing(splits)) # Structure expect_identical(nrow(full_data), nrow(predictions_tbl)) expect_identical(full_data$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - predictions_tbl <- predictions_tbl %>% filter(.key == "prediction") - resid <- testing(splits)$value - predictions_tbl$.value + predictions_tbl <- predictions_tbl %>% dplyr::filter(.key == "prediction") + resid <- rsample::testing(splits)$value - predictions_tbl$.value # - Max Error less than 1500 expect_lte(max(abs(resid)), 1500) diff --git a/tests/testthat/test-algo-arima_reg-Arima.R b/tests/testthat/test-algo-arima_reg-Arima.R index 36c849d8..0539bcea 100644 --- a/tests/testthat/test-algo-arima_reg-Arima.R +++ b/tests/testthat/test-algo-arima_reg-Arima.R @@ -5,10 +5,10 @@ context("TEST arima_reg: Arima") # SETUP ---- # Data -m750 <- m4_monthly %>% filter(id == "M750") +m750 <- timetk::m4_monthly %>% dplyr::filter(id == "M750") # Split Data 80/20 -splits <- initial_time_split(m750, prop = 0.8) +splits <- rsample::initial_time_split(m750, prop = 0.8) # Model Spec model_spec <- arima_reg( @@ -20,7 +20,7 @@ model_spec <- arima_reg( seasonal_differences = 0, seasonal_ma = 1 ) %>% - set_engine("arima") + parsnip::set_engine("arima") # PARSNIP ---- @@ -39,12 +39,12 @@ test_that("arima_reg: Arima, (No xregs), Test Model Fit Object", { # Fit Spec model_fit <- model_spec %>% - fit(log(value) ~ date, data = training(splits)) + fit(log(value) ~ date, data = rsample::training(splits)) # Predictions predictions_tbl <- model_fit %>% - modeltime_calibrate(testing(splits)) %>% - modeltime_forecast(new_data = testing(splits)) + modeltime_calibrate(rsample::testing(splits)) %>% + modeltime_forecast(new_data = rsample::testing(splits)) expect_s3_class(model_fit$fit, "Arima_fit_impl") @@ -66,12 +66,12 @@ test_that("arima_reg: Arima, (No xregs), Test Model Fit Object", { # arima_reg: Arima, (No xregs), Test Predictions # Structure - expect_identical(nrow(testing(splits)), nrow(predictions_tbl)) - expect_identical(testing(splits)$date, predictions_tbl$.index) + expect_identical(nrow(rsample::testing(splits)), nrow(predictions_tbl)) + expect_identical(rsample::testing(splits)$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - resid <- testing(splits)$value - exp(predictions_tbl$.value) + resid <- rsample::testing(splits)$value - exp(predictions_tbl$.value) # - Max Error less than 1500 expect_lte(max(abs(resid)), 1500) @@ -92,12 +92,12 @@ test_that("arima_reg: Arima, (XREGS), Test Model Fit Object", { # Fit Spec model_fit <- model_spec %>% - fit(log(value) ~ date + month(date, label = TRUE), data = training(splits)) + fit(log(value) ~ date + lubridate::month(date, label = TRUE), data = rsample::training(splits)) # Predictions predictions_tbl <- model_fit %>% - modeltime_calibrate(testing(splits)) %>% - modeltime_forecast(new_data = testing(splits)) + modeltime_calibrate(rsample::testing(splits)) %>% + modeltime_forecast(new_data = rsample::testing(splits)) expect_s3_class(model_fit$fit, "Arima_fit_impl") @@ -120,12 +120,12 @@ test_that("arima_reg: Arima, (XREGS), Test Model Fit Object", { # arima_reg: Arima (XREGS), Test Predictions # Structure - expect_identical(nrow(testing(splits)), nrow(predictions_tbl)) - expect_identical(testing(splits)$date, predictions_tbl$.index) + expect_identical(nrow(rsample::testing(splits)), nrow(predictions_tbl)) + expect_identical(rsample::testing(splits)$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - resid <- testing(splits)$value - exp(predictions_tbl$.value) + resid <- rsample::testing(splits)$value - exp(predictions_tbl$.value) # - Max Error less than 1500 expect_lte(max(abs(resid)), 1200) @@ -155,25 +155,25 @@ test_that("arima_reg: Arima (workflow), Test Model Fit Object", { seasonal_differences = 0, seasonal_ma = 1 ) %>% - set_engine("arima") + parsnip::set_engine("arima") # Recipe spec - recipe_spec <- recipe(value ~ date, data = training(splits)) %>% - step_log(value, skip = FALSE) + recipe_spec <- recipes::recipe(value ~ date, data = rsample::training(splits)) %>% + recipes::step_log(value, skip = FALSE) # Workflow - wflw <- workflow() %>% - add_recipe(recipe_spec) %>% - add_model(model_spec) + wflw <- workflows::workflow() %>% + workflows::add_recipe(recipe_spec) %>% + workflows::add_model(model_spec) wflw_fit <- wflw %>% - fit(training(splits)) + fit(rsample::training(splits)) # Forecast predictions_tbl <- wflw_fit %>% - modeltime_calibrate(testing(splits)) %>% - modeltime_forecast(new_data = testing(splits), actual_data = training(splits)) %>% - mutate_at(vars(.value), exp) + modeltime_calibrate(rsample::testing(splits)) %>% + modeltime_forecast(new_data = rsample::testing(splits), actual_data = rsample::training(splits)) %>% + dplyr::mutate_at(dplyr::vars(.value), exp) expect_s3_class(wflw_fit$fit$fit$fit, "Arima_fit_impl") @@ -195,15 +195,15 @@ test_that("arima_reg: Arima (workflow), Test Model Fit Object", { # arima_reg: Arima (workflow), Test Predictions - full_data <- bind_rows(training(splits), testing(splits)) + full_data <- dplyr::bind_rows(rsample::training(splits), rsample::testing(splits)) # Structure expect_identical(nrow(full_data), nrow(predictions_tbl)) expect_identical(full_data$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - predictions_tbl <- predictions_tbl %>% filter(.key == "prediction") - resid <- testing(splits)$value - predictions_tbl$.value + predictions_tbl <- predictions_tbl %>% dplyr::filter(.key == "prediction") + resid <- rsample::testing(splits)$value - predictions_tbl$.value # - Max Error less than 1500 expect_lte(max(abs(resid)), 1500) diff --git a/tests/testthat/test-algo-arima_reg-auto_arima.R b/tests/testthat/test-algo-arima_reg-auto_arima.R index 16523a1a..33fcbfd3 100644 --- a/tests/testthat/test-algo-arima_reg-auto_arima.R +++ b/tests/testthat/test-algo-arima_reg-auto_arima.R @@ -4,10 +4,10 @@ context("TEST arima_reg: auto.arima") # SETUP ---- # Data -m750 <- m4_monthly %>% filter(id == "M750") +m750 <- timetk::m4_monthly %>% dplyr::filter(id == "M750") # Split Data 80/20 -splits <- initial_time_split(m750, prop = 0.8) +splits <- rsample::initial_time_split(m750, prop = 0.8) # ---- PARSNIP ---- @@ -21,16 +21,16 @@ test_that("arima_reg: auto.arima (No xregs), Test Model Fit Object", { # Model Spec model_spec <- arima_reg(seasonal_period = 12) %>% - set_engine("auto_arima") + parsnip::set_engine("auto_arima") # Fit Spec model_fit <- model_spec %>% - fit(log(value) ~ date, data = training(splits)) + fit(log(value) ~ date, data = rsample::training(splits)) # Predictions predictions_tbl <- model_fit %>% - modeltime_calibrate(testing(splits)) %>% - modeltime_forecast(new_data = testing(splits)) + modeltime_calibrate(rsample::testing(splits)) %>% + modeltime_forecast(new_data = rsample::testing(splits)) expect_s3_class(model_fit$fit, "auto_arima_fit_impl") @@ -53,12 +53,12 @@ test_that("arima_reg: auto.arima (No xregs), Test Model Fit Object", { # arima_reg: auto.arima (No xregs), Test Predictions # Structure - expect_identical(nrow(testing(splits)), nrow(predictions_tbl)) - expect_identical(testing(splits)$date, predictions_tbl$.index) + expect_identical(nrow(rsample::testing(splits)), nrow(predictions_tbl)) + expect_identical(rsample::testing(splits)$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - resid <- testing(splits)$value - exp(predictions_tbl$.value) + resid <- rsample::testing(splits)$value - exp(predictions_tbl$.value) # - Max Error less than 1500 expect_lte(max(abs(resid)), 1500) @@ -79,16 +79,16 @@ test_that("arima_reg: auto.arima (XREGS), Test Model Fit Object", { # Model Spec model_spec <- arima_reg(seasonal_period = 12) %>% - set_engine("auto_arima") + parsnip::set_engine("auto_arima") # Fit Spec model_fit <- model_spec %>% - fit(log(value) ~ date + month(date, label = TRUE), data = training(splits)) + fit(log(value) ~ date + lubridate::month(date, label = TRUE), data = rsample::training(splits)) # Predictions predictions_tbl <- model_fit %>% - modeltime_calibrate(testing(splits)) %>% - modeltime_forecast(new_data = testing(splits)) + modeltime_calibrate(rsample::testing(splits)) %>% + modeltime_forecast(new_data = rsample::testing(splits)) # Test model fit @@ -113,12 +113,12 @@ test_that("arima_reg: auto.arima (XREGS), Test Model Fit Object", { # arima_reg: auto.arima (XREGS), Test Predictions # Structure - expect_identical(nrow(testing(splits)), nrow(predictions_tbl)) - expect_identical(testing(splits)$date, predictions_tbl$.index) + expect_identical(nrow(rsample::testing(splits)), nrow(predictions_tbl)) + expect_identical(rsample::testing(splits)$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - resid <- testing(splits)$value - exp(predictions_tbl$.value) + resid <- rsample::testing(splits)$value - exp(predictions_tbl$.value) # - Max Error less than 1500 expect_lte(max(abs(resid)), 1200) @@ -138,25 +138,25 @@ test_that("arima_reg: auto.arima (Workflow), Test Model Fit Object", { # Model Spec model_spec <- arima_reg(seasonal_period = 12) %>% - set_engine("auto_arima") + parsnip::set_engine("auto_arima") # Recipe spec - recipe_spec <- recipe(value ~ date, data = training(splits)) %>% - step_log(value, skip = FALSE) + recipe_spec <- recipes::recipe(value ~ date, data = rsample::training(splits)) %>% + recipes::step_log(value, skip = FALSE) # Workflow - wflw <- workflow() %>% - add_recipe(recipe_spec) %>% - add_model(model_spec) + wflw <- workflows::workflow() %>% + workflows::add_recipe(recipe_spec) %>% + workflows::add_model(model_spec) wflw_fit <- wflw %>% - fit(training(splits)) + fit(rsample::training(splits)) # Forecast predictions_tbl <- wflw_fit %>% - modeltime_calibrate(testing(splits)) %>% - modeltime_forecast(new_data = testing(splits), actual_data = training(splits)) %>% - mutate_at(vars(.value), exp) + modeltime_calibrate(rsample::testing(splits)) %>% + modeltime_forecast(new_data = rsample::testing(splits), actual_data = rsample::training(splits)) %>% + dplyr::mutate_at(dplyr::vars(.value), exp) # TEST --- @@ -179,15 +179,15 @@ test_that("arima_reg: auto.arima (Workflow), Test Model Fit Object", { # arima_reg: auto.arima (Workflow), Test Predictions - full_data <- bind_rows(training(splits), testing(splits)) + full_data <- dplyr::bind_rows(rsample::training(splits), rsample::testing(splits)) # Structure expect_identical(nrow(full_data), nrow(predictions_tbl)) expect_identical(full_data$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - predictions_tbl <- predictions_tbl %>% filter(.key == "prediction") - resid <- testing(splits)$value - predictions_tbl$.value + predictions_tbl <- predictions_tbl %>% dplyr::filter(.key == "prediction") + resid <- rsample::testing(splits)$value - predictions_tbl$.value # - Max Error less than 1500 expect_lte(max(abs(resid)), 1500) diff --git a/tests/testthat/test-algo-exp_smoothing-ets.R b/tests/testthat/test-algo-exp_smoothing-ets.R index a5c06f09..8e008aa5 100644 --- a/tests/testthat/test-algo-exp_smoothing-ets.R +++ b/tests/testthat/test-algo-exp_smoothing-ets.R @@ -5,14 +5,14 @@ context("TEST exp_smoothing()") # SETUP ---- # Data -m750 <- m4_monthly %>% filter(id == "M750") +m750 <- timetk::m4_monthly %>% dplyr::filter(id == "M750") # Split Data 80/20 -splits <- initial_time_split(m750, prop = 0.9) +splits <- rsample::initial_time_split(m750, prop = 0.9) # Model Spec model_spec <- exp_smoothing() %>% - set_engine("ets") + parsnip::set_engine("ets") # ETS PARSNIP ---- @@ -28,12 +28,12 @@ test_that("exp_smoothing: ets, Test Model Fit Object", { # Fit Spec model_fit <- model_spec %>% - fit(log(value) ~ date, data = training(splits)) + fit(log(value) ~ date, data = rsample::training(splits)) # Predictions predictions_tbl <- model_fit %>% - modeltime_calibrate(testing(splits)) %>% - modeltime_forecast(new_data = testing(splits)) + modeltime_calibrate(rsample::testing(splits)) %>% + modeltime_forecast(new_data = rsample::testing(splits)) expect_s3_class(model_fit$fit, "ets_fit_impl") @@ -55,12 +55,12 @@ test_that("exp_smoothing: ets, Test Model Fit Object", { # exp_smoothing: ets, Test Predictions # Structure - expect_identical(nrow(testing(splits)), nrow(predictions_tbl)) - expect_identical(testing(splits)$date, predictions_tbl$.index) + expect_identical(nrow(rsample::testing(splits)), nrow(predictions_tbl)) + expect_identical(rsample::testing(splits)$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - resid <- testing(splits)$value - exp(predictions_tbl$.value) + resid <- rsample::testing(splits)$value - exp(predictions_tbl$.value) # - Max Error less than 1500 expect_lte(max(abs(resid)), 1500) @@ -90,26 +90,26 @@ test_that("exp_smoothing: Arima (workflow), Test Model Fit Object", { , smooth_level = 0.2, smooth_trend = 0.1, smooth_seasonal = 0.1 ) %>% - set_engine("ets") + parsnip::set_engine("ets") # Recipe spec - recipe_spec <- recipe(value ~ date, data = training(splits)) %>% - step_log(value, skip = FALSE) + recipe_spec <- recipes::recipe(value ~ date, data = rsample::training(splits)) %>% + recipes::step_log(value, skip = FALSE) # Workflow - wflw <- workflow() %>% - add_recipe(recipe_spec) %>% - add_model(model_spec) + wflw <- workflows::workflow() %>% + workflows::add_recipe(recipe_spec) %>% + workflows::add_model(model_spec) wflw_fit <- wflw %>% - fit(training(splits)) + fit(rsample::training(splits)) # Forecast predictions_tbl <- wflw_fit %>% - modeltime_calibrate(testing(splits)) %>% - modeltime_forecast(new_data = testing(splits), - actual_data = training(splits)) %>% - mutate_at(vars(.value), exp) + modeltime_calibrate(rsample::testing(splits)) %>% + modeltime_forecast(new_data = rsample::testing(splits), + actual_data = rsample::training(splits)) %>% + dplyr::mutate_at(dplyr::vars(.value), exp) @@ -132,15 +132,15 @@ test_that("exp_smoothing: Arima (workflow), Test Model Fit Object", { # exp_smoothing: ets (workflow), Test Predictions - full_data <- bind_rows(training(splits), testing(splits)) + full_data <- dplyr::bind_rows(rsample::training(splits), rsample::testing(splits)) # Structure expect_identical(nrow(full_data), nrow(predictions_tbl)) expect_identical(full_data$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - predictions_tbl <- predictions_tbl %>% filter(.key == "prediction") - resid <- testing(splits)$value - predictions_tbl$.value + predictions_tbl <- predictions_tbl %>% dplyr::filter(.key == "prediction") + resid <- rsample::testing(splits)$value - predictions_tbl$.value # - Max Error less than 1500 expect_lte(max(abs(resid)), 1500) @@ -165,27 +165,27 @@ test_that("exp_smoothing: CROSTON", { model_spec <- exp_smoothing( smooth_level = 0.2 ) %>% - set_engine("croston") + parsnip::set_engine("croston") # Recipe spec - recipe_spec <- recipe(value ~ date, data = training(splits)) %>% - step_log(value, skip = FALSE) + recipe_spec <- recipes::recipe(value ~ date, data = rsample::training(splits)) %>% + recipes::step_log(value, skip = FALSE) # Workflow - wflw <- workflow() %>% - add_recipe(recipe_spec) %>% - add_model(model_spec) + wflw <- workflows::workflow() %>% + workflows::add_recipe(recipe_spec) %>% + workflows::add_model(model_spec) wflw_fit <- wflw %>% - fit(training(splits)) + fit(rsample::training(splits)) # Forecast predictions_tbl <- wflw_fit %>% - modeltime_calibrate(testing(splits)) %>% - modeltime_forecast(new_data = testing(splits), - actual_data = training(splits)) %>% - mutate_at(vars(.value), exp) + modeltime_calibrate(rsample::testing(splits)) %>% + modeltime_forecast(new_data = rsample::testing(splits), + actual_data = rsample::training(splits)) %>% + dplyr::mutate_at(dplyr::vars(.value), exp) expect_s3_class(wflw_fit$fit$fit$fit, "croston_fit_impl") @@ -206,15 +206,15 @@ test_that("exp_smoothing: CROSTON", { - full_data <- bind_rows(training(splits), testing(splits)) + full_data <- dplyr::bind_rows(rsample::training(splits), rsample::testing(splits)) # Structure expect_identical(nrow(full_data), nrow(predictions_tbl)) expect_identical(full_data$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - predictions_tbl <- predictions_tbl %>% filter(.key == "prediction") - resid <- testing(splits)$value - predictions_tbl$.value + predictions_tbl <- predictions_tbl %>% dplyr::filter(.key == "prediction") + resid <- rsample::testing(splits)$value - predictions_tbl$.value # - Max Error less than 1500 expect_lte(max(abs(resid)), 1500) @@ -238,27 +238,27 @@ test_that("exp_smoothing: Theta", { # Model Spec model_spec <- exp_smoothing() %>% - set_engine("theta") + parsnip::set_engine("theta") # Recipe spec - recipe_spec <- recipe(value ~ date, data = training(splits)) %>% - step_log(value, skip = FALSE) + recipe_spec <- recipes::recipe(value ~ date, data = rsample::training(splits)) %>% + recipes::step_log(value, skip = FALSE) # Workflow - wflw <- workflow() %>% - add_recipe(recipe_spec) %>% - add_model(model_spec) + wflw <- workflows::workflow() %>% + workflows::add_recipe(recipe_spec) %>% + workflows::add_model(model_spec) wflw_fit <- wflw %>% - fit(training(splits)) + fit(rsample::training(splits)) # Forecast predictions_tbl <- wflw_fit %>% - modeltime_calibrate(testing(splits)) %>% - modeltime_forecast(new_data = testing(splits), - actual_data = training(splits)) %>% - mutate_at(vars(.value), exp) + modeltime_calibrate(rsample::testing(splits)) %>% + modeltime_forecast(new_data = rsample::testing(splits), + actual_data = rsample::training(splits)) %>% + dplyr::mutate_at(dplyr::vars(.value), exp) # @@ -280,15 +280,15 @@ test_that("exp_smoothing: Theta", { - full_data <- bind_rows(training(splits), testing(splits)) + full_data <- dplyr::bind_rows(rsample::training(splits), rsample::testing(splits)) # Structure expect_identical(nrow(full_data), nrow(predictions_tbl)) expect_identical(full_data$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - predictions_tbl <- predictions_tbl %>% filter(.key == "prediction") - resid <- testing(splits)$value - predictions_tbl$.value + predictions_tbl <- predictions_tbl %>% dplyr::filter(.key == "prediction") + resid <- rsample::testing(splits)$value - predictions_tbl$.value # - Max Error less than 1500 expect_lte(max(abs(resid)), 2408) @@ -314,16 +314,16 @@ test_that("exp_smoothing: smooth", { # model_spec <- exp_smoothing() %>% - set_engine("smooth_es") + parsnip::set_engine("smooth_es") # Fit Spec model_fit <- model_spec %>% - fit(log(value) ~ date, data = training(splits)) + fit(log(value) ~ date, data = rsample::training(splits)) # Predictions predictions_tbl <- model_fit %>% - modeltime_calibrate(testing(splits)) %>% - modeltime_forecast(new_data = testing(splits)) + modeltime_calibrate(rsample::testing(splits)) %>% + modeltime_forecast(new_data = rsample::testing(splits)) # @@ -345,12 +345,12 @@ test_that("exp_smoothing: smooth", { # Structure - expect_identical(nrow(testing(splits)), nrow(predictions_tbl)) - expect_identical(testing(splits)$date, predictions_tbl$.index) + expect_identical(nrow(rsample::testing(splits)), nrow(predictions_tbl)) + expect_identical(rsample::testing(splits)$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - resid <- testing(splits)$value - exp(predictions_tbl$.value) + resid <- rsample::testing(splits)$value - exp(predictions_tbl$.value) # - Max Error less than 1500 expect_lte(max(abs(resid)), 1395) @@ -382,31 +382,31 @@ test_that("exp_smoothing: Arima (workflow), Test Model Fit Object", { , smooth_level = 0.2, smooth_trend = 0.1, smooth_seasonal = 0.1 ) %>% - set_engine("smooth_es") + parsnip::set_engine("smooth_es") # Recipe spec - recipe_spec <- recipe(value ~ date, data = training(splits)) %>% - step_log(value, skip = FALSE) %>% - step_date(date, features = "month") + recipe_spec <- recipes::recipe(value ~ date, data = rsample::training(splits)) %>% + recipes::step_log(value, skip = FALSE) %>% + recipes::step_date(date, features = "month") # Workflow - wflw <- workflow() %>% - add_recipe(recipe_spec) %>% - add_model(model_spec) + wflw <- workflows::workflow() %>% + workflows::add_recipe(recipe_spec) %>% + workflows::add_model(model_spec) # xreg did not contain values for the holdout, so we had to predict missing values. suppressWarnings({ wflw_fit <- wflw %>% - fit(training(splits)) + fit(rsample::training(splits)) }) # Forecast suppressWarnings({ predictions_tbl <- wflw_fit %>% - modeltime_calibrate(testing(splits)) %>% - modeltime_forecast(new_data = testing(splits), - actual_data = training(splits)) %>% - mutate_at(vars(.value, .conf_lo, .conf_hi), exp) + modeltime_calibrate(rsample::testing(splits)) %>% + modeltime_forecast(new_data = rsample::testing(splits), + actual_data = rsample::training(splits)) %>% + dplyr::mutate_at(dplyr::vars(.value, .conf_lo, .conf_hi), exp) }) # @@ -430,15 +430,15 @@ test_that("exp_smoothing: Arima (workflow), Test Model Fit Object", { # exp_smoothing: ets (workflow), Test Predictions - full_data <- bind_rows(training(splits), testing(splits)) + full_data <- dplyr::bind_rows(rsample::training(splits), rsample::testing(splits)) # Structure expect_identical(nrow(full_data), nrow(predictions_tbl)) expect_identical(full_data$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - predictions_tbl <- predictions_tbl %>% filter(.key == "prediction") - resid <- testing(splits)$value - predictions_tbl$.value + predictions_tbl <- predictions_tbl %>% dplyr::filter(.key == "prediction") + resid <- rsample::testing(splits)$value - predictions_tbl$.value # - Max Error less than 1500 # expect_lte(max(abs(resid)), 1395) diff --git a/tests/testthat/test-algo-nnetar_reg.R b/tests/testthat/test-algo-nnetar_reg.R index 1e3c51e0..84084d92 100644 --- a/tests/testthat/test-algo-nnetar_reg.R +++ b/tests/testthat/test-algo-nnetar_reg.R @@ -5,10 +5,10 @@ context("TEST nnetar_reg") # SETUP ---- # Data -m750 <- m4_monthly %>% filter(id == "M750") +m750 <- timetk::m4_monthly %>% dplyr::filter(id == "M750") # Split Data 80/20 -splits <- initial_time_split(m750, prop = 0.8) +splits <- rsample::initial_time_split(m750, prop = 0.8) # PARSNIP ---- @@ -30,17 +30,17 @@ test_that("nnetar_reg: Parsnip", { penalty = 0.1, epochs = 50 ) %>% - set_engine("nnetar") + parsnip::set_engine("nnetar") # Fit Spec set.seed(123) model_fit <- model_spec %>% - fit(log(value) ~ date, data = training(splits)) + fit(log(value) ~ date, data = rsample::training(splits)) # Predictions predictions_tbl <- model_fit %>% - modeltime_calibrate(testing(splits)) %>% - modeltime_forecast(new_data = testing(splits)) + modeltime_calibrate(rsample::testing(splits)) %>% + modeltime_forecast(new_data = rsample::testing(splits)) expect_s3_class(model_fit$fit, "nnetar_fit_impl") @@ -70,12 +70,12 @@ test_that("nnetar_reg: Parsnip", { # Structure - expect_identical(nrow(testing(splits)), nrow(predictions_tbl)) - expect_identical(testing(splits)$date, predictions_tbl$.index) + expect_identical(nrow(rsample::testing(splits)), nrow(predictions_tbl)) + expect_identical(rsample::testing(splits)$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - resid <- testing(splits)$value - exp(predictions_tbl$.value) + resid <- rsample::testing(splits)$value - exp(predictions_tbl$.value) # - Max Error less than 1500 expect_lte(max(abs(resid)), 1600) @@ -89,12 +89,12 @@ test_that("nnetar_reg: Parsnip", { # Fit set.seed(123) model_fit <- model_spec %>% - fit(log(value) ~ date + month(date, label = TRUE), data = training(splits)) + fit(log(value) ~ date + lubridate::month(date, label = TRUE), data = rsample::training(splits)) # Predictions predictions_tbl <- model_fit %>% - modeltime_calibrate(testing(splits)) %>% - modeltime_forecast(new_data = testing(splits)) + modeltime_calibrate(rsample::testing(splits)) %>% + modeltime_forecast(new_data = rsample::testing(splits)) expect_s3_class(model_fit$fit, "nnetar_fit_impl") @@ -122,12 +122,12 @@ test_that("nnetar_reg: Parsnip", { expect_equal(model_fit$preproc$y_var, "value") # Structure - expect_identical(nrow(testing(splits)), nrow(predictions_tbl)) - expect_identical(testing(splits)$date, predictions_tbl$.index) + expect_identical(nrow(rsample::testing(splits)), nrow(predictions_tbl)) + expect_identical(rsample::testing(splits)$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - resid <- testing(splits)$value - exp(predictions_tbl$.value) + resid <- rsample::testing(splits)$value - exp(predictions_tbl$.value) # - Max Error 967.2171 expect_lte(max(abs(resid)), 1250) @@ -155,26 +155,26 @@ test_that("nnetar_reg: (workflow)", { penalty = 0.1, epochs = 50 ) %>% - set_engine("nnetar") + parsnip::set_engine("nnetar") # Recipe spec - recipe_spec <- recipe(value ~ date, data = training(splits)) %>% - step_log(value, skip = FALSE) + recipe_spec <- recipes::recipe(value ~ date, data = rsample::training(splits)) %>% + recipes::step_log(value, skip = FALSE) # Workflow - wflw <- workflow() %>% - add_recipe(recipe_spec) %>% - add_model(model_spec) + wflw <- workflows::workflow() %>% + workflows::add_recipe(recipe_spec) %>% + workflows::add_model(model_spec) set.seed(123) wflw_fit <- wflw %>% - fit(training(splits)) + fit(rsample::training(splits)) # Forecast predictions_tbl <- wflw_fit %>% - modeltime_calibrate(testing(splits)) %>% - modeltime_forecast(new_data = testing(splits), actual_data = training(splits)) %>% - mutate_at(vars(.value), exp) + modeltime_calibrate(rsample::testing(splits)) %>% + modeltime_forecast(new_data = rsample::testing(splits), actual_data = rsample::training(splits)) %>% + dplyr::mutate_at(dplyr::vars(.value), exp) expect_s3_class(wflw_fit$fit$fit$fit, "nnetar_fit_impl") @@ -202,15 +202,15 @@ test_that("nnetar_reg: (workflow)", { # Predictions - full_data <- bind_rows(training(splits), testing(splits)) + full_data <- dplyr::bind_rows(rsample::training(splits), rsample::testing(splits)) # Structure expect_identical(nrow(full_data), nrow(predictions_tbl)) expect_identical(full_data$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - predictions_tbl <- predictions_tbl %>% filter(.key == "prediction") - resid <- testing(splits)$value - predictions_tbl$.value + predictions_tbl <- predictions_tbl %>% dplyr::filter(.key == "prediction") + resid <- rsample::testing(splits)$value - predictions_tbl$.value # - Max Error less than 1501.464 expect_lte(max(abs(resid)), 1600) diff --git a/tests/testthat/test-algo-prophet_boost.R b/tests/testthat/test-algo-prophet_boost.R index 3e68c37c..f5472de2 100644 --- a/tests/testthat/test-algo-prophet_boost.R +++ b/tests/testthat/test-algo-prophet_boost.R @@ -5,10 +5,10 @@ context("TEST prophet_boost: prophet_xgboost") # SETUP ---- # Data -m750 <- m4_monthly %>% filter(id == "M750") +m750 <- timetk::m4_monthly %>% dplyr::filter(id == "M750") # Split Data 80/20 -splits <- initial_time_split(m750, prop = 0.8) +splits <- rsample::initial_time_split(m750, prop = 0.8) # Model Spec model_spec <- prophet_boost( @@ -32,7 +32,7 @@ model_spec <- prophet_boost( loss_reduction = 0.4, sample_size = 0.9 ) %>% - set_engine("prophet_xgboost") + parsnip::set_engine("prophet_xgboost") # PARSNIP ---- @@ -47,7 +47,7 @@ test_that("prophet_boost: No Xregs", { # Model Fit model_fit <- model_spec %>% - fit(log(value) ~ date, data = training(splits)) + fit(log(value) ~ date, data = rsample::training(splits)) # Structure @@ -88,16 +88,16 @@ test_that("prophet_boost: No Xregs", { # Predictions predictions_tbl <- model_fit %>% - modeltime_calibrate(testing(splits)) %>% - modeltime_forecast(new_data = testing(splits)) + modeltime_calibrate(rsample::testing(splits)) %>% + modeltime_forecast(new_data = rsample::testing(splits)) # Structure - expect_identical(nrow(testing(splits)), nrow(predictions_tbl)) - expect_identical(testing(splits)$date, predictions_tbl$.index) + expect_identical(nrow(rsample::testing(splits)), nrow(predictions_tbl)) + expect_identical(rsample::testing(splits)$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - resid <- testing(splits)$value - exp(predictions_tbl$.value) + resid <- rsample::testing(splits)$value - exp(predictions_tbl$.value) # - Max Error less than 1500 expect_lte(max(abs(resid)), 1500) @@ -117,7 +117,7 @@ test_that("prophet_boost: prophet, XREGS", { # Model Fit model_fit <- model_spec %>% fit(log(value) ~ date + as.numeric(date) + factor(month(date, label = TRUE), ordered = F), - data = training(splits)) + data = rsample::training(splits)) # Structure @@ -177,16 +177,16 @@ test_that("prophet_boost: prophet, XREGS", { # Predictions predictions_tbl <- model_fit %>% - modeltime_calibrate(testing(splits)) %>% - modeltime_forecast(new_data = testing(splits)) + modeltime_calibrate(rsample::testing(splits)) %>% + modeltime_forecast(new_data = rsample::testing(splits)) # Structure - expect_identical(nrow(testing(splits)), nrow(predictions_tbl)) - expect_identical(testing(splits)$date, predictions_tbl$.index) + expect_identical(nrow(rsample::testing(splits)), nrow(predictions_tbl)) + expect_identical(rsample::testing(splits)$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - resid <- testing(splits)$value - exp(predictions_tbl$.value) + resid <- rsample::testing(splits)$value - exp(predictions_tbl$.value) # - Max Error less than 1500 expect_lte(max(abs(resid)), 1500) @@ -208,19 +208,19 @@ test_that("prophet_boost: prophet_xgboost (workflow)", { # # Recipe spec - recipe_spec <- recipe(value ~ date, data = training(splits)) %>% - step_log(value, skip = FALSE) %>% - step_date(date, features = "month") %>% - step_mutate(date_num = as.numeric(date)) + recipe_spec <- recipes::recipe(value ~ date, data = rsample::training(splits)) %>% + recipes::step_log(value, skip = FALSE) %>% + recipes::step_date(date, features = "month") %>% + recipes::step_mutate(date_num = as.numeric(date)) # Workflow - wflw <- workflow() %>% - add_recipe(recipe_spec) %>% - add_model(model_spec) + wflw <- workflows::workflow() %>% + workflows::add_recipe(recipe_spec) %>% + workflows::add_model(model_spec) # Fitted Workflow wflw_fit <- wflw %>% - fit(training(splits)) + fit(rsample::training(splits)) # Structure @@ -281,19 +281,19 @@ test_that("prophet_boost: prophet_xgboost (workflow)", { # Forecast predictions_tbl <- wflw_fit %>% - modeltime_calibrate(testing(splits)) %>% - modeltime_forecast(new_data = testing(splits), actual_data = training(splits)) %>% - mutate_at(vars(.value), exp) + modeltime_calibrate(rsample::testing(splits)) %>% + modeltime_forecast(new_data = rsample::testing(splits), actual_data = rsample::training(splits)) %>% + dplyr::mutate_at(dplyr::vars(.value), exp) - full_data <- bind_rows(training(splits), testing(splits)) + full_data <- dplyr::bind_rows(rsample::training(splits), rsample::testing(splits)) # Structure expect_identical(nrow(full_data), nrow(predictions_tbl)) expect_identical(full_data$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - predictions_tbl <- predictions_tbl %>% filter(.key == "prediction") - resid <- testing(splits)$value - predictions_tbl$.value + predictions_tbl <- predictions_tbl %>% dplyr::filter(.key == "prediction") + resid <- rsample::testing(splits)$value - predictions_tbl$.value # - Max Error less than 1500 expect_lte(max(abs(resid)), 1500) @@ -322,10 +322,10 @@ test_that("prophet_reg: prophet, Logistic Growth", { seasonality_weekly = FALSE, seasonality_daily = FALSE ) %>% - set_engine(engine = "prophet_xgboost") %>% + parsnip::set_engine(engine = "prophet_xgboost") %>% fit(value ~ date + as.numeric(date) - + month(date, label = TRUE) + + lubridate::month(date, label = TRUE) + fourier_vec(date, period = 12), data = m750) @@ -375,7 +375,7 @@ test_that("prophet_reg: prophet, Logistic Growth", { prophet_boost( growth = "logistic" ) %>% - set_engine(engine = "prophet_xgboost") %>% + parsnip::set_engine(engine = "prophet_xgboost") %>% fit(value ~ date, m750) }) diff --git a/tests/testthat/test-algo-prophet_reg.R b/tests/testthat/test-algo-prophet_reg.R index 4db0c0cb..a779d42b 100644 --- a/tests/testthat/test-algo-prophet_reg.R +++ b/tests/testthat/test-algo-prophet_reg.R @@ -5,10 +5,10 @@ context("TEST prophet_reg: prophet") # SETUP ---- # Data -m750 <- m4_monthly %>% filter(id == "M750") +m750 <- timetk::m4_monthly %>% dplyr::filter(id == "M750") # Split Data 80/20 -splits <- initial_time_split(m750, prop = 0.8) +splits <- rsample::initial_time_split(m750, prop = 0.8) # Model Spec model_spec <- prophet_reg( @@ -23,7 +23,7 @@ model_spec <- prophet_reg( prior_scale_seasonality = 20, prior_scale_holidays = 20 ) %>% - set_engine("prophet") + parsnip::set_engine("prophet") # PARSNIP ---- @@ -38,7 +38,7 @@ test_that("prophet_reg: prophet, (NO XREGS), Test Model Fit Object", { # Model Fit model_fit <- model_spec %>% - fit(log(value) ~ date, data = training(splits)) + fit(log(value) ~ date, data = rsample::training(splits)) # Structure @@ -79,16 +79,16 @@ test_that("prophet_reg: prophet, (NO XREGS), Test Model Fit Object", { # Predictions predictions_tbl <- model_fit %>% - modeltime_calibrate(testing(splits)) %>% - modeltime_forecast(new_data = testing(splits)) + modeltime_calibrate(rsample::testing(splits)) %>% + modeltime_forecast(new_data = rsample::testing(splits)) # Structure - expect_identical(nrow(testing(splits)), nrow(predictions_tbl)) - expect_identical(testing(splits)$date, predictions_tbl$.index) + expect_identical(nrow(rsample::testing(splits)), nrow(predictions_tbl)) + expect_identical(rsample::testing(splits)$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - resid <- testing(splits)$value - exp(predictions_tbl$.value) + resid <- rsample::testing(splits)$value - exp(predictions_tbl$.value) # - Max Error less than 1500 expect_lte(max(abs(resid)), 1500) @@ -109,7 +109,7 @@ test_that("prophet_reg: prophet, (XREGS), Test Model Fit Object", { # Model Fit model_fit <- model_spec %>% fit(log(value) ~ date + as.numeric(date) + factor(month(date, label = TRUE), ordered = F), - data = training(splits)) + data = rsample::training(splits)) # Structure @@ -150,16 +150,16 @@ test_that("prophet_reg: prophet, (XREGS), Test Model Fit Object", { # Predictions predictions_tbl <- model_fit %>% - modeltime_calibrate(testing(splits)) %>% - modeltime_forecast(new_data = testing(splits)) + modeltime_calibrate(rsample::testing(splits)) %>% + modeltime_forecast(new_data = rsample::testing(splits)) # Structure - expect_identical(nrow(testing(splits)), nrow(predictions_tbl)) - expect_identical(testing(splits)$date, predictions_tbl$.index) + expect_identical(nrow(rsample::testing(splits)), nrow(predictions_tbl)) + expect_identical(rsample::testing(splits)$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - resid <- testing(splits)$value - exp(predictions_tbl$.value) + resid <- rsample::testing(splits)$value - exp(predictions_tbl$.value) # - Max Error less than 1500 expect_lte(max(abs(resid)), 1500) @@ -180,19 +180,19 @@ test_that("prophet_reg: prophet (workflow), Test Model Fit Object", { # # Recipe spec - recipe_spec <- recipe(value ~ date, data = training(splits)) %>% - step_log(value, skip = FALSE) %>% - step_date(date, features = "month") %>% - step_mutate(date_num = as.numeric(date)) + recipe_spec <- recipes::recipe(value ~ date, data = rsample::training(splits)) %>% + recipes::step_log(value, skip = FALSE) %>% + recipes::step_date(date, features = "month") %>% + recipes::step_mutate(date_num = as.numeric(date)) # Workflow - wflw <- workflow() %>% - add_recipe(recipe_spec) %>% - add_model(model_spec) + wflw <- workflows::workflow() %>% + workflows::add_recipe(recipe_spec) %>% + workflows::add_model(model_spec) # Fitted Workflow wflw_fit <- wflw %>% - fit(training(splits)) + fit(rsample::training(splits)) # Structure @@ -234,19 +234,19 @@ test_that("prophet_reg: prophet (workflow), Test Model Fit Object", { # Forecast predictions_tbl <- wflw_fit %>% - modeltime_calibrate(testing(splits)) %>% - modeltime_forecast(new_data = testing(splits), actual_data = training(splits)) %>% - mutate_at(vars(.value), exp) + modeltime_calibrate(rsample::testing(splits)) %>% + modeltime_forecast(new_data = rsample::testing(splits), actual_data = rsample::training(splits)) %>% + dplyr::mutate_at(dplyr::vars(.value), exp) - full_data <- bind_rows(training(splits), testing(splits)) + full_data <- dplyr::bind_rows(rsample::training(splits), rsample::testing(splits)) # Structure expect_identical(nrow(full_data), nrow(predictions_tbl)) expect_identical(full_data$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - predictions_tbl <- predictions_tbl %>% filter(.key == "prediction") - resid <- testing(splits)$value - predictions_tbl$.value + predictions_tbl <- predictions_tbl %>% dplyr::filter(.key == "prediction") + resid <- rsample::testing(splits)$value - predictions_tbl$.value # - Max Error less than 1500 expect_lte(max(abs(resid)), 1500) @@ -272,7 +272,7 @@ test_that("prophet_reg: prophet, Logistic Growth", { growth = "logistic", logistic_cap = 11000 ) %>% - set_engine(engine = "prophet") %>% + parsnip::set_engine(engine = "prophet") %>% fit(value ~ date, m750) # Structure @@ -321,7 +321,7 @@ test_that("prophet_reg: prophet, Logistic Growth", { prophet_reg( growth = "logistic" ) %>% - set_engine(engine = "prophet") %>% + parsnip::set_engine(engine = "prophet") %>% fit(value ~ date, m750) }) diff --git a/tests/testthat/test-algo-seasonal_decomp_arima.R b/tests/testthat/test-algo-seasonal_decomp_arima.R index 329e5f76..5d46073d 100644 --- a/tests/testthat/test-algo-seasonal_decomp_arima.R +++ b/tests/testthat/test-algo-seasonal_decomp_arima.R @@ -14,11 +14,11 @@ test_that("seasonal_reg - arima: parnip", { # SETUP ---- # Split Data 80/20 - splits <- initial_time_split(taylor_30_min, prop = 0.9) + splits <- rsample::initial_time_split(timetk::taylor_30_min, prop = 0.9) # Model Spec model_spec <- seasonal_reg(seasonal_period_1 = "1 day", seasonal_period_2 = "week") %>% - set_engine("stlm_arima") + parsnip::set_engine("stlm_arima") # CHECKS ---- test_that("seasonal_reg: checks", { @@ -26,8 +26,8 @@ test_that("seasonal_reg - arima: parnip", { # external regressors message expect_error({ seasonal_reg(seasonal_period_1 = 1) %>% - set_engine("stlm_arima") %>% - fit(value ~ date, data = training(splits)) + parsnip::set_engine("stlm_arima") %>% + fit(value ~ date, data = rsample::training(splits)) }) }) @@ -36,12 +36,12 @@ test_that("seasonal_reg - arima: parnip", { # Fit Spec model_fit <- model_spec %>% - fit(log(value) ~ date + wday(date, label = TRUE), data = training(splits)) + fit(log(value) ~ date + wday(date, label = TRUE), data = rsample::training(splits)) # Predictions predictions_tbl <- model_fit %>% - modeltime_calibrate(testing(splits)) %>% - modeltime_forecast(new_data = testing(splits)) + modeltime_calibrate(rsample::testing(splits)) %>% + modeltime_forecast(new_data = rsample::testing(splits)) # TEST @@ -68,12 +68,12 @@ test_that("seasonal_reg - arima: parnip", { # Structure - expect_identical(nrow(testing(splits)), nrow(predictions_tbl)) - expect_identical(testing(splits)$date, predictions_tbl$.index) + expect_identical(nrow(rsample::testing(splits)), nrow(predictions_tbl)) + expect_identical(rsample::testing(splits)$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - resid <- testing(splits)$value - exp(predictions_tbl$.value) + resid <- rsample::testing(splits)$value - exp(predictions_tbl$.value) # - Max Error less than 1500 expect_lte(max(abs(resid)), 2500) @@ -88,23 +88,23 @@ test_that("seasonal_reg - arima: parnip", { # SETUP # Recipe spec - recipe_spec <- recipe(value ~ date, data = training(splits)) %>% - step_log(value, skip = FALSE) %>% - step_date(date, features = "dow") + recipe_spec <- recipes::recipe(value ~ date, data = rsample::training(splits)) %>% + recipes::step_log(value, skip = FALSE) %>% + recipes::step_date(date, features = "dow") # Workflow - wflw <- workflow() %>% - add_recipe(recipe_spec) %>% - add_model(model_spec) + wflw <- workflows::workflow() %>% + workflows::add_recipe(recipe_spec) %>% + workflows::add_model(model_spec) wflw_fit <- wflw %>% - fit(training(splits)) + fit(rsample::training(splits)) # Forecast predictions_tbl <- wflw_fit %>% - modeltime_calibrate(testing(splits)) %>% - modeltime_forecast(new_data = testing(splits), actual_data = training(splits)) %>% - mutate_at(vars(.value), exp) + modeltime_calibrate(rsample::testing(splits)) %>% + modeltime_forecast(new_data = rsample::testing(splits), actual_data = rsample::training(splits)) %>% + dplyr::mutate_at(dplyr::vars(.value), exp) # TEST @@ -127,15 +127,15 @@ test_that("seasonal_reg - arima: parnip", { expect_equal(names(mld$outcomes), "value") - full_data <- bind_rows(training(splits), testing(splits)) + full_data <- dplyr::bind_rows(rsample::training(splits), rsample::testing(splits)) # Structure expect_identical(nrow(full_data), nrow(predictions_tbl)) expect_identical(full_data$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - predictions_tbl <- predictions_tbl %>% filter(.key == "prediction") - resid <- testing(splits)$value - predictions_tbl$.value + predictions_tbl <- predictions_tbl %>% dplyr::filter(.key == "prediction") + resid <- rsample::testing(splits)$value - predictions_tbl$.value # - Max Error less than 1500 expect_lte(max(abs(resid)), 2500) diff --git a/tests/testthat/test-algo-seasonal_decomp_ets.R b/tests/testthat/test-algo-seasonal_decomp_ets.R index c66499d0..ea9d5fa4 100644 --- a/tests/testthat/test-algo-seasonal_decomp_ets.R +++ b/tests/testthat/test-algo-seasonal_decomp_ets.R @@ -12,23 +12,23 @@ test_that("seasonal_reg: stlm_ets", { # SETUP ---- # Split Data 80/20 - splits <- initial_time_split(taylor_30_min, prop = 0.9) + splits <- rsample::initial_time_split(timetk::taylor_30_min, prop = 0.9) # Model Spec model_spec <- seasonal_reg(seasonal_period_1 = "1 day", seasonal_period_2 = "week") %>% - set_engine("stlm_ets") + parsnip::set_engine("stlm_ets") # external regressors message expect_message({ seasonal_reg(seasonal_period_1 = 24*2) %>% - set_engine("stlm_ets") %>% - fit(value ~ date + month(date, label = TRUE), data = training(splits)) + parsnip::set_engine("stlm_ets") %>% + fit(value ~ date + lubridate::month(date, label = TRUE), data = rsample::training(splits)) }) expect_error({ seasonal_reg(seasonal_period_1 = 1) %>% - set_engine("stlm_ets") %>% - fit(value ~ date, data = training(splits)) + parsnip::set_engine("stlm_ets") %>% + fit(value ~ date, data = rsample::training(splits)) }) @@ -39,12 +39,12 @@ test_that("seasonal_reg: stlm_ets", { # Fit Spec model_fit <- model_spec %>% - fit(log(value) ~ date, data = training(splits)) + fit(log(value) ~ date, data = rsample::training(splits)) # Predictions predictions_tbl <- model_fit %>% - modeltime_calibrate(testing(splits)) %>% - modeltime_forecast(new_data = testing(splits)) + modeltime_calibrate(rsample::testing(splits)) %>% + modeltime_forecast(new_data = rsample::testing(splits)) expect_s3_class(model_fit$fit, "stlm_ets_fit_impl") @@ -69,12 +69,12 @@ test_that("seasonal_reg: stlm_ets", { # Structure - expect_identical(nrow(testing(splits)), nrow(predictions_tbl)) - expect_identical(testing(splits)$date, predictions_tbl$.index) + expect_identical(nrow(rsample::testing(splits)), nrow(predictions_tbl)) + expect_identical(rsample::testing(splits)$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - resid <- testing(splits)$value - exp(predictions_tbl$.value) + resid <- rsample::testing(splits)$value - exp(predictions_tbl$.value) # - Max Error less than 1500 expect_lte(max(abs(resid)), 2500) @@ -87,22 +87,22 @@ test_that("seasonal_reg: stlm_ets", { # ---- WORKFLOWS ---- # Recipe spec - recipe_spec <- recipe(value ~ date, data = training(splits)) %>% - step_log(value, skip = FALSE) + recipe_spec <- recipes::recipe(value ~ date, data = rsample::training(splits)) %>% + recipes::step_log(value, skip = FALSE) # Workflow - wflw <- workflow() %>% - add_recipe(recipe_spec) %>% - add_model(model_spec) + wflw <- workflows::workflow() %>% + workflows::add_recipe(recipe_spec) %>% + workflows::add_model(model_spec) wflw_fit <- wflw %>% - fit(training(splits)) + fit(rsample::training(splits)) # Forecast predictions_tbl <- wflw_fit %>% - modeltime_calibrate(testing(splits)) %>% - modeltime_forecast(new_data = testing(splits), actual_data = training(splits)) %>% - mutate_at(vars(.value), exp) + modeltime_calibrate(rsample::testing(splits)) %>% + modeltime_forecast(new_data = rsample::testing(splits), actual_data = rsample::training(splits)) %>% + dplyr::mutate_at(dplyr::vars(.value), exp) expect_s3_class(wflw_fit$fit$fit$fit, "stlm_ets_fit_impl") @@ -123,15 +123,15 @@ test_that("seasonal_reg: stlm_ets", { expect_equal(names(mld$outcomes), "value") - full_data <- bind_rows(training(splits), testing(splits)) + full_data <- dplyr::bind_rows(rsample::training(splits), rsample::testing(splits)) # Structure expect_identical(nrow(full_data), nrow(predictions_tbl)) expect_identical(full_data$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - predictions_tbl <- predictions_tbl %>% filter(.key == "prediction") - resid <- testing(splits)$value - predictions_tbl$.value + predictions_tbl <- predictions_tbl %>% dplyr::filter(.key == "prediction") + resid <- rsample::testing(splits)$value - predictions_tbl$.value # - Max Error less than 1500 expect_lte(max(abs(resid)), 2500) diff --git a/tests/testthat/test-algo-seasonal_reg_tbats.R b/tests/testthat/test-algo-seasonal_reg_tbats.R index ba7cf5bb..122866c3 100644 --- a/tests/testthat/test-algo-seasonal_reg_tbats.R +++ b/tests/testthat/test-algo-seasonal_reg_tbats.R @@ -5,14 +5,14 @@ context("TEST seasonal_reg() - tbats") # SETUP ---- # Data -m750 <- m4_monthly %>% filter(id == "M750") +m750 <- timetk::m4_monthly %>% dplyr::filter(id == "M750") # Split Data 80/20 -splits <- initial_time_split(m750, prop = 0.8) +splits <- rsample::initial_time_split(m750, prop = 0.8) # Model Spec model_spec <- seasonal_reg() %>% - set_engine("tbats") + parsnip::set_engine("tbats") # CHECKS ---- test_that("seasonal_reg: checks", { @@ -22,8 +22,8 @@ test_that("seasonal_reg: checks", { # external regressors message expect_error({ seasonal_reg(seasonal_period_1 = 1) %>% - set_engine("tbats") %>% - fit(value ~ date, data = training(splits)) + parsnip::set_engine("tbats") %>% + fit(value ~ date, data = rsample::training(splits)) }) }) @@ -38,12 +38,12 @@ test_that("seasonal_reg - tbats: parsnip", { # Fit Spec model_fit <- model_spec %>% - fit(log(value) ~ date + wday(date, label = TRUE), data = training(splits)) + fit(log(value) ~ date + wday(date, label = TRUE), data = rsample::training(splits)) # Predictions predictions_tbl <- model_fit %>% - modeltime_calibrate(testing(splits)) %>% - modeltime_forecast(new_data = testing(splits)) + modeltime_calibrate(rsample::testing(splits)) %>% + modeltime_forecast(new_data = rsample::testing(splits)) # TEST @@ -69,12 +69,12 @@ test_that("seasonal_reg - tbats: parsnip", { # Structure - expect_identical(nrow(testing(splits)), nrow(predictions_tbl)) - expect_identical(testing(splits)$date, predictions_tbl$.index) + expect_identical(nrow(rsample::testing(splits)), nrow(predictions_tbl)) + expect_identical(rsample::testing(splits)$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - resid <- testing(splits)$value - exp(predictions_tbl$.value) + resid <- rsample::testing(splits)$value - exp(predictions_tbl$.value) # - Max Error less than 1500 expect_lte(max(abs(resid)), 2500) @@ -95,23 +95,23 @@ test_that("seasonal_reg: workflow", { # SETUP # Recipe spec - recipe_spec <- recipe(value ~ date, data = training(splits)) %>% - step_log(value, skip = FALSE) %>% - step_date(date, features = "dow") + recipe_spec <- recipes::recipe(value ~ date, data = rsample::training(splits)) %>% + recipes::step_log(value, skip = FALSE) %>% + recipes::step_date(date, features = "dow") # Workflow - wflw <- workflow() %>% - add_recipe(recipe_spec) %>% - add_model(model_spec) + wflw <- workflows::workflow() %>% + workflows::add_recipe(recipe_spec) %>% + workflows::add_model(model_spec) wflw_fit <- wflw %>% - fit(training(splits)) + fit(rsample::training(splits)) # Forecast predictions_tbl <- wflw_fit %>% - modeltime_calibrate(testing(splits)) %>% - modeltime_forecast(new_data = testing(splits), actual_data = training(splits)) %>% - mutate_at(vars(.value), exp) + modeltime_calibrate(rsample::testing(splits)) %>% + modeltime_forecast(new_data = rsample::testing(splits), actual_data = rsample::training(splits)) %>% + dplyr::mutate_at(dplyr::vars(.value), exp) # TEST @@ -133,15 +133,15 @@ test_that("seasonal_reg: workflow", { expect_equal(names(mld$outcomes), "value") - full_data <- bind_rows(training(splits), testing(splits)) + full_data <- dplyr::bind_rows(rsample::training(splits), rsample::testing(splits)) # Structure expect_identical(nrow(full_data), nrow(predictions_tbl)) expect_identical(full_data$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - predictions_tbl <- predictions_tbl %>% filter(.key == "prediction") - resid <- testing(splits)$value - predictions_tbl$.value + predictions_tbl <- predictions_tbl %>% dplyr::filter(.key == "prediction") + resid <- rsample::testing(splits)$value - predictions_tbl$.value # - Max Error less than 1500 expect_lte(max(abs(resid)), 2500) diff --git a/tests/testthat/test-algo-temporal_hierarchy.R b/tests/testthat/test-algo-temporal_hierarchy.R index 57fa4468..0ffa6495 100644 --- a/tests/testthat/test-algo-temporal_hierarchy.R +++ b/tests/testthat/test-algo-temporal_hierarchy.R @@ -12,14 +12,14 @@ test_that("thief", { # SETUP ---- # Data - m750 <- m4_monthly %>% filter(id == "M750") + m750 <- timetk::m4_monthly %>% dplyr::filter(id == "M750") # Split Data 80/20 - splits <- initial_time_split(m750, prop = 0.9) + splits <- rsample::initial_time_split(m750, prop = 0.9) # Model Spec model_spec <- temporal_hierarchy() %>% - set_engine("thief") + parsnip::set_engine("thief") # HIERARCHICAL PARSNIP ---- @@ -28,12 +28,12 @@ test_that("thief", { # Fit Spec model_fit <- model_spec %>% - fit(log(value) ~ date, data = training(splits)) + fit(log(value) ~ date, data = rsample::training(splits)) # Predictions predictions_tbl <- model_fit %>% - modeltime_calibrate(testing(splits)) %>% - modeltime_forecast(new_data = testing(splits)) + modeltime_calibrate(rsample::testing(splits)) %>% + modeltime_forecast(new_data = rsample::testing(splits)) expect_s3_class(model_fit$fit, "temporal_hier_fit_impl") @@ -53,12 +53,12 @@ test_that("thief", { # Structure - expect_identical(nrow(testing(splits)), nrow(predictions_tbl)) - expect_identical(testing(splits)$date, predictions_tbl$.index) + expect_identical(nrow(rsample::testing(splits)), nrow(predictions_tbl)) + expect_identical(rsample::testing(splits)$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - resid <- testing(splits)$value - exp(predictions_tbl$.value) + resid <- rsample::testing(splits)$value - exp(predictions_tbl$.value) # - Max Error less than 1500 expect_lte(max(abs(resid)), 320) @@ -73,26 +73,26 @@ test_that("thief", { # Model Spec model_spec <- temporal_hierarchy() %>% - set_engine("thief") + parsnip::set_engine("thief") # Recipe spec - recipe_spec <- recipe(value ~ date, data = training(splits)) %>% - step_log(value, skip = FALSE) + recipe_spec <- recipes::recipe(value ~ date, data = rsample::training(splits)) %>% + recipes::step_log(value, skip = FALSE) # Workflow - wflw <- workflow() %>% - add_recipe(recipe_spec) %>% - add_model(model_spec) + wflw <- workflows::workflow() %>% + workflows::add_recipe(recipe_spec) %>% + workflows::add_model(model_spec) wflw_fit <- wflw %>% - fit(training(splits)) + fit(rsample::training(splits)) # Forecast predictions_tbl <- wflw_fit %>% - modeltime_calibrate(testing(splits)) %>% - modeltime_forecast(new_data = testing(splits), - actual_data = training(splits)) %>% - mutate_at(vars(.value), exp) + modeltime_calibrate(rsample::testing(splits)) %>% + modeltime_forecast(new_data = rsample::testing(splits), + actual_data = rsample::training(splits)) %>% + dplyr::mutate_at(dplyr::vars(.value), exp) # Tests @@ -113,15 +113,15 @@ test_that("thief", { expect_equal(names(mld$outcomes), "value") - full_data <- bind_rows(training(splits), testing(splits)) + full_data <- dplyr::bind_rows(rsample::training(splits), rsample::testing(splits)) # Structure expect_identical(nrow(full_data), nrow(predictions_tbl)) expect_identical(full_data$date, predictions_tbl$.index) # Out-of-Sample Accuracy Tests - predictions_tbl <- predictions_tbl %>% filter(.key == "prediction") - resid <- testing(splits)$value - predictions_tbl$.value + predictions_tbl <- predictions_tbl %>% dplyr::filter(.key == "prediction") + resid <- rsample::testing(splits)$value - predictions_tbl$.value # - Max Error less than 1500 expect_lte(max(abs(resid)), 320) diff --git a/tests/testthat/test-algo-window_reg.R b/tests/testthat/test-algo-window_reg.R index c6e02274..c7cfc9b1 100644 --- a/tests/testthat/test-algo-window_reg.R +++ b/tests/testthat/test-algo-window_reg.R @@ -1,19 +1,19 @@ context("TEST window_reg() and naive_reg()") # Data - Single Time Series -m750 <- m4_monthly %>% filter(id == "M750") +m750 <- timetk::m4_monthly %>% dplyr::filter(id == "M750") -splits <- initial_time_split(m750, prop = 0.8) +splits <- rsample::initial_time_split(m750, prop = 0.8) # Data - Multiple Time Series (Panel) -full_data_tbl <- m4_monthly %>% - group_by(id) %>% +full_data_tbl <- timetk::m4_monthly %>% + dplyr::group_by(id) %>% future_frame(date, .length_out = 60, .bind_data = TRUE) %>% - ungroup() + dplyr::ungroup() -future_tbl <- full_data_tbl %>% filter(is.na(value)) +future_tbl <- full_data_tbl %>% dplyr::filter(is.na(value)) -data_prepared_tbl <- full_data_tbl %>% filter(!is.na(value)) +data_prepared_tbl <- full_data_tbl %>% dplyr::filter(!is.na(value)) # 1.0 NAIVE ---- @@ -24,34 +24,34 @@ test_that("NAIVE - Single Time Series (No ID)", { skip_on_cran() model_fit <- naive_reg() %>% - set_engine("naive") %>% - fit(value ~ date, data = training(splits)) + parsnip::set_engine("naive") %>% + fit(value ~ date, data = rsample::training(splits)) calibration_tbl <- modeltime_table( model_fit ) %>% - modeltime_calibrate(testing(splits)) + modeltime_calibrate(rsample::testing(splits)) forecast_tbl <- calibration_tbl %>% modeltime_forecast( - new_data = testing(splits), + new_data = rsample::testing(splits), actual_data = m750 ) forecast_vec <- forecast_tbl %>% - filter(.model_id == 1) %>% - pull(.value) + dplyr::filter(.model_id == 1) %>% + dplyr::pull(.value) future_forecast_tbl <- calibration_tbl %>% modeltime_refit(m750) %>% modeltime_forecast( - h = nrow(testing(splits)), + h = nrow(rsample::testing(splits)), actual_data = m750 ) future_forecast_vec <- future_forecast_tbl %>% - filter(.model_id == 1) %>% - pull(.value) + dplyr::filter(.model_id == 1) %>% + dplyr::pull(.value) expect_equal(nrow(forecast_tbl), 368) expect_equal(forecast_vec, rep_len(10810, 62)) @@ -69,7 +69,7 @@ test_that("NAIVE - Multiple Time Series (Panel uses ID)", { skip_on_cran() model_fit_panel <- naive_reg(id = "id") %>% - set_engine("naive") %>% + parsnip::set_engine("naive") %>% fit(value ~ date + id, data = data_prepared_tbl) @@ -83,9 +83,9 @@ test_that("NAIVE - Multiple Time Series (Panel uses ID)", { ) future_vec <- future_forecast_panel_tbl %>% - filter(!is.na(.model_id)) %>% - filter(id == "M1") %>% - pull(.value) + dplyr::filter(!is.na(.model_id)) %>% + dplyr::filter(id == "M1") %>% + dplyr::pull(.value) expect_equal(nrow(future_forecast_panel_tbl), 1814) @@ -99,15 +99,15 @@ test_that("NAIVE - Check New Factors", { skip_on_cran() - wflw_fit_panel <- workflow() %>% - add_model(naive_reg(id = "id") %>% set_engine("naive")) %>% - add_recipe(recipe(value ~ date + id, data = data_prepared_tbl)) %>% + wflw_fit_panel <- workflows::workflow() %>% + workflows::add_model(naive_reg(id = "id") %>% parsnip::set_engine("naive")) %>% + workflows::add_recipe(recipes::recipe(value ~ date + id, data = data_prepared_tbl)) %>% fit(data_prepared_tbl) # FOR SOME REASON PARSNIP MODELS FAIL # Error: Problem occurred during prediction. Error in model.frame.default(mod_terms, new_data, na.action = na.action, : factor id has new levels UNSEEN # model_fit_panel <- naive_reg(id = "id") %>% - # set_engine("naive") %>% + # parsnip::set_engine("naive") %>% # fit(value ~ date + id, data = data_prepared_tbl) expect_warning({ @@ -116,17 +116,17 @@ test_that("NAIVE - Check New Factors", { wflw_fit_panel ) %>% modeltime_forecast( - new_data = bind_rows( + new_data = dplyr::bind_rows( future_tbl, future_tbl %>% - filter(id == "M1") %>% - mutate(id = fct_recode(id, UNSEEN = "M1")) + dplyr::filter(id == "M1") %>% + dplyr::mutate(id = forcats::fct_recode(id, UNSEEN = "M1")) ), - actual_data = bind_rows( + actual_data = dplyr::bind_rows( data_prepared_tbl, data_prepared_tbl %>% - filter(id == "M1") %>% - mutate(id = fct_recode(id, UNSEEN = "M1")) + dplyr::filter(id == "M1") %>% + dplyr::mutate(id = forcats::fct_recode(id, UNSEEN = "M1")) ), keep_data = TRUE ) @@ -135,9 +135,9 @@ test_that("NAIVE - Check New Factors", { future_forecast_vec <- future_forecast_panel_tbl %>% - filter(!is.na(.model_id)) %>% - filter(id == "UNSEEN") %>% - pull(.value) + dplyr::filter(!is.na(.model_id)) %>% + dplyr::filter(id == "UNSEEN") %>% + dplyr::pull(.value) expect_equal(future_forecast_vec, rep_len(NA_real_, 60)) @@ -153,42 +153,42 @@ test_that("SNAIVE - Single Time Series (No ID)", { skip_on_cran() model_fit <- naive_reg() %>% - set_engine("snaive") %>% - fit(value ~ date, data = training(splits)) + parsnip::set_engine("snaive") %>% + fit(value ~ date, data = rsample::training(splits)) calibration_tbl <- modeltime_table( model_fit ) %>% - modeltime_calibrate(testing(splits)) + modeltime_calibrate(rsample::testing(splits)) forecast_tbl <- calibration_tbl %>% modeltime_forecast( - new_data = testing(splits), + new_data = rsample::testing(splits), actual_data = m750 ) forecast_vec <- forecast_tbl %>% - filter(.model_id == 1) %>% - pull(.value) + dplyr::filter(.model_id == 1) %>% + dplyr::pull(.value) - last_series <- training(splits) %>% - slice_tail(n = 12) %>% - pull(value) + last_series <- rsample::training(splits) %>% + dplyr::slice_tail(n = 12) %>% + dplyr::pull(value) future_forecast_tbl <- calibration_tbl %>% modeltime_refit(m750) %>% modeltime_forecast( - h = nrow(testing(splits)), + h = nrow(rsample::testing(splits)), actual_data = m750 ) future_forecast_vec <- future_forecast_tbl %>% - filter(.model_id == 1) %>% - pull(.value) + dplyr::filter(.model_id == 1) %>% + dplyr::pull(.value) future_last_series <- m750 %>% - slice_tail(n = 12) %>% - pull(value) + dplyr::slice_tail(n = 12) %>% + dplyr::pull(value) expect_equal(model_fit$fit$extras$period, 12) @@ -206,7 +206,7 @@ test_that("SNAIVE - Multiple Time Series (Panel ID)", { skip_on_cran() model_fit_panel <- naive_reg(id = "id") %>% - set_engine("snaive") %>% + parsnip::set_engine("snaive") %>% fit(value ~ date + id, data = data_prepared_tbl) future_forecast_panel_tbl <- modeltime_table( @@ -219,15 +219,13 @@ test_that("SNAIVE - Multiple Time Series (Panel ID)", { ) future_vec <- future_forecast_panel_tbl %>% - filter(!is.na(.model_id)) %>% - filter(id == "M1") %>% - pull(.value) + dplyr::filter(id == "M1", !is.na(.model_id)) %>% + dplyr::pull(.value) last_series_vec <- future_forecast_panel_tbl %>% - filter(is.na(.model_id)) %>% - filter(id == "M1") %>% - slice_tail(n = 12) %>% - pull(.value) + dplyr::filter(id == "M1", is.na(.model_id)) %>% + dplyr::slice_tail(n = 12) %>% + dplyr::pull(.value) expect_equal(nrow(future_forecast_panel_tbl), 1814) @@ -242,15 +240,15 @@ test_that("SNAIVE - Check New Factors", { skip_on_cran() - wflw_fit_panel <- workflow() %>% - add_model(naive_reg(id = "id") %>% set_engine("snaive")) %>% - add_recipe(recipe(value ~ date + id, data = data_prepared_tbl)) %>% + wflw_fit_panel <- workflows::workflow() %>% + workflows::add_model(naive_reg(id = "id") %>% parsnip::set_engine("snaive")) %>% + workflows::add_recipe(recipes::recipe(value ~ date + id, data = data_prepared_tbl)) %>% fit(data_prepared_tbl) # FOR SOME REASON PARSNIP MODELS FAIL # Error: Problem occurred during prediction. Error in model.frame.default(mod_terms, new_data, na.action = na.action, : factor id has new levels UNSEEN # model_fit_panel <- naive_reg(id = "id") %>% - # set_engine("snaive") %>% + # parsnip::set_engine("snaive") %>% # fit(value ~ date + id, data = data_prepared_tbl) expect_warning({ @@ -259,17 +257,17 @@ test_that("SNAIVE - Check New Factors", { wflw_fit_panel ) %>% modeltime_forecast( - new_data = bind_rows( + new_data = dplyr::bind_rows( future_tbl, future_tbl %>% - filter(id == "M1") %>% - mutate(id = fct_recode(id, UNSEEN = "M1")) + dplyr::filter(id == "M1") %>% + dplyr::mutate(id = forcats::fct_recode(id, UNSEEN = "M1")) ), - actual_data = bind_rows( + actual_data = dplyr::bind_rows( data_prepared_tbl, data_prepared_tbl %>% - filter(id == "M1") %>% - mutate(id = fct_recode(id, UNSEEN = "M1")) + dplyr::filter(id == "M1") %>% + dplyr::mutate(id = forcats::fct_recode(id, UNSEEN = "M1")) ), keep_data = TRUE ) @@ -277,9 +275,8 @@ test_that("SNAIVE - Check New Factors", { future_forecast_vec <- future_forecast_panel_tbl %>% - filter(!is.na(.model_id)) %>% - filter(id == "UNSEEN") %>% - pull(.value) + dplyr::filter(id == "UNSEEN", !is.na(.model_id)) %>% + dplyr::pull(.value) expect_equal(future_forecast_vec, rep_len(NA_real_, 60)) @@ -298,57 +295,57 @@ test_that("WINDOW - Single Time Series (No ID)", { model_fit_1 <- window_reg( window_size = 24 ) %>% - set_engine("window_function", window_function = ~ mean(.x, na.rm = TRUE),) %>% - fit(value ~ date, data = training(splits)) + parsnip::set_engine("window_function", window_function = ~ mean(.x, na.rm = TRUE),) %>% + fit(value ~ date, data = rsample::training(splits)) model_fit_2 <- window_reg( window_size = 36 ) %>% - set_engine("window_function", window_function = median, na.rm = TRUE) %>% - fit(value ~ date, data = training(splits)) + parsnip::set_engine("window_function", window_function = median, na.rm = TRUE) %>% + fit(value ~ date, data = rsample::training(splits)) model_fit_3 <- window_reg() %>% - set_engine("window_function", + parsnip::set_engine("window_function", window_function = ~ tail(.x, 12), na.rm = TRUE) %>% - fit(value ~ date, data = training(splits)) + fit(value ~ date, data = rsample::training(splits)) calibration_tbl <- modeltime_table( model_fit_1, model_fit_2, model_fit_3 ) %>% - modeltime_calibrate(testing(splits)) + modeltime_calibrate(rsample::testing(splits)) forecast_tbl <- calibration_tbl %>% modeltime_forecast( - new_data = testing(splits), + new_data = rsample::testing(splits), actual_data = m750 ) forecast_vec <- forecast_tbl %>% - filter(.model_id == 1) %>% - pull(.value) + dplyr::filter(.model_id == 1) %>% + dplyr::pull(.value) - last_series <- training(splits) %>% - slice_tail(n = 12) %>% - pull(value) + last_series <- rsample::training(splits) %>% + dplyr::slice_tail(n = 12) %>% + dplyr::pull(value) future_forecast_tbl <- calibration_tbl %>% modeltime_refit(m750) %>% modeltime_forecast( - h = nrow(testing(splits)), + h = nrow(rsample::testing(splits)), actual_data = m750, keep_data = TRUE ) future_forecast_vec <- future_forecast_tbl %>% - filter(.model_id == 1) %>% - pull(.value) + dplyr::filter(.model_id == 1) %>% + dplyr::pull(.value) future_last_series <- m750 %>% - slice_tail(n = 12) %>% - pull(value) + dplyr::slice_tail(n = 12) %>% + dplyr::pull(value) expect_equal(model_fit_1$fit$extras$period, 24) @@ -370,7 +367,7 @@ test_that("WINDOW - Multiple Time Series (Panel ID)", { id = "id", window_size = 12 ) %>% - set_engine("window_function", window_function = mean) %>% + parsnip::set_engine("window_function", window_function = mean) %>% fit(value ~ date + id, data = data_prepared_tbl) future_forecast_panel_tbl <- modeltime_table( @@ -383,15 +380,13 @@ test_that("WINDOW - Multiple Time Series (Panel ID)", { ) future_vec <- future_forecast_panel_tbl %>% - filter(!is.na(.model_id)) %>% - filter(id == "M1") %>% - pull(.value) + dplyr::filter(id == "M1", !is.na(.model_id)) %>% + dplyr::pull(.value) last_series_vec <- future_forecast_panel_tbl %>% - filter(is.na(.model_id)) %>% - filter(id == "M1") %>% - slice_tail(n = 12) %>% - pull(.value) + dplyr::filter(id == "M1", is.na(.model_id)) %>% + dplyr::slice_tail(n = 12) %>% + dplyr::pull(.value) expect_equal(nrow(future_forecast_panel_tbl), 1814) @@ -407,15 +402,15 @@ test_that("SNAIVE - Check New Factors", { skip_on_cran() - wflw_fit_panel <- workflow() %>% - add_model(window_reg(id = "id") %>% set_engine("window_function")) %>% - add_recipe(recipe(value ~ date + id, data = data_prepared_tbl)) %>% + wflw_fit_panel <- workflows::workflow() %>% + workflows::add_model(window_reg(id = "id") %>% parsnip::set_engine("window_function")) %>% + workflows::add_recipe(recipes::recipe(value ~ date + id, data = data_prepared_tbl)) %>% fit(data_prepared_tbl) # FOR SOME REASON PARSNIP MODELS FAIL # Error: Problem occurred during prediction. Error in model.frame.default(mod_terms, new_data, na.action = na.action, : factor id has new levels UNSEEN # model_fit_panel <- naive_reg(id = "id") %>% - # set_engine("snaive") %>% + # parsnip::set_engine("snaive") %>% # fit(value ~ date + id, data = data_prepared_tbl) expect_warning({ @@ -424,17 +419,17 @@ test_that("SNAIVE - Check New Factors", { wflw_fit_panel ) %>% modeltime_forecast( - new_data = bind_rows( + new_data = dplyr::bind_rows( future_tbl, future_tbl %>% - filter(id == "M1") %>% - mutate(id = fct_recode(id, UNSEEN = "M1")) + dplyr::filter(id == "M1") %>% + mutate(id = forcats::fct_recode(id, UNSEEN = "M1")) ), - actual_data = bind_rows( + actual_data = dplyr::bind_rows( data_prepared_tbl, data_prepared_tbl %>% - filter(id == "M1") %>% - mutate(id = fct_recode(id, UNSEEN = "M1")) + dplyr::filter(id == "M1") %>% + dplyr::mutate(id = forcats::fct_recode(id, UNSEEN = "M1")) ), keep_data = TRUE ) @@ -442,9 +437,8 @@ test_that("SNAIVE - Check New Factors", { future_forecast_vec <- future_forecast_panel_tbl %>% - filter(!is.na(.model_id)) %>% - filter(id == "UNSEEN") %>% - pull(.value) + dplyr::filter(id == "UNSEEN", !is.na(.model_id)) %>% + dplyr::pull(.value) expect_equal(future_forecast_vec, rep_len(NA_real_, 60)) diff --git a/tests/testthat/test-conf_by_id.R b/tests/testthat/test-conf_by_id.R index 219ef8b0..73ac4195 100644 --- a/tests/testthat/test-conf_by_id.R +++ b/tests/testthat/test-conf_by_id.R @@ -7,7 +7,6 @@ test_that("Confidence and Accuracy by ID", { library(tidymodels) library(timetk) - library(modeltime) library(tidyverse) # Data @@ -17,19 +16,19 @@ test_that("Confidence and Accuracy by ID", { splits <- data %>% time_series_split(assess = "3 months", cumulative = TRUE) - rec_obj <- recipe(value ~ ., training(splits)) %>% + rec_obj <- recipes::recipe(value ~ ., rsample::training(splits)) %>% step_timeseries_signature(date) %>% - step_rm(date) %>% - step_zv(all_predictors()) %>% - step_dummy(all_nominal_predictors(), one_hot = TRUE) + recipes::step_rm(date) %>% + recipes::step_zv(recipes::all_predictors()) %>% + recipes::step_dummy(recipes::all_nominal_predictors(), one_hot = TRUE) - wflw_xgb <- workflow() %>% - add_model( - boost_tree("regression") %>% set_engine("xgboost") + wflw_xgb <- workflows::workflow() %>% + workflows::add_model( + boost_tree("regression") %>% parsnip::set_engine("xgboost") ) %>% - add_recipe(rec_obj) %>% - fit(training(splits)) + workflows::add_recipe(rec_obj) %>% + fit(rsample::training(splits)) model_tbl <- modeltime_table( @@ -40,7 +39,7 @@ test_that("Confidence and Accuracy by ID", { # CALIBRATION BY ID ---- - test_data <- testing(splits) %>% arrange(ID, date) + test_data <- rsample::testing(splits) %>% arrange(ID, date) calib_tbl <- model_tbl %>% modeltime_calibrate(new_data = test_data, id = "ID") @@ -77,7 +76,7 @@ test_that("Confidence and Accuracy by ID", { forecast_tbl <- calib_tbl %>% modeltime_forecast( - new_data = testing(splits), + new_data = rsample::testing(splits), actual_data = NULL, conf_by_id = TRUE ) diff --git a/tests/testthat/test-default_accuracy_metric_sets.R b/tests/testthat/test-default_accuracy_metric_sets.R index be7c2319..6ef13ffd 100644 --- a/tests/testthat/test-default_accuracy_metric_sets.R +++ b/tests/testthat/test-default_accuracy_metric_sets.R @@ -1,5 +1,4 @@ -library(testthat) library(tidymodels) library(tibble) library(dplyr) diff --git a/tests/testthat/test-developer-tools-constructor.R b/tests/testthat/test-developer-tools-constructor.R index bf57934a..1ca89eaf 100644 --- a/tests/testthat/test-developer-tools-constructor.R +++ b/tests/testthat/test-developer-tools-constructor.R @@ -12,12 +12,12 @@ test_that("modeltime bridge: Good Structure", { # lm_model <- lm(value ~ as.numeric(date) + hour(date) + wday(date, label = TRUE), - data = taylor_30_min) + data = timetk::taylor_30_min) data = tibble( - date = taylor_30_min$date, # Important - The column name must match the modeled data + date = timetk::taylor_30_min$date, # Important - The column name must match the modeled data # These are standardized names: .value, .fitted, .resid - .actual = taylor_30_min$value, + .actual = timetk::taylor_30_min$value, .fitted = lm_model$fitted.values %>% as.numeric(), .residuals = lm_model$residuals %>% as.numeric() ) diff --git a/tests/testthat/test-developer-tools-xregs.R b/tests/testthat/test-developer-tools-xregs.R index ac4e8476..fdbf8a7c 100644 --- a/tests/testthat/test-developer-tools-xregs.R +++ b/tests/testthat/test-developer-tools-xregs.R @@ -8,9 +8,9 @@ context("TEST DEVELOPER TOOLS - XREG TOOLS") # # - id is removed because of zero variance # # - date is removed # expect_warning({ -# null_recipe <- m4_monthly %>% -# filter(id == "M750") %>% -# select(-value) %>% +# null_recipe <- timetk::m4_monthly %>% +# dplyr::filter(id == "M750") %>% +# dplyr::select(-value) %>% # create_xreg_recipe(prepare = TRUE, one_hot = TRUE) # # }) @@ -26,10 +26,10 @@ test_that("create_xreg_recipe: dummy_encode = FALSE returns factors", { skip_on_cran() # Month - predictors <- m4_monthly %>% - filter(id == "M750") %>% - select(-value) %>% - mutate(`month lbl` = month(date, label = TRUE)) + predictors <- timetk::m4_monthly %>% + dplyr::filter(id == "M750") %>% + dplyr::select(-value) %>% + dplyr::mutate(`month lbl` = lubridate::month(date, label = TRUE)) xreg_recipe_spec <- create_xreg_recipe(predictors, prepare = TRUE, dummy_encode = FALSE) @@ -37,7 +37,7 @@ test_that("create_xreg_recipe: dummy_encode = FALSE returns factors", { expect_equal(ncol(juiced), 1) - expect_s3_class(pull(juiced), "factor") + expect_s3_class(dplyr::pull(juiced), "factor") }) @@ -47,10 +47,10 @@ test_that("create_xreg_recipe: dummy_encode = TRUE returns dummies", { skip_on_cran() # Month - predictors <- m4_monthly %>% - filter(id == "M750") %>% - select(-value) %>% - mutate(`month lbl` = month(date, label = TRUE)) + predictors <- timetk::m4_monthly %>% + dplyr::filter(id == "M750") %>% + dplyr::select(-value) %>% + dplyr::mutate(`month lbl` = lubridate::month(date, label = TRUE)) xreg_recipe_spec <- create_xreg_recipe(predictors, prepare = TRUE, dummy_encode = TRUE) diff --git a/tests/testthat/test-extended_accuracy_metric_set.R b/tests/testthat/test-extended_accuracy_metric_set.R index 2893b07e..cca87601 100644 --- a/tests/testthat/test-extended_accuracy_metric_set.R +++ b/tests/testthat/test-extended_accuracy_metric_set.R @@ -1,4 +1,3 @@ -library(testthat) library(tidymodels) library(tibble) library(dplyr) @@ -9,7 +8,7 @@ test_that("extended_forecast_accuracy_metric_set works", { skip_on_cran() set.seed(1) - data <- tibble( + data <- dplyr::tibble( time = tk_make_timeseries("2020", by = "sec", length_out = 10), y = 1:10 + rnorm(10), y_hat = 1:10 + rnorm(10) @@ -19,16 +18,17 @@ test_that("extended_forecast_accuracy_metric_set works", { calc_default_metrics <- extended_forecast_accuracy_metric_set(yardstick::mae) # Apply the metric summarizer to new data + skip_if_not_installed("TSrepr") ret <- calc_default_metrics(data, y, y_hat) expect_equal(nrow(ret), 8) }) test_that("summarize_accuracy_metrics works", { - + skip_if_not_installed("TSrepr") skip_on_cran() - predictions_tbl <- tibble( + predictions_tbl <- dplyr::tibble( group = c(rep("model_1", 4), rep("model_2", 4)), truth = c(1, 2, 3, 4, @@ -38,7 +38,7 @@ test_that("summarize_accuracy_metrics works", { ) accuracy_tbl <- predictions_tbl %>% - group_by(group) %>% + dplyr::group_by(group) %>% summarize_accuracy_metrics( truth, estimate, metric_set = extended_forecast_accuracy_metric_set() @@ -48,7 +48,7 @@ test_that("summarize_accuracy_metrics works", { accuracy_tbl <- predictions_tbl %>% - group_by(group) %>% + dplyr::group_by(group) %>% summarize_accuracy_metrics( truth, estimate, metric_set = extended_forecast_accuracy_metric_set( diff --git a/tests/testthat/test-fit_workflowsets.R b/tests/testthat/test-fit_workflowsets.R index c47a7db4..51802ceb 100644 --- a/tests/testthat/test-fit_workflowsets.R +++ b/tests/testthat/test-fit_workflowsets.R @@ -1,8 +1,6 @@ context("WORKFLOWSETS") -library(testthat) library(tidymodels) -library(modeltime) library(workflowsets) library(tidyverse) library(timetk) @@ -16,26 +14,26 @@ test_that("Workflowsets Tests", { # Sequential - workflowset is correct order ---- - data_set <- m4_monthly + data_set <- timetk::m4_monthly # SETUP WORKFLOWSETS - rec1 <- recipe(value ~ date + id, data_set) %>% - step_mutate(date_num = as.numeric(date)) %>% - step_mutate(month_lbl = lubridate::month(date, label = TRUE)) %>% + rec1 <- recipes::recipe(value ~ date + id, data_set) %>% + recipes::step_mutate(date_num = as.numeric(date)) %>% + recipes::step_mutate(month_lbl = lubridate::month(date, label = TRUE)) %>% step_dummy(all_nominal(), one_hot = TRUE) - rec2 <- recipe(value ~ date + id, data_set) %>% - step_mutate(date_num = as.numeric(date)) %>% - step_mutate(month_lbl = lubridate::month(date, label = TRUE)) %>% + rec2 <- recipes::recipe(value ~ date + id, data_set) %>% + recipes::step_mutate(date_num = as.numeric(date)) %>% + recipes::step_mutate(month_lbl = lubridate::month(date, label = TRUE)) %>% step_dummy(all_nominal(), one_hot = TRUE) %>% step_ts_clean(value) mod_spec_prophet <- prophet_reg() %>% - set_engine("prophet") + parsnip::set_engine("prophet") mod_spec_ets <- exp_smoothing() %>% - set_engine("ets") + parsnip::set_engine("ets") wfsets <- workflowsets::workflow_set( preproc = list(rec1 = rec1, rec2 = rec2), @@ -132,7 +130,7 @@ test_that("Workflowsets Tests", { # "Parallel - workflowset is correct order" ---- - + # Note: this call fails if no previous versions of modeltime exist. model_par_tbl <- wfsets %>% modeltime_fit_workflowset( data_set, control = control_fit_workflowset(allow_par = TRUE, cores = 2) diff --git a/tests/testthat/test-helpers-combine-modeltime-tables.R b/tests/testthat/test-helpers-combine-modeltime-tables.R index 502c42b8..e7287838 100644 --- a/tests/testthat/test-helpers-combine-modeltime-tables.R +++ b/tests/testthat/test-helpers-combine-modeltime-tables.R @@ -9,22 +9,22 @@ test_that("combine_modeltime_table(): succeeds with mdl_time_tbl classes", { # - m750 <- m4_monthly %>% - filter(id == "M750") + m750 <- timetk::m4_monthly %>% + dplyr::filter(id == "M750") splits <- time_series_split(m750, assess = "3 years", cumulative = TRUE) model_fit_arima <- arima_reg() %>% - set_engine("auto_arima") %>% - fit(value ~ date, training(splits)) + parsnip::set_engine("auto_arima") %>% + fit(value ~ date, rsample::training(splits)) model_fit_prophet <- prophet_reg() %>% - set_engine("prophet") %>% - fit(value ~ date, training(splits)) + parsnip::set_engine("prophet") %>% + fit(value ~ date, rsample::training(splits)) model_fit_ets <- exp_smoothing() %>% - set_engine("ets") %>% - fit(value ~ date, training(splits)) + parsnip::set_engine("ets") %>% + fit(value ~ date, rsample::training(splits)) # Make 3 separate modeltime tables model_tbl_1 <- modeltime_table(model_fit_arima) @@ -34,7 +34,7 @@ test_that("combine_modeltime_table(): succeeds with mdl_time_tbl classes", { model_tbl_3 <- modeltime_table(model_fit_ets) calib_tbl <- model_tbl_1 %>% - modeltime_calibrate(testing(splits)) + modeltime_calibrate(rsample::testing(splits)) # combine_modeltime_table(): succeeds with mdl_time_tbl classes diff --git a/tests/testthat/test-helpers-pull_parsnip_preprocessor.R b/tests/testthat/test-helpers-pull_parsnip_preprocessor.R index 6045689a..32901789 100644 --- a/tests/testthat/test-helpers-pull_parsnip_preprocessor.R +++ b/tests/testthat/test-helpers-pull_parsnip_preprocessor.R @@ -1,9 +1,9 @@ # Data -m750 <- m4_monthly %>% filter(id == "M750") +m750 <- timetk::m4_monthly %>% dplyr::filter(id == "M750") # Split Data 80/20 -splits <- initial_time_split(m750, prop = 0.9) +splits <- rsample::initial_time_split(m750, prop = 0.9) # * (MATRIX) ARIMA ---- @@ -16,8 +16,8 @@ test_that("arima - matrix interface", { form <- stats::formula("log(value) ~ date") model_fit_no_boost <- arima_reg() %>% - set_engine(engine = "auto_arima") %>% - fit(form, data = training(splits)) + parsnip::set_engine(engine = "auto_arima") %>% + fit(form, data = rsample::training(splits)) form_extract <- model_fit_no_boost %>% pull_parsnip_preprocessor() @@ -29,14 +29,13 @@ test_that("arima - matrix interface", { # (FORMULA - S3) MARS ---- test_that("MARS - S3 FORMULA", { - + skip_if_not_installed("earth") skip_on_cran() - form <- stats::formula("log(value) ~ as.numeric(date) + month(date, label = TRUE)") - + form <- stats::formula("log(value) ~ as.numeric(date) + lubridate::month(date, label = TRUE)") model_fit_mars <- mars(mode = "regression") %>% - set_engine("earth") %>% - fit(form, data = training(splits)) + parsnip::set_engine("earth") %>% + fit(form, data = rsample::training(splits)) form_extract <- model_fit_mars %>% pull_parsnip_preprocessor() @@ -48,14 +47,14 @@ test_that("MARS - S3 FORMULA", { test_that("SVM - S4 FORMULA", { - + skip_if_not_installed("kernlab") skip_on_cran() - form <- stats::formula("log(value) ~ as.numeric(date) + month(date, label = TRUE)") + form <- stats::formula("log(value) ~ as.numeric(date) + lubridate::month(date, label = TRUE)") model_fit_svm <- svm_rbf(mode = "regression") %>% - set_engine("kernlab") %>% - fit(form, data = training(splits)) + parsnip::set_engine("kernlab") %>% + fit(form, data = rsample::training(splits)) form_extract <- model_fit_svm %>% pull_parsnip_preprocessor() diff --git a/tests/testthat/test-helpers-update-modeltime-tables.R b/tests/testthat/test-helpers-update-modeltime-tables.R index 0800d125..6a111af8 100644 --- a/tests/testthat/test-helpers-update-modeltime-tables.R +++ b/tests/testthat/test-helpers-update-modeltime-tables.R @@ -11,42 +11,42 @@ test_that("TEST MODELTIME TABLE HELPERS", { # SETUP # Data - m750 <- m4_monthly %>% filter(id == "M750") - splits <- initial_time_split(m750, prop = 0.8) + m750 <- timetk::m4_monthly %>% dplyr::filter(id == "M750") + splits <- rsample::initial_time_split(m750, prop = 0.8) # Model Specs # This model updates model_spec_arima_1 <- arima_reg(seasonal_period = 1) %>% - set_engine("auto_arima") + parsnip::set_engine("auto_arima") # This model does not update model_spec_arima_12 <- arima_reg(seasonal_period = 12) %>% - set_engine("auto_arima") + parsnip::set_engine("auto_arima") # PARSNIP INTERFACE ---- model_fit_arima_1 <- model_spec_arima_1 %>% - fit(log(value) ~ date, data = training(splits)) + fit(log(value) ~ date, data = rsample::training(splits)) model_fit_arima_12 <- model_spec_arima_12 %>% - fit(log(value) ~ date, data = training(splits)) + fit(log(value) ~ date, data = rsample::training(splits)) # WORKFLOW INTERFACE ---- - recipe_spec <- recipe(value ~ date, training(splits)) %>% - step_log(value) + recipe_spec <- recipes::recipe(value ~ date, rsample::training(splits)) %>% + recipes::step_log(value) - workflow_fit_arima_1 <- workflow() %>% - add_model(model_spec_arima_1) %>% - add_recipe(recipe_spec) %>% - fit(training(splits)) + workflow_fit_arima_1 <- workflows::workflow() %>% + workflows::add_model(model_spec_arima_1) %>% + workflows::add_recipe(recipe_spec) %>% + fit(rsample::training(splits)) - workflow_fit_arima_12 <- workflow() %>% - add_model(model_spec_arima_12) %>% - add_recipe(recipe_spec) %>% - fit(training(splits)) + workflow_fit_arima_12 <- workflows::workflow() %>% + workflows::add_model(model_spec_arima_12) %>% + workflows::add_recipe(recipe_spec) %>% + fit(rsample::training(splits)) # TESTS ---- @@ -102,7 +102,7 @@ test_that("TEST MODELTIME TABLE HELPERS", { # Description Updates, Post Refit refit_tbl <- updated_model_tbl %>% - modeltime_calibrate(training(splits)) %>% + modeltime_calibrate(rsample::training(splits)) %>% modeltime_refit(m750) expected <- c( diff --git a/tests/testthat/test-modeltime_residuals.R b/tests/testthat/test-modeltime_residuals.R index 8765ba0c..bc054ea2 100644 --- a/tests/testthat/test-modeltime_residuals.R +++ b/tests/testthat/test-modeltime_residuals.R @@ -6,7 +6,6 @@ test_that("modeltime_residuals(): Returns correct order", { skip_on_cran() library(tidymodels) - library(modeltime) library(tidyverse) library(timetk) @@ -19,31 +18,31 @@ test_that("modeltime_residuals(): Returns correct order", { splits <- data %>% time_series_split(assess = "3 months", cumulative = TRUE) - rec_obj <- recipe(value ~ ., training(splits)) %>% - step_mutate(ID = droplevels(ID)) %>% + rec_obj <- recipes::recipe(value ~ ., rsample::training(splits)) %>% + recipes::step_mutate(ID = droplevels(ID)) %>% step_timeseries_signature(date) %>% - step_rm(date) %>% - step_zv(all_predictors()) %>% - step_dummy(all_nominal_predictors(), one_hot = TRUE) + recipes::step_rm(date) %>% + recipes::step_zv(recipes::all_predictors()) %>% + recipes::step_dummy(recipes::all_nominal_predictors(), one_hot = TRUE) # Workflow - wflw_xgb <- workflow() %>% - add_model( - boost_tree("regression") %>% set_engine("xgboost") + wflw_xgb <- workflows::workflow() %>% + workflows::add_model( + boost_tree("regression") %>% parsnip::set_engine("xgboost") ) %>% - add_recipe(rec_obj) %>% - fit(training(splits)) + workflows::add_recipe(rec_obj) %>% + fit(rsample::training(splits)) - - wflw_glmnet <- workflow() %>% - add_model( + skip_if_not_installed("glmnet") + wflw_glmnet <- workflows::workflow() %>% + workflows::add_model( linear_reg(penalty = 1) %>% - set_engine("glmnet") + parsnip::set_engine("glmnet") ) %>% - add_recipe(rec_obj) %>% - fit(training(splits)) + workflows::add_recipe(rec_obj) %>% + fit(rsample::training(splits)) model_tbl <- modeltime_table( @@ -57,10 +56,10 @@ test_that("modeltime_residuals(): Returns correct order", { # Order is not changed for the training set calib_tbl_train <- model_tbl %>% modeltime_calibrate( - training(splits) + rsample::training(splits) ) - expect_equal(training(splits)$value, calib_tbl_train$.calibration_data[[1]]$.actual) + expect_equal(rsample::training(splits)$value, calib_tbl_train$.calibration_data[[1]]$.actual) # Usage of the full data set diff --git a/tests/testthat/test-modeltime_table-forecast-accuracy-refitting.R b/tests/testthat/test-modeltime_table-forecast-accuracy-refitting.R index c00a575e..a6f5aa8e 100644 --- a/tests/testthat/test-modeltime_table-forecast-accuracy-refitting.R +++ b/tests/testthat/test-modeltime_table-forecast-accuracy-refitting.R @@ -5,10 +5,10 @@ context("TEST MODELTIME WORKFLOW VS MODELS") # - Test Multiple Modeltime Objects # Data -m750 <- m4_monthly %>% filter(id == "M750") +m750 <- timetk::m4_monthly %>% dplyr::filter(id == "M750") # Split Data 80/20 -splits <- initial_time_split(m750, prop = 0.9) +splits <- rsample::initial_time_split(m750, prop = 0.9) # MODELTIME MODELS ---- @@ -20,8 +20,8 @@ test_that("Auto ARIMA (Parsnip)", { skip_on_cran() model_fit_no_boost <- arima_reg() %>% - set_engine(engine = "auto_arima") %>% - fit(log(value) ~ date, data = training(splits)) + parsnip::set_engine(engine = "auto_arima") %>% + fit(log(value) ~ date, data = rsample::training(splits)) # ** Model Table ---- @@ -34,7 +34,7 @@ test_that("Auto ARIMA (Parsnip)", { # ** Calibrate ---- calibrated_tbl <- model_table %>% - modeltime_calibrate(testing(splits)) + modeltime_calibrate(rsample::testing(splits)) expect_s3_class(calibrated_tbl, "mdl_time_tbl") @@ -50,9 +50,9 @@ test_that("Auto ARIMA (Parsnip)", { }) forecast_tbl <- calibrated_tbl %>% - modeltime_forecast(testing(splits)) + modeltime_forecast(rsample::testing(splits)) - expect_equal(nrow(forecast_tbl), nrow(testing(splits))) + expect_equal(nrow(forecast_tbl), nrow(rsample::testing(splits))) # ** Accuracy ---- accuracy_tbl <- calibrated_tbl %>% @@ -85,17 +85,17 @@ test_that("Auto ARIMA (Workflow)", { # - wflw_fit_arima <- workflow() %>% - add_model( + wflw_fit_arima <- workflows::workflow() %>% + workflows::add_model( spec = arima_reg() %>% - set_engine("auto_arima") + parsnip::set_engine("auto_arima") ) %>% - add_recipe( - recipe = recipe(value ~ date, data = training(splits)) %>% - step_date(date, features = "month") %>% - step_log(value) + workflows::add_recipe( + recipe = recipe(value ~ date, data = rsample::training(splits)) %>% + recipes::step_date(date, features = "month") %>% + recipes::step_log(value) ) %>% - fit(training(splits)) + fit(rsample::training(splits)) # ** Model Table ---- model_table <- modeltime_table(wflw_fit_arima) @@ -107,7 +107,7 @@ test_that("Auto ARIMA (Workflow)", { # ** Calibrate ---- calibrated_tbl <- model_table %>% - modeltime_calibrate(testing(splits)) + modeltime_calibrate(rsample::testing(splits)) expect_s3_class(calibrated_tbl, "mdl_time_tbl") @@ -117,9 +117,9 @@ test_that("Auto ARIMA (Workflow)", { # ** Forecast ---- forecast_tbl <- calibrated_tbl %>% - modeltime_forecast(testing(splits)) + modeltime_forecast(rsample::testing(splits)) - expect_equal(nrow(forecast_tbl), nrow(testing(splits))) + expect_equal(nrow(forecast_tbl), nrow(rsample::testing(splits))) # ** Accuracy ---- accuracy_tbl <- calibrated_tbl %>% @@ -150,21 +150,21 @@ test_that("Models for Mega Test", { # * Auto ARIMA (Parsnip) ---- model_fit_no_boost <- arima_reg() %>% - set_engine(engine = "auto_arima") %>% - fit(log(value) ~ date, data = training(splits)) + parsnip::set_engine(engine = "auto_arima") %>% + fit(log(value) ~ date, data = rsample::training(splits)) # * Auto ARIMA (Workflow) ----- - wflw_fit_arima <- workflow() %>% - add_model( + wflw_fit_arima <- workflows::workflow() %>% + workflows::add_model( spec = arima_reg() %>% - set_engine("auto_arima") + parsnip::set_engine("auto_arima") ) %>% - add_recipe( - recipe = recipe(value ~ date, data = training(splits)) %>% - step_date(date, features = "month") %>% - step_log(value) + workflows::add_recipe( + recipe = recipe(value ~ date, data = rsample::training(splits)) %>% + recipes::step_date(date, features = "month") %>% + recipes::step_log(value) ) %>% - fit(training(splits)) + fit(rsample::training(splits)) # * ARIMA Boosted (Parsnip) ---- @@ -176,17 +176,17 @@ test_that("Models for Mega Test", { seasonal_differences = 1, seasonal_ma = 1 ) %>% - set_engine(engine = "arima_xgboost") %>% - fit(log(value) ~ date + as.numeric(date) + month(date, label = TRUE), - data = training(splits)) + parsnip::set_engine(engine = "arima_xgboost") %>% + fit(log(value) ~ date + as.numeric(date) + lubridate::month(date, label = TRUE), + data = rsample::training(splits)) # * ETS (Parsnip) ---- model_fit_ets <- exp_smoothing() %>% - set_engine("ets") %>% - fit(log(value) ~ date + as.numeric(date) + month(date, label = TRUE), - data = training(splits)) + parsnip::set_engine("ets") %>% + fit(log(value) ~ date + as.numeric(date) + lubridate::month(date, label = TRUE), + data = rsample::training(splits)) @@ -197,15 +197,15 @@ test_that("Models for Mega Test", { error = "multiplicative", trend = "additive", season = "multiplicative") %>% - set_engine("ets") + parsnip::set_engine("ets") - recipe_spec <- recipe(value ~ date, data = training(splits)) %>% - step_log(value) + recipe_spec <- recipes::recipe(value ~ date, data = rsample::training(splits)) %>% + recipes::step_log(value) - wflw_fit_ets <- workflow() %>% - add_recipe(recipe_spec) %>% - add_model(model_spec) %>% - fit(training(splits)) + wflw_fit_ets <- workflows::workflow() %>% + workflows::add_recipe(recipe_spec) %>% + workflows::add_model(model_spec) %>% + fit(rsample::training(splits)) @@ -216,33 +216,33 @@ test_that("Models for Mega Test", { # * LM (Parsnip Model) ---- model_fit_lm <- linear_reg() %>% - set_engine("lm") %>% - fit(log(value) ~ as.numeric(date) + month(date, label = TRUE), - data = training(splits)) + parsnip::set_engine("lm") %>% + fit(log(value) ~ as.numeric(date) + lubridate::month(date, label = TRUE), + data = rsample::training(splits)) # * LM workflow ----- model_spec <- linear_reg() %>% - set_engine("lm") + parsnip::set_engine("lm") - recipe_spec <- recipe(value ~ date, data = training(splits)) %>% - step_date(date, features = "month") %>% - step_log(value) + recipe_spec <- recipes::recipe(value ~ date, data = rsample::training(splits)) %>% + recipes::step_date(date, features = "month") %>% + recipes::step_log(value) - wflw_fit_lm <- workflow() %>% - add_recipe(recipe_spec) %>% - add_model(model_spec) %>% - fit(training(splits)) + wflw_fit_lm <- workflows::workflow() %>% + workflows::add_recipe(recipe_spec) %>% + workflows::add_model(model_spec) %>% + fit(rsample::training(splits)) # * MARS (Parsnip Model) ---- - + skip_if_not_installed("earth") model_fit_mars <- mars(mode = "regression") %>% - set_engine("earth") %>% - fit(log(value) ~ as.numeric(date) + month(date, label = TRUE), - data = training(splits)) + parsnip::set_engine("earth") %>% + fit(log(value) ~ as.numeric(date) + lubridate::month(date, label = TRUE), + data = rsample::training(splits)) @@ -250,19 +250,19 @@ test_that("Models for Mega Test", { # * MARS (Workflow) ----- model_spec <- mars(mode = "regression") %>% - set_engine("earth") + parsnip::set_engine("earth") - recipe_spec <- recipe(value ~ date, data = training(splits)) %>% - step_date(date, features = "month", ordinal = FALSE) %>% - step_mutate(date_num = as.numeric(date)) %>% - step_normalize(date_num) %>% - step_rm(date) %>% - step_log(value) + recipe_spec <- recipes::recipe(value ~ date, data = rsample::training(splits)) %>% + recipes::step_date(date, features = "month", ordinal = FALSE) %>% + recipes::step_mutate(date_num = as.numeric(date)) %>% + recipes::step_normalize(date_num) %>% + recipes::step_rm(date) %>% + recipes::step_log(value) - wflw_fit_mars <- workflow() %>% - add_recipe(recipe_spec) %>% - add_model(model_spec) %>% - fit(training(splits)) + wflw_fit_mars <- workflows::workflow() %>% + workflows::add_recipe(recipe_spec) %>% + workflows::add_model(model_spec) %>% + fit(rsample::training(splits)) @@ -270,29 +270,30 @@ test_that("Models for Mega Test", { # * SVM (Parsnip Model) ---- model_fit_svm <- svm_rbf(mode = "regression") %>% - set_engine("kernlab") %>% - fit(log(value) ~ as.numeric(date) + month(date, label = TRUE), - data = training(splits)) + parsnip::set_engine("kernlab") %>% + fit(log(value) ~ as.numeric(date) + lubridate::month(date, label = TRUE), + data = rsample::training(splits)) # * SVM (Workflow) ----- + skip_if_not_installed("kernlab") model_spec <- svm_rbf(mode = "regression") %>% - set_engine("kernlab") + parsnip::set_engine("kernlab") - recipe_spec <- recipe(value ~ date, data = training(splits)) %>% - step_date(date, features = "month") %>% + recipe_spec <- recipes::recipe(value ~ date, data = rsample::training(splits)) %>% + recipes::step_date(date, features = "month") %>% step_rm(date) %>% # SVM requires dummy variables step_dummy(all_nominal()) %>% - step_log(value) + recipes::step_log(value) - wflw_fit_svm <- workflow() %>% - add_recipe(recipe_spec) %>% - add_model(model_spec) %>% - fit(training(splits)) + wflw_fit_svm <- workflows::workflow() %>% + workflows::add_recipe(recipe_spec) %>% + workflows::add_model(model_spec) %>% + fit(rsample::training(splits)) @@ -304,42 +305,42 @@ test_that("Models for Mega Test", { # model_fit_glmnet <- linear_reg( # penalty = 0.000388 # ) %>% - # set_engine("glmnet") %>% - # fit(log(value) ~ as.numeric(date) + month(date, label = TRUE), - # data = training(splits)) + # parsnip::set_engine("glmnet") %>% + # fit(log(value) ~ as.numeric(date) + lubridate::month(date, label = TRUE), + # data = rsample::training(splits)) # # model_fit_glmnet %>% - # modeltime_calibrate(testing(splits)) %>% + # modeltime_calibrate(rsample::testing(splits)) %>% # modeltime_accuracy() # * GLMNET (workflow) ---- # model_spec <- linear_reg(penalty = 0.000388) %>% - # set_engine("glmnet") + # parsnip::set_engine("glmnet") # - # recipe_spec <- recipe(value ~ date, data = training(splits)) %>% - # step_date(date, features = "month") %>% - # step_mutate(date_num = as.numeric(date)) %>% + # recipe_spec <- recipes::recipe(value ~ date, data = rsample::training(splits)) %>% + # recipes::step_date(date, features = "month") %>% + # recipes::step_mutate(date_num = as.numeric(date)) %>% # step_rm(date) %>% # step_dummy(all_nominal()) %>% - # step_log(value) + # recipes::step_log(value) # - # wflw_fit_glmnet <- workflow() %>% - # add_recipe(recipe_spec) %>% - # add_model(model_spec) %>% - # fit(training(splits)) + # wflw_fit_glmnet <- workflows::workflow() %>% + # workflows::add_recipe(recipe_spec) %>% + # workflows::add_model(model_spec) %>% + # fit(rsample::training(splits)) # # wflw_fit_glmnet %>% - # modeltime_calibrate(testing(splits)) %>% + # modeltime_calibrate(rsample::testing(splits)) %>% # modeltime_accuracy() # * randomForest (parsnip) ---- model_fit_randomForest <- rand_forest(mode = "regression") %>% - set_engine("randomForest") %>% - fit(log(value) ~ as.numeric(date) + month(date, label = TRUE), - data = training(splits)) + parsnip::set_engine("randomForest") %>% + fit(log(value) ~ as.numeric(date) + lubridate::month(date, label = TRUE), + data = rsample::training(splits)) @@ -347,47 +348,47 @@ test_that("Models for Mega Test", { # * randomForest (workflow) ---- model_spec <- rand_forest("regression") %>% - set_engine("randomForest") + parsnip::set_engine("randomForest") - recipe_spec <- recipe(value ~ date, data = training(splits)) %>% - step_date(date, features = "month") %>% - step_mutate(date_num = as.numeric(date)) %>% + recipe_spec <- recipes::recipe(value ~ date, data = rsample::training(splits)) %>% + recipes::step_date(date, features = "month") %>% + recipes::step_mutate(date_num = as.numeric(date)) %>% step_rm(date) %>% step_dummy(all_nominal()) %>% - step_log(value) + recipes::step_log(value) - wflw_fit_randomForest <- workflow() %>% - add_recipe(recipe_spec) %>% - add_model(model_spec) %>% - fit(training(splits)) + wflw_fit_randomForest <- workflows::workflow() %>% + workflows::add_recipe(recipe_spec) %>% + workflows::add_model(model_spec) %>% + fit(rsample::training(splits)) # * XGBoost (parsnip) ---- model_fit_xgboost <- boost_tree(mode = "regression") %>% - set_engine("xgboost", objective = "reg:squarederror") %>% - fit(log(value) ~ as.numeric(date) + month(date, label = TRUE), - data = training(splits)) + parsnip::set_engine("xgboost", objective = "reg:squarederror") %>% + fit(log(value) ~ as.numeric(date) + lubridate::month(date, label = TRUE), + data = rsample::training(splits)) # * XGBoost (workflow) ---- model_spec <- boost_tree("regression") %>% - set_engine("xgboost", objective = "reg:squarederror") + parsnip::set_engine("xgboost", objective = "reg:squarederror") - recipe_spec <- recipe(value ~ date, data = training(splits)) %>% - step_date(date, features = "month") %>% - step_mutate(date_num = as.numeric(date)) %>% + recipe_spec <- recipes::recipe(value ~ date, data = rsample::training(splits)) %>% + recipes::step_date(date, features = "month") %>% + recipes::step_mutate(date_num = as.numeric(date)) %>% step_rm(date) %>% step_dummy(all_nominal()) %>% - step_log(value) + recipes::step_log(value) - wflw_fit_xgboost <- workflow() %>% - add_recipe(recipe_spec) %>% - add_model(model_spec) %>% - fit(training(splits)) + wflw_fit_xgboost <- workflows::workflow() %>% + workflows::add_recipe(recipe_spec) %>% + workflows::add_model(model_spec) %>% + fit(rsample::training(splits)) @@ -432,7 +433,7 @@ test_that("Models for Mega Test", { expect_error(modeltime_accuracy(1)) accuracy_tbl <- model_table %>% - modeltime_calibrate(testing(splits)) %>% + modeltime_calibrate(rsample::testing(splits)) %>% modeltime_accuracy() # Structure @@ -446,10 +447,10 @@ test_that("Models for Mega Test", { expect_error(modeltime_forecast(1)) forecast_tbl <- model_table %>% - modeltime_calibrate(testing(splits)) %>% + modeltime_calibrate(rsample::testing(splits)) %>% modeltime_forecast( - new_data = testing(splits), - actual_data = bind_rows(training(splits), testing(splits)) + new_data = rsample::testing(splits), + actual_data = dplyr::bind_rows(rsample::training(splits), rsample::testing(splits)) ) # forecast_tbl %>% @@ -461,7 +462,7 @@ test_that("Models for Mega Test", { # Correct number of forecasts produced expect_equal( nrow(forecast_tbl), - nrow(model_table) * nrow(testing(splits)) + nrow(bind_rows(training(splits), testing(splits))) + nrow(model_table) * nrow(rsample::testing(splits)) + nrow(dplyr::bind_rows(rsample::training(splits), rsample::testing(splits))) ) @@ -470,7 +471,7 @@ test_that("Models for Mega Test", { model_table_refit <- model_table %>% # filter(.model_id %in% c(10)) %>% # filter(.model_id %in% c(1,3,4,6,8,10,12,14)) %>% - modeltime_calibrate(testing(splits)) %>% + modeltime_calibrate(rsample::testing(splits)) %>% modeltime_refit(data = m750) # Refit Structure @@ -492,8 +493,8 @@ test_that("Models for Mega Test", { # Forecast Structure expect_s3_class(forecast_tbl, "tbl_df") - actual_tbl <- forecast_tbl %>% filter(.model_desc == "ACTUAL") - future_predictions_tbl <- forecast_tbl %>% filter(.model_desc != "ACTUAL") + actual_tbl <- forecast_tbl %>% dplyr::filter(.model_desc == "ACTUAL") + future_predictions_tbl <- forecast_tbl %>% dplyr::filter(.model_desc != "ACTUAL") expect_true(all(tail(actual_tbl$.index, 1) < future_predictions_tbl$.index)) diff --git a/tests/testthat/test-modeltime_table-no-calib-refit.R b/tests/testthat/test-modeltime_table-no-calib-refit.R index 4462b8c8..1909ed00 100644 --- a/tests/testthat/test-modeltime_table-no-calib-refit.R +++ b/tests/testthat/test-modeltime_table-no-calib-refit.R @@ -3,7 +3,6 @@ context("TEST FORECASTING WITH NO CALIBRATION") # SIMPLE PREDICTION ---- -# library(modeltime) # library(tidymodels) # library(tidyverse) # library(timetk) @@ -17,19 +16,19 @@ test_that("No Calibration", { skip_on_cran() # SETUP ---- - m750 <- m4_monthly %>% - filter(id == "M750") + m750 <- timetk::m4_monthly %>% + dplyr::filter(id == "M750") model_fit_arima <- arima_reg() %>% - set_engine("auto_arima") %>% + parsnip::set_engine("auto_arima") %>% fit(value ~ date, m750) model_fit_lm <- linear_reg() %>% - set_engine("lm") %>% - fit(value ~ splines::ns(date, df = 5) + month(date, label = TRUE), m750) + parsnip::set_engine("lm") %>% + fit(value ~ splines::ns(date, df = 5) + lubridate::month(date, label = TRUE), m750) model_fit_prophet <- prophet_reg() %>% - set_engine("prophet") %>% + parsnip::set_engine("prophet") %>% fit(value ~ date, m750) # Non-Calibration 1: h = 3 years, actual_data = m750 ---- diff --git a/tests/testthat/test-nested-modeltime.R b/tests/testthat/test-nested-modeltime.R index 8c3331a3..4fbd934e 100644 --- a/tests/testthat/test-nested-modeltime.R +++ b/tests/testthat/test-nested-modeltime.R @@ -11,9 +11,7 @@ test_that("MODELTIME NESTED (ITERATIVE) FORECASTING", { # SETUP - library(testthat) library(tidymodels) - library(modeltime) library(tidyverse) library(timetk) @@ -21,17 +19,16 @@ test_that("MODELTIME NESTED (ITERATIVE) FORECASTING", { # DATA PREP FUNCTIONS ---- data_prep_tbl <- walmart_sales_weekly %>% - select(id, Date, Weekly_Sales) %>% - set_names(c("id", "date", "value")) + dplyr::select(id, date = Date, value = Weekly_Sales) tib_1 <- data_prep_tbl %>% - filter(id %in% c("1_1", "1_3")) + dplyr::filter(id %in% c("1_1", "1_3")) tib_2 <- data_prep_tbl %>% - filter(id %in% c("1_8")) %>% - slice_tail( n= 10) + dplyr::filter(id %in% c("1_8")) %>% + dplyr::slice_tail(n = 10) - data_start_tbl <- bind_rows(tib_1, tib_2) + data_start_tbl <- dplyr::bind_rows(tib_1, tib_2) @@ -62,34 +59,34 @@ test_that("MODELTIME NESTED (ITERATIVE) FORECASTING", { # * XGBoost ---- - rec_xgb <- recipe(value ~ ., extract_nested_train_split(nested_data_tbl)) %>% + rec_xgb <- recipes::recipe(value ~ ., extract_nested_train_split(nested_data_tbl)) %>% step_timeseries_signature(date) %>% - step_rm(date) %>% - step_zv(all_predictors()) %>% - step_dummy(all_nominal_predictors(), one_hot = TRUE) + recipes::step_rm(date) %>% + recipes::step_zv(recipes::all_predictors()) %>% + recipes::step_dummy(recipes::all_nominal_predictors(), one_hot = TRUE) - wflw_xgb <- workflow() %>% - add_model(boost_tree("regression") %>% set_engine("xgboost")) %>% - add_recipe(rec_xgb) + wflw_xgb <- workflows::workflow() %>% + workflows::add_model(boost_tree("regression") %>% parsnip::set_engine("xgboost")) %>% + workflows::add_recipe(rec_xgb) # * Bad Model ---- # - Xgboost can't handle dates - recipe_bad <- recipe(value ~ ., extract_nested_train_split(nested_data_tbl)) + recipe_bad <- recipes::recipe(value ~ ., extract_nested_train_split(nested_data_tbl)) - wflw_bad <- workflow() %>% - add_model(boost_tree("regression") %>% set_engine("xgboost")) %>% - add_recipe(recipe_bad) + wflw_bad <- workflows::workflow() %>% + workflows::add_model(boost_tree("regression") %>% parsnip::set_engine("xgboost")) %>% + workflows::add_recipe(recipe_bad) # * Prophet ---- - # rec_prophet <- recipe(value ~ date, extract_nested_train_split(nested_data_tbl)) + # rec_prophet <- recipes::recipe(value ~ date, extract_nested_train_split(nested_data_tbl)) # - # wflw_prophet <- workflow() %>% - # add_model( + # wflw_prophet <- workflows::workflow() %>% + # workflows::add_model( # prophet_reg("regression", seasonality_yearly = TRUE) %>% - # set_engine("prophet") + # parsnip::set_engine("prophet") # ) %>% - # add_recipe(rec_prophet) + # workflows::add_recipe(rec_prophet) # "modeltime_nested_fit: Good + Bad Model" @@ -259,7 +256,7 @@ test_that("MODELTIME NESTED (ITERATIVE) FORECASTING", { expect_equal(ncol(fcast_tbl), 0) # fcast_tbl %>% - # group_by(id) %>% + # dplyr::group_by(id) %>% # plot_modeltime_forecast() diff --git a/tests/testthat/test-panel-data.R b/tests/testthat/test-panel-data.R index 51acde6d..e4d3aeb6 100644 --- a/tests/testthat/test-panel-data.R +++ b/tests/testthat/test-panel-data.R @@ -6,46 +6,46 @@ context("PANEL DATA") test_that("Panel Data - Forecast Jumbled", { skip_on_cran() - + skip_if_not_installed("kernlab") # - m4_monthly_jumbled <- m4_monthly %>% + m4_monthly_jumbled <- timetk::m4_monthly %>% arrange(desc(date)) data_set <- m4_monthly_jumbled - recipe_spec <- recipe(value ~ date + id, data_set) %>% - step_mutate(date_num = as.numeric(date)) %>% - step_mutate(month_lbl = lubridate::month(date, label = TRUE)) %>% - step_dummy(all_nominal(), one_hot = TRUE) + recipe_spec <- recipes::recipe(value ~ date + id, data_set) %>% + recipes::step_mutate(date_num = as.numeric(date)) %>% + recipes::step_mutate(month_lbl = lubridate::month(date, label = TRUE)) %>% + recipes::step_dummy(all_nominal(), one_hot = TRUE) set.seed(123) - wflw_fit_prophet <- workflow() %>% - add_model( + wflw_fit_prophet <- workflows::workflow() %>% + workflows::add_model( prophet_boost( seasonality_yearly = F, seasonality_weekly = F, seasonality_daily = F ) %>% - set_engine( + parsnip::set_engine( "prophet_xgboost" # , # colsample_bytree = 1 ) ) %>% - add_recipe(recipe_spec) %>% + workflows::add_recipe(recipe_spec) %>% fit(data_set) set.seed(123) - wflw_fit_svm <- workflow() %>% - add_model(svm_rbf(mode = "regression") %>% set_engine("kernlab")) %>% - add_recipe(recipe_spec %>% step_rm(date)) %>% + wflw_fit_svm <- workflows::workflow() %>% + workflows::add_model(svm_rbf(mode = "regression") %>% parsnip::set_engine("kernlab")) %>% + workflows::add_recipe(recipe_spec %>% step_rm(date)) %>% fit(data_set) # set.seed(123) - # wflw_fit_xgb <- workflow() %>% - # add_model(boost_tree() %>% set_engine("xgboost")) %>% - # add_recipe(recipe_spec %>% step_rm(date)) %>% + # wflw_fit_xgb <- workflows::workflow() %>% + # workflows::add_model(boost_tree() %>% parsnip::set_engine("xgboost")) %>% + # workflows::add_recipe(recipe_spec %>% step_rm(date)) %>% # fit(data_set) # Panel Data - Forecast Jumbled @@ -90,7 +90,7 @@ test_that("Panel Data - Forecast Jumbled", { # * Test Model ---- svm_tbl <- forecast_tbl %>% - filter(.model_id == 2) + dplyr::filter(.model_id == 2) expect_equal(nrow(svm_tbl), nrow(data_set)) expect_equal(svm_tbl$.index, svm_tbl$date) diff --git a/tests/testthat/test-recursive-chunk-uneven.R b/tests/testthat/test-recursive-chunk-uneven.R index 2baa3a57..75206c3a 100644 --- a/tests/testthat/test-recursive-chunk-uneven.R +++ b/tests/testthat/test-recursive-chunk-uneven.R @@ -21,7 +21,7 @@ test_that("Chunked Recursive Tests - Uneven ", { # recursive 1 - single / recipe / parsnip ---- # Lag Recipe - recipe_lag <- recipe(value ~ date, m750_extended) %>% + recipe_lag <- recipes::recipe(value ~ date, m750_extended) %>% step_lag(value, lag = c(3,6,9,12)) # Data Transformation @@ -36,11 +36,11 @@ test_that("Chunked Recursive Tests - Uneven ", { # * Recursive Modeling ---- model_fit_lm <- linear_reg() %>% - set_engine("lm") %>% + parsnip::set_engine("lm") %>% fit(value ~ date, data = train_data) model_fit_lm_recursive <- linear_reg() %>% - set_engine("lm") %>% + parsnip::set_engine("lm") %>% fit(value ~ ., data = train_data) %>% recursive( transform = recipe_lag, @@ -61,7 +61,7 @@ test_that("Chunked Recursive Tests - Uneven ", { keep_data = TRUE ) - preds <- forecast_tbl %>% filter(.model_id == 2) %>% pull(.value) + preds <- forecast_tbl %>% dplyr::filter(.model_id == 2) %>% dplyr::pull(.value) expect_equal( length(future_data$value), length(preds) @@ -88,7 +88,7 @@ test_that("Chunked Recursive Tests - Uneven ", { actual_data = retrain_tbl ) - preds <- forecast_refit_tbl %>% filter(.model_id == 1) %>% pull(.value) + preds <- forecast_refit_tbl %>% dplyr::filter(.model_id == 1) %>% dplyr::pull(.value) expect_equal( length(future_tbl$value), length(preds) @@ -123,14 +123,14 @@ test_that("Chunked Recursive Tests - Uneven ", { filter(is.na(value)) # * Recursive Modeling ---- - wflw_fit_lm <- workflow() %>% - add_recipe(recipe(value ~ date, train_data)) %>% - add_model(linear_reg() %>% set_engine("lm")) %>% + wflw_fit_lm <- workflows::workflow() %>% + workflows::add_recipe(recipes::recipe(value ~ date, train_data)) %>% + workflows::add_model(linear_reg() %>% parsnip::set_engine("lm")) %>% fit(train_data) - wflw_fit_lm_recursive <- workflow() %>% - add_recipe(recipe(value ~ ., train_data)) %>% - add_model(linear_reg() %>% set_engine("lm")) %>% + wflw_fit_lm_recursive <- workflows::workflow() %>% + workflows::add_recipe(recipes::recipe(value ~ ., train_data)) %>% + workflows::add_model(linear_reg() %>% parsnip::set_engine("lm")) %>% fit(train_data) %>% recursive( transform = lag_transformer, @@ -153,7 +153,7 @@ test_that("Chunked Recursive Tests - Uneven ", { # forecast_tbl %>% plot_modeltime_forecast() - preds <- forecast_tbl %>% filter(.model_id == 2) %>% pull(.value) + preds <- forecast_tbl %>% dplyr::filter(.model_id == 2) %>% dplyr::pull(.value) expect_equal( length(future_data$value), length(preds) @@ -184,7 +184,7 @@ test_that("Chunked Recursive Tests - Uneven ", { # forecast_refit_tbl %>% plot_modeltime_forecast() - preds <- forecast_refit_tbl %>% filter(.model_id == 1) %>% pull(.value) + preds <- forecast_refit_tbl %>% dplyr::filter(.model_id == 1) %>% dplyr::pull(.value) expect_equal( length(future_tbl$value), length(preds) @@ -199,7 +199,7 @@ test_that("Chunked Recursive Tests - Uneven ", { # recursive 3 - panel / function / parsnip + workflow # Jumble the data to make sure it forecasts properly - m4_monthly_updated <- m4_monthly %>% + m4_monthly_updated <- timetk::m4_monthly %>% arrange(desc(id), date) %>% mutate(id = as_factor(as.character(id))) @@ -232,7 +232,7 @@ test_that("Chunked Recursive Tests - Uneven ", { # * Recursive Modeling ---- model_fit_lm_recursive <- linear_reg() %>% - set_engine("lm") %>% + parsnip::set_engine("lm") %>% fit(value ~ ., data = train_data) %>% recursive( id = "id", @@ -241,9 +241,9 @@ test_that("Chunked Recursive Tests - Uneven ", { chunk_size = 3 ) - wflw_fit_lm_recursive <- workflow() %>% - add_recipe(recipe(value ~ ., train_data)) %>% - add_model(linear_reg() %>% set_engine("lm")) %>% + wflw_fit_lm_recursive <- workflows::workflow() %>% + workflows::add_recipe(recipes::recipe(value ~ ., train_data)) %>% + workflows::add_model(linear_reg() %>% parsnip::set_engine("lm")) %>% fit(train_data) %>% recursive( id = "id", @@ -264,13 +264,13 @@ test_that("Chunked Recursive Tests - Uneven ", { ) %>% modeltime_forecast( new_data = future_data, - actual_data = m4_monthly, + actual_data = timetk::m4_monthly, keep_data = TRUE ) # forecast_tbl %>% group_by(id) %>% plot_modeltime_forecast() - preds_1 <- forecast_tbl %>% filter(.model_id == 1) %>% pull(.value) - preds_2 <- forecast_tbl %>% filter(.model_id == 2) %>% pull(.value) + preds_1 <- forecast_tbl %>% dplyr::filter(.model_id == 1) %>% dplyr::pull(.value) + preds_2 <- forecast_tbl %>% dplyr::filter(.model_id == 2) %>% dplyr::pull(.value) expect_equal( length(future_data$value), length(preds_1), @@ -314,8 +314,8 @@ test_that("Chunked Recursive Tests - Uneven ", { # forecast_refit_tbl %>% group_by(id) %>% plot_modeltime_forecast() - preds_1 <- forecast_refit_tbl %>% filter(.model_id == 1) %>% pull(.value) - preds_2 <- forecast_refit_tbl %>% filter(.model_id == 2) %>% pull(.value) + preds_1 <- forecast_refit_tbl %>% dplyr::filter(.model_id == 1) %>% dplyr::pull(.value) + preds_2 <- forecast_refit_tbl %>% dplyr::filter(.model_id == 2) %>% dplyr::pull(.value) expect_equal( length(future_tbl$value), @@ -346,7 +346,7 @@ test_that("Chunked Recursive Tests - Single ", { # recursive 1 - single / recipe / parsnip ---- # Lag Recipe - recipe_lag <- recipe(value ~ date, m750_extended) %>% + recipe_lag <- recipes::recipe(value ~ date, m750_extended) %>% step_lag(value, lag = c(3,6,9,12)) # Data Transformation @@ -361,11 +361,11 @@ test_that("Chunked Recursive Tests - Single ", { # * Recursive Modeling ---- model_fit_lm <- linear_reg() %>% - set_engine("lm") %>% + parsnip::set_engine("lm") %>% fit(value ~ date, data = train_data) model_fit_lm_recursive <- linear_reg() %>% - set_engine("lm") %>% + parsnip::set_engine("lm") %>% fit(value ~ ., data = train_data) %>% recursive( transform = recipe_lag, @@ -386,7 +386,7 @@ test_that("Chunked Recursive Tests - Single ", { keep_data = TRUE ) - preds <- forecast_tbl %>% filter(.model_id == 2) %>% pull(.value) + preds <- forecast_tbl %>% dplyr::filter(.model_id == 2) %>% dplyr::pull(.value) expect_equal( length(future_data$value), length(preds) @@ -413,7 +413,7 @@ test_that("Chunked Recursive Tests - Single ", { actual_data = retrain_tbl ) - preds <- forecast_refit_tbl %>% filter(.model_id == 1) %>% pull(.value) + preds <- forecast_refit_tbl %>% dplyr::filter(.model_id == 1) %>% dplyr::pull(.value) expect_equal( length(future_tbl$value), length(preds) @@ -448,14 +448,14 @@ test_that("Chunked Recursive Tests - Single ", { filter(is.na(value)) # * Recursive Modeling ---- - wflw_fit_lm <- workflow() %>% - add_recipe(recipe(value ~ date, train_data)) %>% - add_model(linear_reg() %>% set_engine("lm")) %>% + wflw_fit_lm <- workflows::workflow() %>% + workflows::add_recipe(recipes::recipe(value ~ date, train_data)) %>% + workflows::add_model(linear_reg() %>% parsnip::set_engine("lm")) %>% fit(train_data) - wflw_fit_lm_recursive <- workflow() %>% - add_recipe(recipe(value ~ ., train_data)) %>% - add_model(linear_reg() %>% set_engine("lm")) %>% + wflw_fit_lm_recursive <- workflows::workflow() %>% + workflows::add_recipe(recipes::recipe(value ~ ., train_data)) %>% + workflows::add_model(linear_reg() %>% parsnip::set_engine("lm")) %>% fit(train_data) %>% recursive( transform = lag_transformer, @@ -478,7 +478,7 @@ test_that("Chunked Recursive Tests - Single ", { # forecast_tbl %>% plot_modeltime_forecast() - preds <- forecast_tbl %>% filter(.model_id == 2) %>% pull(.value) + preds <- forecast_tbl %>% dplyr::filter(.model_id == 2) %>% dplyr::pull(.value) expect_equal( length(future_data$value), length(preds) @@ -509,7 +509,7 @@ test_that("Chunked Recursive Tests - Single ", { # forecast_refit_tbl %>% plot_modeltime_forecast() - preds <- forecast_refit_tbl %>% filter(.model_id == 1) %>% pull(.value) + preds <- forecast_refit_tbl %>% dplyr::filter(.model_id == 1) %>% dplyr::pull(.value) expect_equal( length(future_tbl$value), length(preds) @@ -524,7 +524,7 @@ test_that("Chunked Recursive Tests - Single ", { # recursive 3 - panel / function / parsnip + workflow # Jumble the data to make sure it forecasts properly - m4_monthly_updated <- m4_monthly %>% + m4_monthly_updated <- timetk::m4_monthly %>% arrange(desc(id), date) %>% mutate(id = as_factor(as.character(id))) @@ -557,7 +557,7 @@ test_that("Chunked Recursive Tests - Single ", { # * Recursive Modeling ---- model_fit_lm_recursive <- linear_reg() %>% - set_engine("lm") %>% + parsnip::set_engine("lm") %>% fit(value ~ ., data = train_data) %>% recursive( id = "id", @@ -566,9 +566,9 @@ test_that("Chunked Recursive Tests - Single ", { chunk_size = 3 ) - wflw_fit_lm_recursive <- workflow() %>% - add_recipe(recipe(value ~ ., train_data)) %>% - add_model(linear_reg() %>% set_engine("lm")) %>% + wflw_fit_lm_recursive <- workflows::workflow() %>% + workflows::add_recipe(recipes::recipe(value ~ ., train_data)) %>% + workflows::add_model(linear_reg() %>% parsnip::set_engine("lm")) %>% fit(train_data) %>% recursive( id = "id", @@ -589,13 +589,13 @@ test_that("Chunked Recursive Tests - Single ", { ) %>% modeltime_forecast( new_data = future_data, - actual_data = m4_monthly, + actual_data = timetk::m4_monthly, keep_data = TRUE ) # forecast_tbl %>% group_by(id) %>% plot_modeltime_forecast() - preds_1 <- forecast_tbl %>% filter(.model_id == 1) %>% pull(.value) - preds_2 <- forecast_tbl %>% filter(.model_id == 2) %>% pull(.value) + preds_1 <- forecast_tbl %>% dplyr::filter(.model_id == 1) %>% dplyr::pull(.value) + preds_2 <- forecast_tbl %>% dplyr::filter(.model_id == 2) %>% dplyr::pull(.value) expect_equal( length(future_data$value), length(preds_1), @@ -639,8 +639,8 @@ test_that("Chunked Recursive Tests - Single ", { # forecast_refit_tbl %>% group_by(id) %>% plot_modeltime_forecast() - preds_1 <- forecast_refit_tbl %>% filter(.model_id == 1) %>% pull(.value) - preds_2 <- forecast_refit_tbl %>% filter(.model_id == 2) %>% pull(.value) + preds_1 <- forecast_refit_tbl %>% dplyr::filter(.model_id == 1) %>% dplyr::pull(.value) + preds_2 <- forecast_refit_tbl %>% dplyr::filter(.model_id == 2) %>% dplyr::pull(.value) expect_equal( length(future_tbl$value), diff --git a/tests/testthat/test-recursive-chunk.R b/tests/testthat/test-recursive-chunk.R index 8ae080d6..7d742515 100644 --- a/tests/testthat/test-recursive-chunk.R +++ b/tests/testthat/test-recursive-chunk.R @@ -21,7 +21,7 @@ test_that("Chunked Recursive Tests ", { # recursive 1 - single / recipe / parsnip ---- # Lag Recipe - recipe_lag <- recipe(value ~ date, m750_extended) %>% + recipe_lag <- recipes::recipe(value ~ date, m750_extended) %>% step_lag(value, lag = c(3,6,9,12)) # Data Transformation @@ -36,11 +36,11 @@ test_that("Chunked Recursive Tests ", { # * Recursive Modeling ---- model_fit_lm <- linear_reg() %>% - set_engine("lm") %>% + parsnip::set_engine("lm") %>% fit(value ~ date, data = train_data) model_fit_lm_recursive <- linear_reg() %>% - set_engine("lm") %>% + parsnip::set_engine("lm") %>% fit(value ~ ., data = train_data) %>% recursive( transform = recipe_lag, @@ -64,7 +64,7 @@ test_that("Chunked Recursive Tests ", { # Visualize # forecast_tbl %>% plot_modeltime_forecast() - preds <- forecast_tbl %>% filter(.model_id == 2) %>% pull(.value) + preds <- forecast_tbl %>% dplyr::filter(.model_id == 2) %>% dplyr::pull(.value) expect_equal( length(future_data$value), length(preds) @@ -93,7 +93,7 @@ test_that("Chunked Recursive Tests ", { # forecast_refit_tbl %>% plot_modeltime_forecast() - preds <- forecast_refit_tbl %>% filter(.model_id == 1) %>% pull(.value) + preds <- forecast_refit_tbl %>% dplyr::filter(.model_id == 1) %>% dplyr::pull(.value) expect_equal( length(future_tbl$value), length(preds) @@ -128,14 +128,14 @@ test_that("Chunked Recursive Tests ", { filter(is.na(value)) # * Recursive Modeling ---- - wflw_fit_lm <- workflow() %>% - add_recipe(recipe(value ~ date, train_data)) %>% - add_model(linear_reg() %>% set_engine("lm")) %>% + wflw_fit_lm <- workflows::workflow() %>% + workflows::add_recipe(recipes::recipe(value ~ date, train_data)) %>% + workflows::add_model(linear_reg() %>% parsnip::set_engine("lm")) %>% fit(train_data) - wflw_fit_lm_recursive <- workflow() %>% - add_recipe(recipe(value ~ ., train_data)) %>% - add_model(linear_reg() %>% set_engine("lm")) %>% + wflw_fit_lm_recursive <- workflows::workflow() %>% + workflows::add_recipe(recipes::recipe(value ~ ., train_data)) %>% + workflows::add_model(linear_reg() %>% parsnip::set_engine("lm")) %>% fit(train_data) %>% recursive( transform = lag_transformer, @@ -158,7 +158,7 @@ test_that("Chunked Recursive Tests ", { # forecast_tbl %>% plot_modeltime_forecast() - preds <- forecast_tbl %>% filter(.model_id == 2) %>% pull(.value) + preds <- forecast_tbl %>% dplyr::filter(.model_id == 2) %>% dplyr::pull(.value) expect_equal( length(future_data$value), length(preds) @@ -189,7 +189,7 @@ test_that("Chunked Recursive Tests ", { # forecast_refit_tbl %>% plot_modeltime_forecast() - preds <- forecast_refit_tbl %>% filter(.model_id == 1) %>% pull(.value) + preds <- forecast_refit_tbl %>% dplyr::filter(.model_id == 1) %>% dplyr::pull(.value) expect_equal( length(future_tbl$value), length(preds) @@ -204,7 +204,7 @@ test_that("Chunked Recursive Tests ", { # recursive 3 - panel / function / parsnip + workflow # Jumble the data to make sure it forecasts properly - m4_monthly_updated <- m4_monthly %>% + m4_monthly_updated <- timetk::m4_monthly %>% arrange(desc(id), date) %>% mutate(id = as_factor(as.character(id))) @@ -237,7 +237,7 @@ test_that("Chunked Recursive Tests ", { # * Recursive Modeling ---- model_fit_lm_recursive <- linear_reg() %>% - set_engine("lm") %>% + parsnip::set_engine("lm") %>% fit(value ~ ., data = train_data) %>% recursive( id = "id", @@ -246,9 +246,9 @@ test_that("Chunked Recursive Tests ", { chunk_size = 3 ) - wflw_fit_lm_recursive <- workflow() %>% - add_recipe(recipe(value ~ ., train_data)) %>% - add_model(linear_reg() %>% set_engine("lm")) %>% + wflw_fit_lm_recursive <- workflows::workflow() %>% + workflows::add_recipe(recipes::recipe(value ~ ., train_data)) %>% + workflows::add_model(linear_reg() %>% parsnip::set_engine("lm")) %>% fit(train_data) %>% recursive( id = "id", @@ -269,13 +269,13 @@ test_that("Chunked Recursive Tests ", { ) %>% modeltime_forecast( new_data = future_data, - actual_data = m4_monthly, + actual_data = timetk::m4_monthly, keep_data = TRUE ) # forecast_tbl %>% group_by(id) %>% plot_modeltime_forecast() - preds_1 <- forecast_tbl %>% filter(.model_id == 1) %>% pull(.value) - preds_2 <- forecast_tbl %>% filter(.model_id == 2) %>% pull(.value) + preds_1 <- forecast_tbl %>% dplyr::filter(.model_id == 1) %>% dplyr::pull(.value) + preds_2 <- forecast_tbl %>% dplyr::filter(.model_id == 2) %>% dplyr::pull(.value) expect_equal( length(future_data$value), length(preds_1), @@ -319,8 +319,8 @@ test_that("Chunked Recursive Tests ", { # forecast_refit_tbl %>% group_by(id) %>% plot_modeltime_forecast() - preds_1 <- forecast_refit_tbl %>% filter(.model_id == 1) %>% pull(.value) - preds_2 <- forecast_refit_tbl %>% filter(.model_id == 2) %>% pull(.value) + preds_1 <- forecast_refit_tbl %>% dplyr::filter(.model_id == 1) %>% dplyr::pull(.value) + preds_2 <- forecast_refit_tbl %>% dplyr::filter(.model_id == 2) %>% dplyr::pull(.value) expect_equal( length(future_tbl$value), diff --git a/tests/testthat/test-recursive.R b/tests/testthat/test-recursive.R index 1d2608f6..e48608bb 100644 --- a/tests/testthat/test-recursive.R +++ b/tests/testthat/test-recursive.R @@ -21,7 +21,7 @@ test_that("Recursive Tests ", { # recursive 1 - single / recipe / parsnip ---- # Lag Recipe - recipe_lag <- recipe(value ~ date, m750_extended) %>% + recipe_lag <- recipes::recipe(value ~ date, m750_extended) %>% step_lag(value, lag = 1:FORECAST_HORIZON) # Data Transformation @@ -36,11 +36,11 @@ test_that("Recursive Tests ", { # * Recursive Modeling ---- model_fit_lm <- linear_reg() %>% - set_engine("lm") %>% + parsnip::set_engine("lm") %>% fit(value ~ date, data = train_data) model_fit_lm_recursive <- linear_reg() %>% - set_engine("lm") %>% + parsnip::set_engine("lm") %>% fit(value ~ ., data = train_data) %>% recursive( transform = recipe_lag, @@ -63,7 +63,7 @@ test_that("Recursive Tests ", { # Visualize # forecast_tbl %>% plot_modeltime_forecast() - preds <- forecast_tbl %>% filter(.model_id == 2) %>% pull(.value) + preds <- forecast_tbl %>% dplyr::filter(.model_id == 2) %>% dplyr::pull(.value) expect_equal( length(future_data$value), length(preds) @@ -92,7 +92,7 @@ test_that("Recursive Tests ", { # forecast_refit_tbl %>% plot_modeltime_forecast() - preds <- forecast_refit_tbl %>% filter(.model_id == 1) %>% pull(.value) + preds <- forecast_refit_tbl %>% dplyr::filter(.model_id == 1) %>% dplyr::pull(.value) expect_equal( length(future_tbl$value), length(preds) @@ -127,14 +127,14 @@ test_that("Recursive Tests ", { filter(is.na(value)) # * Recursive Modeling ---- - wflw_fit_lm <- workflow() %>% - add_recipe(recipe(value ~ date, train_data)) %>% - add_model(linear_reg() %>% set_engine("lm")) %>% + wflw_fit_lm <- workflows::workflow() %>% + workflows::add_recipe(recipes::recipe(value ~ date, train_data)) %>% + workflows::add_model(linear_reg() %>% parsnip::set_engine("lm")) %>% fit(train_data) - wflw_fit_lm_recursive <- workflow() %>% - add_recipe(recipe(value ~ ., train_data)) %>% - add_model(linear_reg() %>% set_engine("lm")) %>% + wflw_fit_lm_recursive <- workflows::workflow() %>% + workflows::add_recipe(recipes::recipe(value ~ ., train_data)) %>% + workflows::add_model(linear_reg() %>% parsnip::set_engine("lm")) %>% fit(train_data) %>% recursive( transform = lag_transformer, @@ -156,7 +156,7 @@ test_that("Recursive Tests ", { # forecast_tbl %>% plot_modeltime_forecast() - preds <- forecast_tbl %>% filter(.model_id == 2) %>% pull(.value) + preds <- forecast_tbl %>% dplyr::filter(.model_id == 2) %>% dplyr::pull(.value) expect_equal( length(future_data$value), length(preds) @@ -187,7 +187,7 @@ test_that("Recursive Tests ", { # forecast_refit_tbl %>% plot_modeltime_forecast() - preds <- forecast_refit_tbl %>% filter(.model_id == 1) %>% pull(.value) + preds <- forecast_refit_tbl %>% dplyr::filter(.model_id == 1) %>% dplyr::pull(.value) expect_equal( length(future_tbl$value), length(preds) @@ -202,7 +202,7 @@ test_that("Recursive Tests ", { # recursive 3 - panel / function / parsnip + workflow # Jumble the data to make sure it forecasts properly - m4_monthly_updated <- m4_monthly %>% + m4_monthly_updated <- timetk::m4_monthly %>% arrange(desc(id), date) %>% mutate(id = as_factor(as.character(id))) @@ -235,7 +235,7 @@ test_that("Recursive Tests ", { # * Recursive Modeling ---- model_fit_lm_recursive <- linear_reg() %>% - set_engine("lm") %>% + parsnip::set_engine("lm") %>% fit(value ~ ., data = train_data) %>% recursive( id = "id", @@ -243,9 +243,9 @@ test_that("Recursive Tests ", { train_tail = panel_tail(train_data, id, FORECAST_HORIZON) ) - wflw_fit_lm_recursive <- workflow() %>% - add_recipe(recipe(value ~ ., train_data)) %>% - add_model(linear_reg() %>% set_engine("lm")) %>% + wflw_fit_lm_recursive <- workflows::workflow() %>% + workflows::add_recipe(recipes::recipe(value ~ ., train_data)) %>% + workflows::add_model(linear_reg() %>% parsnip::set_engine("lm")) %>% fit(train_data) %>% recursive( id = "id", @@ -265,13 +265,13 @@ test_that("Recursive Tests ", { ) %>% modeltime_forecast( new_data = future_data, - actual_data = m4_monthly, + actual_data = timetk::m4_monthly, keep_data = TRUE ) # forecast_tbl %>% group_by(id) %>% plot_modeltime_forecast() - preds_1 <- forecast_tbl %>% filter(.model_id == 1) %>% pull(.value) - preds_2 <- forecast_tbl %>% filter(.model_id == 2) %>% pull(.value) + preds_1 <- forecast_tbl %>% dplyr::filter(.model_id == 1) %>% dplyr::pull(.value) + preds_2 <- forecast_tbl %>% dplyr::filter(.model_id == 2) %>% dplyr::pull(.value) expect_equal( length(future_data$value), length(preds_1), @@ -316,8 +316,8 @@ test_that("Recursive Tests ", { # forecast_refit_tbl %>% group_by(id) %>% plot_modeltime_forecast() - preds_1 <- forecast_refit_tbl %>% filter(.model_id == 1) %>% pull(.value) - preds_2 <- forecast_refit_tbl %>% filter(.model_id == 2) %>% pull(.value) + preds_1 <- forecast_refit_tbl %>% dplyr::filter(.model_id == 1) %>% dplyr::pull(.value) + preds_2 <- forecast_refit_tbl %>% dplyr::filter(.model_id == 2) %>% dplyr::pull(.value) expect_equal( length(future_tbl$value), diff --git a/tests/testthat/test-refit-parallel.R b/tests/testthat/test-refit-parallel.R index 1e63117b..4551e97d 100644 --- a/tests/testthat/test-refit-parallel.R +++ b/tests/testthat/test-refit-parallel.R @@ -3,33 +3,33 @@ test_that("refit works in parallel", { skip_on_cran() - - m4_monthly_jumbled <- m4_monthly %>% + m4_monthly_jumbled <- timetk::m4_monthly %>% arrange(desc(date)) data_set <- m4_monthly_jumbled - recipe_spec <- recipe(value ~ date + id, data_set) %>% - step_mutate(date_num = as.numeric(date)) %>% - step_mutate(month_lbl = lubridate::month(date, label = TRUE)) %>% - step_dummy(all_nominal(), one_hot = TRUE) + recipe_spec <- recipes::recipe(value ~ date + id, data_set) %>% + recipes::step_mutate(date_num = as.numeric(date)) %>% + recipes::step_mutate(month_lbl = lubridate::month(date, label = TRUE)) %>% + recipes::step_dummy(all_nominal(), one_hot = TRUE) set.seed(123) - wflw_fit_prophet <- workflow() %>% - add_model( + wflw_fit_prophet <- workflows::workflow() %>% + workflows::add_model( prophet_boost( seasonality_yearly = F, seasonality_weekly = F, seasonality_daily = F ) %>% - set_engine("prophet_xgboost")) %>% - add_recipe(recipe_spec) %>% + parsnip::set_engine("prophet_xgboost")) %>% + workflows::add_recipe(recipe_spec) %>% fit(data_set) + skip_if_not_installed("kernlab") set.seed(123) - wflw_fit_svm <- workflow() %>% - add_model(svm_rbf("regression") %>% set_engine("kernlab")) %>% - add_recipe(recipe_spec %>% update_role(date, new_role = "ID")) %>% + wflw_fit_svm <- workflows::workflow() %>% + workflows::add_model(svm_rbf("regression") %>% parsnip::set_engine("kernlab")) %>% + workflows::add_recipe(recipe_spec %>% update_role(date, new_role = "ID")) %>% fit(data_set) diff --git a/tests/testthat/test-results-accuracy-tables.R b/tests/testthat/test-results-accuracy-tables.R index d7aca897..d05060f2 100644 --- a/tests/testthat/test-results-accuracy-tables.R +++ b/tests/testthat/test-results-accuracy-tables.R @@ -4,8 +4,8 @@ context("TEST MODELTIME ACCURACY & TABLES") # SETUP ---- # Data -m750 <- m4_monthly %>% filter(id == "M750") -splits <- initial_time_split(m750, prop = 0.8) +m750 <- timetk::m4_monthly %>% dplyr::filter(id == "M750") +splits <- rsample::initial_time_split(m750, prop = 0.8) # ACCURACY ---- @@ -18,18 +18,18 @@ test_that("Test Modeltime Accuracy", { # Model Spec model_fit_arima <- arima_reg() %>% - set_engine("auto_arima") %>% - fit(value ~ date, training(splits)) + parsnip::set_engine("auto_arima") %>% + fit(value ~ date, rsample::training(splits)) model_fit_prophet <- prophet_reg() %>% - set_engine("prophet") %>% - fit(value ~ date, training(splits)) + parsnip::set_engine("prophet") %>% + fit(value ~ date, rsample::training(splits)) model_fit_lm <- linear_reg() %>% - set_engine("lm") %>% + parsnip::set_engine("lm") %>% fit(value ~ splines::ns(date, df = 5) - + month(date, label = TRUE), - training(splits)) + + lubridate::month(date, label = TRUE), + rsample::training(splits)) # Model Table model_tbl <- modeltime_table( @@ -39,18 +39,18 @@ test_that("Test Modeltime Accuracy", { ) calibration_tbl <- model_tbl %>% - modeltime_calibrate(testing(splits)) + modeltime_calibrate(rsample::testing(splits)) # Test Modeltime Accuracy acc_tbl_1 <- calibration_tbl %>% modeltime_accuracy() - acc_tbl_2 <- model_tbl %>% modeltime_accuracy(testing(splits)) + acc_tbl_2 <- model_tbl %>% modeltime_accuracy(rsample::testing(splits)) - acc_tbl_3 <- calibration_tbl %>% modeltime_accuracy(training(splits)) + acc_tbl_3 <- calibration_tbl %>% modeltime_accuracy(rsample::training(splits)) - acc_tbl_4 <- model_tbl %>% modeltime_accuracy(training(splits)) + acc_tbl_4 <- model_tbl %>% modeltime_accuracy(rsample::training(splits)) # Structure nms_expected <- c(".model_id", ".model_desc", ".type", @@ -154,22 +154,22 @@ test_that("accuracy table, GT - Workflow Interface", { # Model Spec model_spec <- arima_reg(seasonal_period = 12) %>% - set_engine("auto_arima") + parsnip::set_engine("auto_arima") # Recipe spec - recipe_spec <- recipe(value ~ date, data = training(splits)) %>% - step_log(value, skip = FALSE) + recipe_spec <- recipes::recipe(value ~ date, data = rsample::training(splits)) %>% + recipes::step_log(value, skip = FALSE) # Workflow - wflw <- workflow() %>% - add_recipe(recipe_spec) %>% - add_model(model_spec) + wflw <- workflows::workflow() %>% + workflows::add_recipe(recipe_spec) %>% + workflows::add_model(model_spec) wflw_fit <- wflw %>% - fit(training(splits)) + fit(rsample::training(splits)) accuracy_tbl <- wflw_fit %>% - modeltime_calibrate(testing(splits)) %>% + modeltime_calibrate(rsample::testing(splits)) %>% modeltime_accuracy() # * Reactable table ---- diff --git a/tests/testthat/test-results-forecast-plots.R b/tests/testthat/test-results-forecast-plots.R index d34dd20a..48c2dddc 100644 --- a/tests/testthat/test-results-forecast-plots.R +++ b/tests/testthat/test-results-forecast-plots.R @@ -4,8 +4,8 @@ context("TEST MODELTIME PLOTS") # SETUP ---- # Data -m750 <- m4_monthly %>% filter(id == "M750") -splits <- initial_time_split(m750, prop = 0.8) +m750 <- timetk::m4_monthly %>% dplyr::filter(id == "M750") +splits <- rsample::initial_time_split(m750, prop = 0.8) @@ -17,16 +17,16 @@ test_that("modeltime plotting", { # Model Spec model_spec <- arima_reg(seasonal_period = 12) %>% - set_engine("auto_arima") + parsnip::set_engine("auto_arima") # PARSNIP INTERFACE ---- model_fit <- model_spec %>% - fit(log(value) ~ date, data = training(splits)) + fit(log(value) ~ date, data = rsample::training(splits)) # * Forecast ---- forecast_tbl <- model_fit %>% - modeltime_calibrate(new_data = testing(splits)) %>% + modeltime_calibrate(new_data = rsample::testing(splits)) %>% modeltime_forecast( actual_data = m750, conf_interval = 0.95) @@ -35,14 +35,14 @@ test_that("modeltime plotting", { # * ggplot2 visualization ---- g <- forecast_tbl %>% - mutate_at(vars(.value:.conf_hi), exp) %>% + dplyr::mutate_at(dplyr::vars(.value:.conf_hi), exp) %>% plot_modeltime_forecast(.interactive = FALSE) # * plotly visualization ---- suppressWarnings({ # Needed until plotly is resolved: https://github.com/ropensci/plotly/issues/1783 p <- forecast_tbl %>% - mutate_at(vars(.value:.conf_hi), exp) %>% + dplyr::mutate_at(dplyr::vars(.value:.conf_hi), exp) %>% plot_modeltime_forecast(.interactive = TRUE) }) @@ -62,12 +62,12 @@ test_that("modeltime plotting", { # # PLOTS WITHOUT CONF INTERVALS ----- g <- forecast_tbl %>% - mutate_at(vars(.value:.conf_hi), exp) %>% + dplyr::mutate_at(dplyr::vars(.value:.conf_hi), exp) %>% plot_modeltime_forecast(.interactive = FALSE, .conf_interval_show = FALSE) p <- forecast_tbl %>% - mutate_at(vars(.value:.conf_hi), exp) %>% + dplyr::mutate_at(dplyr::vars(.value:.conf_hi), exp) %>% plot_modeltime_forecast(.interactive = TRUE, .conf_interval_show = FALSE) # Structure @@ -90,32 +90,32 @@ test_that("modeltime plot - workflow, Test Static ggplot", { # Model Spec model_spec <- arima_reg(seasonal_period = 12) %>% - set_engine("auto_arima") + parsnip::set_engine("auto_arima") # Recipe spec - recipe_spec <- recipe(value ~ date, data = training(splits)) %>% - step_log(value, skip = FALSE) + recipe_spec <- recipes::recipe(value ~ date, data = rsample::training(splits)) %>% + recipes::step_log(value, skip = FALSE) # Workflow - wflw <- workflow() %>% - add_recipe(recipe_spec) %>% - add_model(model_spec) + wflw <- workflows::workflow() %>% + workflows::add_recipe(recipe_spec) %>% + workflows::add_model(model_spec) wflw_fit <- wflw %>% - fit(training(splits)) + fit(rsample::training(splits)) forecast_tbl <- wflw_fit %>% - modeltime_calibrate(testing(splits)) %>% + modeltime_calibrate(rsample::testing(splits)) %>% modeltime_forecast(actual_data = m750, conf_interval = 0.8) # * ggplot2 visualization ---- g <- forecast_tbl %>% - mutate_at(vars(.value:.conf_hi), exp) %>% + dplyr::mutate_at(dplyr::vars(.value:.conf_hi), exp) %>% plot_modeltime_forecast(.conf_interval_show = TRUE, .interactive = FALSE) # * plotly visualization ---- p <- forecast_tbl %>% - mutate_at(vars(.value:.conf_hi), exp) %>% + dplyr::mutate_at(dplyr::vars(.value:.conf_hi), exp) %>% plot_modeltime_forecast(.conf_interval_show = TRUE, .interactive = TRUE) diff --git a/tests/testthat/test-results-residuals-tests.R b/tests/testthat/test-results-residuals-tests.R index 2b57a51c..30ffebc1 100644 --- a/tests/testthat/test-results-residuals-tests.R +++ b/tests/testthat/test-results-residuals-tests.R @@ -9,23 +9,23 @@ test_that("Test Modeltime Residuals Tests", { skip_on_cran() # Data - m750 <- m4_monthly %>% filter(id == "M750") - splits <- initial_time_split(m750, prop = 0.8) + m750 <- timetk::m4_monthly %>% dplyr::filter(id == "M750") + splits <- rsample::initial_time_split(m750, prop = 0.8) # Model Spec model_fit_arima <- arima_reg() %>% - set_engine("auto_arima") %>% - fit(value ~ date, training(splits)) + parsnip::set_engine("auto_arima") %>% + fit(value ~ date, rsample::training(splits)) model_fit_prophet <- prophet_reg() %>% - set_engine("prophet") %>% - fit(value ~ date, training(splits)) + parsnip::set_engine("prophet") %>% + fit(value ~ date, rsample::training(splits)) model_fit_lm <- linear_reg() %>% - set_engine("lm") %>% + parsnip::set_engine("lm") %>% fit(value ~ splines::ns(date, df = 5) - + month(date, label = TRUE), - training(splits)) + + lubridate::month(date, label = TRUE), + rsample::training(splits)) # Model Table model_tbl <- modeltime_table( @@ -35,26 +35,26 @@ test_that("Test Modeltime Residuals Tests", { ) residuals_tbl <- model_tbl %>% - modeltime_calibrate(testing(splits)) %>% + modeltime_calibrate(rsample::testing(splits)) %>% modeltime_residuals() res_tbl_1 <- residuals_tbl %>% modeltime_residuals_test() - res_tbl_2 <- model_tbl %>% modeltime_residuals_test(training(splits)) + res_tbl_2 <- model_tbl %>% modeltime_residuals_test(rsample::training(splits)) - res_tbl_3 <- model_tbl %>% modeltime_residuals_test(testing(splits)) + res_tbl_3 <- model_tbl %>% modeltime_residuals_test(rsample::testing(splits)) res_tbl_4 <- model_fit_arima %>% - modeltime_calibrate(new_data = training(splits)) %>% - modeltime_residuals_test(new_data = training(splits)) + modeltime_calibrate(new_data = rsample::training(splits)) %>% + modeltime_residuals_test(new_data = rsample::training(splits)) res_tbl_5 <- model_fit_arima %>% - modeltime_calibrate(new_data = testing(splits)) %>% - modeltime_residuals_test(new_data = testing(splits)) + modeltime_calibrate(new_data = rsample::testing(splits)) %>% + modeltime_residuals_test(new_data = rsample::testing(splits)) - res_tbl_6 <- residuals_tbl %>% modeltime_residuals_test(training(splits)) + res_tbl_6 <- residuals_tbl %>% modeltime_residuals_test(rsample::training(splits)) - res_tbl_7 <- residuals_tbl %>% modeltime_residuals_test(testing(splits)) + res_tbl_7 <- residuals_tbl %>% modeltime_residuals_test(rsample::testing(splits)) # Structure nms_expected <- c(".model_id", ".model_desc", "shapiro_wilk", "box_pierce", "ljung_box", "durbin_watson") @@ -88,7 +88,7 @@ test_that("Test Modeltime Residuals Tests", { expect_error({ # Missing new_data or calibration data - model_fit_arima %>% modeltime_residuals_test(new_data = training(splits)) + model_fit_arima %>% modeltime_residuals_test(new_data = rsample::training(splits)) }) expect_error({ diff --git a/tests/testthat/test-tune_workflows.R b/tests/testthat/test-tune_workflows.R index 2649ee45..dc1a4ade 100644 --- a/tests/testthat/test-tune_workflows.R +++ b/tests/testthat/test-tune_workflows.R @@ -9,7 +9,7 @@ test_that("Tuning, arima_boost", { # - m750 <- m4_monthly %>% filter(id == "M750") + m750 <- timetk::m4_monthly %>% dplyr::filter(id == "M750") # RESAMPLE SPEC ---- resample_spec <- time_series_cv(data = m750, @@ -20,9 +20,9 @@ test_that("Tuning, arima_boost", { slice_limit = 2) # Recipe - recipe_spec <- recipe(value ~ date, data = m750) %>% - step_mutate(as.numeric(date)) %>% - step_date(date, features = "month") + recipe_spec <- recipes::recipe(value ~ date, data = m750) %>% + recipes::step_mutate(as.numeric(date)) %>% + recipes::step_date(date, features = "month") # Model model_spec <- arima_boost( @@ -37,13 +37,13 @@ test_that("Tuning, arima_boost", { seasonal_ma = 0, # XGBoost Tuning Params - min_n = tune() + min_n = tune::tune() ) %>% - set_engine("arima_xgboost") + parsnip::set_engine("arima_xgboost") # Grid set.seed(3) - grid_spec <- grid_latin_hypercube( + grid_spec <- dials::grid_latin_hypercube( parameters(min_n()), size = 3 ) @@ -51,14 +51,15 @@ test_that("Tuning, arima_boost", { parallel_start(2) # Tune - tune_results_boosted <- workflow() %>% - add_recipe(recipe_spec) %>% - add_model(model_spec) %>% - tune_grid( + # This fails if no previous versions of modeltime exist. + tune_results_boosted <- workflows::workflow() %>% + workflows::add_recipe(recipe_spec) %>% + workflows::add_model(model_spec) %>% + tune::tune_grid( resamples = resample_spec, grid = grid_spec, metrics = default_forecast_accuracy_metric_set(), - control = control_grid( + control = tune::control_grid( verbose = TRUE, allow_par = TRUE, ) @@ -71,8 +72,8 @@ test_that("Tuning, arima_boost", { expect_equal(ncol(tune_results_boosted), 4) tune_results_boosted_metrics <- tune_results_boosted %>% - select(.metrics) %>% - unnest(.metrics) + dplyr::select(.metrics) %>% + tidyr::unnest(.metrics) expect_equal(nrow(tune_results_boosted_metrics), 36)