diff --git a/pkgdown.yml b/pkgdown.yml index e6433fc0..412b40df 100644 --- a/pkgdown.yml +++ b/pkgdown.yml @@ -21,7 +21,7 @@ articles: articles/story-seven-test-types: story-seven-test-types.html articles/story-spending-time-example: story-spending-time-example.html articles/story-update-boundary: story-update-boundary.html -last_built: 2024-10-17T20:43Z +last_built: 2024-10-24T17:56Z urls: reference: https://merck.github.io/gsDesign2/reference article: https://merck.github.io/gsDesign2/articles diff --git a/reference/gs_create_arm.html b/reference/gs_create_arm.html index 6dd0fe07..6e2ab178 100644 --- a/reference/gs_create_arm.html +++ b/reference/gs_create_arm.html @@ -48,11 +48,11 @@

Argumentsenroll_rate -

Enrollment rates.

+

Enrollment rates from define_enroll_rate().

fail_rate
-

Failure and dropout rates.

+

Failure and dropout rates from define_fail_rate().

ratio
diff --git a/reference/gs_design_ahr.html b/reference/gs_design_ahr.html index 4337edf8..41616a53 100644 --- a/reference/gs_design_ahr.html +++ b/reference/gs_design_ahr.html @@ -354,7 +354,7 @@

Examples#> } #> } #> } -#> <bytecode: 0x559383f85e48> +#> <bytecode: 0x564e709314e0> #> <environment: namespace:gsDesign2> #> #> $input$upar @@ -383,7 +383,7 @@

Examples#> class(x) <- "spendfn" #> x #> } -#> <bytecode: 0x5593817d4158> +#> <bytecode: 0x564e6e12d888> #> <environment: namespace:gsDesign> #> #> $input$upar$total_spend @@ -520,7 +520,7 @@

Examples#> } #> } #> } -#> <bytecode: 0x559383f85e48> +#> <bytecode: 0x564e709314e0> #> <environment: namespace:gsDesign2> #> #> $input$lpar @@ -549,7 +549,7 @@

Examples#> class(x) <- "spendfn" #> x #> } -#> <bytecode: 0x5593817d4158> +#> <bytecode: 0x564e6e12d888> #> <environment: namespace:gsDesign> #> #> $input$lpar$total_spend @@ -775,7 +775,7 @@

Examples#> } #> } #> } -#> <bytecode: 0x559383f85e48> +#> <bytecode: 0x564e709314e0> #> <environment: namespace:gsDesign2> #> #> $input$upar @@ -804,7 +804,7 @@

Examples#> class(x) <- "spendfn" #> x #> } -#> <bytecode: 0x5593817d4158> +#> <bytecode: 0x564e6e12d888> #> <environment: namespace:gsDesign> #> #> $input$upar$total_spend @@ -941,7 +941,7 @@

Examples#> } #> } #> } -#> <bytecode: 0x559383f85e48> +#> <bytecode: 0x564e709314e0> #> <environment: namespace:gsDesign2> #> #> $input$lpar @@ -970,7 +970,7 @@

Examples#> class(x) <- "spendfn" #> x #> } -#> <bytecode: 0x5593817d4158> +#> <bytecode: 0x564e6e12d888> #> <environment: namespace:gsDesign> #> #> $input$lpar$total_spend @@ -1196,7 +1196,7 @@

Examples#> } #> } #> } -#> <bytecode: 0x559383f85e48> +#> <bytecode: 0x564e709314e0> #> <environment: namespace:gsDesign2> #> #> $input$upar @@ -1225,7 +1225,7 @@

Examples#> class(x) <- "spendfn" #> x #> } -#> <bytecode: 0x5593817d4158> +#> <bytecode: 0x564e6e12d888> #> <environment: namespace:gsDesign> #> #> $input$upar$total_spend @@ -1362,7 +1362,7 @@

Examples#> } #> } #> } -#> <bytecode: 0x559383f85e48> +#> <bytecode: 0x564e709314e0> #> <environment: namespace:gsDesign2> #> #> $input$lpar @@ -1391,7 +1391,7 @@

Examples#> class(x) <- "spendfn" #> x #> } -#> <bytecode: 0x5593817d4158> +#> <bytecode: 0x564e6e12d888> #> <environment: namespace:gsDesign> #> #> $input$lpar$total_spend @@ -1625,7 +1625,7 @@

Examples#> } #> } #> } -#> <bytecode: 0x559383f85e48> +#> <bytecode: 0x564e709314e0> #> <environment: namespace:gsDesign2> #> #> $input$upar @@ -1654,7 +1654,7 @@

Examples#> class(x) <- "spendfn" #> x #> } -#> <bytecode: 0x5593817d4158> +#> <bytecode: 0x564e6e12d888> #> <environment: namespace:gsDesign> #> #> $input$upar$total_spend @@ -1791,7 +1791,7 @@

Examples#> } #> } #> } -#> <bytecode: 0x559383f85e48> +#> <bytecode: 0x564e709314e0> #> <environment: namespace:gsDesign2> #> #> $input$lpar @@ -1820,7 +1820,7 @@

Examples#> class(x) <- "spendfn" #> x #> } -#> <bytecode: 0x5593817d4158> +#> <bytecode: 0x564e6e12d888> #> <environment: namespace:gsDesign> #> #> $input$lpar$total_spend @@ -2055,7 +2055,7 @@

Examples#> } #> } #> } -#> <bytecode: 0x559383f85e48> +#> <bytecode: 0x564e709314e0> #> <environment: namespace:gsDesign2> #> #> $input$upar @@ -2084,7 +2084,7 @@

Examples#> class(x) <- "spendfn" #> x #> } -#> <bytecode: 0x5593817d4158> +#> <bytecode: 0x564e6e12d888> #> <environment: namespace:gsDesign> #> #> $input$upar$total_spend @@ -2221,7 +2221,7 @@

Examples#> } #> } #> } -#> <bytecode: 0x559383f85e48> +#> <bytecode: 0x564e709314e0> #> <environment: namespace:gsDesign2> #> #> $input$lpar @@ -2250,7 +2250,7 @@

Examples#> class(x) <- "spendfn" #> x #> } -#> <bytecode: 0x5593817d4158> +#> <bytecode: 0x564e6e12d888> #> <environment: namespace:gsDesign> #> #> $input$lpar$total_spend @@ -2482,7 +2482,7 @@

Examples#> } #> } #> } -#> <bytecode: 0x559383f85e48> +#> <bytecode: 0x564e709314e0> #> <environment: namespace:gsDesign2> #> #> $input$upar @@ -2511,7 +2511,7 @@

Examples#> class(x) <- "spendfn" #> x #> } -#> <bytecode: 0x5593817d4158> +#> <bytecode: 0x564e6e12d888> #> <environment: namespace:gsDesign> #> #> $input$upar$total_spend @@ -2648,7 +2648,7 @@

Examples#> } #> } #> } -#> <bytecode: 0x559383f85e48> +#> <bytecode: 0x564e709314e0> #> <environment: namespace:gsDesign2> #> #> $input$lpar @@ -2677,7 +2677,7 @@

Examples#> class(x) <- "spendfn" #> x #> } -#> <bytecode: 0x5593817d4158> +#> <bytecode: 0x564e6e12d888> #> <environment: namespace:gsDesign> #> #> $input$lpar$total_spend @@ -2920,7 +2920,7 @@

Examples#> } #> } #> } -#> <bytecode: 0x559383f85e48> +#> <bytecode: 0x564e709314e0> #> <environment: namespace:gsDesign2> #> #> $input$upar @@ -2949,7 +2949,7 @@

Examples#> class(x) <- "spendfn" #> x #> } -#> <bytecode: 0x5593817d4158> +#> <bytecode: 0x564e6e12d888> #> <environment: namespace:gsDesign> #> #> $input$upar$total_spend @@ -3092,7 +3092,7 @@

Examples#> } #> } #> } -#> <bytecode: 0x559383f85e48> +#> <bytecode: 0x564e709314e0> #> <environment: namespace:gsDesign2> #> #> $input$lpar @@ -3121,7 +3121,7 @@

Examples#> class(x) <- "spendfn" #> x #> } -#> <bytecode: 0x5593817d4158> +#> <bytecode: 0x564e6e12d888> #> <environment: namespace:gsDesign> #> #> $input$lpar$total_spend @@ -3369,7 +3369,7 @@

Examples#> } #> } #> } -#> <bytecode: 0x559383f85e48> +#> <bytecode: 0x564e709314e0> #> <environment: namespace:gsDesign2> #> #> $input$upar @@ -3398,7 +3398,7 @@

Examples#> class(x) <- "spendfn" #> x #> } -#> <bytecode: 0x5593817d4158> +#> <bytecode: 0x564e6e12d888> #> <environment: namespace:gsDesign> #> #> $input$upar$total_spend @@ -3541,7 +3541,7 @@

Examples#> } #> } #> } -#> <bytecode: 0x559383f85e48> +#> <bytecode: 0x564e709314e0> #> <environment: namespace:gsDesign2> #> #> $input$lpar @@ -3557,7 +3557,7 @@

Examples#> class(x) <- "spendfn" #> x #> } -#> <bytecode: 0x559389887a20> +#> <bytecode: 0x564e708482e8> #> <environment: namespace:gsDesign> #> #> $input$lpar$total_spend @@ -3804,7 +3804,7 @@

Examples#> } #> } #> } -#> <bytecode: 0x559383f85e48> +#> <bytecode: 0x564e709314e0> #> <environment: namespace:gsDesign2> #> #> $input$upar @@ -3833,7 +3833,7 @@

Examples#> class(x) <- "spendfn" #> x #> } -#> <bytecode: 0x5593817d4158> +#> <bytecode: 0x564e6e12d888> #> <environment: namespace:gsDesign> #> #> $input$upar$total_spend @@ -3856,7 +3856,7 @@

Examples#> return(par[k]) #> } #> } -#> <bytecode: 0x559383f82ef8> +#> <bytecode: 0x564e7092e590> #> <environment: namespace:gsDesign2> #> #> $input$lpar @@ -3973,7 +3973,7 @@

Examples#> return(par[k]) #> } #> } -#> <bytecode: 0x559383f82ef8> +#> <bytecode: 0x564e7092e590> #> <environment: namespace:gsDesign2> #> #> $input$upar @@ -3989,7 +3989,7 @@

Examples#> return(par[k]) #> } #> } -#> <bytecode: 0x559383f82ef8> +#> <bytecode: 0x564e7092e590> #> <environment: namespace:gsDesign2> #> #> $input$lpar diff --git a/reference/gs_design_rd.html b/reference/gs_design_rd.html index 5f326ad5..1ccede81 100644 --- a/reference/gs_design_rd.html +++ b/reference/gs_design_rd.html @@ -411,7 +411,7 @@

Examples#> } #> } #> } -#> <bytecode: 0x559383f85e48> +#> <bytecode: 0x564e709314e0> #> <environment: namespace:gsDesign2> #> #> $input$upar @@ -440,7 +440,7 @@

Examples#> class(x) <- "spendfn" #> x #> } -#> <bytecode: 0x5593817d4158> +#> <bytecode: 0x564e6e12d888> #> <environment: namespace:gsDesign> #> #> $input$upar$total_spend @@ -466,7 +466,7 @@

Examples#> return(par[k]) #> } #> } -#> <bytecode: 0x559383f82ef8> +#> <bytecode: 0x564e7092e590> #> <environment: namespace:gsDesign2> #> #> $input$lpar diff --git a/reference/gs_design_wlr.html b/reference/gs_design_wlr.html index e99ae0b1..5bb844aa 100644 --- a/reference/gs_design_wlr.html +++ b/reference/gs_design_wlr.html @@ -242,7 +242,7 @@

Examples#> { #> wlr_weight_fh(x, arm0, arm1, rho = 0, gamma = 0.5) #> } -#> <environment: 0x55938321bdb0> +#> <environment: 0x564e6e0b7d10> #> #> $input$approx #> [1] "asymptotic" @@ -392,7 +392,7 @@

Examples#> } #> } #> } -#> <bytecode: 0x559383f85e48> +#> <bytecode: 0x564e709314e0> #> <environment: namespace:gsDesign2> #> #> $input$upar @@ -421,7 +421,7 @@

Examples#> class(x) <- "spendfn" #> x #> } -#> <bytecode: 0x5593817d4158> +#> <bytecode: 0x564e6e12d888> #> <environment: namespace:gsDesign> #> #> $input$upar$total_spend @@ -558,7 +558,7 @@

Examples#> } #> } #> } -#> <bytecode: 0x559383f85e48> +#> <bytecode: 0x564e709314e0> #> <environment: namespace:gsDesign2> #> #> $input$lpar @@ -587,7 +587,7 @@

Examples#> class(x) <- "spendfn" #> x #> } -#> <bytecode: 0x5593817d4158> +#> <bytecode: 0x564e6e12d888> #> <environment: namespace:gsDesign> #> #> $input$lpar$total_spend @@ -687,7 +687,7 @@

Examples#> { #> wlr_weight_fh(x, arm0, arm1, rho = 0, gamma = 0.5) #> } -#> <environment: 0x55938321bdb0> +#> <environment: 0x564e6e0b7d10> #> #> $input$approx #> [1] "asymptotic" @@ -837,7 +837,7 @@

Examples#> } #> } #> } -#> <bytecode: 0x559383f85e48> +#> <bytecode: 0x564e709314e0> #> <environment: namespace:gsDesign2> #> #> $input$upar @@ -866,7 +866,7 @@

Examples#> class(x) <- "spendfn" #> x #> } -#> <bytecode: 0x5593817d4158> +#> <bytecode: 0x564e6e12d888> #> <environment: namespace:gsDesign> #> #> $input$upar$total_spend @@ -1003,7 +1003,7 @@

Examples#> } #> } #> } -#> <bytecode: 0x559383f85e48> +#> <bytecode: 0x564e709314e0> #> <environment: namespace:gsDesign2> #> #> $input$lpar @@ -1032,7 +1032,7 @@

Examples#> class(x) <- "spendfn" #> x #> } -#> <bytecode: 0x5593817d4158> +#> <bytecode: 0x564e6e12d888> #> <environment: namespace:gsDesign> #> #> $input$lpar$total_spend @@ -1132,7 +1132,7 @@

Examples#> { #> wlr_weight_fh(x, arm0, arm1, rho = 0, gamma = 0.5) #> } -#> <environment: 0x55938321bdb0> +#> <environment: 0x564e6e0b7d10> #> #> $input$approx #> [1] "asymptotic" @@ -1282,7 +1282,7 @@

Examples#> } #> } #> } -#> <bytecode: 0x559383f85e48> +#> <bytecode: 0x564e709314e0> #> <environment: namespace:gsDesign2> #> #> $input$upar @@ -1311,7 +1311,7 @@

Examples#> class(x) <- "spendfn" #> x #> } -#> <bytecode: 0x5593817d4158> +#> <bytecode: 0x564e6e12d888> #> <environment: namespace:gsDesign> #> #> $input$upar$total_spend @@ -1448,7 +1448,7 @@

Examples#> } #> } #> } -#> <bytecode: 0x559383f85e48> +#> <bytecode: 0x564e709314e0> #> <environment: namespace:gsDesign2> #> #> $input$lpar @@ -1477,7 +1477,7 @@

Examples#> class(x) <- "spendfn" #> x #> } -#> <bytecode: 0x5593817d4158> +#> <bytecode: 0x564e6e12d888> #> <environment: namespace:gsDesign> #> #> $input$lpar$total_spend diff --git a/reference/gs_info_ahr.html b/reference/gs_info_ahr.html index 69d64b1d..b228dd37 100644 --- a/reference/gs_info_ahr.html +++ b/reference/gs_info_ahr.html @@ -59,11 +59,11 @@

Argumentsenroll_rate -

Enrollment rates.

+

Enrollment rates from define_enroll_rate().

fail_rate
-

Failure and dropout rates.

+

Failure and dropout rates from define_fail_rate().

ratio
@@ -85,11 +85,11 @@

Arguments

Value

-

A data frame with columns Analysis, Time, AHR, Events, theta, info, info0. -info, and info0 contain statistical information under H1, H0, respectively. -For analysis k, Time[k] is the maximum of analysis_time[k] and the +

A data frame with columns analysis, time, ahr, event, theta, info, info0. +The columns info and info0 contain statistical information under H1, H0, respectively. +For analysis k, time[k] is the maximum of analysis_time[k] and the expected time required to accrue the targeted event[k]. -AHR is the expected average hazard ratio at each analysis.

+ahr is the expected average hazard ratio at each analysis.

Details

@@ -118,8 +118,8 @@

Examples#> 2 2 19.16437 40.00000 0.7442008 0.2954444 9.789940 10.00000 #> 3 3 24.54264 50.00000 0.7128241 0.3385206 12.227632 12.50000 # } -# Example 2 ---- +# Example 2 ---- # Only put in targeted analysis times gs_info_ahr(analysis_time = c(18, 27, 36)) #> analysis time event ahr theta info info0 diff --git a/reference/gs_power_ahr.html b/reference/gs_power_ahr.html index 6d3c86b4..4c4d6dec 100644 --- a/reference/gs_power_ahr.html +++ b/reference/gs_power_ahr.html @@ -334,7 +334,7 @@

Examples#> } #> } #> } -#> <bytecode: 0x559383f85e48> +#> <bytecode: 0x564e709314e0> #> <environment: namespace:gsDesign2> #> #> $input$upar @@ -363,7 +363,7 @@

Examples#> class(x) <- "spendfn" #> x #> } -#> <bytecode: 0x5593817d4158> +#> <bytecode: 0x564e6e12d888> #> <environment: namespace:gsDesign> #> #> $input$upar$total_spend @@ -500,7 +500,7 @@

Examples#> } #> } #> } -#> <bytecode: 0x559383f85e48> +#> <bytecode: 0x564e709314e0> #> <environment: namespace:gsDesign2> #> #> $input$lpar @@ -529,7 +529,7 @@

Examples#> class(x) <- "spendfn" #> x #> } -#> <bytecode: 0x5593817d4158> +#> <bytecode: 0x564e6e12d888> #> <environment: namespace:gsDesign> #> #> $input$lpar$total_spend @@ -765,7 +765,7 @@

Examples#> } #> } #> } -#> <bytecode: 0x559383f85e48> +#> <bytecode: 0x564e709314e0> #> <environment: namespace:gsDesign2> #> #> $input$upar @@ -794,7 +794,7 @@

Examples#> class(x) <- "spendfn" #> x #> } -#> <bytecode: 0x5593817d4158> +#> <bytecode: 0x564e6e12d888> #> <environment: namespace:gsDesign> #> #> $input$upar$total_spend @@ -931,7 +931,7 @@

Examples#> } #> } #> } -#> <bytecode: 0x559383f85e48> +#> <bytecode: 0x564e709314e0> #> <environment: namespace:gsDesign2> #> #> $input$lpar @@ -960,7 +960,7 @@

Examples#> class(x) <- "spendfn" #> x #> } -#> <bytecode: 0x5593817d4158> +#> <bytecode: 0x564e6e12d888> #> <environment: namespace:gsDesign> #> #> $input$lpar$total_spend @@ -1196,7 +1196,7 @@

Examples#> } #> } #> } -#> <bytecode: 0x559383f85e48> +#> <bytecode: 0x564e709314e0> #> <environment: namespace:gsDesign2> #> #> $input$upar @@ -1225,7 +1225,7 @@

Examples#> class(x) <- "spendfn" #> x #> } -#> <bytecode: 0x5593817d4158> +#> <bytecode: 0x564e6e12d888> #> <environment: namespace:gsDesign> #> #> $input$upar$total_spend @@ -1362,7 +1362,7 @@

Examples#> } #> } #> } -#> <bytecode: 0x559383f85e48> +#> <bytecode: 0x564e709314e0> #> <environment: namespace:gsDesign2> #> #> $input$lpar @@ -1391,7 +1391,7 @@

Examples#> class(x) <- "spendfn" #> x #> } -#> <bytecode: 0x5593817d4158> +#> <bytecode: 0x564e6e12d888> #> <environment: namespace:gsDesign> #> #> $input$lpar$total_spend @@ -1631,7 +1631,7 @@

Examples#> } #> } #> } -#> <bytecode: 0x559383f85e48> +#> <bytecode: 0x564e709314e0> #> <environment: namespace:gsDesign2> #> #> $input$upar @@ -1660,7 +1660,7 @@

Examples#> class(x) <- "spendfn" #> x #> } -#> <bytecode: 0x5593817d4158> +#> <bytecode: 0x564e6e12d888> #> <environment: namespace:gsDesign> #> #> $input$upar$total_spend @@ -1797,7 +1797,7 @@

Examples#> } #> } #> } -#> <bytecode: 0x559383f85e48> +#> <bytecode: 0x564e709314e0> #> <environment: namespace:gsDesign2> #> #> $input$lpar @@ -1826,7 +1826,7 @@

Examples#> class(x) <- "spendfn" #> x #> } -#> <bytecode: 0x5593817d4158> +#> <bytecode: 0x564e6e12d888> #> <environment: namespace:gsDesign> #> #> $input$lpar$total_spend diff --git a/reference/gs_power_wlr.html b/reference/gs_power_wlr.html index 5d89c5e3..fe8a5867 100644 --- a/reference/gs_power_wlr.html +++ b/reference/gs_power_wlr.html @@ -248,7 +248,7 @@

Examples#> return(par[k]) #> } #> } -#> <bytecode: 0x559383f82ef8> +#> <bytecode: 0x564e7092e590> #> <environment: namespace:gsDesign2> #> #> $input$upar @@ -267,7 +267,7 @@

Examples#> return(par[k]) #> } #> } -#> <bytecode: 0x559383f82ef8> +#> <bytecode: 0x564e7092e590> #> <environment: namespace:gsDesign2> #> #> $input$lpar @@ -291,7 +291,7 @@

Examples#> arm1) #> (1 - esurv)^rho * esurv^gamma #> } -#> <bytecode: 0x5593841664d8> +#> <bytecode: 0x564e70ad5d90> #> <environment: namespace:gsDesign2> #> #> $input$info_scale @@ -398,7 +398,7 @@

Examples#> return(par[k]) #> } #> } -#> <bytecode: 0x559383f82ef8> +#> <bytecode: 0x564e7092e590> #> <environment: namespace:gsDesign2> #> #> $input$upar @@ -417,7 +417,7 @@

Examples#> return(par[k]) #> } #> } -#> <bytecode: 0x559383f82ef8> +#> <bytecode: 0x564e7092e590> #> <environment: namespace:gsDesign2> #> #> $input$lpar @@ -441,7 +441,7 @@

Examples#> arm1) #> (1 - esurv)^rho * esurv^gamma #> } -#> <bytecode: 0x5593841664d8> +#> <bytecode: 0x564e70ad5d90> #> <environment: namespace:gsDesign2> #> #> $input$info_scale @@ -548,7 +548,7 @@

Examples#> return(par[k]) #> } #> } -#> <bytecode: 0x559383f82ef8> +#> <bytecode: 0x564e7092e590> #> <environment: namespace:gsDesign2> #> #> $input$upar @@ -567,7 +567,7 @@

Examples#> return(par[k]) #> } #> } -#> <bytecode: 0x559383f82ef8> +#> <bytecode: 0x564e7092e590> #> <environment: namespace:gsDesign2> #> #> $input$lpar @@ -591,7 +591,7 @@

Examples#> arm1) #> (1 - esurv)^rho * esurv^gamma #> } -#> <bytecode: 0x5593841664d8> +#> <bytecode: 0x564e70ad5d90> #> <environment: namespace:gsDesign2> #> #> $input$info_scale @@ -811,7 +811,7 @@

Examples#> } #> } #> } -#> <bytecode: 0x559383f85e48> +#> <bytecode: 0x564e709314e0> #> <environment: namespace:gsDesign2> #> #> $input$upar @@ -840,7 +840,7 @@

Examples#> class(x) <- "spendfn" #> x #> } -#> <bytecode: 0x5593817d4158> +#> <bytecode: 0x564e6e12d888> #> <environment: namespace:gsDesign> #> #> $input$upar$total_spend @@ -980,7 +980,7 @@

Examples#> } #> } #> } -#> <bytecode: 0x559383f85e48> +#> <bytecode: 0x564e709314e0> #> <environment: namespace:gsDesign2> #> #> $input$lpar @@ -1009,7 +1009,7 @@

Examples#> class(x) <- "spendfn" #> x #> } -#> <bytecode: 0x5593817d4158> +#> <bytecode: 0x564e6e12d888> #> <environment: namespace:gsDesign> #> #> $input$lpar$total_spend @@ -1034,7 +1034,7 @@

Examples#> arm1) #> (1 - esurv)^rho * esurv^gamma #> } -#> <bytecode: 0x5593841664d8> +#> <bytecode: 0x564e70ad5d90> #> <environment: namespace:gsDesign2> #> #> $input$info_scale @@ -1256,7 +1256,7 @@

Examples#> } #> } #> } -#> <bytecode: 0x559383f85e48> +#> <bytecode: 0x564e709314e0> #> <environment: namespace:gsDesign2> #> #> $input$upar @@ -1285,7 +1285,7 @@

Examples#> class(x) <- "spendfn" #> x #> } -#> <bytecode: 0x5593817d4158> +#> <bytecode: 0x564e6e12d888> #> <environment: namespace:gsDesign> #> #> $input$upar$total_spend @@ -1425,7 +1425,7 @@

Examples#> } #> } #> } -#> <bytecode: 0x559383f85e48> +#> <bytecode: 0x564e709314e0> #> <environment: namespace:gsDesign2> #> #> $input$lpar @@ -1454,7 +1454,7 @@

Examples#> class(x) <- "spendfn" #> x #> } -#> <bytecode: 0x5593817d4158> +#> <bytecode: 0x564e6e12d888> #> <environment: namespace:gsDesign> #> #> $input$lpar$total_spend @@ -1479,7 +1479,7 @@

Examples#> arm1) #> (1 - esurv)^rho * esurv^gamma #> } -#> <bytecode: 0x5593841664d8> +#> <bytecode: 0x564e70ad5d90> #> <environment: namespace:gsDesign2> #> #> $input$info_scale @@ -1701,7 +1701,7 @@

Examples#> } #> } #> } -#> <bytecode: 0x559383f85e48> +#> <bytecode: 0x564e709314e0> #> <environment: namespace:gsDesign2> #> #> $input$upar @@ -1730,7 +1730,7 @@

Examples#> class(x) <- "spendfn" #> x #> } -#> <bytecode: 0x5593817d4158> +#> <bytecode: 0x564e6e12d888> #> <environment: namespace:gsDesign> #> #> $input$upar$total_spend @@ -1870,7 +1870,7 @@

Examples#> } #> } #> } -#> <bytecode: 0x559383f85e48> +#> <bytecode: 0x564e709314e0> #> <environment: namespace:gsDesign2> #> #> $input$lpar @@ -1899,7 +1899,7 @@

Examples#> class(x) <- "spendfn" #> x #> } -#> <bytecode: 0x5593817d4158> +#> <bytecode: 0x564e6e12d888> #> <environment: namespace:gsDesign> #> #> $input$lpar$total_spend @@ -1924,7 +1924,7 @@

Examples#> arm1) #> (1 - esurv)^rho * esurv^gamma #> } -#> <bytecode: 0x5593841664d8> +#> <bytecode: 0x564e70ad5d90> #> <environment: namespace:gsDesign2> #> #> $input$info_scale diff --git a/reference/gs_spending_bound.html b/reference/gs_spending_bound.html index 59f6edd0..b7e98da2 100644 --- a/reference/gs_spending_bound.html +++ b/reference/gs_spending_bound.html @@ -320,7 +320,7 @@

Examples#> } #> } #> } -#> <bytecode: 0x559383f85e48> +#> <bytecode: 0x564e709314e0> #> <environment: namespace:gsDesign2> #> #> $input$upar @@ -349,7 +349,7 @@

Examples#> class(x) <- "spendfn" #> x #> } -#> <bytecode: 0x5593817d4158> +#> <bytecode: 0x564e6e12d888> #> <environment: namespace:gsDesign> #> #> $input$upar$total_spend @@ -492,7 +492,7 @@

Examples#> } #> } #> } -#> <bytecode: 0x559383f85e48> +#> <bytecode: 0x564e709314e0> #> <environment: namespace:gsDesign2> #> #> $input$lpar @@ -521,7 +521,7 @@

Examples#> class(x) <- "spendfn" #> x #> } -#> <bytecode: 0x5593817d4158> +#> <bytecode: 0x564e6e12d888> #> <environment: namespace:gsDesign> #> #> $input$lpar$total_spend diff --git a/search.json b/search.json index 69a64439..f2bb6ac1 100644 --- a/search.json +++ b/search.json @@ -1 +1 @@ -[{"path":"https://merck.github.io/gsDesign2/articles/gsDesign2.html","id":"overview","dir":"Articles","previous_headings":"","what":"Overview","title":"Quick start for NPH sample size and power","text":"provide simple examples use gsDesign2 package deriving fixed group sequential designs non-proportional hazards. piecewise model enrollment, failure rates, dropout rates changing hazard ratio time allow great flexibility design assumptions. Users encouraged suggest features immediate long-term interest add. Topics included : Packages required used. Specifying enrollment rates. Specifying failure dropout rates possibly changing hazard ratio time. Deriving fixed design interim analysis. Simple boundary specification group sequential design. Deriving group sequential design non-proportional hazards. Displaying design properties. Design properties alternate assumptions. Differences gsDesign. Future enhancement priorities. items discussed briefly enable quick start early adopters also suggesting ultimate possibilities software enables. Finally, final section provides current enhancement priorities, potential topic-related enhancements discussed throughout document.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/gsDesign2.html","id":"packages-used","dir":"Articles","previous_headings":"","what":"Packages used","title":"Quick start for NPH sample size and power","text":"gsDesign package used check results proportional hazards well source deriving bounds using spending functions. computations compute expected event accumulation average hazard ratio time; key inputs group sequential distribution parameters. implement group sequential distribution theory non-proportional hazards derive wide variety boundary types group sequential designs. simtrial package used verify design properties using simulation.","code":"library(gsDesign) library(gsDesign2) library(knitr) library(dplyr) library(gt) library(ggplot2)"},{"path":"https://merck.github.io/gsDesign2/articles/gsDesign2.html","id":"enrollment-rates","dir":"Articles","previous_headings":"","what":"Enrollment rates","title":"Quick start for NPH sample size and power","text":"Piecewise constant enrollment rates input tabular format. assume enrollment ramp-25\\%, 50\\%, 75\\% final enrollment rate 2 months followed steady state 100\\% enrollment another 6 months. rates increased later power design appropriately. However, fixed enrollment rate periods remain unchanged.","code":"enroll_rate <- define_enroll_rate( duration = c(2, 2, 2, 6), rate = (1:4) / 4 ) enroll_rate %>% gt()"},{"path":"https://merck.github.io/gsDesign2/articles/gsDesign2.html","id":"failure-and-dropout-rates","dir":"Articles","previous_headings":"","what":"Failure and dropout rates","title":"Quick start for NPH sample size and power","text":"Constant failure dropout rates specified study period stratum; consider single stratum . hazard ratio provided treatment/control hazard rate period stratum. dropout rate period assumed treatment group; restriction eliminated future version, needed. Generally, take advantage identity exponential distribution median m, corresponding failure rate \\lambda \\lambda = \\log(2) / m. consider control group exponential time--event 12 month median. assume hazard ratio 1 4 months, followed hazard ratio 0.6 thereafter. Finally, assume low 0.001 exponential dropout rate per month treatment groups.","code":"median_surv <- 12 fail_rate <- define_fail_rate( duration = c(4, Inf), fail_rate = log(2) / median_surv, hr = c(1, .6), dropout_rate = .001 ) fail_rate %>% gt()"},{"path":"https://merck.github.io/gsDesign2/articles/gsDesign2.html","id":"fixed-design","dir":"Articles","previous_headings":"","what":"Fixed design","title":"Quick start for NPH sample size and power","text":"enrollment, failure dropout rate assumptions now derive sample size trial targeted complete 36 months interim analysis, 90\\% power 2.5\\% Type error. quick summary targeted sample size obtained . Note normally round N even number Events next integer. enrollment rates period increased proportionately size trial desired properties; duration enrollment rate changed.","code":"alpha <- .025 beta <- .1 # 1 - targeted power d <- fixed_design_ahr( enroll_rate = enroll_rate, # Relative enrollment rates fail_rate = fail_rate, # Failure rates from above alpha = alpha, # Type I error power = 1 - beta, # Type II error = 1 - power study_duration = 36 # Planned trial duration ) d %>% summary() %>% as_gt() d$enroll_rate %>% gt()"},{"path":"https://merck.github.io/gsDesign2/articles/gsDesign2.html","id":"group-sequential-design","dir":"Articles","previous_headings":"","what":"Group sequential design","title":"Quick start for NPH sample size and power","text":"go detail group sequential designs . brief, however, sequence tests Z_1, Z_2,\\ldots, Z_K follow multivariate normal distribution performed test new treatment better control (Jennison Turnbull (1999)). assume Z_k > 0 favorable experimental treatment. Generally Type error set tests controlled null hypothesis treatment difference sequence bounds b_1, b_2,\\ldots,b_K chosen Type error \\alpha > 0 \\alpha = 1 - P_0(\\cap_{k=1}^K Z_k < b_k) P_0() refers probability null hypothesis. referred non-binding bound since assumed trial stopped early futility Z_k small.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/gsDesign2.html","id":"simple-efficacy-bound-definition","dir":"Articles","previous_headings":"Group sequential design","what":"Simple efficacy bound definition","title":"Quick start for NPH sample size and power","text":"Lan DeMets (1983) developed spending function method deriving group sequential bounds. involves use non-decreasing spending function f(t) t \\geq 0 f(0)=0 f(t)=\\alpha t \\geq 1. Suppose K>0 analyses performed proportion t_1< t_2 <\\ldots t_K=1 planned statistical information (e.g., proportion planned events time--event endpoint trial proportion observations binomial normal endpoint). Bounds first k analyses 1\\leq k\\leq K recursively defined spending function multivariate normal distribution satisfy f(t_k) = 1 - P_0(\\cap_{j=1}^k Z_j < b_j). quick start, illustrate type efficacy bound. Perhaps common spending function approach Lan DeMets (1983) approximation O’Brien-Fleming bound f(t) = 2-2\\Phi\\left(\\frac{\\Phi^{-1}(1-\\alpha/2)}{t^{1/2}}\\right). Suppose K=3 t_1=0.5, t_2 = 0.75, t_3 = 1. can use assumptions group sequential design efficacy bound using Lan-DeMets O’Brien-Fleming spending function \\alpha = 0.025 Bounds 3 analyses follows. Note expected sample size time data cutoff analysis also N. filter upper bound lower bounds Z = -Inf shown. gsDesign replicate bounds (replicate sample size).","code":"design1s <- gs_design_ahr( alpha = alpha, beta = beta, enroll_rate = enroll_rate, fail_rate = fail_rate, analysis_time = c(16, 26, 36), # Calendar time of planned analyses upper = gs_spending_bound, # Spending function bound for efficacy upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), # Specify spending function and total Type I error lower = gs_b, lpar = rep(-Inf, 3), # No futility bound info_scale = \"h0_h1_info\" ) design1s %>% summary() %>% as_gt( title = \"1-sided group sequential bound using AHR method\", subtitle = \"Lan-DeMets spending to approximate O'Brien-Fleming bound\" ) x <- gsDesign(k = 3, test.type = 1, timing = design1s$analysis$info_frac, sfu = sfLDOF) cat( \"gsDesign\\n Upper bound: \", x$upper$bound, \"\\n Cumulative boundary crossing probability (H0): \", cumsum(x$upper$prob[, 1]), \"\\n Timing (IF): \", x$timing, \"\\ngs_design_ahr\\n Upper bound: \", design1s$bound$z, \"\\n Cumulative boundary crossing probability (H0): \", design1s$bound$probability0, \"\\n Timinng (IF): \", design1s$analysis$info_frac, \"\\n\" ) #> gsDesign #> Upper bound: 3.013804 2.264946 2.027236 #> Cumulative boundary crossing probability (H0): 0.00128997 0.01217731 0.025 #> Timing (IF): 0.4850799 0.7993622 1 #> gs_design_ahr #> Upper bound: 3.003506 2.256138 2.028823 #> Cumulative boundary crossing probability (H0): 0.001334442 0.01246455 0.025 #> Timinng (IF): 0.4850799 0.7993622 1"},{"path":"https://merck.github.io/gsDesign2/articles/gsDesign2.html","id":"two-sided-testing","dir":"Articles","previous_headings":"Group sequential design","what":"Two-sided testing","title":"Quick start for NPH sample size and power","text":"consider symmetric asymmetric 2-sided designs.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/gsDesign2.html","id":"symmetric-2-sided-bounds","dir":"Articles","previous_headings":"Group sequential design > Two-sided testing","what":"Symmetric 2-sided bounds","title":"Quick start for NPH sample size and power","text":"first 2-sided design symmetric design. Design bounds confirmed : bounds can plotted easily:","code":"design2ss <- gs_design_ahr( alpha = alpha, beta = beta, enroll_rate = enroll_rate, fail_rate = fail_rate, analysis_time = c(16, 26, 36), # Calendar analysis times upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), h1_spending = FALSE # This specifies futility testing with spending under NULL ) design2ss %>% summary() %>% as_gt( title = \"2-sided symmetric group sequential bound using AHR method\", subtitle = \"Lan-DeMets spending to approximate O'Brien-Fleming bound\" ) ggplot( data = design2ss$analysis %>% left_join(design2ss$bound, by = \"analysis\"), aes(x = event, y = z, group = bound) ) + geom_line(aes(linetype = bound)) + geom_point() + ggtitle(\"2-sided symmetric bounds with O'Brien-Fleming-like spending\")"},{"path":"https://merck.github.io/gsDesign2/articles/gsDesign2.html","id":"asymmetric-2-sided-bounds","dir":"Articles","previous_headings":"Group sequential design > Two-sided testing","what":"Asymmetric 2-sided bounds","title":"Quick start for NPH sample size and power","text":"Asymmetric 2-sided designs common symmetric since objectives two bounds tend different. often caution analyze early efficacy use conservative bound; principles used example designs far. Stopping lack benefit experimental treatment control overt indication unfavorable trend generally might examined early bounds less stringent. add early futility analysis nominal 1-sided p-value 0.05 wrong direction (Z=\\Phi^{-1}(0.05) 30% 50\\% events accrued. might considered disaster check. point time, may perceived need futility analysis. efficacy, add infinite bound first interim analysis. now slightly larger sample size account possibility early futility stop. Bounds now:","code":"design2sa <- gs_design_ahr( alpha = alpha, beta = beta, enroll_rate = enroll_rate, fail_rate = fail_rate, analysis_time = c(12, 16, 26, 36), upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), # Same efficacy bound as before test_lower = c(FALSE, TRUE, TRUE, TRUE), # Only test efficacy after IA1 lower = gs_b, lpar = c(rep(qnorm(.05), 2), -Inf, -Inf) # Fixed lower bound at first 2 analyses ) design2sa %>% summary() %>% as_gt( title = \"2-sided asymmetric group sequential bound using AHR method\", subtitle = \"Lan-DeMets spending to approximate O'Brien-Fleming bound for efficacy, futility disaster check at IA1, IA2 only\" )"},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-ahr-under-nph.html","id":"introduction","dir":"Articles","previous_headings":"","what":"Introduction","title":"Average hazard ratio and sample size under non-proportional hazards","text":"document demonstrates applications average hazard ratio concept design fixed designs without interim analysis. Throughout consider 2-arm trial experimental control group time--event endpoint. Testing differences treatment groups performed using stratified logrank test. setting, gsDesign2::ahr() routine provides average hazard ratio can used sample size using function gsDesign::nSurv(). approach assumes piecewise constant enrollment rates piecewise exponential failure rates option including multiple strata. approach allows flexibility approximate wide variety scenarios. evaluate approximations used via simulation using simtrial package; specifically provide simulation routine changes specified user easily incorporated. consider non-proportional hazards single stratum multiple strata different underlying proportional hazards assumptions. two things note regarding differences simtrial::simfix() gsDesign2::ahr(): simtrial::simfix() less flexible requires strata enrolled relative rates throughout trial whereas gsDesign2::ahr() allows, example, enrollment start stop different times different strata. document, use restrictive parameterization simtrial::simfix() can confirm asymptotic sample size approximation based gsDesign2::ahr() simulation. simtrial::simfix() provides flexibility test statistics used gsDesign2::ahr() documented pMaxCombo vignette demonstrating use Fleming-Harrington weighted logrank tests combinations tests.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-ahr-under-nph.html","id":"document-organization","dir":"Articles","previous_headings":"Introduction","what":"Document organization","title":"Average hazard ratio and sample size under non-proportional hazards","text":"vignette organized follows: single stratum design assumes delayed treatment benefit. stratified example assumes different proportional hazards 3 strata. Description design scenario. Deriving average hazard ratio. Deriving sample size based average hazard ratio. Computing plotting average hazard ratio function time. Simulation verify sample size approximation provides targeted power. simulation done data cutoff performed 5 different ways: Based targeted trial duration Based targeted minimum follow-duration Based targeted event count Based maximum targeted event count targeted trial duration Based maximum targeted event count targeted minimum follow-method based waiting achieve targeted event count targeted minimum follow-appears practical provide targeted power.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-ahr-under-nph.html","id":"initial-setup","dir":"Articles","previous_headings":"Introduction > Document organization","what":"Initial setup","title":"Average hazard ratio and sample size under non-proportional hazards","text":"begin setting two parameters used throughout simulations used verify accuracy power approximations; either customized simulation. First, set number simulations performed. can increase improve accuracy simulation estimates power. Simulations using simtrial::simfix() routine use blocked randomization. set change individual simulations. Based balanced randomization block set randomization ratio experimental control 1. load packages needed . gsDesign used implementation Schoenfeld (1981) approximation compute number events required power trial proportional hazards assumption. dplyr tibble work tabular data ‘data wrangling’ approach coding. simtrial enable simulations. survival enable Cox proportional hazards estimation (average) hazard ratio simulation compare approximation provided gsDesign2::ahr() routine computes expected average hazard ratio trial (Kalbfleisch Prentice (1981), Schemper, Wakounig, Heinze (2009)). Hidden underneath gsDesign2::eEvents_df() routine provides expected event counts period stratum hazard ratio differs. basic calculation used gsDesign2::ahr() routine.","code":"nsim <- 2000 block <- rep(c(\"Control\", \"Experimental\"), 2) ratio <- 1 library(gsDesign) library(gsDesign2) library(ggplot2) library(dplyr) library(tibble) library(survival) library(gt)"},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-ahr-under-nph.html","id":"design-scenario","dir":"Articles","previous_headings":"Single stratum non-proportional hazards example","what":"Design scenario","title":"Average hazard ratio and sample size under non-proportional hazards","text":"set first scenario design parameters. Enrollment ramps course first 4 months follow-steady state enrollment thereafter. adjusted proportionately power trial later. control group piecewise exponential distribution median 9 first 3 months 18 thereafter. hazard ratio experimental group versus control 1 first 3 months followed 0.55 thereafter. Since single stratum, set strata default:","code":"# Note: this is done differently for multiple strata; see below! enroll_rate <- define_enroll_rate( duration = c(2, 2, 10), rate = c(3, 6, 9) ) fail_rate <- define_fail_rate( duration = c(3, 100), fail_rate = log(2) / c(9, 18), dropout_rate = .001, hr = c(1, .55) ) total_duration <- 30 strata <- tibble::tibble(stratum = \"All\", p = 1)"},{"path":"https://merck.github.io/gsDesign2/articles/story-ahr-under-nph.html","id":"computing-average-hazard-ratio","dir":"Articles","previous_headings":"Single stratum non-proportional hazards example","what":"Computing average hazard ratio","title":"Average hazard ratio and sample size under non-proportional hazards","text":"compute average hazard ratio using gsDesign2::ahr() (average hazard ratio) routine. modify enrollment rates proportionately sample size computed. result given enrollment rates adjusted next step. However, since adjusted proportionately relative enrollment timing changing, average hazard ratio change. Approximations statistical information null (info0) alternate (info) hypotheses provided . Recall parameterization terms \\log(HR), , thus information intended approximate 1 variance Cox regression coefficient treatment effect; checked simulation later. result can explained number events observed first 3 months treatment treatment group. Now can replicate geometric average hazard ratio (AHR) computed using ahr() routine . compute logarithm HR computed weighted average weighting expected number events hazard ratio. Exponentiating resulting weighted average gives geometric mean hazard ratio, label AHR.","code":"avehr <- ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, total_duration = as.numeric(total_duration) ) avehr %>% gt() xx <- pw_info( enroll_rate = enroll_rate, fail_rate = fail_rate, total_duration = as.numeric(total_duration) ) xx %>% gt() xx %>% summarize(AHR = exp(sum(event * log(hr) / sum(event)))) %>% gt()"},{"path":"https://merck.github.io/gsDesign2/articles/story-ahr-under-nph.html","id":"deriving-the-design","dir":"Articles","previous_headings":"Single stratum non-proportional hazards example","what":"Deriving the design","title":"Average hazard ratio and sample size under non-proportional hazards","text":"average hazard ratio, use call gsDesign::nEvents() uses Schoenfeld (1981) approximation derive targeted number events. need average hazard ratio , randomization ratio (experimental/control), Type error Type II error (1 - power). also compute proportionately increase enrollment rates achieve targeted number events; round number events required next higher integer. also compute sample size, rounding nearest even integer.","code":"target_event <- gsDesign::nEvents( hr = avehr$ahr, # average hazard ratio computed above ratio = 1, # randomization ratio alpha = .025, # 1-sided Type I error beta = .1 # Type II error (1-power) ) target_event <- ceiling(target_event) target_event #> [1] 309 # Update enroll_rate to obtain targeted events enroll_rate$rate <- ceiling(target_event) / avehr$event * enroll_rate$rate avehr <- ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, total_duration = as.numeric(total_duration) ) avehr %>% gt() # round up sample size in both treatment groups sample_size <- ceiling(sum(enroll_rate$rate * enroll_rate$duration) / 2) * 2 sample_size #> [1] 576"},{"path":"https://merck.github.io/gsDesign2/articles/story-ahr-under-nph.html","id":"average-hazard-ratio-and-expected-event-accumulation-over-time","dir":"Articles","previous_headings":"Single stratum non-proportional hazards example","what":"Average hazard ratio and expected event accumulation over time","title":"Average hazard ratio and sample size under non-proportional hazards","text":"examine average hazard ratio function trial duration modified enrollment required power trial. also plot expected event accrual time; although graphs go 40 months, recall targeted trial duration 30 months. key design consideration selecting trial duration based things like degree ahr improvement time versus urgency completing trial quickly possible, noting required sample size decrease longer follow-.","code":"avehrtbl <- ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, total_duration = 1:(total_duration + 10) ) ggplot(avehrtbl, aes(x = time, y = ahr)) + geom_line() + ylab(\"Average HR\") + ggtitle(\"Average HR as a function of study duration\") ggplot(avehrtbl, aes(x = time, y = event)) + geom_line() + ylab(\"Expected events\") + ggtitle(\"Expected event accumulation as a function of study duration\")"},{"path":"https://merck.github.io/gsDesign2/articles/story-ahr-under-nph.html","id":"simulation-to-verify-power","dir":"Articles","previous_headings":"Single stratum non-proportional hazards example","what":"Simulation to verify power","title":"Average hazard ratio and sample size under non-proportional hazards","text":"use function simtrial::simfix() simplify setting executing simulation evaluate sample size derivation . Arguments simtrial::simfix() slightly different set-used gsDesign2::ahr() function used . Thus, reformatting input parameters involved. One difference gsDesign2::ahr() parameterization simtrial::simfix() block provided specify fixed block randomization opposed ratio gsDesign2::ahr(). following summarizes outcomes data cutoff chosen. Regardless cutoff chosen, see power approximates targeted 90% quite well. statistical information computed simulation computed one simulation variance Cox regression coefficient treatment (.e., log hazard ratio). column HR exponentiated mean Cox regression coefficients (geometric mean HR). see HR estimate matches simulations quite well. column info estimated statistical information alternate hypothesis, info0 estimate null hypothesis. value info0 1/4 expected events calculated . case, information approximation alternate hypothesis appears slightly small, meaning asymptotic approximation used overpower trial. Nonetheless, approximation power appear quite good noted .","code":"# Do simulations # Cut at targeted study duration results1 <- simtrial::simfix( nsim = nsim, block = block, sampleSize = sample_size, strata = strata, enroll_rate = enroll_rate, fail_rate = fail_rate, total_duration = total_duration, target_event = ceiling(target_event), timingType = 1:5 ) # Loading the data saved previously results1 <- readRDS(\"fixtures/results1.rds\") results1$Positive <- results1$Z <= qnorm(.025) results1 %>% group_by(cut) %>% summarise( Simulations = n(), Power = mean(Positive), sdDur = sd(Duration), Duration = mean(Duration), sdEvents = sd(Events), Events = mean(Events), HR = exp(mean(lnhr)), sdlnhr = sd(lnhr), info = 1 / sdlnhr^2 ) %>% gt() %>% fmt_number(column = 2:9, decimals = 3) avehr %>% gt()"},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-ahr-under-nph.html","id":"design-scenario-1","dir":"Articles","previous_headings":"Different proportional hazards by strata","what":"Design scenario","title":"Average hazard ratio and sample size under non-proportional hazards","text":"set design scenario parameter. limited simultaneous enrollment strata since simtrial::simfix() routine uses simtrial::simPWSurv() limited scenario. specify three strata: High risk: 1/3 population median time--event 6 months treatment effect hazard ratio 1.2. Moderate risk: 1/2 population median time--event 9 months hazard ratio 0.2. Low risk: 1/6 population essentially cured arms (median 100, HR = 1).","code":"strata <- tibble::tibble(stratum = c(\"High\", \"Moderate\", \"Low\"), p = c(1 / 3, 1 / 2, 1 / 6)) enroll_rate <- define_enroll_rate( stratum = c(array(\"High\", 4), array(\"Moderate\", 4), array(\"Low\", 4)), duration = rep(c(2, 2, 2, 18), 3), rate = c((1:4) / 3, (1:4) / 2, (1:4) / 6) ) fail_rate <- define_fail_rate( stratum = c(\"High\", \"Moderate\", \"Low\"), duration = 100, fail_rate = log(2) / c(6, 9, 100), dropout_rate = .001, hr = c(1.2, 1 / 3, 1) ) total_duration <- 36"},{"path":"https://merck.github.io/gsDesign2/articles/story-ahr-under-nph.html","id":"computing-average-hazard-ratio-1","dir":"Articles","previous_headings":"Different proportional hazards by strata","what":"Computing average hazard ratio","title":"Average hazard ratio and sample size under non-proportional hazards","text":"Now transform enrollment rates account stratified population. examine expected events stratum. Getting average log(HR) weighted Events exponentiating, get overall AHR just derived.","code":"ahr2 <- ahr(enroll_rate, fail_rate, total_duration) ahr2 %>% gt() xx <- pw_info(enroll_rate, fail_rate, total_duration) xx %>% gt() xx %>% ungroup() %>% summarise(lnhr = sum(event * log(hr)) / sum(event), AHR = exp(lnhr)) %>% gt()"},{"path":"https://merck.github.io/gsDesign2/articles/story-ahr-under-nph.html","id":"deriving-the-design-1","dir":"Articles","previous_headings":"Different proportional hazards by strata","what":"Deriving the design","title":"Average hazard ratio and sample size under non-proportional hazards","text":"derive sample size . plan sample size based average hazard ratio overall population use across strata. First, derive targeted events: Next, adapt enrollment rates proportionately trial powered targeted failure rates follow-duration. targeted sample size, rounding even integer, :","code":"target_event <- gsDesign::nEvents( hr = ahr2$ahr, ratio = 1, alpha = .025, beta = .1 ) target_event <- ceiling(target_event) target_event #> [1] 216 enroll_rate <- enroll_rate %>% mutate(rate = target_event / ahr2$event * rate) ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, total_duration = total_duration ) %>% gt() sample_size <- ceiling(sum(enroll_rate$rate * enroll_rate$duration) / 2) * 2 sample_size #> [1] 340"},{"path":"https://merck.github.io/gsDesign2/articles/story-ahr-under-nph.html","id":"average-hr-and-expected-event-accumulation-over-time","dir":"Articles","previous_headings":"Different proportional hazards by strata","what":"Average HR and expected event accumulation over time","title":"Average hazard ratio and sample size under non-proportional hazards","text":"Plotting average hazard ratio function study duration, see improves considerably course study. also plot expected event accumulation. , plot 10 months planned study duration 36 months allow evaluation event accumulation versus treatment effect different trial durations.","code":"avehrtbl <- ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, total_duration = 1:(total_duration + 10) ) ggplot(avehrtbl, aes(x = time, y = ahr)) + geom_line() + ylab(\"Average HR\") + ggtitle(\"Average HR as a function of study duration\") ggplot(avehrtbl, aes(x = time, y = event)) + geom_line() + ylab(\"Expected events\") + ggtitle(\"Expected event accumulation as a function of study duration\")"},{"path":"https://merck.github.io/gsDesign2/articles/story-ahr-under-nph.html","id":"simulation-to-verify-power-1","dir":"Articles","previous_headings":"Different proportional hazards by strata","what":"Simulation to verify power","title":"Average hazard ratio and sample size under non-proportional hazards","text":"change enrollment rates stratum produced gsDesign::nSurv() overall enrollment rates needed simtrial::simfix(). Now simulate summarize results. , see expected statistical information simulation greater expected Schoenfeld approximation expected events divided 4. Finally, compare simulation results asymptotic approximation . achieved power simulation just targeted 90%; noting simulation standard error 0.006, asymptotic approximation quite good. Using final cutoff requires targeted events minimum follow-seems reasonable convention preserved targeted design power.","code":"er <- enroll_rate %>% group_by(stratum) %>% mutate(period = seq_len(n())) %>% group_by(period) %>% summarise(rate = sum(rate), duration = last(duration)) er %>% gt() results2 <- simtrial::simfix( nsim = nsim, block = block, sampleSize = sample_size, strata = strata, enroll_rate = er, fail_rate = fail_rate, total_duration = as.numeric(total_duration), target_event = as.numeric(target_event), timingType = 1:5 ) results2 <- readRDS(\"fixtures/results2.rds\") results2$Positive <- (pnorm(results2$Z) <= .025) results2 %>% group_by(cut) %>% summarize( Simulations = n(), Power = mean(Positive), sdDur = sd(Duration), Duration = mean(Duration), sdEvents = sd(Events), Events = mean(Events), HR = exp(mean(lnhr)), sdlnhr = sd(lnhr), info = 1 / sdlnhr^2 ) %>% gt() %>% fmt_number(column = 2:9, decimals = 3) ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, total_duration = total_duration ) %>% gt()"},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-arbitrary-distribution.html","id":"introduction","dir":"Articles","previous_headings":"","what":"Introduction","title":"Approximating an arbitrary survival distribution","text":"demonstrate approximate arbitrary continuous survival distributions piecewise exponential approximations. enables sample size computations arbitrary survival models using software designed piecewise exponential distribution. Three functions particular demonstrated: s2pwe() translates arbitrary survival distribution piecewise exponential. ppwe() computes cumulative survival distribution upper tail distribution form generated s2pwe(). p_pm() provides cumulative survival distribution Poisson mixture distribution.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-arbitrary-distribution.html","id":"lognormal-approximation","dir":"Articles","previous_headings":"","what":"Lognormal approximation","title":"Approximating an arbitrary survival distribution","text":"demonstrate s2pwe() approximating lognormal distribution piecewise exponential failure rates. Note resulting log_normal_rate used, final piecewise exponential duration extended. , arbitrarily approximated 6 piecewise exponential rates duration 1 unit time (say, month) followed final rate extends infinity. compare resulting approximation actual lognormal survival using ppwe() compute survival probabilities P\\{T>t\\}. better approximation, use larger number points. plot log scale y-axis since piecewise exponential survival ppwe() piecewise linear scale. note beginning rate period approximation actual survival distribution approximation match exactly indicated circles graph. considered lognormal distribution due flexibility allows hazard rates time; see, example, Wikipedia.","code":"log_normal_rate <- s2pwe( times = c(1:6, 9), survival = plnorm(c(1:6, 9), meanlog = 0, sdlog = 2, lower.tail = FALSE) ) log_normal_rate ## # A tibble: 7 × 2 ## duration rate ## ## 1 1 0.693 ## 2 1 0.316 ## 3 1 0.224 ## 4 1 0.177 ## 5 1 0.148 ## 6 1 0.128 ## 7 3 0.103 # Use a large number of points to plot lognormal survival times <- seq(0, 12, .025) plot(times, plnorm(times, meanlog = 0, sdlog = 2, lower.tail = FALSE), log = \"y\", type = \"l\", main = \"Lognormal Distribution vs. Piecewise Approximation\", yaxt = \"n\", ylab = \"log(Survival)\", col = 1 ) # Now plot the pieceise approximation using the 7-point approximation from above lines( times, ppwe(x = times, duration = log_normal_rate$duration, rate = log_normal_rate$rate), col = 2 ) # Finally, add point markers at the points used in the approximation points(x = c(0:6), plnorm(c(0:6), meanlog = 0, sdlog = 2, lower.tail = FALSE), col = 1) text(x = c(5, 5), y = c(.5, .4), labels = c(\"Log-normal\", \"Piecewise Approximation (7 pts)\"), col = 1:2, pos = 4)"},{"path":"https://merck.github.io/gsDesign2/articles/story-arbitrary-distribution.html","id":"poisson-mixture-model","dir":"Articles","previous_headings":"","what":"Poisson mixture model","title":"Approximating an arbitrary survival distribution","text":"consider Poisson mixture model incorporate cure model sample size planning. form survival function S(t)=\\exp(-\\theta F_0(t)) t \\geq 0 F_0(t) continuous cumulative distribution function non-negative random variable F_0(0)=0 F_0(t)\\uparrow 1 t\\uparrow \\infty. note t\\uparrow \\infty, S(t)\\downarrow \\exp(-\\theta)=c refer c cure rate. function p_pm() assumes F_0(t)=1-\\exp(-\\lambda t) exponential cumulative distribution function resulting survival distribution t \\geq 0: S(t; \\theta, \\lambda) = \\exp(-\\theta(1-\\exp(-\\lambda t))). Note set default lower.tail=FALSE survival function computation default: plot \\lambda = \\log(2) / 10 make F_0(t) exponential distribution median 10. set \\theta = -\\log(.4) obtain cure rate 0.4. overlay piecewise exponential approximation. note two different \\theta values provide proportional hazards model ratio cumulative hazard function H(t; \\theta, \\lambda) = \\theta\\exp(-\\lambda t) constant: \\frac{\\log(S(t; \\theta_1, \\lambda))}{\\log(S(t; \\theta_2, \\lambda))} = \\theta_1/\\theta_2. given \\theta value can compute \\lambda provide survival rate c_1 > \\exp(-\\theta) arbitrary time t_1>0 setting: \\lambda = -\\log\\left(\\frac{\\theta - \\log(c_1)}{\\theta}\\right)/t_1. compute \\theta \\lambda values cure rate 0.4 survival rate 0.6 30 months: confirm survival time 30:","code":"p_pm <- function(x, theta, lambda, lower_tail = FALSE) { exp(-theta * (1 - exp(-lambda * x))) } lambda <- log(2) / 10 theta <- -log(.4) times <- 0:40 plot(times, p_pm(times, theta, lambda), type = \"l\", ylab = \"Survival\", xlab = \"Time\", log = \"y\") # Now compute piecewise exponential approximation x <- seq(8, 40, 8) pm_rate <- s2pwe( times = x, survival = p_pm(x, theta = theta, lambda = lambda) ) # Now plot the piecewise approximation using the 7-point approximation from above lines( c(0, x), ppwe(x = c(0, x), duration = pm_rate$duration, rate = pm_rate$rate), col = 2 ) points(c(0, x), p_pm(c(0, x), theta, lambda)) theta <- -log(0.4) lambda <- -log((theta + log(.6)) / theta) / 30 p_pm(30, theta, lambda) ## [1] 0.6"},{"path":"https://merck.github.io/gsDesign2/articles/story-canonical-h0-h1.html","id":"null-hypothesis","dir":"Articles","previous_headings":"","what":"Null hypothesis","title":"Canonical joint distribution of Z-score and B-values under null and alternative hypothesis","text":"distribution \\{B_k\\}_{k = 1, \\ldots, K} following structure: B_1, B_2, \\ldots, B_K multivariate normal distribution. E(B_k \\;|\\; H_0) = 0 k = 1, \\ldots, K. \\text{Var}(B_k \\;|\\; H_0) = t_k. \\text{Cov}(B_i, B_j \\;|\\; H_0) = t_i 1 \\leq \\leq j \\leq K. derivation last 2 statement \\begin{eqnarray} \\text{Var}(B_k\\;|\\; H_0) & = & \\frac{ \\text{Var}(\\sum_{=1}^{d_k} \\Delta_i | H_0) }{ \\text{Var}(\\sum_{=1}^{d_K} \\Delta_i | H_0) } = t_k\\\\ \\text{Cov}(B_i, B_j \\;|\\; H_0) & = & \\frac{1}{\\text{Var}(\\sum_{s=1}^{d_K} \\Delta_s\\;|\\; H_0)} \\text{Var} \\left( \\sum_{s=1}^{d_i} \\Delta_s\\;|\\; H_0 \\right) = t_i \\end{eqnarray} Accordingly, \\{Z_k\\}_{k = 1, \\ldots, K} canonical joint distribution following properties: Z_1, Z_2, \\ldots, Z_K multivariate normal distribution. E(Z_k \\;|\\; H_0) = 0. \\text{Var}(Z_k \\;|\\; H_0) = 1. \\text{Cov}(Z_i, Z_j \\;|\\; H_0) = \\sqrt{t_i/t_j} 1 \\leq \\leq j \\leq K.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-canonical-h0-h1.html","id":"alternative-hypothesis","dir":"Articles","previous_headings":"","what":"Alternative hypothesis","title":"Canonical joint distribution of Z-score and B-values under null and alternative hypothesis","text":"alternative hypothesis, 2 B-values (B_i, B_j \\leq j), distribution \\{B_k\\}_{k = 1, \\ldots, K} following structure: B_1, B_2, \\ldots, B_K multivariate normal distribution. E(B_k \\;|\\; H_1) = \\theta_k t_k \\sqrt{\\mathcal I_{k, H_0}} k = 1, \\ldots, K. \\text{Var}(B_k \\;|\\; H_1) = t_k \\mathcal I_{k, H_0} / \\mathcal I_{k, H_1}. \\text{Cov}(B_i, B_j \\;|\\; H_1) = t_i \\; \\mathcal I_{, H_0}/\\mathcal I_{, H_1} 1 \\leq \\leq j \\leq K. last statement derived \\begin{eqnarray} \\text{Cov}(B_i, B_j \\;|\\; H_1) & = & \\frac{1}{\\text{Var}(\\sum_{s=1}^{d_K} \\Delta_s | H_0)} \\text{Var} \\left( \\sum_{s=1}^{d_i} \\Delta_s | H_1 \\right) \\\\ & = & \\underbrace{ \\frac{1}{\\text{Var}(\\sum_{s=1}^{d_K} \\Delta_s | H_0)} \\text{Var} \\left( \\sum_{s=1}^{d_i} \\Delta_s | H_0 \\right) }_{t_i} \\underbrace{ \\text{Var} \\left( \\sum_{s=1}^{d_i} \\Delta_s | H_1 \\right) }_{1/\\mathcal I_{, H_1}} \\bigg/ \\underbrace{ \\text{Var} \\left( \\sum_{s=1}^{d_i} \\Delta_s | H_0 \\right) }_{1/\\mathcal I_{, H_0}} \\\\ & = & t_i\\; \\mathcal I_{, H_0}/\\mathcal I_{, H_1}. \\end{eqnarray} Accordingly, Z_k canonical joint distribution following properties: Z_1, Z_2, \\ldots, Z_K multivariate normal distribution. E(Z_k \\;|\\; H_1) = \\theta_k \\sqrt{\\mathcal I_{k, H_0}} treatment effect \\theta_k k-th analysis. \\text{Var}(Z_k \\;|\\; H_1) = \\mathcal I_{k, H_0} / \\mathcal I_{k, H_1}. \\text{Cov}(Z_i, Z_j \\;|\\; H_1) = \\sqrt{\\frac{t_i}{t_j}} \\frac{\\mathcal I_{, H_0}}{\\mathcal I_{, H_1}} 1 \\leq \\leq j \\leq K. last statement \\begin{eqnarray} \\text{Cov}(Z_i, Z_j \\;|\\; H_1) & = & \\text{Cov}(B_i/\\sqrt{t_i}, B_j/\\sqrt{t_j}) \\\\ & = & \\frac{1}{\\sqrt{t_i t_j}} \\text{Cov}(B_i, B_j) \\\\ & = & \\frac{1}{\\sqrt{t_i t_j}} \\text{Var}(B_i) \\\\ & = & \\sqrt{\\frac{t_i}{t_j}} \\frac{\\mathcal I_{, H_0}}{\\mathcal I_{, H_1}} \\end{eqnarray} local alternative assumption holds, \\text{Cov}(Z_i, Z_j) \\approx \\sqrt{\\frac{t_i}{t_j}}, format canonical joint distribution introduced Chapter 3 Proschan, Lan, Wittes (2006).","code":""},{"path":[]},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-compare-power-delay-effect.html","id":"overview","dir":"Articles","previous_headings":"","what":"Overview","title":"Power for delayed effect scenarios","text":"consider delayed effect scenario control group time--event distribution exponential median 15 months. experimental group hazard ratio vs. control 1 6 months 0.6 thereafter. Enrollment constant rate 12 months. Total study duration 20 48 months. Exponential dropout rate 0.001 per month. scenarios, investigate power, sample size events 6 tests: fh_05: Fleming-Harrington \\rho=0, \\gamma=0.5 test obtain power 85% given 1-sided Type error 0.025. fh_00: regular logrank test \\rho=0, \\gamma=0 fixed study duration \\\\{20, 24, 28, \\ldots, 60\\}. mc2_test: MaxCombo test including 2 WLR tests, .e., \\{(\\rho=0, \\gamma=0, \\tau = -1), (\\rho=0, \\gamma=0.5, \\tau = -1)\\}. mc2_test: MaxCombo test including 3 WLR tests, .e., \\{(\\rho=0, \\gamma=0, \\tau = -1), (\\rho=0, \\gamma=0.5, \\tau = -1), (\\rho=0.5, \\gamma=0.5, \\tau = -1)\\}. mc4_test: MaxCombo test including 4 WLR tests, .e., \\{(\\rho=0, \\gamma=0, \\tau = -1), (\\rho=0, \\gamma=0.5, \\tau = -1), (\\rho=0.5, \\gamma=0.5, \\tau = -1), (\\rho=0.5, \\gamma=0, \\tau = -1)\\}. mb_6: Magirr-Burman \\rho=-1, \\gamma=0, \\tau = 6 test fixed study duration \\\\{20, 24, 28, \\ldots, 60\\}. compute power logrank test. general summary Fleming-Harrington test meaningful power gain relative logrank regardless study durations evaluated.","code":"enroll_rate <- define_enroll_rate(duration = 12, rate = 1) fail_rate <- define_fail_rate( duration = c(6, 100), fail_rate = log(2) / 15, hr = c(1, .6), dropout_rate = 0.001 ) enroll_rate %>% gt() %>% tab_header(title = \"Enrollment Table of Scenario 1\") fail_rate %>% gt() %>% tab_header(title = \"Failure Table of Scenario 1\") tab <- NULL for (trial_duration in seq(24, 60, 4)) { # Fleming-Harrington rho=0, gamma=0.5 test fh_05 <- gs_design_wlr( enroll_rate = enroll_rate, fail_rate = fail_rate, ratio = 1, alpha = 0.025, beta = 0.15, weight = function(x, arm0, arm1) { wlr_weight_fh(x, arm0, arm1, rho = 0, gamma = 0.5) }, upper = gs_b, lower = gs_b, upar = qnorm(.975), lpar = -Inf, analysis_time = trial_duration ) |> to_integer() # Regular logrank test fh_00 <- gs_power_wlr( enroll_rate = fh_05$enroll_rate, fail_rate = fail_rate, ratio = 1, weight = function(x, arm0, arm1) { wlr_weight_fh(x, arm0, arm1, rho = 0, gamma = 0) }, upper = gs_b, lower = gs_b, upar = qnorm(.975), lpar = -Inf, analysis_time = trial_duration, event = .1 ) # MaxCombo test 1 mc2_test <- data.frame( rho = 0, gamma = c(0, .5), tau = -1, test = 1:2, analysis = 1, analysis_time = trial_duration ) mc_2 <- gs_power_combo( enroll_rate = fh_05$enroll_rate, fail_rate = fail_rate, fh_test = mc2_test, upper = gs_spending_combo, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_combo, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.01) ) # MaxCombo test 2 mc3_test <- data.frame( rho = c(0, 0, .5), gamma = c(0, .5, .5), tau = -1, test = 1:3, analysis = 1, analysis_time = trial_duration ) mc_3 <- gs_power_combo( enroll_rate = fh_05$enroll_rate, fail_rate = fail_rate, fh_test = mc3_test, upper = gs_spending_combo, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_combo, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.01) ) # MaxCombo test mc4_test <- data.frame( rho = c(0, 0, .5, .5), gamma = c(0, .5, .5, 0), tau = -1, test = 1:4, analysis = 1, analysis_time = trial_duration ) mc_4 <- gs_power_combo( enroll_rate = fh_05$enroll_rate, fail_rate = fail_rate, fh_test = mc4_test, upper = gs_spending_combo, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_combo, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.01) ) # Magirr-Burman rho=-1, gamma=0, tau = 6 test mb_6 <- gs_power_wlr( enroll_rate = fh_05$enroll_rate, fail_rate = fail_rate, ratio = 1, weight = function(x, arm0, arm1) { wlr_weight_fh(x, arm0, arm1, rho = -1, gamma = 0, tau = 15) }, upper = gs_b, lower = gs_b, upar = qnorm(.975), lpar = -Inf, analysis_time = trial_duration, event = .1 ) tab_new <- tibble( `Study duration` = trial_duration, N = fh_05$analysis$n[1], Events = fh_05$analysi$event[1], `Events/N` = Events / N, # We use the AHR from regular WLR as the AHR of different MaxCombo test AHR = as.numeric(fh_00$analysis$ahr[1]), `FH(0, 0.5) power` = fh_05$bound$probability[1], `FH(0, 0) power` = fh_00$bound$probability[1], `MC2 power` = mc_2$bound$probability[1], `MC4 power` = mc_4$bound$probability[1], `MC3 power` = mc_3$bound$probability[1], `MB6 power` = mb_6$bound$probability[1] ) tab <- rbind(tab, tab_new) } tab %>% gt() %>% fmt_number(columns = c(2, 3), decimals = 1) %>% fmt_number(columns = 4, decimals = 2) %>% fmt_number(columns = 5, decimals = 4) %>% fmt_number(columns = 6:11, decimals = 2)"},{"path":"https://merck.github.io/gsDesign2/articles/story-compare-power-delay-effect.html","id":"an-alternative-scenario","dir":"Articles","previous_headings":"","what":"An Alternative Scenario","title":"Power for delayed effect scenarios","text":"Now consider alternate scenario placebo group starts median, piecewise change median 30 16 months hazard ratio 0.85 late period.","code":"enroll_rate <- define_enroll_rate(duration = 12, rate = 1) fail_rate <- define_fail_rate( duration = c(6, 10, 100), # In Scenario 1: fail_rate = log(2) / 15, fail_rate = log(2) / c(15, 15, 30), dropout_rate = 0.001, # In Scenario 1: hr = c(1, .6) hr = c(1, .6, .85) ) enroll_rate %>% gt() %>% tab_header(title = \"Enrollment Table of Scenario 2\") fail_rate %>% gt() %>% tab_header(title = \"Failure Table of Scenario 2\") tab <- NULL for (trial_duration in seq(20, 60, 4)) { # Fleming-Harrington rho=0, gamma=0.5 test fh_05 <- gs_design_wlr( enroll_rate = enroll_rate, fail_rate = fail_rate, ratio = 1, alpha = 0.025, beta = 0.15, weight = function(x, arm0, arm1) { wlr_weight_fh(x, arm0, arm1, rho = 0, gamma = 0.5) }, upper = gs_b, upar = qnorm(.975), lower = gs_b, lpar = -Inf, analysis_time = trial_duration ) |> to_integer() # Regular logrank test fh_00 <- gs_power_wlr( enroll_rate = fh_05$enroll_rate, fail_rate = fail_rate, ratio = 1, weight = function(x, arm0, arm1) { wlr_weight_fh(x, arm0, arm1, rho = 0, gamma = 0) }, upper = gs_b, upar = qnorm(.975), lower = gs_b, lpar = -Inf, analysis_time = trial_duration, event = .1 ) # MaxCombo test mc2_test <- data.frame( rho = 0, gamma = c(0, .5), tau = -1, test = 1:2, analysis = 1, analysis_time = trial_duration ) mc_2 <- gs_power_combo( enroll_rate = fh_05$enroll_rate, fail_rate = fail_rate, fh_test = mc2_test, upper = gs_spending_combo, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_combo, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.01) ) # MaxCombo test mc3_test <- data.frame( rho = c(0, 0, .5), gamma = c(0, .5, .5), tau = -1, test = 1:3, analysis = 1, analysis_time = trial_duration ) mc_3 <- gs_power_combo( enroll_rate = fh_05$enroll_rate, fail_rate = fail_rate, fh_test = mc3_test, upper = gs_spending_combo, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_combo, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.01) ) # MaxCombo test mc4_test <- data.frame( rho = c(0, 0, .5, .5), gamma = c(0, .5, .5, 0), tau = -1, test = 1:4, analysis = 1, analysis_time = trial_duration ) mc_4 <- gs_power_combo( enroll_rate = fh_05$enroll_rate, fail_rate = fail_rate, fh_test = mc4_test, upper = gs_spending_combo, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_combo, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.01) ) # Magirr-Burman rho=-1, gamma=0, tau = 6 test mb_6 <- gs_power_wlr( enroll_rate = fh_05$enroll_rate, fail_rate = fail_rate, ratio = 1, weight = function(x, arm0, arm1) { wlr_weight_fh(x, arm0, arm1, rho = -1, gamma = 0, tau = 15) }, upper = gs_b, lower = gs_b, upar = qnorm(.975), lpar = -Inf, analysis_time = trial_duration, event = .1 ) tab_new <- tibble( `Study duration` = trial_duration, N = fh_05$analysis$n[1], Events = fh_05$analysi$event[1], `Events/N` = Events / N, # We use the AHR from regular WLR as the AHR of different MaxCombo test AHR = as.numeric(fh_00$analysis$ahr[1]), `FH(0, 0.5) power` = fh_05$bound$probability[1], `FH(0, 0) power` = fh_00$bound$probability[1], `MC2 power` = mc_2$bound$probability[1], `MC4 power` = mc_4$bound$probability[1], `MC3 power` = mc_3$bound$probability[1], `MB6 power` = mb_6$bound$probability[1] ) tab <- rbind(tab, tab_new) } tab %>% gt() %>% fmt_number(columns = c(2, 3), decimals = 1) %>% fmt_number(columns = 4, decimals = 2) %>% fmt_number(columns = 5, decimals = 4) %>% fmt_number(columns = 6:11, decimals = 2)"},{"path":"https://merck.github.io/gsDesign2/articles/story-compute-expected-events.html","id":"introduction","dir":"Articles","previous_headings":"","what":"Introduction","title":"Computing expected events by interval at risk","text":"document derives algorithm computing expected events observed model piecewise constant enrollment, failure dropout rates similar Lachin Foulkes (1986). Specifically, design enable computation average hazard ratio use elsewhere approximate sample size fixed group sequential designs non-proportional hazards assumption (Kalbfleisch Prentice (1981), Schemper, Wakounig, Heinze (2009)). expected events calculation outlined implemented function expected_event().","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-compute-expected-events.html","id":"general-formulation-and-notation","dir":"Articles","previous_headings":"","what":"General formulation and notation","title":"Computing expected events by interval at risk","text":"notation, study time scale denoted \\omega study start first opening enrollment \\omega=0. use variable t indicate patient time t=0 representing time patient enrolled. assume patient time enrollment event independent identically distributed subjects enrolled. also assume patient time censoring independent identically distributed subjects enrolled. individual, let X>0 denote patient time event Y>0 denote patient time loss--follow-. also let U denote (independent) study time entry patient. assume triplet X, Y, U independent. consider single treatment group stratum assume subjects enroll according Poisson process entry rate g(\\omega)\\geq 0 0 \\leq \\omega. expected number subjects enrolled study time \\omega simply \\begin{equation} G(\\omega)=\\int_0^\\omega g(u)du. \\end{equation} Analysis time--event data done using time enrollment patient event, drops , censored prior event time data cutoff; consider data cutoff fixed time \\Omega. key counts consider : \\bar{N}(t) : number patients events study least duration 00, Y_m>0 random variables independent study entry time U. let X_m Y_m define X Y, respectively, interval (t_{m-1},t_m], m=1,2,\\ldots,M, follows: \\begin{align} X&=\\sum_{m=1}^M \\min(X_m,t_m-t_{m-1}) \\prod_{j=1}^{m-1}\\{X_j>t_j-t_{j-1}\\}\\label{eq:Xdef}\\\\ Y&=\\sum_{m=1}^M \\min(Y_m,t_m-t_{m-1})\\prod_{j=1}^{m-1}\\{Y_j>t_j-t_{j-1}\\}\\label{eq:Ydef}. \\end{align} assume X_m Y_m independent exponentially distributed failure rates \\lambda_m \\eta_m, respectively, m=1,2,\\ldots,M. now assume subjects enroll constant rate J intervals defined 0=\\omega_0<\\omega_1<\\ldots<\\omega_J<\\infty. denote enrollment rates \\begin{equation}g(\\omega)=\\gamma_j\\geq 0\\label{eq:gj}\\end{equation} \\omega interval (\\omega_{j-1},\\omega_j], j=0,1,2,\\ldots,J. assume \\gamma_1>0, j>1 assume \\gamma_j \\geq 0. Letting G_0=0 recursively define j=1,\\ldots,J \\begin{equation}G_j=G(\\omega_j)=G_{j-1}+\\gamma_j(\\omega_j-\\omega_{j-1})\\label{eq:Gj}\\end{equation} thus \\omega\\[\\omega_{j-1},\\omega_j] expected enrollment study time \\omega \\begin{equation}G(\\omega)=G_{j-1}+\\gamma_j(\\omega-\\omega_{j-1}).\\label{eq:ENpw}\\end{equation}","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-compute-expected-events.html","id":"an-example-under-the-piecewise-model","dir":"Articles","previous_headings":"The piecewise model","what":"An example under the piecewise model","title":"Computing expected events by interval at risk","text":"consider example piecewise model assuming J=3, \\omega_j=1,2,7 \\gamma_j=3,2,0 j=1,2,3. assume M=2 t_m=4,\\infty, failure rates \\lambda_m=.03,.06, dropout rates \\eta_m=0.001,.002. plot following plot enrollment rate axis right failure dropout rate axis left. plot \\omega reverse order related integration equation E\\{\\bar{n}(t_1,t_2)\\} . also plotted vertical dot-dashed line point either enrollment rate failure (dropout) rate changes.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-compute-expected-events.html","id":"organizing-calculations-under-the-piecewise-model","dir":"Articles","previous_headings":"The piecewise model","what":"Organizing calculations under the piecewise model","title":"Computing expected events by interval at risk","text":"now proceed define algorithms computing expected events observed interval model piecewise constant enrollment, failure rates, dropout rates. assume study duration \\Omega=t_M. assume without loss generality sequence t_m, m=1,2,\\ldots, M constant failure rate \\lambda_m dropout rate \\eta_m interval (t_{m-1},t_m] well constant enrollment rate \\gamma_m interval (t_M-t_m,t_M-t_{m-1}]. Deriving intervals relatively straightforward exercise shown example . example, example , change points vertical lines drawn following scenario calculation purposes. define m=1,\\ldots,M intermediate probability calculations use calculating \\bar n(t_{m-1},t_m) follows: \\begin{align} q_m&=P\\{\\min(X_m,Y_m)>t_m-t_{m-1}\\}=\\exp^{-(\\lambda_m+\\eta_m)(t_m-t_{m-1})} \\label{eq:qm}\\\\ Q_m&=P\\{\\min(X,Y)>t_m\\}=\\prod_{j=1}^m q_j\\label{eq:Qm}\\\\ d_m&=P\\{t_{m-1}t_{m-1}\\}\\cdot P\\{0<\\min (X_m,Y_m)\\leq t_m-t_{m-1},X_m\\leq Y_m\\}\\\\ &=P\\{\\min(X,Y)>t_{m-1}\\}\\cdot P\\{0<\\min (X_m,Y_m)\\leq t_m-t_{m-1}\\}\\cdot P\\{X_m\\leq Y_m|0<\\min (X_m,Y_m)\\leq t_m-t_{m-1}\\}\\\\ &=Q_{m-1}(1-e^{-(\\lambda_m+\\eta_m)(t_m-t_{m-1})}) \\frac{\\lambda_m}{\\lambda_m+\\eta_m}\\\\ \\bar n_m&=E\\{\\bar n(t_{m-1},t_m)\\} \\end{align} Note \\lambda_m+\\eta_m=0, d_m=0. , \\begin{align} \\bar n_m&=G(t_M-t_m)P\\{t_{m-1}t_{m-1}\\} \\int_0^{t_m-t_{m-1}}g_{M+1-m}P\\{X_m\\leq v, X_m\\leq Y_m\\}dv\\\\ &=G_{M+1-m}d_m + \\frac{Q_{m-1}g_{M+1-m}\\lambda_m}{\\lambda_m+\\eta_m} \\int_0^{t_m-t_{m-1}}\\left(1-\\exp^{-(\\lambda_m+\\eta_m)v}\\right)dv\\\\ &=G_{M+1-m}d_m + \\frac{Q_{m-1}g_{M+1-m}\\lambda_m}{\\lambda_m+\\eta_m} \\left(t_m-t_{m-1}-\\frac{1-\\exp^{-(\\lambda_m+\\eta_m)(t_m-t_{m-1})}}{\\lambda_m+\\eta_m}\\right)\\\\ &=G_{M+1-m}d_m + \\frac{Q_{m-1}g_{M+1-m}\\lambda_m}{\\lambda_m+\\eta_m} \\left(t_m-t_{m-1}-\\frac{1-q_m}{\\lambda_m+\\eta_m}\\right) \\end{align} now add q_m, Q_m, d_m calculations enable computation \\bar n_m, expected events time interval.","code":"name_tem <- names(x) names(x) <- c(\"m\", \"tm\", \"lambda\", \"eta\", \"j\", \"omega\", \"gamma\") y <- x %>% mutate( tdel = tm - lag(tm, default = 0), q = exp(-(lambda + eta) * tdel), Q = lag(cumprod(q), default = 1), d = Q * (1 - q) * lambda / (lambda + eta), G = c(5, 5, 3, 0), nbar = G * d + (lambda * Q * gamma) / (lambda + eta) * (tdel - (1 - q) / (lambda + eta)) ) yy <- y names(yy) <- c( \"$m$\", \"$t_m$\", \"$\\\\lambda_m$\", \"$\\\\eta_m$\", \"$j$\", \"$\\\\omega_j=t_M-t_{m-1}$\", \"$\\\\gamma_j$\", \"$t_m-t_{m-1}$\", \"$q_m$\", \"$Q_{m-1}$\", \"$d_m$\", \"$G_{j-1}$\", \"$\\\\bar{n}_m$\" ) yy <- yy %>% select(c(1:7, 12, 8:11, 13)) yy %>% kable(digits = 4) %>% kable_styling(c(\"striped\", \"bordered\")) %>% add_header_above(c( \"Failure and dropout rates\" = 4, \"Enrollment\" = 4, \"Events by time period\" = 5 ))"},{"path":"https://merck.github.io/gsDesign2/articles/story-compute-expected-events.html","id":"verifying-calculations","dir":"Articles","previous_headings":"The piecewise model","what":"Verifying calculations","title":"Computing expected events by interval at risk","text":"check total number events using gsDesign function eEvents(). First, sum \\bar{n}_m values sum(y$nbar) get 1.083773 compare : Next, examine periods defined fail_rate: Now group rows y intervals. Finally, approximate specific numbers using simulation. First, simulate large dataset confirm simulation targeted enrollment pattern. Now confirm expected events follow-interval given targeted enrollment.","code":"event <- gsDesign::eEvents( lambda = y$lambda, eta = y$eta, gamma = y$gamma[rev(seq_along(y$gamma))], S = y$tdel[seq_len(length(y$tdel) - 1)], R = y$tdel[rev(seq_along(y$tdel))], T = max(y$tm) )$d event #> [1] 1.083773 expected_event( enroll_rate = define_enroll_rate(duration = c(1, 1), rate = c(3, 2)), fail_rate = define_fail_rate(duration = c(4, 3), fail_rate = c(.03, .06), dropout_rate = c(.001, .002)), total_duration = 7, simple = FALSE ) #> t fail_rate event #> 1 0 0.03 0.5642911 #> 2 4 0.06 0.5194821 y %>% mutate(t = c(0, 4, 4, 4)) %>% group_by(t) %>% summarise( fail_rate = first(lambda), Events = sum(nbar) ) #> # A tibble: 2 × 3 #> t fail_rate Events #> #> 1 0 0.03 0.564 #> 2 4 0.06 0.519 nsim <- 1e6 xx <- simtrial::simPWSurv( n = nsim, block = (rep(\"xx\", 4)), enroll_rate = define_enroll_rate(rate = c(3, 2) * nsim / 5, duration = c(1, 1)), fail_rate = tibble( stratum = \"All\", period = 1:2, Treatment = \"xx\", rate = c(.03, .06), duration = c(4, Inf) ), dropout_rate = tibble( stratum = \"All\", period = 1:2, Treatment = \"xx\", rate = c(.001, .002), duration = c(4, Inf) ) ) saveRDS(xx, file = \"fixtures/compute_expected_events.rds\", compress = \"xz\") xx <- readRDS(\"fixtures/compute_expected_events.rds\") ecat <- 1 + (xx$enrollTime > 1) + (xx$enrollTime > 2) cat(\"Enrollment pattern: \", table(ecat) / nsim) #> Enrollment pattern: 0.599697 0.399995 0.000308 #' This function is borrowed from Merck/simtrial. #' We copy it here to make gsDesign2 self-contained. #' #' Cut a Dataset for Analysis at a Specified Date #' #' @param x a time-to-event dataset, e.g., generated by \\code{simPWSurv} #' @param cut_date date relative to start of randomization (\\code{cte} from input dataset) #' at which dataset is to be cut off for analysis #' @return A dataset ready for survival analysis #' @examples #' # Use default enrollment and event rates and cut at calendar time 5 after start #' # of randomization #' library(dplyr) #' simPWSurv(n = 20) %>% cut_data(5) cut_data <- function(x, cut_date) { x %>% filter(enrollTime <= cut_date) %>% mutate( tte = pmin(cte, cut_date) - enrollTime, event = fail * (cte <= cut_date) ) %>% select(tte, event, Stratum, Treatment) } yy <- xx %>% cut_data(7) %>% filter(event == 1) %>% mutate(tcat = 4 + (tte > 4) + (tte > 5) + (tte > 6)) cat(\"Event by interval: \", table(yy$tcat) / nsim * 5, \"\\n\") #> Event by interval: 0.56421 0.2591 0.19403 0.067865 cat(\"Total events: \", sum(yy$event) / nsim * 5) #> Total events: 1.085205"},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-compute-npe-bound.html","id":"overview","dir":"Articles","previous_headings":"","what":"Overview","title":"Computing bounds under non-constant treatment effect","text":"consider group sequential designs possibly non-constant treatment effects time. can useful situations assumed non-proportional hazards model laid vignettes/articles/story-npe-background.Rmd. general, assume K \\geq 1 analyses statistical information \\mathcal{}_k information fraction t_k=\\mathcal{}_k/\\mathcal{}_k analysis k, 1\\leq k\\leq K. denote null hypothesis H_{0}: \\theta(t)=0 alternate hypothesis H_1: \\theta(t)=\\theta_1(t) t> 0 t represents information fraction study. study planned stop information fraction t=1, define \\theta(t) t>0 since trial can overrun planned statistical information final analysis. , use shorthand notation \\theta represent \\theta(), \\theta=0 represent \\theta(t)\\equiv 0 t \\theta_1 represent \\theta_i(t_k), effect size analysis k, 1\\leq k\\leq K. purposes, H_0 represent treatment difference, represent non-inferiority hypothesis. Recall assume K analyses bounds -\\infty \\leq a_k< b_k<\\leq \\infty 1\\leq k < K -\\infty \\leq a_K\\leq b_K<\\infty. denote probability crossing upper boundary analysis k without previously crossing bound \\alpha_{k}(\\theta)=P_{\\theta}(\\{Z_{k}\\geq b_{k}\\}\\cap_{j=1}^{k-1}\\{a_{j}\\leq Z_{j}< b_{j}\\}), k=1,2,\\ldots,K. total probability crossing upper bound prior crossing lower bound denoted \\alpha(\\theta)\\equiv\\sum_{k=1}^K\\alpha_k(\\theta). non-binding bounds, define probability \\alpha_{k}^{+}(\\theta)=P_{\\theta}\\{\\{Z_{k}\\geq b_{k}\\}\\cap_{j=1}^{k-1} \\{Z_{j}< b_{j}\\}\\} ignores lower bounds computing upper boundary crossing probabilities. non-binding Type error probability ever crossing upper bound \\theta=0. value \\alpha^+_{k}(0) commonly referred amount Type error spent analysis k, 1\\leq k\\leq K. total upper boundary crossing probability trial denoted one-sided scenario \\alpha^+(\\theta) \\equiv\\sum_{k=1}^{K}\\alpha^+_{k}(\\theta). primarily interested \\alpha(\\theta) compute power \\theta > 0. Type error, may interested \\alpha(0) binding lower bounds, often consider non-binding Type error calculations, \\alpha^{+}(0). denote probability crossing lower bound analysis k without previously crossing bound \\beta_{k}(\\theta)=P_{\\theta}((Z_{k}< a_{k}\\}\\cap_{j=1}^{k-1}\\{ a_{j}\\leq Z_{j}< b_{j}\\}). Efficacy bounds b_k, 1\\leq k\\leq K, group sequential design derived control Type level \\alpha=\\alpha(0). Lower bounds a_k, 1\\leq k\\leq K may used control boundary crossing probabilities either null hypothesis (2-sided testing), alternate hypothesis hypothesis (futility testing). Thus, may consider 3 values \\theta(t): null hypothesis \\theta_0(t)=0 computing efficacy bounds, value \\theta_1(t) computing lower bounds, value \\theta_a(t) computing sample size power. refer information 3 assumptions \\mathcal{}^{(0)}(t), \\mathcal{}^{(1)}(t), \\mathcal{}^{()}(t), respectively. Often assume \\mathcal{}(t)=\\mathcal{}^{(0)}(t)=\\mathcal{}^{(1)}(t)=\\mathcal{}^{()}(t). note information may differ different values \\theta(t). fixed designs, Lachin (2009) computes sample size based different variances null alternate hypothesis.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-compute-npe-bound.html","id":"spending-bounds","dir":"Articles","previous_headings":"","what":"Spending bounds","title":"Computing bounds under non-constant treatment effect","text":"consider different boundary types gsDesign package simplify two types according whether lower bounds binding non-binding. concept implicitly derive Z-value bounds a_k, b_k, k=1,\\cdots,K based probabilities specified following table. include test.type argument gsDesign::gsDesign() function reference. Boundary crossing probabilities used set Z-value boundaries can reduced just two types distinguishing whether lower bounds binding non-binding: Reduced options boundary crossing probabilities used set Z-value boundaries second table used \\theta=0 derive upper bound control Type error cases. chosen arbitrary \\theta 0 test.type, \\theta_a \\beta-spending arbitrary \\theta_1 otherwise. note one-sided design let \\beta_k(\\theta)=0 a_k=-\\infty, k=1,\\cdots,K. test.type=3, 4 let \\theta=\\theta_a, test.type=5, 6 \\theta \\geq 0 arbitrary. note asymmetric \\alpha-spending bounds can derived using test.type > 2 \\theta=0.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-compute-npe-bound.html","id":"two-sided-testing-and-design","dir":"Articles","previous_headings":"","what":"Two-sided testing and design","title":"Computing bounds under non-constant treatment effect","text":"denote alternative H_{}: \\theta(t)=\\theta_a(t); always assume H_a power calculations; using \\beta-spending also use H_a controlling lower boundary a_k crossing probabilities letting \\theta=\\theta_a lower bound spending. value \\theta(t)>0 reflect positive benefit. restrict alternate hypothesis \\theta_a(t)>0 t. value \\theta(t) referred (standardized) treatment effect information fraction t. assume interest stopping early good evidence reject one hypothesis favor . a_k= -\\infty analysis k 1\\leq k\\leq K alternate hypothesis rejected analysis k; .e., futility bound analysis k. k=1,2,\\ldots,K, trial stopped analysis k reject H_0 a_j0 \\epsilon= 0.001 yields b_k=3.09. original proposal use b_K=\\Phi^{-1}(1-\\alpha) final analysis, fully control one-sided Type error level \\alpha suggest computing final bound b_K using algorithm \\alpha(0)=\\alpha. Bounds computed spending \\alpha_k(0) analysis k can computed using equation (9) b_1. k=2,\\ldots,K algorithm previous section used. noted Jennison Turnbull (1999), b_1,\\ldots,b_K determined null hypothesis depend t_k \\alpha_k(0) dependence \\mathcal{}_k, k=1,\\ldots,K. computing bounds based \\beta_k(\\theta), k=1,\\ldots,K, \\theta(t_k)\\neq 0 additional dependency a_k depending t_k b_k, k=1,\\ldots,K, also final total information \\mathcal{}_K. Thus, spending bound something null hypothesis needs recomputed time \\mathcal{}_K changes, whereas needs computed \\theta(t_k)=0, k=1,\\ldots,K.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-compute-npe-bound.html","id":"bounds-based-on-boundary-families","dir":"Articles","previous_headings":"Two-sided testing and design","what":"Bounds based on boundary families","title":"Computing bounds under non-constant treatment effect","text":"Assume constants b_1^*,\\ldots,b_K^* total targeted one-sided Type error \\alpha. wish find C_u function t_1,\\ldots t_K b_k=C_ub_k^* \\alpha(0)=\\alpha. Thus, problem solve C_u. a_k, k=1,2,\\ldots,K fixed simple root finding problem. Since one normally normally uses non-binding efficacy bounds, normally case a_k=-\\infty, k=1,\\ldots,K problem. Now assume constants a_k^* wish find C_l a_k=C_la_k^*+\\theta(t_k)\\sqrt{\\mathcal{}_k} k=1,\\ldots,K \\beta(\\theta)=\\beta. use constant upper bounds previous paragraph, finding C_l simple root-finding problem. 2-sided symmetric bounds a_k=-b_k, k=1,\\ldots,K, need solve C_u use simple root finding. point, solve type bound asymmetric upper lower bounds.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-compute-npe-bound.html","id":"sample-size","dir":"Articles","previous_headings":"","what":"Sample size","title":"Computing bounds under non-constant treatment effect","text":"sample size, assume t_k, \\theta(t_k) 1,\\ldots,K fixed. assume \\beta(\\theta) decreasing \\mathcal{} decreasing. automatically case \\theta(t_k)>0, k=1,\\ldots,K many cases. Thus, information required done search \\mathcal{I_K} yields \\alpha(\\theta) yields targeted power.","code":""},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-design-with-ahr.html","id":"introduction","dir":"Articles","previous_headings":"","what":"Introduction","title":"Design using average hazard ratio","text":"consider fixed group sequential design non-proportional hazards testing logrank test. focus primarily average hazard ratio approach, expanding asymptotic approach Mukhopadhyay et al. (2020) group sequential design complex enrollment assumptions. theoretical background provided vignettes package. provide basic examples along lines Lin et al. (2020) illustration design considerations following assumptions: Proportional hazards Short delayed effect Longer delayed effect Crossing survival Illustrations include Expected average hazard ratio (AHR) time. Expected event accumulation time. impact planned study duration required number events. Power across scenarios trial designed assumption short delayed effect. Timing interim analyses. \\alpha-spending considerations. focus results rather code, hidden code can revealed examples.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-design-with-ahr.html","id":"packages-used","dir":"Articles","previous_headings":"","what":"Packages used","title":"Design using average hazard ratio","text":"primary packages needed gsDesign2. packages used supportive.","code":"library(gsDesign) library(gsDesign2) library(ggplot2) library(dplyr) library(gt) library(tidyr) library(tibble)"},{"path":"https://merck.github.io/gsDesign2/articles/story-design-with-ahr.html","id":"scenarios","dir":"Articles","previous_headings":"","what":"Scenarios","title":"Design using average hazard ratio","text":"Expected enrollment duration 18 months piecewise constant enrollment rates escalating every 2 months month 6 enrollment assumed reached steady state. later assume similar ramp-period 24 months expected enrollment duration. consider following failure rate assumptions: Control group exponential failure rate median 14 months. Constant hazard ratio 0.7 (experimental/control). Control group exponential failure rate median 10 months. Hazard ratio 1 6 months followed hazard ratio 0.6. Control group exponential failure rate median 10 months. Hazard ratio 1 6 months followed hazard ratio 0.6. Control group exponential failure rate median 10 months. Hazard ratio 1.5 4 months followed hazard ratio 0.5. survival curves 4 scenarios shown : average hazard ratio 4 scenarios shown . note Shorter delayed effect scenario, average hazard ratio approaches PH scenario study duration 36 months. number events 4 scenarios shown . 3 NPH scenarios events accumulate faster PH scenario due lower control median /delayed effect. , see slight variations control failure rates potential delayed effect can substantially accelerate accumulation events. event-based cutoff analysis slight variations can lead earlier analyses anticipated average hazard ratio expected longer follow-never achieved. examine implications .","code":"# Set the enrollment table of totally 24 month enroll24 <- define_enroll_rate( duration = c(rep(2, 3), 18), # 6 month ramp-up of enrollment, 24 months enrollment time target rate = 1:4 # ratio of the enrollment rate ) # Adjust enrollment rates to enroll 100 subjects enroll24$rate <- enroll24$rate * 100 / sum(enroll24$duration * enroll24$rate) # Set the enrollment table for 18 month expected enrollment enroll18 <- define_enroll_rate( duration = c(rep(2, 3), 12), # 6 month ramp-up of enrollment, 18 months enrollment time target rate = 1:4 # ratio of the enrollment rate ) # Adjust enrollment rates to enroll 100 subjects enroll18$rate <- enroll18$rate * 100 / sum(enroll18$duration * enroll18$rate) # Put these in a single tibble by scenario # We will use 18 month enrollment for delayed effect and crossing hazards scenarios enroll_rate <- rbind( enroll18 %>% mutate(Scenario = \"PH\"), enroll18 %>% mutate(Scenario = \"Shorter delayed effect\"), enroll18 %>% mutate(Scenario = \"Longer delayed effect\"), enroll18 %>% mutate(Scenario = \"Crossing\") ) month <- c(0, 4, 6, 44) duration <- month - c(0, month[1:3]) control_rate <- log(2) / c(rep(16, 4), rep(14, 4), rep(14, 4)) s <- tibble( Scenario = c(rep(\"PH\", 4), rep(\"Delayed effect\", 4), rep(\"Crossing\", 4)), Treatment = rep(\"Control\", 12), Month = rep(month, 3), duration = rep(duration, 3), rate = control_rate, hr = c(rep(.7, 4), c(1, 1, 1, .575), c(1.5, 1.5, .5, .5)) ) s <- rbind( s, s %>% mutate(Treatment = \"Experimental\", rate = rate * hr) ) %>% group_by(Scenario, Treatment) %>% mutate(Survival = exp(-cumsum(duration * rate))) ggplot(s, aes(x = Month, y = Survival, col = Scenario, lty = Treatment)) + geom_line() + scale_y_log10(breaks = (1:10) / 10, lim = c(.1, 1)) + scale_x_continuous(breaks = seq(0, 42, 6)) # get 4 scenarios control_median <- c(14, 12, 12, 12) month <- c(0, 4, 6, 44) duration <- month - c(0, month[1:3]) # HR by time period for each scenario hr <- c( rep(.7, 4), # constant hazard ratio of 0.7 1, 1, .6, .6, # hazard ratio of 1 for 4 months followed by a hazard ratio of 0.6. 1, 1, 1, .6, # hr = 1 for 6 months followed by hr = .6 1.5, 1.5, .5, .5 ) # hazard ratio of 1.5 for 4 months followed by a hazard ratio of 0.5. # Put parameters together in a tibble s <- tibble( Scenario = c(rep(\"PH\", 4), rep(\"Shorter delayed effect\", 4), rep(\"Longer delayed effect\", 4), rep(\"Crossing\", 4)), Treatment = rep(\"Control\", 16), Month = rep(month, 4), # Periods for constant HR duration = rep(duration, 4), rate = log(2) / c( rep(control_median[1], 4), rep(control_median[2], 4), rep(control_median[3], 4), rep(control_median[4], 4) ), hr = hr ) # calculate the survival at each change point for each scenario s <- rbind( s, s %>% mutate(Treatment = \"Experimental\", rate = rate * hr) ) %>% group_by(Scenario, Treatment) %>% mutate(Survival = exp(-cumsum(duration * rate))) # plot the survival curve ggplot(s, aes(x = Month, y = Survival, col = Scenario, lty = Treatment, shape = Treatment)) + geom_line() + annotate(\"text\", x = 18, y = .1, label = \"Control for scenarios other than PH have same survival\") + scale_y_log10(breaks = (1:10) / 10, lim = c(.07, 1)) + scale_x_continuous(breaks = seq(0, 42, 6)) + ggtitle(\"Survival over time for 4 scenarios studied\") # Durations to be used in common for all failure rate scenarios dur <- month[2:4] - month[1:3] # Set the failure table # We use exponential failure, proportional hazards fail_rate <- rbind( tibble( Scenario = \"PH\", stratum = \"All\", duration = dur, fail_rate = log(2) / 14, hr = hr[1], dropout_rate = .001 ), tibble( Scenario = \"Shorter delayed effect\", stratum = \"All\", duration = dur, fail_rate = log(2) / 11, hr = hr[6:8], dropout_rate = .001 ), tibble( Scenario = \"Longer delayed effect\", stratum = \"All\", duration = dur, fail_rate = log(2) / 11, hr = hr[10:12], dropout_rate = .001 ), tibble( Scenario = \"Crossing\", stratum = \"All\", duration = dur, fail_rate = log(2) / 11, hr = hr[14:16], dropout_rate = .001 ) ) hr <- do.call( rbind, lapply( c(\"PH\", \"Shorter delayed effect\", \"Longer delayed effect\", \"Crossing\"), function(x) { ahr( enroll_rate = enroll_rate %>% filter(Scenario == x), fail_rate = fail_rate %>% filter(Scenario == x), total_duration = c(.001, seq(4, 44, 4)) ) %>% mutate(Scenario = x) } ) ) ggplot(hr, aes(x = time, y = ahr, col = Scenario)) + geom_line() + scale_x_continuous(breaks = seq(0, 42, 6)) + ggtitle(\"Average hazard ratio (AHR) by study duration\", subtitle = \"Under the 4 scenarios examined\" ) ggplot(hr, aes(x = time, y = event, col = Scenario)) + geom_line() + scale_x_continuous(breaks = seq(0, 42, 6)) + ylab(\"Expected events per 100 enrolled\") + ggtitle(\"Expected event accumulation under the 4 scenarios studied\")"},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-design-with-ahr.html","id":"fixed-design-using-ahr-and-logrank","dir":"Articles","previous_headings":"Sample Size and Events by Scenarios","what":"Fixed Design using AHR and Logrank","title":"Design using average hazard ratio","text":"power fixed design 90% 2.5% one-sided Type error different scenarios consideration. now assume 18 month enrollment pattern scenarios. PH Shorter delayed effect scenarios need similar AHR, number events sample size 36 month study. two scenarios crossing survival curves large effect delay require substantially larger sample sizes due achieving similar AHR month 36. Assuming shorter delayed effect primary scenario wish protect power, long trial optimize tradeoffs sample size, AHR events required? inform tradeoff looking sizing trial different assumed trial durations failure rates assumed relative enrollment rates. counts events required perhaps interesting 24 month trial requires almost twice events powered 90% compared trial 42 months duration. study, consider 36 month trial duration reasonable tradeoff time, sample size power presumed delayed effect 4 months followed hazard ratio 0.6 thereafter.","code":"ss_ahr_fixed <- do.call( rbind, lapply( c(\"PH\", \"Shorter delayed effect\", \"Longer delayed effect\", \"Crossing\"), function(x) { xx <- gs_design_ahr( enroll_rate = enroll_rate %>% filter(Scenario == x), fail_rate = fail_rate %>% filter(Scenario == x), analysis_time = 36, upper = gs_b, upar = qnorm(.975), lower = gs_b, lpar = -Inf, alpha = .025, beta = .1 ) ans <- xx$analysis %>% select(time, n, event, ahr) %>% mutate(Scenario = x) return(ans) } ) ) ss_ahr_fixed %>% gt() %>% fmt_number(columns = 1:3, decimals = 0) %>% fmt_number(columns = 4, decimals = 3) %>% tab_header( title = \"Sample Size and Events Required by Scenario\", subtitle = \"36 Month Trial duration, 2.5% One-sided Type 1 Error, 90% Power\" ) do.call( rbind, lapply( c(24, 30, 36, 42), function(x) { ans <- gs_design_ahr( enroll_rate = enroll_rate %>% filter(Scenario == \"Shorter delayed effect\"), fail_rate = fail_rate %>% filter(Scenario == \"Shorter delayed effect\"), analysis_time = x, upper = gs_b, upar = qnorm(.975), lower = gs_b, lpar = -Inf, alpha = .025, beta = .1 )$analysis %>% select(time, n, event, ahr) %>% mutate(Scenario = \"Shorter delayed effect\") return(ans) } ) ) %>% gt() %>% fmt_number(columns = 1:3, decimals = 0) %>% fmt_number(columns = 4, decimals = 3) %>% tab_header( title = \"Sample Size and Events Required by Trial Duration\", subtitle = \"Delayed Effect of 4 Months, HR = 0.6 Thereafter; 90% Power\" )"},{"path":"https://merck.github.io/gsDesign2/articles/story-design-with-ahr.html","id":"alternate-hypothesis-mapping","dir":"Articles","previous_headings":"Sample Size and Events by Scenarios","what":"Alternate Hypothesis Mapping","title":"Design using average hazard ratio","text":"different scenarios interest, can examine expected number events time periods interest. Recall alternate hypothesis assumes treatment effect (HR=1) 4 months HR = 0.6 thereafter. scenarios, wish base futility bound assumption plus number events first 4 months 4 months, can compute average hazard ratio alternate hazard ratio scenario 20 months follows. can see interim futility spending bound based alternate hypothesis can depend fairly heavily enrollment control failure rate. Note also time interim analysis, alternate hypothesis AHR can computed fashion based observed events time period. Note can quite different scenario HR; e.g., PH, assume HR=0.7 throughout, futility bound comparison, compute blinded AHR decreases analysis alternate hypothesis.","code":"events_by_time_period <- NULL for (g in c(\"PH\", \"Shorter delayed effect\", \"Longer delayed effect\", \"Crossing\")) { events_by_time_period <- rbind( events_by_time_period, pw_info( enroll_rate = enroll_rate %>% filter(Scenario == g), fail_rate = fail_rate %>% filter(Scenario == g), total_duration = c(12, 20, 28, 36) ) %>% mutate(Scenario = g) ) } # Time periods for each scenario were 0-4, 4-6, and 6+ # Thus H1 has HR as follows hr1 <- tibble(t = c(0, 4, 6), hr1 = c(1, .6, .6)) ahr_by_analysis <- events_by_time_period %>% full_join(hr1) %>% group_by(Scenario, time) %>% summarize(AHR1 = exp(sum(event * log(hr1)) / sum(event))) ahr_by_analysis %>% pivot_wider(names_from = Scenario, values_from = AHR1) %>% gt() %>% fmt_number(columns = 2:5, decimals = 3)"},{"path":"https://merck.github.io/gsDesign2/articles/story-design-with-ahr.html","id":"group-sequential-design","dir":"Articles","previous_headings":"Sample Size and Events by Scenarios","what":"Group Sequential Design","title":"Design using average hazard ratio","text":"assume design delayed effect model delay long long-term average hazard ratio benefit strong. proportional hazards scenario, look power alternate scenarios. plan 36 month group sequential design Shorter delayed effect scenario. Interim analyses planned 12, 20, 28 months.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-design-with-ahr.html","id":"ahr-method","dir":"Articles","previous_headings":"Sample Size and Events by Scenarios > Group Sequential Design","what":"AHR method","title":"Design using average hazard ratio","text":"scenario, now wish compute adjusted expected futility bounds power implied.","code":"analysis_time <- c(12, 20, 28, 36) upar <- list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL, theta = 0) lpar <- list(sf = gsDesign::sfHSD, total_spend = .1, param = -2, timing = NULL, theta = NULL) nph_asymmetric <- gs_design_ahr( enroll_rate = enroll_rate |> filter(Scenario == \"Shorter delayed effect\"), fail_rate = fail_rate |> filter(Scenario == \"Shorter delayed effect\"), ratio = 1, alpha = .025, beta = 0.1, # Information fraction not required (but available!) analysis_time = analysis_time, # Function to enable spending bound upper = gs_spending_bound, lower = gs_spending_bound, # Spending function and parameters used upar = upar, lpar = lpar ) summary(nph_asymmetric) %>% as_gt() do.call( rbind, lapply( c(\"PH\", \"Shorter delayed effect\", \"Longer delayed effect\", \"Crossing\"), function(x) { ahr1 <- (ahr_by_analysis %>% filter(Scenario == x))$AHR1 lparx <- lpar lparx$theta1 <- -log(ahr1) yy <- gs_power_ahr( enroll_rate = enroll_rate %>% filter(Scenario == x), fail_rate = fail_rate %>% filter(Scenario == x), event = NULL, analysis_time = c(12, 20, 28, 36), upper = gs_spending_bound, upar = upar, lower = gs_spending_bound, lpar = lparx )$analysis %>% mutate(Scenario = x) } ) ) %>% gt() %>% fmt_number(columns = \"event\", decimals = 1) %>% fmt_number(columns = 5:10, decimals = 4)"},{"path":"https://merck.github.io/gsDesign2/articles/story-design-with-ahr.html","id":"weighted-logrank-method","dir":"Articles","previous_headings":"Sample Size and Events by Scenarios > Group Sequential Design","what":"Weighted Logrank Method","title":"Design using average hazard ratio","text":"investigate two types weighting scheme weight logrank method. fixed design first weighting scheme four scenario summarized follows. fixed design second weighting scheme four scenario summarized follows.","code":"do.call( rbind, lapply( c(\"PH\", \"Shorter delayed effect\", \"Longer delayed effect\", \"Crossing\"), function(x) { gs_design_wlr( enroll_rate = enroll_rate %>% filter(Scenario == x), fail_rate = fail_rate %>% filter(Scenario == x), weight = function(x, arm0, arm1) { wlr_weight_fh(x, arm0, arm1, rho = 0, gamma = 0.5, tau = 4) }, alpha = .025, beta = .1, upar = qnorm(.975), lpar = -Inf, analysis_time = 44 )$analysis %>% mutate(Scenario = x) } ) ) %>% gt() %>% fmt_number(columns = 3:6, decimals = 4) # Ignore tau or (tau can be -1) do.call( rbind, lapply( c(\"PH\", \"Shorter delayed effect\", \"Longer delayed effect\", \"Crossing\"), function(x) { gs_design_wlr( enroll_rate = enroll_rate %>% filter(Scenario == x), fail_rate = fail_rate %>% filter(Scenario == x), weight = function(x, arm0, arm1) { wlr_weight_fh(x, arm0, arm1, rho = 0, gamma = 0.5) }, alpha = .025, beta = .1, upar = qnorm(.975), lpar = -Inf, analysis_time = 44 )$analysis %>% mutate(Scenario = x) } ) ) %>% gt() %>% fmt_number(columns = 3:6, decimals = 4)"},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-design-with-spending.html","id":"overview","dir":"Articles","previous_headings":"","what":"Overview","title":"Trial design with spending under NPH","text":"vignette covers implement designs trials spending assuming non-proportional hazards. primarily concerned practical issues implementation rather design strategies, ignore design strategy.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-design-with-spending.html","id":"scenario-for-consideration","dir":"Articles","previous_headings":"","what":"Scenario for consideration","title":"Trial design with spending under NPH","text":"set enrollment, failure dropout rates along assumptions enrollment duration times analyses. assume 4 analysis (3 interim analyses + 1 final analysis) conducted 18, 24, 30, 36 months trial enrollment opened. assume single stratum enrollment targeted last 12 months. first 2 months, second 2 months, third 2 months remaining months, relative enrollment rates 8:12:16:24. rates updated constant multiple time design note . assume hazard ratio (HR) 0.9 first 3 months 0.6 thereafter. also assume control time--event follows piecewise exponential distribution median 8 month first 3 months 14 months thereafter.","code":"n_analysis <- 4 analysis_time <- c(18, 24, 30, 36) enroll_rate <- define_enroll_rate( duration = c(2, 2, 2, 6), rate = c(8, 12, 16, 24) ) enroll_rate |> gt::gt() |> gt::tab_header(title = \"Planned Relative Enrollment Rates\") fail_rate <- define_fail_rate( duration = c(3, 100), fail_rate = log(2) / c(8, 14), hr = c(.9, .6), dropout_rate = .001 ) fail_rate |> gt::gt() |> gt::tab_header(title = \"Table of Failure Rate Assumptions\")"},{"path":"https://merck.github.io/gsDesign2/articles/story-design-with-spending.html","id":"fixed-design-with-no-interim-analysis","dir":"Articles","previous_headings":"","what":"Fixed design with no interim analysis","title":"Trial design with spending under NPH","text":"can derive power enrollment rates failure rates follows: now compute sample size translate continuous sample size integer sample size.","code":"fixed_design_ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, power = NULL, ratio = 1, study_duration = 36, event = NULL ) |> summary() #> # A tibble: 1 × 7 #> Design N Events Time Bound alpha Power #> #> 1 Average hazard ratio 216 151. 36 1.96 0.025 0.656 fixed_design <- fixed_design_ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, power = .9, ratio = 1, study_duration = 36, event = NULL ) |> to_integer() fixed_design$analysis #> # A tibble: 1 × 7 #> design n event time bound alpha power #> #> 1 ahr 410 287 36.0 1.96 0.025 0.901"},{"path":"https://merck.github.io/gsDesign2/articles/story-design-with-spending.html","id":"group-sequential-design","dir":"Articles","previous_headings":"Fixed design with no interim analysis","what":"Group sequential design","title":"Trial design with spending under NPH","text":"now consider group sequential design bounds derived using spending functions. target interim analysis 24 months final analysis 36 months. Spending efficacy futility based proportion events expected analysis divided total expected events final analysis.","code":"gs <- gs_design_ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, info_frac = NULL, analysis_time = c(24, 36), upper = gs_spending_bound, lower = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL), lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.1, param = NULL, timing = NULL), h1_spending = TRUE ) |> to_integer() gs |> summary() |> gt::gt()"},{"path":"https://merck.github.io/gsDesign2/articles/story-info-formula.html","id":"continuous-outcomes","dir":"Articles","previous_headings":"","what":"Continuous outcomes","title":"Statistical information under null and alternative hypothesis","text":"Imagine trial continuous outcome. Let X_{0, } \\sim N(\\mu_0, \\sigma^2) subjects = 1, \\ldots, n_0 control arm X_{1,} \\sim N(\\mu_1, \\sigma^2) patient (= 1, \\ldots, n_1) experimental arm. superiority design, tested hypothesis H_0: \\; \\mu_0 = \\mu_1 \\;\\;\\; \\text{vs.} \\;\\;\\; H_1:\\; \\mu_1 > \\mu_0. Suppose k-th analysis, n_{0k} subjects control arm, n_{1k} subjects experimental arm. \\delta_k difference group means, .e., \\delta_k = \\frac{\\sum_{=1}^{n_{1k}} X_{,1}}{n_{1k}} - \\frac{\\sum_{=1}^{n_{0k}} X_{,0}}{n_{0k}}. can estimated \\widehat\\delta_k = \\frac{\\sum_{=1}^{n_{1k}} x_{,1}}{n_{1k}} - \\frac{\\sum_{=1}^{n_{0k}} x_{,0}}{n_{0k}}, x_{,j} observation X_{,j} subject arm j. statistical information \\mathcal I_k \\mathcal I_k^{-1} = \\text{Var}(\\delta_k | H_0) = \\sigma^2 (1 / n_{1k} + 1 / n_{0k}), H_0 H_1, can estimated \\mathcal I_k = \\widehat\\sigma^2 (1 / n_{1k} + 1 / n_{0k}).","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-info-formula.html","id":"binary-outcomes","dir":"Articles","previous_headings":"","what":"Binary outcomes","title":"Statistical information under null and alternative hypothesis","text":"Imagine trial binary outcome. Let X_{0, } \\sim B(p_0) patient = 1, \\ldots, n_0 X_{1,} \\sim B(p_1) patient (= 1, \\ldots, n_1), p_0 p_1 failure rate probability. Suppose k-th analysis, n_{0k} subjects control arm, n_{1k} subjects experimental arm. superiority design, null alternative hypothesis H_0: \\; p_0 = p_1 = p \\;\\;\\; \\text{vs.} \\;\\;\\; H_1:\\; p_0 > p_1. nature-scale treatment effect \\delta_k = \\frac{\\sum_{=1}^{n_{1k}} X_{,1}}{n_{1k}} - \\frac{\\sum_{=1}^{n_{0k}} X_{,0}}{n_{0k}}, can estimated \\widehat\\delta_k = \\frac{\\sum_{=1}^{n_{1k}} x_{,1}}{n_{1k}} - \\frac{\\sum_{=1}^{n_{0k}} x_{,0}}{n_{0k}}, x_{,j} observation X_{,j} subject arm j. statistical information \\mathcal I_k^{-1} = \\text{Var}(\\delta_k) = \\left\\{ \\begin{array}{ll} p(1-p)/n_{1k} + p(1-p)/n_{0k} & \\text{} H_0\\\\ p_1(1-p_1)/n_{1k} + p_0(1-p_0)/n_{0k} & \\text{} H_1\\\\ \\end{array} \\right.. estimation \\widehat{\\mathcal }_k^{-1} = \\left\\{ \\begin{array}{ll} \\bar p(1 - \\bar p) / n_{1k} + \\bar p(1 - \\bar p) / n_{0k} & \\text{} H_0\\\\ \\widehat p_1(1-p_1) / n_{1k} + \\widehat p_0(1 - \\widehat p_0)/n_{0k} & \\text{} H_1\\\\ \\end{array} \\right., \\bar p = \\frac{\\sum_{=1}^{n_{1k}}x_{i1} + \\sum_{=1}^{n_{0k}}x_{i0}}{n_{1k} + n_{0k}}, \\widehat p_j = \\frac{\\sum_{=1}^{n_{jk}}x_{ij}}{n_{jk}} j = 0, 1.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-info-formula.html","id":"survival-outcome","dir":"Articles","previous_headings":"","what":"Survival outcome","title":"Statistical information under null and alternative hypothesis","text":"many clinical trials, outcome time event. simplicity, assume event death person can one event; ideas apply events can recur, cases restrict attention first event patients. use logrank statistics compare treatment control arms. assume N_k total number deaths analysis k. numerator logrank statistics analysis k (Proschan, Lan, Wittes 2006) \\sum_{=1}^{N_k} D_i, D_i = O_i - E_i O_i indicator ith death occurred treatment patient, E_i = m_{1i} / (m_{0i} + m_{1i}) null expectation O_i given respective numbers, m_{0i} m_{1i}, control treatment patients risk just prior ith death. Conditioned m_{0i} m_{1i}, O_i Bernoulli distribution parameter E_i. null conditional mean variance D_i 0 V_i = E_i(1 − E_i), respectively. Unconditionally, D_i uncorrelated, mean 0 random variables variance E(V_i) null hypothesis. Thus, conditioned N_k, \\begin{array}{ccl} \\mathcal I_k^{-1} & = & \\text{Var}(\\delta_k) = \\sum_{=1}^{N_k} \\text{Var}(D_i) = \\sum_{=1}^{N_k} E(V_i) = E \\left( \\sum_{=1}^{N_k} V_i \\right) = E \\left( \\sum_{=1}^{N_k} E_i(1 − E_i) \\right) \\\\ & = & \\left\\{ \\begin{array}{ll} E\\left(\\sum_{=1}^{N_k} \\frac{r}{1+r} \\frac{1}{1+r} \\right) & \\text{} H_0\\\\ E\\left(\\sum_{=1}^{N_k} \\frac{m_{1i}}{(m_{0i} + m_{1i})} \\frac{m_{0i}}{(m_{0i} + m_{1i})}\\right) & \\text{} H_1 \\end{array} \\right., \\end{array} r randomization ratio. estimation \\begin{array}{ccl} \\widehat{\\mathcal }_k^{-1} & = & \\left\\{ \\begin{array}{ll} \\sum_{=1}^{N_k} \\frac{r}{1+r} \\frac{1}{1+r} & \\text{} H_0\\\\ \\sum_{=1}^{N_k} \\frac{m_{1i}}{(m_{0i} + m_{1i})} \\frac{m_{0i}}{(m_{0i} + m_{1i})} & \\text{} H_1 \\end{array} \\right.. \\end{array}","code":""},{"path":[]},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-integer-design.html","id":"binary-outcome","dir":"Articles","previous_headings":"Unstratified design","what":"Binary outcome","title":"Integer designs","text":"Note original design, sample size 1243.307021, 1989.2912336, 2486.614042, integer design, sample size updated 1242, 1988, 2488. 2 interim analysis, floor closet multiplier 2, since randomization ratio 1. final analysis, ceiling sample size 2486.614042 2488 also make sure integer sample size multiplier 2. Please also note , since sample size rounded, power new design also changes little bit, , 0.9 0.9001172.","code":"x <- gs_design_rd( p_c = tibble(stratum = \"All\", rate = 0.2), p_e = tibble(stratum = \"All\", rate = 0.15), info_frac = c(0.5, 0.8, 1), rd0 = 0, alpha = 0.025, beta = 0.1, ratio = 1, stratum_prev = NULL, weight = \"unstratified\", upper = gs_spending_bound, lower = gs_b, upar = list(sf = gsDesign::sfLDOF, timing = c(0.5, 0.8, 1), total_spend = 0.025, param = NULL), lpar = rep(-Inf, 3) ) xi <- x %>% to_integer() tibble( Design = rep(c(\"Original design\", \"Integer design\"), each = 3), `Sample size` = c(x$analysis$n, xi$analysis$n), Z = c( (x$bound %>% filter(bound == \"upper\"))$z, (xi$bound %>% filter(bound == \"upper\"))$z ), `Information fraction` = c(x$analysis$info_frac, xi$analysis$info_frac), Power = c( (x$bound %>% filter(bound == \"upper\"))$probability, (xi$bound %>% filter(bound == \"upper\"))$probability ) ) %>% group_by(Design) %>% gt() %>% tab_header( title = \"Comparison between the original/integer design\", subtitle = \"on binary endpoints (unstratified design)\" ) %>% fmt_number(columns = 2:5, decimals = 4)"},{"path":"https://merck.github.io/gsDesign2/articles/story-integer-design.html","id":"survival-outcome","dir":"Articles","previous_headings":"Unstratified design","what":"Survival outcome","title":"Integer designs","text":"Notice integer design, () number events, (ii) sample size, (iii) power, (iv) information fraction different.","code":"x <- gs_design_ahr( analysis_time = c(12, 24, 36), upper = gs_spending_bound, lower = gs_b, upar = list(sf = gsDesign::sfLDOF, timing = 1:3 / 3, total_spend = 0.025, param = NULL), lpar = rep(-Inf, 3) ) xi <- x %>% to_integer() tibble( Design = rep(c(\"Original design\", \"Integer design\"), each = 3), Events = c(x$analysis$event, xi$analysis$event), `Sample size` = c(x$analysis$n, xi$analysis$n), Z = c( (x$bound %>% filter(bound == \"upper\"))$z, (xi$bound %>% filter(bound == \"upper\"))$z ), `Information fraction` = c(x$analysis$info_frac, xi$analysis$info_frac), Power = c( (x$bound %>% filter(bound == \"upper\"))$probability, (xi$bound %>% filter(bound == \"upper\"))$probability ) ) %>% group_by(Design) %>% gt() %>% tab_header( title = \"Comparison between the original/integer design\", subtitle = \"on survival endpoints (unstratified design)\" ) %>% fmt_number(columns = 2:5, decimals = 4)"},{"path":"https://merck.github.io/gsDesign2/articles/story-integer-design.html","id":"stratified-design","dir":"Articles","previous_headings":"","what":"Stratified design","title":"Integer designs","text":"Note original design, sample size 3426.1318268, 4894.4740382, integer design, sample size updated 3426, 4896. 2 interim analysis, floor closet multiplier 2, since randomization ratio 1. final analysis, ceiling sample size 4894.4740382 4896 also make sure integer sample size multiplier 2.","code":"x <- gs_design_rd( p_c = tibble( stratum = c(\"biomarker positive\", \"biomarker negative\"), rate = c(0.2, 0.25) ), p_e = tibble( stratum = c(\"biomarker positive\", \"biomarker negative\"), rate = c(0.15, 0.22) ), info_frac = c(0.7, 1), rd0 = 0, alpha = 0.025, beta = 0.1, ratio = 1, stratum_prev = tibble( stratum = c(\"biomarker positive\", \"biomarker negative\"), prevalence = c(0.4, 0.6) ), weight = \"ss\", upper = gs_spending_bound, lower = gs_b, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = c(0.7, 1)), lpar = rep(-Inf, 2) ) xi <- x %>% to_integer() tibble( Design = rep(c(\"Original design\", \"Integer design\"), each = 2), `Sample size` = c(x$analysis$n, xi$analysis$n), Z = c( (x$bound %>% filter(bound == \"upper\"))$z, (xi$bound %>% filter(bound == \"upper\"))$z ), `Information fraction` = c(x$analysis$info_frac, xi$analysis$info_frac), Power = c( (x$bound %>% filter(bound == \"upper\"))$probability, (xi$bound %>% filter(bound == \"upper\"))$probability ) ) %>% group_by(Design) %>% gt() %>% tab_header( title = \"Comparison between the original/integer design\", subtitle = \"on binary endpoints (unstratified design)\" ) %>% fmt_number(columns = 2:5, decimals = 4)"},{"path":"https://merck.github.io/gsDesign2/articles/story-npe-background.html","id":"overview","dir":"Articles","previous_headings":"","what":"Overview","title":"Non-proportional effect size in group sequential design","text":"acronym NPES short non-proportional effect size. motivated primarily use designing time--event trial non-proportional hazards (NPH), simplified generalized concept . model likely useful rank-based survival tests beyond logrank test considered initially Tsiatis (1982). also useful situations treatment effect may vary time trial reason. generalize framework Chapter 2 Proschan, Lan, Wittes (2006) incorporate possibility treatment effect changing course trial systematic way. vignettes addresses distribution theory initial technical issues around computing boundary crossing probabilities bounds satisfying targeted boundary crossing probabilities applied generalize computational algorithms provided Chapter 19 Jennison Turnbull (1999) used compute boundary crossing probabilities well boundaries group sequential designs. Additional specifics around boundary computation, power sample size provided separate vignette.","code":""},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-npe-background.html","id":"the-continuous-model-and-e-process","dir":"Articles","previous_headings":"The probability model","what":"The continuous model and E-process","title":"Non-proportional effect size in group sequential design","text":"consider simple example motivate distribution theory quite general applies across many situations. instance, Proschan, Lan, Wittes (2006) immediately suggest paired observations, time--event binary outcomes endpoints theory applicable. assume given integer N>0 X_{} independent, =1,2,\\ldots. integer K\\leq N assume perform analysis K times 00 reflect positive benefit. k=1,2,\\ldots,K-1, interim cutoffs -\\infty \\leq a_k< b_k\\leq \\infty set; final cutoffs -\\infty \\leq a_K\\leq b_K <\\infty also set. infinite efficacy bound analysis means bound crossed analysis. Thus, 3K parameters define group sequential design: a_k, b_k, \\mathcal{}_k, k=1,2,\\ldots,K.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-npe-background.html","id":"notation-for-boundary-crossing-probabilities","dir":"Articles","previous_headings":"Test bounds and crossing probabilities","what":"Notation for boundary crossing probabilities","title":"Non-proportional effect size in group sequential design","text":"now apply distributional assumptions compute boundary crossing probabilities. use shorthand notation section \\theta represent \\theta() \\theta=0 represent \\theta(t)\\equiv 0 t. denote probability crossing upper boundary analysis k without previously crossing bound \\alpha_{k}(\\theta)=P_{\\theta}(\\{Z_{k}\\geq b_{k}\\}\\cap_{j=1}^{-1}\\{a_{j}\\leq Z_{j}< b_{j}\\}), k=1,2,\\ldots,K. Next, consider analogous notation lower bound. k=1,2,\\ldots,K denote probability crossing lower bound analysis k without previously crossing bound \\beta_{k}(\\theta)=P_{\\theta}((Z_{k}< a_{k}\\}\\cap_{j=1}^{k-1}\\{ a_{j}\\leq Z_{j}< b_{j}\\}). symmetric testing analysis k a_k= - b_k, \\beta_k(0)=\\alpha_k(0), k=1,2,\\ldots,K. total lower boundary crossing probability trial denoted \\beta(\\theta)\\equiv\\sum_{k=1}^{K}\\beta_{k}(\\theta). Note can also set a_k= -\\infty analyses lower bound desired, k=1,2,\\ldots,K. k-\\infty b_k<\\infty.","code":""},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-npe-integration.html","id":"overview","dir":"Articles","previous_headings":"","what":"Overview","title":"Numerical integration non-proportional effect size in group sequential design","text":"provided asymptotic distribution theory notation group sequential boundaries vignettes/articles/story-npe-background.Rmd. vignettes generalize computational algorithms provided Chapter 19 Jennison Turnbull (1999) used compute boundary crossing probabilities well derive boundaries group sequential designs.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-npe-integration.html","id":"asymptotic-normal-and-boundary-crossing-probabilities","dir":"Articles","previous_headings":"","what":"Asymptotic normal and boundary crossing probabilities","title":"Numerical integration non-proportional effect size in group sequential design","text":"assume Z_1,\\cdots,Z_K multivariate normal distribution variance 1\\leq k\\leq K \\text{Var}(Z_k) = 1 expected value E(Z_{k})= \\sqrt{\\mathcal{}_k}\\theta(t_{k})= \\sqrt{n_k}E(\\bar X_k) .","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-npe-integration.html","id":"notation-for-boundary-crossing-probabilities","dir":"Articles","previous_headings":"","what":"Notation for boundary crossing probabilities","title":"Numerical integration non-proportional effect size in group sequential design","text":"use shorthand notation section \\theta represent \\theta() \\theta=0 represent \\theta(t)\\equiv 0 t. denote probability crossing upper boundary analysis k without previously crossing bound \\alpha_{k}(\\theta)=P_{\\theta}(\\{Z_{k}\\geq b_{k}\\}\\cap_{j=1}^{-1}\\{a_{j}\\leq Z_{j}< b_{j}\\}), k=1,2,\\ldots,K. Next, consider analogous notation lower bound. k=1,2,\\ldots,K denote probability crossing lower bound analysis k without previously crossing bound \\beta_{k}(\\theta)=P_{\\theta}((Z_{k}< a_{k}\\}\\cap_{j=1}^{k-1}\\{ a_{j}\\leq Z_{j}< b_{j}\\}). symmetric testing analysis k a_k= - b_k, \\beta_k(0)=\\alpha_k(0), k=1,2,\\ldots,K. total lower boundary crossing probability trial denoted \\beta(\\theta)\\equiv\\sum_{k=1}^{K}\\beta_{k}(\\theta). Note can also set a_k= -\\infty analyses lower bound desired, k=1,2,\\ldots,K; thus, use \\alpha^+(\\theta) notation . k-\\infty b_k<\\infty.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-npe-integration.html","id":"recursive-algorithms-for-numerical-integration","dir":"Articles","previous_headings":"","what":"Recursive algorithms for numerical integration","title":"Numerical integration non-proportional effect size in group sequential design","text":"now provide small update algorithm Chapter 19 Jennison Turnbull (1999) numerical integration required compute boundary crossing probabilities previous section also identifying group sequential boundaries satisfying desired characteristics. key calculations conditional power identity equation (1) allows building recursive numerical integration identities enable simple, efficient numerical integration. define g_1(z;\\theta) = \\frac{d}{dz}P(Z_1\\leq z) = \\phi\\left(z - \\sqrt{\\mathcal{}_1}\\theta(t_1)\\right)\\tag{2} k=2,3,\\ldots K recursively define subdensity function \\begin{align} g_k(z; \\theta) &= \\frac{d}{dz}P_\\theta(\\{Z_k\\leq z\\}\\cap_{j=1}^{k-1}\\{a_j\\leq Z_j0 \\pi_k(b^{(+1)};\\theta)-\\alpha_k(\\theta) suitably small. simple starting value k b^{(0)} = \\Phi^{-1}(1- \\alpha_k(\\theta)) + \\sqrt{\\mathcal{}_k}\\theta(t_k).\\tag{9} Normally, b_k calculated \\theta(t_k)=0 k=1,2,\\ldots,K simplifies . However, a_k computed analogously often use non-zero \\theta enable -called \\beta-spending.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-npe-integration.html","id":"numerical-integration","dir":"Articles","previous_headings":"","what":"Numerical integration","title":"Numerical integration non-proportional effect size in group sequential design","text":"numerical integration required compute boundary probabilities derive boundaries defined section 19.3 Jennison Turnbull (1999). single change replacement non-proportional effect size assumption equation (3) replacing equivalent equation (4) used constant effect size Jennison Turnbull (1999).","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-npe-integration.html","id":"demonstrating-calculations","dir":"Articles","previous_headings":"Numerical integration","what":"Demonstrating calculations","title":"Numerical integration non-proportional effect size in group sequential design","text":"walk perform basic calculations . basic scenario one interim analysis addition final analysis. target Type error \\alpha=0.025 Type II error \\beta = 0.1, latter corresponding target 90% power. assume power spending function \\rho=2 bounds. , information fraction t, cumulative spending \\alpha \\times t^2 upper bound \\beta \\times t^2 lower bound. Statistical information 1 first analysis 4 final analysis, leading information fraction t_1= 1/4, t_2=1 interim final, respectively. assume \\theta_1 = .5, \\theta_3=1.5. Set overall study parameters Calculate interim bounds Set numerical integration grid next (final) analysis set table numerical integration continuation region can subsequently use compute boundary crossing probabilities bounds second interim analysis. begin null hypothesis. columns resulting table - z - Z-values grid; recall interim test statistic normally distributed variance 1 - w - weights numerical integration - h - weights w times normal density can used numerical integration; demonstrate use probability crossing bound null hypothesis computed follows: now set numerical integration grid alternate hypothesis compute continuation probability. Compute initial iteration analysis 2 bounds initial estimate second analysis bounds computed way actual first analysis bounds. Compute actual boundary crossing probabilities initial approximations get actual boundary crossing probabilities second analysis, update numerical integration grids. null hypothesis, need update interval b2_0. get first order Taylor’s series approximation update bound, need derivative probability respect Z-value cutoff. given subdensity computed grid. , grid contains numerical integration weight w weight times subdensity h. Thus, get subdensity bound, estimated derivative boundary crossing probability, compute: see Taylor’s series update gotten us substantially closer targeted boundary probability. now update lower bound analogous fashion. Confirm gs_power_npe()","code":"# Information for both null and alternative info <- c(1, 4) # information fraction timing <- info / max(info) # Type I error alpha <- 0.025 # Type II error (1 - power) beta <- 0.1 # Cumulative alpha-spending at IA, Final alphaspend <- alpha * timing^2 # Cumulative beta-spending at IA, Final betaspend <- beta * timing^2 # Average treatment effect at analyses theta <- c(1, 3) / 2 # Upper bound under null hypothesis b1 <- qnorm(alphaspend[1], lower.tail = FALSE) # Lower bound under alternate hypothesis a1 <- qnorm(betaspend[1], mean = sqrt(info[1]) * theta[1]) # Compare probability of crossing vs target for bounds: cat( \"Upper bound =\", b1, \"Target spend =\", alphaspend[1], \"Actual spend =\", pnorm(b1, lower.tail = FALSE) ) #> Upper bound = 2.955167 Target spend = 0.0015625 Actual spend = 0.0015625 # Lower bound under alternate hypothesis a1 <- qnorm(betaspend[1], mean = sqrt(info[1]) * theta[1]) # Compare probability of crossing vs target for bounds: cat( \"Lower bound =\", a1, \"Target spend =\", betaspend[1], \"Actual spend =\", pnorm(a1, mean = sqrt(info[1]) * theta[1]) ) #> Lower bound = -1.997705 Target spend = 0.00625 Actual spend = 0.00625 # Set up grid over continuation region # Null hypothesis grid1_0 <- gsDesign2:::h1(theta = 0, info = info[1], a = a1, b = b1) grid1_0 %>% head() #> $z #> [1] -1.99770547 -1.95718607 -1.91666667 -1.87500000 -1.83333333 -1.79166667 #> [7] -1.75000000 -1.70833333 -1.66666667 -1.62500000 -1.58333333 -1.54166667 #> [13] -1.50000000 -1.45833333 -1.41666667 -1.37500000 -1.33333333 -1.29166667 #> [19] -1.25000000 -1.20833333 -1.16666667 -1.12500000 -1.08333333 -1.04166667 #> [25] -1.00000000 -0.95833333 -0.91666667 -0.87500000 -0.83333333 -0.79166667 #> [31] -0.75000000 -0.70833333 -0.66666667 -0.62500000 -0.58333333 -0.54166667 #> [37] -0.50000000 -0.45833333 -0.41666667 -0.37500000 -0.33333333 -0.29166667 #> [43] -0.25000000 -0.20833333 -0.16666667 -0.12500000 -0.08333333 -0.04166667 #> [49] 0.00000000 0.04166667 0.08333333 0.12500000 0.16666667 0.20833333 #> [55] 0.25000000 0.29166667 0.33333333 0.37500000 0.41666667 0.45833333 #> [61] 0.50000000 0.54166667 0.58333333 0.62500000 0.66666667 0.70833333 #> [67] 0.75000000 0.79166667 0.83333333 0.87500000 0.91666667 0.95833333 #> [73] 1.00000000 1.04166667 1.08333333 1.12500000 1.16666667 1.20833333 #> [79] 1.25000000 1.29166667 1.33333333 1.37500000 1.41666667 1.45833333 #> [85] 1.50000000 1.54166667 1.58333333 1.62500000 1.66666667 1.70833333 #> [91] 1.75000000 1.79166667 1.83333333 1.87500000 1.91666667 1.95833333 #> [97] 2.00000000 2.04166667 2.08333333 2.12500000 2.16666667 2.20833333 #> [103] 2.25000000 2.29166667 2.33333333 2.37500000 2.41666667 2.45833333 #> [109] 2.50000000 2.54166667 2.58333333 2.62500000 2.66666667 2.70833333 #> [115] 2.75000000 2.79166667 2.83333333 2.87500000 2.91666667 2.93591676 #> [121] 2.95516685 #> #> $w #> [1] 0.013506468 0.054025872 0.027395357 0.055555556 0.027777778 0.055555556 #> [7] 0.027777778 0.055555556 0.027777778 0.055555556 0.027777778 0.055555556 #> [13] 0.027777778 0.055555556 0.027777778 0.055555556 0.027777778 0.055555556 #> [19] 0.027777778 0.055555556 0.027777778 0.055555556 0.027777778 0.055555556 #> [25] 0.027777778 0.055555556 0.027777778 0.055555556 0.027777778 0.055555556 #> [31] 0.027777778 0.055555556 0.027777778 0.055555556 0.027777778 0.055555556 #> [37] 0.027777778 0.055555556 0.027777778 0.055555556 0.027777778 0.055555556 #> [43] 0.027777778 0.055555556 0.027777778 0.055555556 0.027777778 0.055555556 #> [49] 0.027777778 0.055555556 0.027777778 0.055555556 0.027777778 0.055555556 #> [55] 0.027777778 0.055555556 0.027777778 0.055555556 0.027777778 0.055555556 #> [61] 0.027777778 0.055555556 0.027777778 0.055555556 0.027777778 0.055555556 #> [67] 0.027777778 0.055555556 0.027777778 0.055555556 0.027777778 0.055555556 #> [73] 0.027777778 0.055555556 0.027777778 0.055555556 0.027777778 0.055555556 #> [79] 0.027777778 0.055555556 0.027777778 0.055555556 0.027777778 0.055555556 #> [85] 0.027777778 0.055555556 0.027777778 0.055555556 0.027777778 0.055555556 #> [91] 0.027777778 0.055555556 0.027777778 0.055555556 0.027777778 0.055555556 #> [97] 0.027777778 0.055555556 0.027777778 0.055555556 0.027777778 0.055555556 #> [103] 0.027777778 0.055555556 0.027777778 0.055555556 0.027777778 0.055555556 #> [109] 0.027777778 0.055555556 0.027777778 0.055555556 0.027777778 0.055555556 #> [115] 0.027777778 0.055555556 0.027777778 0.055555556 0.020305586 0.025666787 #> [121] 0.006416697 #> #> $h #> [1] 7.325795e-04 3.174772e-03 1.741296e-03 3.821460e-03 2.064199e-03 #> [6] 4.452253e-03 2.396592e-03 5.151272e-03 2.763254e-03 5.918793e-03 #> [11] 3.163964e-03 6.753608e-03 3.597711e-03 7.652841e-03 4.062610e-03 #> [16] 8.611793e-03 4.555835e-03 9.623842e-03 5.073586e-03 1.068040e-02 #> [21] 5.611075e-03 1.177092e-02 6.162560e-03 1.288302e-02 6.721409e-03 #> [26] 1.400261e-02 7.280204e-03 1.511417e-02 7.830885e-03 1.620106e-02 #> [31] 8.364929e-03 1.724594e-02 8.873556e-03 1.823116e-02 9.347967e-03 #> [36] 1.913930e-02 9.779592e-03 1.995361e-02 1.016034e-02 2.065862e-02 #> [41] 1.048287e-02 2.124051e-02 1.074078e-02 2.168766e-02 1.092888e-02 #> [46] 2.199098e-02 1.104332e-02 2.214423e-02 1.108173e-02 2.214423e-02 #> [51] 1.104332e-02 2.199098e-02 1.092888e-02 2.168766e-02 1.074078e-02 #> [56] 2.124051e-02 1.048287e-02 2.065862e-02 1.016034e-02 1.995361e-02 #> [61] 9.779592e-03 1.913930e-02 9.347967e-03 1.823116e-02 8.873556e-03 #> [66] 1.724594e-02 8.364929e-03 1.620106e-02 7.830885e-03 1.511417e-02 #> [71] 7.280204e-03 1.400261e-02 6.721409e-03 1.288302e-02 6.162560e-03 #> [76] 1.177092e-02 5.611075e-03 1.068040e-02 5.073586e-03 9.623842e-03 #> [81] 4.555835e-03 8.611793e-03 4.062610e-03 7.652841e-03 3.597711e-03 #> [86] 6.753608e-03 3.163964e-03 5.918793e-03 2.763254e-03 5.151272e-03 #> [91] 2.396592e-03 4.452253e-03 2.064199e-03 3.821460e-03 1.765603e-03 #> [96] 3.257338e-03 1.499749e-03 2.757277e-03 1.265110e-03 2.317833e-03 #> [101] 1.059795e-03 1.934941e-03 8.816570e-04 1.604123e-03 7.283858e-04 #> [106] 1.320661e-03 5.975956e-04 1.079765e-03 4.868972e-04 8.767006e-04 #> [111] 3.939593e-04 7.068990e-04 3.165552e-04 5.660405e-04 2.525990e-04 #> [116] 4.501131e-04 2.001694e-04 3.554511e-04 1.151506e-04 1.375807e-04 #> [121] 3.249917e-05 prob_h0_continue <- sum(grid1_0$h) cat( \"Probability of continuing trial under null hypothesis\\n\", \" Using numerical integration:\", prob_h0_continue, \"\\n Using normal CDF:\", pnorm(b1) - pnorm(a1), \"\\n\" ) #> Probability of continuing trial under null hypothesis #> Using numerical integration: 0.9755632 #> Using normal CDF: 0.9755632 grid1_1 <- gsDesign2:::h1(theta = theta[1], info = info[1], a = a1, b = b1) prob_h1_continue <- sum(grid1_1$h) h1mean <- sqrt(info[1]) * theta[1] cat( \"Probability of continuing trial under alternate hypothesis\\n\", \" Using numerical integration:\", prob_h1_continue, \"\\n Using normal CDF:\", pnorm(b1, mean = h1mean) - pnorm(a1, h1mean), \"\\n\" ) #> Probability of continuing trial under alternate hypothesis #> Using numerical integration: 0.986709 #> Using normal CDF: 0.986709 # Upper bound under null hypothesis # incremental spend spend0 <- alphaspend[2] - alphaspend[1] # H0 bound at 2nd analysis; 1st approximation b2_0 <- qnorm(spend0, lower.tail = FALSE) # Lower bound under alternate hypothesis spend1 <- betaspend[2] - betaspend[1] a2_0 <- qnorm(spend1, mean = sqrt(info[2]) * theta[2]) cat(\"Initial bound approximation for 2nd analysis\\n (\", a2_0, \", \", b2_0, \")\\n\", sep = \"\" ) #> Initial bound approximation for 2nd analysis #> (1.681989, 1.987428) # Upper rejection region grid under H0 grid2_0 <- gsDesign2:::hupdate(theta = 0, info = info[2], a = b2_0, b = Inf, im1 = info[1], gm1 = grid1_0) pupper_0 <- sum(grid2_0$h) cat( \"Upper spending at analysis 2\\n Target:\", spend0, \"\\n Using initial bound approximation:\", pupper_0, \"\\n\" ) #> Upper spending at analysis 2 #> Target: 0.0234375 #> Using initial bound approximation: 0.02290683 # First point in grid is at bound # Compute derivative dpdb2 <- grid2_0$h[1] / grid2_0$w[1] # Compute difference between target and actual bound crossing probability pdiff <- spend0 - pupper_0 # Taylor's series update b2_1 <- b2_0 - pdiff / dpdb2 # Compute boundary crossing probability at updated bound cat( \"Original bound approximation:\", b2_0, \"\\nUpdated bound approximation:\", b2_1 ) #> Original bound approximation: 1.987428 #> Updated bound approximation: 1.977726 grid2_0 <- gsDesign2:::hupdate(theta = 0, info = info[2], a = b2_1, b = Inf, im1 = info[1], gm1 = grid1_0) pupper_1 <- sum(grid2_0$h) cat( \"\\nOriginal boundary crossing probability:\", pupper_0, \"\\nUpdated boundary crossing probability:\", pupper_1, \"\\nTarget:\", spend0, \"\\n\" ) #> #> Original boundary crossing probability: 0.02290683 #> Updated boundary crossing probability: 0.02344269 #> Target: 0.0234375 # Lower rejection region grid under H1 grid2_1 <- gsDesign2:::hupdate( theta = theta[2], info = info[2], a = -Inf, b = a2_0, thetam1 = theta[1], im1 = info[1], gm1 = grid1_1 ) plower_0 <- sum(grid2_1$h) # Last point in grid is at bound # Compute derivative indx <- length(grid2_1$h) dpda2 <- grid2_1$h[indx] / grid2_1$w[indx] # Compute difference between target and actual bound crossing probability pdiff <- spend1 - plower_0 # Taylor's series update a2_1 <- a2_0 + pdiff / dpda2 # Compute boundary crossing probability at updated bound cat( \"Original bound approximation:\", a2_0, \"\\nUpdated bound approximation:\", a2_1 ) #> Original bound approximation: 1.681989 #> Updated bound approximation: 1.702596 grid2_1 <- gsDesign2:::hupdate( theta = theta[2], info = info[2], a = -Inf, b = a2_1, thetam1 = theta[1], im1 = info[1], gm1 = grid1_1 ) plower_1 <- sum(grid2_1$h) cat( \"\\nOriginal boundary crossing probability:\", plower_0, \"\\nUpdated boundary crossing probability:\", plower_1, \"\\nTarget:\", spend1, \"\\n\" ) #> #> Original boundary crossing probability: 0.09035972 #> Updated boundary crossing probability: 0.09379707 #> Target: 0.09375 gs_power_npe( theta = theta, theta1 = theta, info = info, binding = TRUE, upper = gs_spending_bound, upar = list(sf = gsDesign::sfPower, total_spend = 0.025, param = 2), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfPower, total_spend = 0.1, param = 2) ) #> # A tibble: 4 × 10 #> analysis bound z probability theta theta1 info_frac info info0 info1 #> #> 1 1 upper 2.96 0.00704 0.5 0.5 0.25 1 1 1 #> 2 2 upper 1.98 0.845 1.5 1.5 1 4 4 4 #> 3 1 lower -2.00 0.00625 0.5 0.5 0.25 1 1 1 #> 4 2 lower 1.70 0.100 1.5 1.5 1 4 4 4"},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-nph-futility.html","id":"overview","dir":"Articles","previous_headings":"","what":"Overview","title":"Futility bounds at design and analysis under non-proportional hazards","text":"set futility bounds non-proportional hazards assumption. consider methods presented Korn Freidlin (2018) setting bounds consider alternate futility bound based \\beta-spending delayed crossing treatment effect simplify implementation. Finally, show update \\beta-spending bound based blinded interim data. consider example reproduce line Korn Freidlin (2018) Table 1 alternative futility bounds considered.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-nph-futility.html","id":"initial-design-set-up-for-fixed-analysis","dir":"Articles","previous_headings":"Overview","what":"Initial design set-up for fixed analysis","title":"Futility bounds at design and analysis under non-proportional hazards","text":"Korn Freidlin (2018) considered delayed effect scenarios proposed futility bound modification earlier method proposed Wieand, Schroeder, O’Fallon (1994). begin enrollment failure rate assumptions Korn Freidlin (2018) based example Chen (2013). now derive fixed sample size based assumptions. Ideally, allow targeted event count variable follow-fixed_design_ahr() study duration computed automatically.","code":"# Enrollment assumed to be 680 patients over 12 months with no ramp-up enroll_rate <- define_enroll_rate(duration = 12, rate = 680 / 12) # Failure rates ## Control exponential with median of 12 mos ## Delayed effect with HR = 1 for 3 months and HR = .693 thereafter ## Censoring rate is 0 fail_rate <- define_fail_rate( duration = c(3, 100), fail_rate = -log(.5) / 12, hr = c(1, .693), dropout_rate = 0 ) ## Study duration was 34.8 in Korn & Freidlin Table 1 ## We change to 34.86 here to obtain 512 expected events more precisely study_duration <- 34.86 fixedevents <- fixed_design_ahr( alpha = 0.025, power = NULL, enroll_rate = enroll_rate, fail_rate = fail_rate, study_duration = study_duration ) fixedevents %>% summary() %>% select(-Bound) %>% as_gt(footnote = \"Power based on 512 events\") %>% fmt_number(columns = 3:4, decimals = 2) %>% fmt_number(columns = 5:6, decimals = 3)"},{"path":"https://merck.github.io/gsDesign2/articles/story-nph-futility.html","id":"modified-wieand-futility-bound","dir":"Articles","previous_headings":"","what":"Modified Wieand futility bound","title":"Futility bounds at design and analysis under non-proportional hazards","text":"Wieand, Schroeder, O’Fallon (1994) rule recommends stopping 50% planned events accrue observed HR > 1. kornfreidlin2018 modified adding second interim analysis 75% planned events stop observed HR > 1 implemented requiring trend favor control direction Z-bound 0 resulting Nominal p bound 0.5 interim analyses table . fixed bound specified gs_b() function upper lower corresponding parameters upar upper (efficacy) bound lpar lower (futility) bound. final efficacy bound 1-sided nominal p-value 0.025; futility bound lowers 0.0247 noted lower-right-hand corner table . < 0.025 since probability computed binding assumption. arbitrary convention; futility bound ignored, computation yields 0.025. last row Alternate hypothesis see power 88.44%. Korn Freidlin (2018) computed 88.4% power design 100,000 simulations estimate standard error power calculation 0.1%.","code":"wieand <- gs_power_ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, upper = gs_b, upar = c(rep(Inf, 2), qnorm(.975)), lower = gs_b, lpar = c(0, 0, -Inf), event = 512 * c(.5, .75, 1) ) wieand %>% summary() %>% as_gt( title = \"Group sequential design with futility only at interim analyses\", subtitle = \"Wieand futility rule stops if HR > 1\" )"},{"path":"https://merck.github.io/gsDesign2/articles/story-nph-futility.html","id":"beta-spending-futility-bound-with-ahr","dir":"Articles","previous_headings":"","what":"Beta-spending futility bound with AHR","title":"Futility bounds at design and analysis under non-proportional hazards","text":"Need summarize .","code":"betaspending <- gs_power_ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, upper = gs_b, upar = c(rep(Inf, 2), qnorm(.975)), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL), event = 512 * c(.5, .75, 1), test_lower = c(TRUE, TRUE, FALSE) ) betaspending %>% summary() %>% as_gt( title = \"Group sequential design with futility only\", subtitle = \"Beta-spending futility bound\" )"},{"path":"https://merck.github.io/gsDesign2/articles/story-nph-futility.html","id":"classical-beta-spending-futility-bound","dir":"Articles","previous_headings":"","what":"Classical beta-spending futility bound","title":"Futility bounds at design and analysis under non-proportional hazards","text":"classical \\beta-spending bound assume constant treatment effect time using proportional hazards assumption. use average hazard ratio fixed design analysis purpose.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-nph-futility.html","id":"korn-and-freidlin-futility-bound","dir":"Articles","previous_headings":"","what":"Korn and Freidlin futility bound","title":"Futility bounds at design and analysis under non-proportional hazards","text":"Korn Freidlin (2018) futility bound set least 50% expected events occurred least two thirds observed events occurred later 3 months randomization. expected timing demonstrated .","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-nph-futility.html","id":"accumulation-of-events-by-time-interval","dir":"Articles","previous_headings":"Korn and Freidlin futility bound","what":"Accumulation of events by time interval","title":"Futility bounds at design and analysis under non-proportional hazards","text":"consider accumulation events time occur -effect interval first 3 months randomization events time interval. done overall trial without dividing treatment group using gsDesign2::AHR() function. consider monthly accumulation events 34.86 months planned trial duration. note summary early expected events events first 3 months -study expected prior first interim analysis. can look proportion events first 3 months follows: Korn Freidlin (2018) bound targeted timing 50% events occurred least 2/3 3 months enrollment 3 months delayed effect period. see 1/3 events still within 3 months enrollment month 20.","code":"event_accumulation <- pw_info( enroll_rate = enroll_rate, fail_rate = fail_rate, total_duration = c(1:34, 34.86), ratio = 1 ) head(event_accumulation, n = 7) %>% gt() event_accumulation %>% group_by(time) %>% summarize(`Total events` = sum(event), \"Proportion early\" = first(event) / `Total events`) %>% ggplot(aes(x = time, y = `Proportion early`)) + geom_line()"},{"path":"https://merck.github.io/gsDesign2/articles/story-nph-futility.html","id":"korn-and-freidlin-bound","dir":"Articles","previous_headings":"Korn and Freidlin futility bound","what":"Korn and Freidlin bound","title":"Futility bounds at design and analysis under non-proportional hazards","text":"bound proposed Korn Freidlin (2018)","code":""},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-power-evaluation-with-spending-bound.html","id":"overview","dir":"Articles","previous_headings":"","what":"Overview","title":"Power evaluation with spending bounds","text":"vignette covers compute power Type error design derived spending bound. write general non-constant treatment effect using gs_design_npe() derived design one parameter setting computing power another setting. use trial binary endpoint enable full illustration.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-power-evaluation-with-spending-bound.html","id":"scenario-for-consideration","dir":"Articles","previous_headings":"","what":"Scenario for Consideration","title":"Power evaluation with spending bounds","text":"consider scenario largely based CAPTURE study (Capture Investigators et al. (1997)) primary endpoint composite death, acute myocardial infarction need recurrent percutaneous intervention within 30 days randomization. detailed introduction trial listed follows. consider 2-arm trial experimental arm control arm. assume K=3 analyses 350, 700, 1400 patients observed equal randomization treatment groups. primary endpoint trial binary indicator participant failed outcome. case, consider parameter \\theta = p_1 - p _2 p_1 denotes probability trial participant control group experiences failure p_2 represents probability trial participant experimental group. study designed approximately 80% power (Type II error \\beta = 1 - 0.8 = 0.2) 2.5% one-sided Type error (\\alpha = 0.025) detect reduction 15% event rate (p_1 = 0.15) control group 10% (p_2 = 0.1) experimental group. example, parameter interest \\theta = p_1 - p_2. denote alternate hypothesis H_1: \\theta = \\theta_1= p_1^1 - p_2^1 = 0.15 - 0.10 = 0.05 null hypothesis H_0: \\theta = \\theta_0 = 0 = p_1^0 - p_2^0 p^0_1 = p^0_2= (p_1^1+p_2^1)/2 = 0.125 laid Lachin (2009). note considered success outcome objective response oncology study, let p_1 denote experimental group p_2 control group response rate. Thus, always set notation p_1>p_2 represents superiority experimental group.","code":"p0 <- 0.15 # assumed failure rate in control group p1 <- 0.10 # assumed failure rate in experimental group alpha <- 0.025 # type I error beta <- 0.2 # type II error for 80% power"},{"path":"https://merck.github.io/gsDesign2/articles/story-power-evaluation-with-spending-bound.html","id":"notations","dir":"Articles","previous_headings":"","what":"Notations","title":"Power evaluation with spending bounds","text":"assume k: index analysis, .e., k = 1, \\ldots, K; : index arm, .e., = 1 control group = 2 experimental group; n_{ik}: number subjects group analysis k; n_k: number subjects analysis k, .e., n_k = n_{1k} + n_{2k}; X_{ij}: independent random variable whether j-th subject group response, .e, X_{ij} \\sim \\text{Bernoulli}(p_i); Y_{ik}: number subject response group analysis k, .e., Y_{ik} = \\sum_{j = 1}^{n_{ik}} X_{ij};","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-power-evaluation-with-spending-bound.html","id":"statistical-testing","dir":"Articles","previous_headings":"","what":"Statistical Testing","title":"Power evaluation with spending bounds","text":"section, discuss estimation statistical information variance proportion null hypothesis H_0: p_1^0 = p_2^0 \\equiv p_0 alternative hypothesis H_1: \\theta = \\theta_1= p_1^1 - p_2^1. , introduce test statistics group sequential design.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-power-evaluation-with-spending-bound.html","id":"estimation-of-statistical-information-under-h1","dir":"Articles","previous_headings":"Statistical Testing","what":"Estimation of Statistical Information under H1","title":"Power evaluation with spending bounds","text":"alternative hypothesis, one can estimate proportion failures group analysis k \\hat{p}_{ik} = Y_{ik}/n_{ik}. note variance \\text{Var}(\\hat p_{ik})=\\frac{p_{}(1-p_i)}{n_{ik}}, consistent estimator \\widehat{\\text{Var}}(\\hat p_{ik})=\\frac{\\hat p_{ik}(1-\\hat p_{ik})}{n_{ik}}, = 1, 2 k = 1, 2, \\ldots, K. Letting \\hat\\theta_k = \\hat p_{1k} - \\hat p_{2k}, also \\sigma^2_k \\equiv \\text{Var}(\\hat\\theta_k) = \\frac{p_1(1-p_1)}{n_{1k}}+\\frac{p_2(1-p_2)}{n_{2k}}, consistent estimator \\hat\\sigma^2_k = \\frac{\\hat p_{1k}(1-\\hat p_{1k})}{n_{1k}}+\\frac{\\hat p_{2k}(1-\\hat p_{2k})}{n_{2k}}, Statistical information quantities corresponding estimators denoted \\left\\{ \\begin{align} \\mathcal{}_k = &1/\\sigma^2_k,\\\\ \\mathcal{\\hat }_k = &1/\\hat \\sigma^2_k, \\end{align} \\right.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-power-evaluation-with-spending-bound.html","id":"estimation-of-statistical-information-under-h0","dir":"Articles","previous_headings":"Statistical Testing","what":"Estimation of Statistical Information under H0","title":"Power evaluation with spending bounds","text":"null hypothesis, one can estimate proportion failures group analysis k estimate \\hat{p}_{0k} = \\frac{Y_{1k}+ Y_{2k}}{n_{1k}+ n_{2k}} = \\frac{n_{1k}\\hat p_{1k} + n_{2k}\\hat p_{2k}}{n_{1k} + n_{2k}}. corresponding null hypothesis estimator \\hat\\sigma^2_{0k} \\equiv \\widehat{\\text{Var}}(\\hat{p}_{0k}) = \\hat p_{0k}(1-\\hat p_{0k})\\left(\\frac{1}{n_{1k}}+ \\frac{1}{n_{2k}}\\right), k = 1,2, \\ldots, K. Statistical information quantities corresponding estimators denoted \\left\\{ \\begin{align} \\mathcal{}_{0k} =& 1/ \\sigma^2_{0k},\\\\ \\mathcal{\\hat }_{0k} =& 1/\\hat \\sigma^2_{0k}, \\end{align} \\right. k = 1, 2, \\ldots, K.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-power-evaluation-with-spending-bound.html","id":"testing-statistics","dir":"Articles","previous_headings":"Statistical Testing","what":"Testing Statistics","title":"Power evaluation with spending bounds","text":"Testing, recommended Lachin (2009), done large sample test null hypothesis variance estimate without continuity correction: Z_k = \\hat\\theta_k/\\hat\\sigma_{0k}=\\frac{\\hat p_{1k} - \\hat p_{2k}}{\\sqrt{(1/n_{1k}+ 1/n_{2k})\\hat p_{0k}(1-\\hat p_{0k})} }, asymptotically \\text{Normal}(0,1) p_1 = p_2 \\text{Normal}(0, \\sigma_{0k}^2/\\sigma_k^2) generally p_1, p_2 k = 1, 2, \\ldots, K. assume constant proportion \\xi_i randomized group =1,2. Thus, Z_k \\approx \\frac{\\sqrt{n_k}(\\hat p_{1k} - \\hat p_{2k})}{\\sqrt{(1/\\xi_1+ 1/\\xi_2)p_{0}(1- p_0)} }. , asymptotic distribution Z_k \\sim \\text{Normal} \\left( \\sqrt{n_k}\\frac{p_1 - p_2}{\\sqrt{(1/\\xi_1+ 1/\\xi_2) p_0(1- p_0)} }, \\sigma^2_{0k}/\\sigma^2_{1k} \\right), note \\sigma^2_{0k}/\\sigma^2_{1k} = \\frac{ p_0(1-p_0)\\left(1/\\xi_1+ 1/\\xi_2\\right)}{p_1(1-p_1)/\\xi_1+p_2(1-p_2)/\\xi_2}. also note definition \\sigma^2_{0k}/\\sigma^2_{1k}=\\mathcal I_k/\\mathcal I_{0k}. Based input p_1, p_2, n_k, \\xi_1, \\xi_2 = 1-\\xi_1 compute \\theta, \\mathcal{}_k, \\mathcal{}_{0k} k = 1, 2, \\ldots, K. note \\chi^2=Z^2_k \\chi^2 test without continuity correction recommended Gordon Watson (1996). Note finally extends straightforward way non-inferiority test Farrington Manning (1990) null hypothesis \\theta = p_1 - p_2 - \\delta = 0 non-inferiority margin \\delta > 0; \\delta < 0 correspond referred super-superiority Chan (2002), requiring experimental therapy shown superior control least margin -\\delta>0.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-power-evaluation-with-spending-bound.html","id":"power-calculations","dir":"Articles","previous_headings":"","what":"Power Calculations","title":"Power evaluation with spending bounds","text":"begin developing function gs_info_binomial() calculate statistical information discussed . CAPTURE trial, can plug gs_power_npe() intended spending functions. begin power alternate hypothesis Now examine information smaller assumed treatment difference alternative:","code":"gs_info_binomial <- function(p1, p2, xi1, n, delta = NULL) { if (is.null(delta)) delta <- p1 - p2 # Compute (constant) effect size at each analysis theta theta <- rep(p1 - p2, length(n)) # compute null hypothesis rate, p0 p0 <- xi1 * p1 + (1 - xi1) * p2 # compute information based on p1, p2 info <- n / (p1 * (1 - p1) / xi1 + p2 * (1 - p2) / (1 - xi1)) # compute information based on null hypothesis rate of p0 info0 <- n / (p0 * (1 - p0) * (1 / xi1 + 1 / (1 - xi1))) # compute information based on H1 rates of p1star, p2star p1star <- p0 + delta * xi1 p2star <- p0 - delta * (1 - xi1) info1 <- n / (p1star * (1 - p1star) / xi1 + p2star * (1 - p2star) / (1 - xi1)) out <- tibble( Analysis = seq_along(n), n = n, theta = theta, theta1 = rep(delta, length(n)), info = info, info0 = info0, info1 = info1 ) return(out) } h1 <- gs_info_binomial(p1 = .15, p2 = .1, xi1 = .5, n = c(350, 700, 1400)) h1 %>% gt() gs_power_npe( theta = h1$theta, theta1 = h1$theta, info = h1$info, info0 = h1$info0, info1 = h1$info1, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfHSD, param = -2, total_spend = 0.2) ) %>% gt() %>% fmt_number(columns = 3:10, decimals = 4) h <- gs_info_binomial(p1 = .15, p2 = .12, xi1 = .5, delta = .05, n = c(350, 700, 1400)) gs_power_npe( theta = h$theta, theta1 = h$theta1, info = h$info, info0 = h$info0, info1 = h$info1, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfHSD, param = -2, total_spend = 0.2) ) %>% gt() %>% fmt_number(columns = 3:10, decimals = 4)"},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-risk-difference.html","id":"overview","dir":"Articles","previous_headings":"","what":"Overview","title":"Group sequential design for binary outcomes","text":"consider group sequential design examining risk difference two treatment groups binary outcome. several issues consider: measure treatment difference natural parameter; focus risk difference. Incorporation null alternate hypothesis variances. Superiority, non-inferiority super-superiority designs. Stratified populations. Fixed group sequential designs. single stratum designs, focus sample size power using method Farrington Manning (1990) trial test difference two binomial event rates. routine can used test superiority, non-inferiority super-superiority. design tests superiority, methods consistent Fleiss, Tytun, Ury (1980), without continuity correction. Methods sample size power gsDesign::nBinomial() testing risk-difference scale single stratum. also consistent Hmisc R package routines bsamsize() bpower() superiority designs. trials multiple strata, testing risk difference often done weighting stratum according inverse variance (Mantel Haenszel (1959)). Since risk differences may also assumed different different strata, also explore weighting strata sample sizes Mehrotra Railkar (2000). focus sample sizes large enough asymptotic theory work well without continuity corrections. concepts incorporated following functions intended use fixed group sequential designs: gs_info_rd() support asymptotic variance statistical information calculation. gs_power_rd() support power calculations. gs_design_rd() support sample size calculations. Simulation used throughout check examples presented.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-risk-difference.html","id":"notation","dir":"Articles","previous_headings":"","what":"Notation","title":"Group sequential design for binary outcomes","text":"K: total number analyses (including final analysis) group sequential design. fixed design, K= 1. S: total number strata. population un-stratified population, S=1. w_{s,k}: underlying weight assigned s-th strata k-th analysis. SWITCH ORDER s, k w? \\widehat w_{s,k}: estimated weight assigned s-th strata k-th analysis. N_{C,k,s}, N_{E,k,s}: planned sample size control/treatment group k-th analysis s-th strata. \\widehat N_{C,k,s}, \\widehat N_{E,k,s}: observed sample size control/treatment group k-th analysis s-th strata. r: planned randomization ratio, .e., r = N_{E,k,s} / N_{C,k,s} \\;\\; \\forall k = 1, \\ldots, K \\;\\; \\text{} \\;\\; s = 1, \\ldots, S. p_{C,s}, p_{E,s}: planned rate control/treatment arm, .e., independent observations control/treatment group binary outcome observed probability p_{C,s} k-th analysis s-th strata. d: indicator whether outcome failure (bad outcome) response (good outcome), .e., d = \\left\\{ \\begin{array}{lll} -1 & \\text{} p_{C,s} < p_{E,s} & \\text{control arm better}\\\\ 1 & \\text{} p_{C,s} > p_{E,s} & \\text{treatment arm better}\\\\ \\end{array} \\right. assume \\exists s^* \\\\{1, \\ldots, S\\}, s.t., p_{C,s^*} < p_{E,s^*}, p_{C,s} < p_{E,s}, \\forall s \\\\{1, \\ldots, S\\}, vice versa. X_{C,k,s}, X_{E,k,s}: random variables indicating number subjects failed control/treatment arm, .e., X_{C,k,s} \\sim \\text{Binomial}(N_{C,k,s}, p_{C,k,s}), X_{E,k,s} \\sim \\text{Binomial}(N_{E,k,s}, p_{E,k,s}) k-th analysis s-th strata. x_{C,k,s}, x_{E,k,s}: observed outcome X_{C, k, s}, X_{E, k, s} k-th analysis s-th strata, respectively. \\widehat p_{C,k,s}, \\widehat p_{E,k,s}: observed rates control/treatment group k-th analysis s-th strata, .e., \\widehat p_{C,k,s} = x_{C,k,s} / \\widehat N_{C,k,s}.\\\\ \\widehat p_{E,k,s} = x_{E,k,s} / \\widehat N_{E,k,s}. \\delta_{s}^{null}: planned risk difference H_0 k-th analysis s-th strata. \\delta_{s}: planned risk difference H_1 k-th analysis s-th strata denoted \\delta_{s} = |p_{C,s} - p_{E,s}|. \\hat\\delta_{s}: estimation risk difference \\widehat\\theta_{k,s} = |\\widehat p_{C,k,s} - \\widehat p_{E,k,s}| E(\\widehat\\theta_{k,s}) = \\theta_{s}, \\;\\forall k = 1, \\ldots, K.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-risk-difference.html","id":"testing","dir":"Articles","previous_headings":"","what":"Testing","title":"Group sequential design for binary outcomes","text":"test statistics k-th analysis Z_{k} = \\frac{ \\sum_{s=1}^S \\widehat w_{s,k} \\; |\\widehat \\delta_{k,s} - \\delta_{s}^{null} | }{ \\sqrt{\\sum_{s=1}^S \\widehat w_{s,k}^2 \\widehat\\sigma_{H_0,k,s}^2} } \\widehat\\sigma^2_{k,s} = \\widehat{\\text{Var}}(\\widehat p_C -\\widehat p_E). value \\widehat\\sigma^2_{k,s} depends hypothesis design, .e., whether superiority design, non-inferiority design, super-superiority design. discuss \\widehat\\sigma^2_{k,s} following 3 subsections.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-risk-difference.html","id":"superiority-design","dir":"Articles","previous_headings":"Testing","what":"Superiority Design","title":"Group sequential design for binary outcomes","text":"superiority design (\\delta_{s}^{null} = 0) show experimental group superior control group thresholds. hypothesis H_0: \\delta_{s} = 0 \\text{ vs. } H_1: \\delta_{s} > 0, \\; \\forall k = 1, \\ldots, K, s = 1, \\ldots, S Variance per strata per analysis: null hypothesis, \\begin{array}{ll} \\sigma^2_{H_0,k,s} & = \\text{Var}(p_C - p_E | H_0) = p_{k,s}^{pool} \\left(1 - p^{pool}_{k,s} \\right) \\left(\\frac{1}{N_{C,k,s}} + \\frac{1}{N_{E,k,s}} \\right), \\\\ \\widehat\\sigma^2_{H_0,k,s} & = \\widehat{\\text{Var}}(\\hat p_C - \\hat p_E | H_0) = \\widehat p_{k,s}^{pool} \\left(1 - \\widehat p^{pool}_{k,s} \\right) \\left(\\frac{1}{N_{C,k,s}} + \\frac{1}{N_{E,k,s}} \\right), \\end{array} p_{k,s}^{pool} = (p_{C,s} N_{C,k,s} + p_{E,s} N_{E,k,s}) / (N_{C,k,s} + N_{E,k,s}) \\widehat p_{k,s}^{pool} = (x_{C,k,s} + x_{E,k,s}) / (\\widehat N_{C,k,s} + \\widehat N_{E,k,s}). alternative hypothesis, \\begin{array}{ll} \\sigma_{H_1,k,s}^2 & = \\text{Var}(p_C - p_E | H_1) = \\frac{p_{C,s} (1- p_{C,s})}{N_{C,k,s}} + \\frac{p_{E,s} (1 - p_{E,s})}{N_{E,k,s}} \\\\ \\widehat\\sigma_{H_1,k,s}^2 & = \\widehat{\\text{Var}}(\\hat p_C - \\hat p_E | H_1) = \\frac{\\widehat p_{C,k,s} (1- \\widehat p_{C,k,s})}{N_{C,k,s}} + \\frac{\\widehat p_{E,k,s} (1 - \\widehat p_{E,k,s})}{N_{E,k,s}} \\end{array} \\widehat p_{C,k,s} = x_{C,k,s} / N_{C,k,s} \\text{ } \\widehat p_{E,k,s} = x_{E,k,s} / N_{E,k,s}. Testing one-sided level \\alpha \\(0, 1) null hypothesis rejected Z_k cross upper boundary. upper boundary can either fixed derived spending functions. Standardized treatment effect per analysis: null hypothesis, \\theta_{H_0,k} = 0 \\\\ \\widehat \\theta_{H_0,k} = 0 alternative hypothesis, \\begin{array}{ll} \\theta_{H_1,k} & = \\frac{\\sum_{s=1}^S w_{k,s} (p_{C,s} - p_{E,s})}{\\sqrt{\\sum_{s=1}^S w_{k,s}^2 \\sigma_{H_1, k,s}^2}}\\\\ \\widehat\\theta_{H_1,k} & = \\frac{ \\sum_{s=1}^S \\widehat w_{k,s} (\\widehat p_C - \\widehat p_E) }{ \\sqrt{\\sum_{s=1}^S \\widehat w_{k,s}^2 \\widehat\\sigma_{H_1, k,s}^2} }. \\end{array} Standardized information per analysis: Lachin (2009) Lachin (1981) provide fixed sample size calculations based values \\psi_0 null hypothesis \\psi_1 alternate hypothesis. propose using variance calculations compute statistical information group sequential design apply formulation power sample size calculation vignette Computing Bounds Non-Constant Treatment Effect. null hypothesis, \\begin{array}{ll} \\mathcal I_{H0,k} & = \\left[ \\sum_{s=1}^S w_{k,s}^2 \\frac{p_{k,s}^{pool} (1 - p_{k,s}^{pool})}{N_{C, k, s}} + w_{k,s}^2 \\frac{p_{k,s}^{pool} (1 - p_{k,s}^{pool})}{N_{E, k, s}} \\right]^{-1} \\\\ \\widehat{\\mathcal }_{H0,k} & = \\left[ \\sum_{s=1}^S \\widehat w_{k,s}^2 \\frac{\\widehat p_{k,s}^{pool} (1 - \\widehat p_{k,s}^{pool})}{\\widehat N_{C,k,s}} + \\widehat w_{k,s}^2 \\frac{\\widehat p_{k,s}^{pool} (1 - \\widehat p_{k,s}^{pool})}{\\widehat N_{C,k,s}} \\right]^{-1} \\end{array} alternative hypothesis, \\begin{array}{ll} \\mathcal I_{H1,k} = \\left[ \\sum_{s=1}^S w_{k,s}^2 \\frac{p_{C,k,s} (1 - p_{C,k,s})}{N_{C,k,s}} + \\sum_{s=1}^S w_{k,s}^2 \\frac{p_{E,k,s} (1 - p_{E,k,s})}{N_{E,k,s}} \\right]^{-1}\\\\ \\widehat{\\mathcal }_{H1,k} = \\left[ \\sum_{s=1}^S \\widehat w_{k,s}^2 \\frac{\\widehat p_{C,k,s} (1 - \\widehat p_{C,k,s})}{\\widehat N_{C,k,s}} + \\sum_{s=1}^S \\widehat w_{k,s}^2 \\frac{\\widehat p_{E,k,s} (1 - \\widehat p_{E,k,s})}{\\widehat N_{E,k,s}} \\right]^{-1} \\end{array}","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-risk-difference.html","id":"super-superiority-design","dir":"Articles","previous_headings":"Testing","what":"Super-Superiority Design","title":"Group sequential design for binary outcomes","text":"hypothesis super-superiority design H_0: \\delta_{k,s} = \\delta_{k,s}^{null} \\;\\; vs. \\;\\; H_1: \\delta > \\delta_{k,s}^{null} \\text{ } \\delta_{k,s}^{null} > 0. \\theta_{k,s_1}^{null} = \\theta_{k,s_2}^{null} \\theta_{k,s_1}^{null} \\neq \\theta_{k,s_2}^{null} s_1 \\neq s_2. null hypothesis \\theta_{0,k,s} \\neq 0, estimation rates \\widehat p_{C0,k,s}, \\widehat p_{E0,k,s} satisfy \\left\\{ \\begin{array}{l} \\widehat p_{C0,k,s} = \\widehat p_{E0,k,s} + d_{k,s} \\times \\delta_{k,s}^{null} \\\\ \\widehat p_{C0,k,s} + r\\widehat p_{E0,k,s} = \\widehat p_{C,k,s} + r\\widehat p_{E,k,s} . \\end{array} \\right. Solving 2 equations 2 unknowns yields \\left\\{ \\begin{array}{l} \\widehat p_{E0,k,s} & = (\\widehat p_{C,k,s} + r \\widehat p_{E,k,s} - d_{k,s} \\delta_{k,s}^{null}) / (r + 1)\\\\ \\widehat p_{C0,k,s} & = \\widehat p_{E0,k,s} + d_{k,s} \\delta_{k,s}^{null}. \\end{array} \\right. Variance per strata per analysis: H_0, \\hat\\sigma^2_{H_0,k,s} = \\frac{\\widehat p_{C0,k,s}(1- \\widehat p_{C0,k,s})}{N_{C,k,s}} + \\frac{ \\widehat p_{E0,k,s} (1 - \\widehat p_{E0,k,s})}{N_{E,k,s}}. H_1, \\widehat\\sigma_{H_1,k,s}^2 = \\frac{\\widehat p_{C,k,s} (1- \\widehat p_{C,k,s})}{N_{C,k,s}} + \\frac{\\widehat p_{E,k,s} (1 - \\widehat p_{E,k,s})}{N_{E,k,s}}. Standardized treatment effect per analysis: null hypothesis, \\widehat \\theta_{H_0,k} = \\frac{ \\sum_{s=1}^S w_{k,s} \\delta_{s,k}^{null} }{ \\sqrt{\\sum_{s=1}^S w_{k,s}^2 \\widehat \\sigma_{H_0,k,s}}^2 }. alternative hypothesis, \\widehat \\theta_{H_1} = \\frac{ \\sum_{s=1}^S w_{k,s} d_{k,s} \\times (\\widehat p_{C,k,s} - \\widehat p_{E,k,s}) }{ \\sqrt{\\sum_{s=1}^S w_{k,s}^2 \\widehat \\sigma_{H_1,k,s}^2} }. Standardized information per analysis: null hypothesis, \\widehat{\\mathcal }_{H0,k} = \\left[ \\sum_{s=1}^S w_{k,s}^2 \\frac{\\bar p_{C0,s} (1 - \\bar p_{C0,s})}{N_{C,s}} + w_{k,s}^2\\frac{\\bar p_{E0,s} (1 - \\bar p_{E0,s})}{N_{E,s}} \\right]^{-1}. alternative hypothesis, \\widehat{\\mathcal }_{H1,k} = \\left[ \\sum_{s=1}^S \\left( w_{k,s}^2 \\frac{\\bar p_{C,k,s} (1 - \\bar p_{C,k,s})}{N_{C,k,s}} + w_{k,s}^2 \\frac{\\bar p_{E,k,s} (1 - \\bar p_{E,k,s})}{N_{E,k,s}} \\right) \\right]^{-1}.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-risk-difference.html","id":"non-inferiority-design","dir":"Articles","previous_headings":"Testing","what":"Non-inferiority Design","title":"Group sequential design for binary outcomes","text":"non-inferiority Design means , treatment group definitely better control group, unacceptably worse. hypothesis H_0: \\delta_{k,s} = \\delta_{k,s}^{null} \\;\\; vs. \\;\\; H_1: \\delta_{k,s} > \\delta_{k,s}^{null} \\delta_{k,s}^{null} <0. variance, standardized treatment effect statistical information super-superiority design setting \\delta_{k,s}^{null} negative numbers.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-risk-difference.html","id":"weighting-options","dir":"Articles","previous_headings":"","what":"Weighting Options","title":"Group sequential design for binary outcomes","text":"previously noted, consider weighting based either inverse-variance weights (Mantel Haenszel (1959)) strata sample size weights (Mehrotra Railkar (2000)). Inverse-variance weights (INVAR): w_{s,k} = \\frac{1/\\sigma^2_{s,k}}{\\sum_{s=1}^S 1/\\sigma^2_{s,k}}. \\\\ \\widehat w_{s,k} = \\frac{1/\\widehat\\sigma^2_{s,k}}{\\sum_{s=1}^S 1/\\widehat\\sigma^2_{s,k}}. \\widehat\\sigma_{s,k}^2 \\\\{\\widehat\\sigma_{H_0, k,s}^2, \\widehat\\sigma_{H_1, k,s}^2 \\} depending information scale info_scale = ... gs_info_rd(), gs_power_rd() gs_design_rd(). Sample-Size Weights (SS): w_{s,k} = \\frac{ (N_{C, s, k} \\; N_{E, s, k}) / (N_{C, s, k} + N_{E, s, k}) }{ \\sum_{s=1}^S (N_{C, s, k} \\; N_{E, s, k}) / (N_{C, s, k} + N_{E, s, k}) },\\\\ \\widehat w_{s,k} = \\frac{ (\\widehat N_{C, s, k} \\; \\widehat N_{E, s, k}) / (\\widehat N_{C, s, k} + \\widehat N_{E, s, k}) }{ \\sum_{s=1}^S (\\widehat N_{C, s, k} \\; \\widehat N_{E, s, k}) / (\\widehat N_{C, s, k} + \\widehat N_{E, s, k}) }, N_{C,s,k}, N_{E,s,k} planned sample size s-th strata k-th analysis control group experimental group, respectively. \\widehat N_{C,s,k}, \\widehat N_{E,s,k} observed sample size s-th strata k-th analysis control group experimental group, respectively.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-risk-difference.html","id":"simulations","dir":"Articles","previous_headings":"","what":"Simulations","title":"Group sequential design for binary outcomes","text":"quick 20,000 simulations compare density histogram outcomes standard normal density. Assume r=1, d = 1, p_C=p_E=0.125, N=200. compute \\sigma 0.047. Even huge sample size normal density fits quite well flatness middle.","code":"# Hypothesized failure rate p <- .125 # Other parameters set.seed(123) r <- 1 n <- 200 n_c <- n / (r + 1) n_e <- r * n / (r + 1) library(ggplot2) # Generate random counts of events for each treatment x_c <- rbinom(n = 20000, size = n_c, prob = p) x_e <- rbinom(n = 20000, size = n_e, prob = p) # Treatment difference estimate thetahat <- x_c / n_c - x_e / n_e # Standard error under H0 pbar <- (x_c + x_e) / n se0 <- sqrt(pbar * (1 - pbar) * (1 / n_c + 1 / n_e)) # Z to test H0 z <- thetahat / se0 x <- seq(-4, 4, .1) se0a <- sqrt(p * (1 - p) * (1 / n_c + 1 / n_e)) y <- data.frame(z = x, Density = dnorm(x = x, mean = 0, sd = 1)) ggplot() + geom_histogram(data = data.frame(z), aes(x = z, y = ..density..), color = 1, fill = \"white\") + geom_line(data = y, aes(x = z, y = Density), linetype = 1) + ylab(\"Density\") + ggtitle(\"Binomial outcomes by simulation vs. asymptotic normal density\", subtitle = \"Histogram of 20,000 simulations\" ) #> Warning: The dot-dot notation (`..density..`) was deprecated in ggplot2 3.4.0. #> ℹ Please use `after_stat(density)` instead. #> This warning is displayed once every 8 hours. #> Call `lifecycle::last_lifecycle_warnings()` to see where this warning was #> generated."},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-risk-difference.html","id":"unstratified-fixed-design","dir":"Articles","previous_headings":"Examples","what":"Unstratified Fixed Design","title":"Group sequential design for binary outcomes","text":"example discussed section unstratified fixed design equal sized groups detect 30% reduction mortality associated congestive heart failure, 1-year mortality control group assumed greater 0.4. p_C=0.4, p_E = .28. null hypothesis, assume p_C=p_E =0.34. desire 90% power two-sided test two proportions \\alpha = 0.05. like calculate sample size achieve 90% power.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-risk-difference.html","id":"gsdesign2","dir":"Articles","previous_headings":"Examples > Unstratified Fixed Design","what":"gsDesign2","title":"Group sequential design for binary outcomes","text":"First, set parameters. calculate variance H_0 H_1. mathematical formulation shown follows. \\begin{array}{ll} \\sigma^2_{H_0} = p^{pool} \\left(1 - p^{pool} \\right) \\left(\\frac{1}{N_C} + \\frac{1}{N_{E}} \\right) = p^{pool} \\left(1 - p^{pool} \\right) \\left(\\frac{1}{N \\xi_C} + \\frac{1}{N \\xi_E} \\right) \\overset{r=1}{=} p^{pool} \\left(1 - p^{pool} \\right) \\frac{4}{N} \\\\ \\sigma^2_{H_1} = \\frac{p_C \\left(1 - p_C \\right)}{N_C} + \\frac{p_E \\left(1 - p_E \\right)}{N_E} = \\frac{p_C \\left(1 - p_C \\right)}{N \\xi_C} + \\frac{p_E \\left(1 - p_E \\right)}{N \\xi_E} \\overset{r=1}{=} \\left[ p_C \\left(1 - p_C \\right) + p_E \\left(1 - p_E \\right) \\right] \\frac{2}{N} \\end{array} calculation results Next, calculate standardized treatment effect H_0 H_1, whose mathematical formulation \\begin{array}{ll} \\theta_{H_0} = 0; \\\\ \\theta_{H_1} = \\frac{|p_c - p_e|}{\\sigma_{H_1}} \\end{array}. calculation results logic implemented function gs_info_rd(). plugging theta info gs_design_npe(), one can calculate sample size achieve 90% power. logic implement gs_design_rd() calculate sample size given fixed power one-step.","code":"p_c <- .28 p_e <- .4 p_pool <- (p_c + p_e) / 2 n <- 1 ratio <- 1 n_c <- n / (1 + ratio) n_e <- n_c * ratio sigma_h0 <- sqrt(p_pool * (1 - p_pool) * 4 / n) sigma_h1 <- sqrt((p_c * (1 - p_c) + p_e * (1 - p_e)) * 2 / n) info_h0 <- 1 / (sigma_h0^2) info_h1 <- 1 / (sigma_h1^2) theta_h0 <- 0 theta_h1 <- abs(p_c - p_e) / sigma_h1 tibble::tribble( ~n_c, ~n_e, ~p_c, ~p_e, ~theta_h1, ~theta_h0, ~info_h1, ~info_h0, n_c, n_e, p_c, p_e, theta_h1, theta_h0, info_h1, info_h0, ) %>% gt::gt() x <- gs_info_rd( p_c = tibble::tibble(stratum = \"All\", rate = .28), p_e = tibble::tibble(stratum = \"All\", rate = .4), n = tibble::tibble(stratum = \"All\", n = 1, analysis = 1), rd0 = 0, ratio = 1, weight = \"unstratified\" ) x %>% gt::gt() %>% gt::fmt_number(columns = 5:8, decimals = 6) # under info_scale = \"h0_info\" y_0 <- gs_design_npe( theta = .4 - .28, info = x$info0, info0 = x$info0, info_scale = \"h0_info\", alpha = .025, beta = .1, upper = gs_b, lower = gs_b, upar = list(par = -qnorm(.025)), lpar = list(par = -Inf) ) # under info_scale = \"h1_info\" y_1 <- gs_design_npe( theta = .4 - .28, info = x$info1, info0 = x$info0, info_scale = \"h1_info\", alpha = .025, beta = .1, upper = gs_b, lower = gs_b, upar = list(par = -qnorm(.025)), lpar = list(par = -Inf) ) # under info_scale = \"h0_h1_info\" y_2 <- gs_design_npe( theta = .4 - .28, info = x$info1, info0 = x$info0, info_scale = \"h0_h1_info\", alpha = .025, beta = .1, upper = gs_b, lower = gs_b, upar = list(par = -qnorm(.025)), lpar = list(par = -Inf) ) tibble( `info_scale = \"h0_info\"` = y_0$info0[1] / x$info0[1], `info_scale = \"h1_info\"` = y_1$info1[1] / x$info1[1], `info_scale = \"h0_h1_info\"` = y_2$info[1] / x$info1[1] ) %>% gt::gt() %>% gt::tab_header(title = \"The sample size calculated by gsDesign2 under 3 info_scale\") z_info_scale_0 <- gs_design_rd( p_c = tibble::tibble(stratum = \"All\", rate = .28), p_e = tibble::tibble(stratum = \"All\", rate = .4), rd0 = 0, alpha = 0.025, beta = 0.1, ratio = 1, weight = \"unstratified\", upper = gs_b, lower = gs_b, upar = -qnorm(.025), lpar = -Inf, info_scale = \"h0_info\" ) z_info_scale_1 <- gs_design_rd( p_c = tibble::tibble(stratum = \"All\", rate = .28), p_e = tibble::tibble(stratum = \"All\", rate = .4), rd0 = 0, alpha = 0.025, beta = 0.1, ratio = 1, weight = \"unstratified\", upper = gs_b, lower = gs_b, upar = -qnorm(.025), lpar = -Inf, info_scale = \"h1_info\" ) z_info_scale_2 <- gs_design_rd( p_c = tibble::tibble(stratum = \"All\", rate = .28), p_e = tibble::tibble(stratum = \"All\", rate = .4), rd0 = 0, alpha = 0.025, beta = 0.1, ratio = 1, weight = \"unstratified\", upper = gs_b, lower = gs_b, upar = -qnorm(.025), lpar = -Inf, info_scale = \"h0_h1_info\" )"},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-risk-difference.html","id":"east","dir":"Articles","previous_headings":"Examples > Unstratified Fixed Design","what":"EAST","title":"Group sequential design for binary outcomes","text":"Sample size calculated EAST","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-risk-difference.html","id":"summary","dir":"Articles","previous_headings":"Examples > Unstratified Fixed Design","what":"Summary","title":"Group sequential design for binary outcomes","text":"","code":"tibble::tibble( gsDesign2_info_scale_0 = z_info_scale_0$analysis$n, gsDesign2_info_scale_1 = z_info_scale_1$analysis$n, gsDesign2_info_scale_2 = z_info_scale_2$analysis$n, gsDesign = x_gsdesign$n, EAST_unpool = 645, EAST_pool = 651 ) %>% gt::gt() %>% gt::tab_spanner( label = \"gsDesign2\", columns = c(gsDesign2_info_scale_0, gsDesign2_info_scale_1, gsDesign2_info_scale_2) ) %>% gt::tab_spanner( label = \"EAST\", columns = c(EAST_unpool, EAST_pool) ) %>% cols_label( gsDesign2_info_scale_0 = \"info_scale = \\\"h0_info\\\"\", gsDesign2_info_scale_1 = \"info_scale = \\\"h1_info\\\"\", gsDesign2_info_scale_2 = \"info_scale = \\\"h0_h1_info\\\"\", EAST_unpool = \"un-pooled\", EAST_pool = \"pooled\" )"},{"path":"https://merck.github.io/gsDesign2/articles/story-risk-difference.html","id":"unstratified-group-sequential-design","dir":"Articles","previous_headings":"Examples","what":"Unstratified Group Sequential Design","title":"Group sequential design for binary outcomes","text":"example discussed section unstratified group sequential design equal sized groups detect p_C = 0.15, p_E = .1. null hypothesis, assume p_C = p_E = 0.125. desire 90% power two-sided test two proportions \\alpha = 0.05. like calculate sample size achieve 90% power.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-risk-difference.html","id":"gsdesign2-1","dir":"Articles","previous_headings":"Examples > Unstratified Group Sequential Design","what":"gsDesign2","title":"Group sequential design for binary outcomes","text":"calculate sample size, one can use gs_design_rd(). logic gs_design_rd() calculate sample size fixed design first. logic implemented gs_design_rd().","code":"x_gs <- gs_info_rd( p_c = tibble::tibble(stratum = \"All\", rate = .15), p_e = tibble::tibble(stratum = \"All\", rate = .1), n = tibble::tibble(stratum = \"All\", n = 1:3 / 3, analysis = 1:3), rd0 = 0, ratio = 1, weight = \"unstratified\" ) x_gs %>% gt::gt() %>% gt::tab_header(title = \"The statistical information of the group sequential design\") y_gs0 <- gs_design_npe( theta = .05, info = x_gs$info0, info0 = x_gs$info0, info_scale = \"h0_info\", alpha = .025, beta = .1, binding = FALSE, upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = rep(-Inf, 3), test_lower = FALSE ) y_gs1 <- gs_design_npe( theta = .05, info = x_gs$info1, info0 = x_gs$info1, info_scale = \"h0_h1_info\", alpha = .025, beta = .1, binding = FALSE, upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = rep(-Inf, 3), test_lower = FALSE ) y_gs2 <- gs_design_npe( theta = .05, info = x_gs$info1, info0 = x_gs$info0, info_scale = \"h0_h1_info\", alpha = .025, beta = .1, binding = FALSE, upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = rep(-Inf, 3), test_lower = FALSE ) tibble( `info_scale = \"h0_info\"` = y_gs0$info0 / x_gs$info0[3], `info_scale = \"h1_info\"` = y_gs1$info1 / x_gs$info1[3], `info_scale = \"h0_h1_info\"` = y_gs2$info / x_gs$info1[3] ) %>% gt::gt() %>% gt::tab_header( title = \"The sample size calculated by `gsDesign2` under 3 info_scale\", subtitle = \"under group sequential design\" ) x_gsdesign2_info_scale_0 <- gs_design_rd( p_c = tibble::tibble(stratum = \"All\", rate = .15), p_e = tibble::tibble(stratum = \"All\", rate = .1), info_frac = 1:3 / 3, rd0 = 0, alpha = .025, beta = .1, ratio = 1, weight = \"unstratified\", upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = rep(-Inf, 3), test_lower = FALSE, info_scale = \"h0_info\" ) x_gsdesign2_info_scale_1 <- gs_design_rd( p_c = tibble::tibble(stratum = \"All\", rate = .15), p_e = tibble::tibble(stratum = \"All\", rate = .1), info_frac = 1:3 / 3, rd0 = 0, alpha = .025, beta = .1, ratio = 1, weight = \"unstratified\", upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = rep(-Inf, 3), test_lower = FALSE, info_scale = \"h1_info\" ) x_gsdesign2_info_scale_2 <- gs_design_rd( p_c = tibble::tibble(stratum = \"All\", rate = .15), p_e = tibble::tibble(stratum = \"All\", rate = .1), info_frac = 1:3 / 3, rd0 = 0, alpha = .025, beta = .1, ratio = 1, weight = \"unstratified\", upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = rep(-Inf, 3), test_lower = FALSE, info_scale = \"h0_h1_info\" )"},{"path":"https://merck.github.io/gsDesign2/articles/story-risk-difference.html","id":"gsdesign-1","dir":"Articles","previous_headings":"Examples > Unstratified Group Sequential Design","what":"gsDesign","title":"Group sequential design for binary outcomes","text":"","code":"n_fix <- nBinomial( # Control event rate p1 = .15, # Experimental event rate p2 = .1, # Null hypothesis event rate difference (control - experimental) delta0 = 0, # 1-sided Type I error alpha = .025, # Type II error (1 - Power) beta = .1, # Experimental/Control randomization ratio ratio = 1 ) cat(\"The sample size of fixed-design calculated by `gsDesign` is \", n_fix, \".\\n\") #> The sample size of fixed-design calculated by `gsDesign` is 1834.641 . x_gsdesign <- gsDesign( k = 3, test.type = 1, # 1-sided Type I error alpha = .025, # Type II error (1 - Power) beta = .1, # If test.type = 5 or 6, this sets maximum spending for futility # under the null hypothesis. Otherwise, this is ignored. astar = 0, timing = 1:3 / 3, sfu = sfLDOF, sfupar = NULL, sfl = sfLDOF, sflpar = NULL, # Difference in event rates under alternate hypothesis delta = 0, # Difference in rates under H1 delta1 = .05, # Difference in rates under H0 delta0 = 0, endpoint = \"Binomial\", # Fixed design sample size from nBinomial above n.fix = n_fix ) cat(\"The sample size calcuated by `gsDesign` is \", x_gsdesign$n.I, \".\\n\") #> The sample size calcuated by `gsDesign` is 618.7954 1237.591 1856.386 . gsBoundSummary(x_gsdesign, digits = 4, ddigits = 2, tdigits = 1) #> Analysis Value Efficacy #> IA 1: 33% Z 3.7103 #> N: 619 p (1-sided) 0.0001 #> ~delta at bound 0.0985 #> P(Cross) if delta=0 0.0001 #> P(Cross) if delta=0.05 0.0338 #> IA 2: 67% Z 2.5114 #> N: 1238 p (1-sided) 0.0060 #> ~delta at bound 0.0472 #> P(Cross) if delta=0 0.0060 #> P(Cross) if delta=0.05 0.5603 #> Final Z 1.9930 #> N: 1857 p (1-sided) 0.0231 #> ~delta at bound 0.0306 #> P(Cross) if delta=0 0.0250 #> P(Cross) if delta=0.05 0.9000"},{"path":"https://merck.github.io/gsDesign2/articles/story-risk-difference.html","id":"east-1","dir":"Articles","previous_headings":"Examples > Unstratified Group Sequential Design","what":"EAST","title":"Group sequential design for binary outcomes","text":"Sample size calculated EAST Sample size calculated EAST Sample size calculated EAST","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-risk-difference.html","id":"summary-1","dir":"Articles","previous_headings":"Examples > Unstratified Group Sequential Design","what":"Summary","title":"Group sequential design for binary outcomes","text":"","code":"tibble::tibble( gsDesign2_info_scale_0 = x_gsdesign2_info_scale_0$analysis$n, gsDesign2_info_scale_1 = x_gsdesign2_info_scale_1$analysis$n, gsDesign2_info_scale_2 = x_gsdesign2_info_scale_2$analysis$n, gsDesign = x_gsdesign$n.I, EAST_unpool = c(617, 1233, 1850), EAST_pool = c(619, 1238, 1857) ) %>% gt::gt() %>% gt::tab_spanner( label = \"gsDesign2\", columns = c(gsDesign2_info_scale_0, gsDesign2_info_scale_1, gsDesign2_info_scale_2) ) %>% gt::tab_spanner( label = \"EAST\", columns = c(EAST_unpool, EAST_pool) ) %>% cols_label( gsDesign2_info_scale_0 = \"info_scale = \\\"h0_info\\\"\", gsDesign2_info_scale_1 = \"info_scale = \\\"h1_info\\\"\", gsDesign2_info_scale_2 = \"info_scale = \\\"h0_h1_info\\\"\", EAST_unpool = \"un-pooled\", EAST_pool = \"pooled\" )"},{"path":"https://merck.github.io/gsDesign2/articles/story-risk-difference.html","id":"stratified-group-sequential-design","dir":"Articles","previous_headings":"Examples","what":"Stratified Group Sequential Design","title":"Group sequential design for binary outcomes","text":"example, consider 3 strata group sequential design 3 analyses. First, calculate variance \\left\\{ \\begin{array}{ll} \\sigma^2_{H_0,k,s} & = p_{k,s}^{pool} \\left(1 - p^{pool}_{k,s} \\right) \\left(\\frac{1}{N_{C,k,s}} + \\frac{1}{N_{E,k,s}} \\right) = p_{k,s}^{pool} \\left(1 - p^{pool}_{k,s} \\right) \\left(\\frac{1}{ \\frac{\\xi_s}{1+r} N_{k}} + \\frac{1}{ \\frac{r \\xi_s}{1+r} N_{k}} \\right) \\\\ \\sigma_{H_1,k,s}^2 & = \\frac{p_{C,s} (1- p_{C,s})}{N_{C,k,s}} + \\frac{p_{E,s} (1 - p_{E,s})}{N_{E,k,s}} = \\frac{p_{C,s} (1- p_{C,s})}{\\frac{\\xi_s}{1+r} N_{k}} + \\frac{p_{E,s} (1 - p_{E,s})}{\\frac{r \\xi_s}{1+r} N_{k}} \\end{array} \\right. Second, calculate weight using inverse variance w_{s,k} = \\frac{1/\\sigma^2_{s,k}}{\\sum_{s=1}^S 1/\\sigma^2_{s,k}}. Third, calculate weighted risk difference weighted statistical information. \\left\\{ \\begin{array}{ll} \\delta_{H_0,k} & = 0\\\\ \\delta_{H_1,k} & = \\sum_{s=1}^S w_{k,s} |p_{C,s} - p_{E,s}| \\end{array} \\right. \\\\ \\left\\{ \\begin{array}{ll} \\mathcal I_{H_0,k} & = \\left[ \\sum_{s=1}^S w_{k,s}^2 \\frac{p_{k,s}^{pool} (1 - p_{k,s}^{pool})}{N_{C, k, s}} + w_{k,s}^2 \\frac{p_{k,s}^{pool} (1 - p_{k,s}^{pool})}{N_{E, k, s}} \\right]^{-1}\\\\ \\mathcal I_{H_1,k} & = \\left[ \\sum_{s=1}^S w_{k,s}^2 \\frac{p_{C,k,s} (1 - p_{C,k,s})}{N_{C,k,s}} + \\sum_{s=1}^S w_{k,s}^2 \\frac{p_{E,k,s} (1 - p_{E,k,s})}{N_{E,k,s}} \\right]^{-1} \\end{array} \\right. \\\\ logic implemented gs_design_rd().","code":"ratio <- 1 prevalence_ratio <- c(4, 5, 6) p_c_by_stratum <- c(.3, .37, .6) p_e_by_stratum <- c(.25, .3, .5) p_c <- tibble::tibble(stratum = c(\"S1\", \"S2\", \"S3\"), rate = p_c_by_stratum) p_e <- tibble::tibble(stratum = c(\"S1\", \"S2\", \"S3\"), rate = p_e_by_stratum) ratio_strata_c <- tibble::tibble(stratum = c(\"S1\", \"S2\", \"S3\"), ratio = prevalence_ratio) ratio_strata_e <- ratio_strata_c n <- 1 info_frac <- 1:3 / 3 n_c <- n / (1 + ratio) n_e <- ratio * n_c x <- p_c %>% rename(p_c = rate) %>% left_join(p_e) %>% rename(p_e = rate) %>% mutate(p_pool = (p_c + p_e) / 2) %>% mutate( xi_c = ( ratio_strata_c %>% mutate(prop = ratio / sum(ratio)) )$prop ) %>% mutate( xi_e = ( ratio_strata_e %>% mutate(prop = ratio / sum(ratio)) )$prop ) %>% mutate(n_c = n_c * xi_c, n_e = n_e * xi_e) x %>% gt::gt() %>% gt::fmt_number(columns = 4:8, decimals = 4) %>% gt::tab_footnote( footnote = \"p_pool = (p_c * n_c + p_e * n_e) / (n_c * n_e).\", locations = gt::cells_column_labels(columns = p_pool) ) %>% gt::tab_footnote( footnote = \"xi_c = sample size of a strata / sample size of the control arm.\", locations = gt::cells_column_labels(columns = xi_c) ) %>% gt::tab_footnote( footnote = \"xi_e = sample size of a strata / sample size of the experimental arm.\", locations = gt::cells_column_labels(columns = xi_e) ) %>% gt::tab_footnote( footnote = \"n_c = total sample size of the control arm.\", locations = gt::cells_column_labels(columns = n_c) ) %>% gt::tab_footnote( footnote = \"n_e = total size of the experimental arm.\", locations = gt::cells_column_labels(columns = n_e) ) %>% gt::tab_header(title = \"Stratified Example\") x <- x %>% union_all(x) %>% union_all(x) %>% mutate(Analysis = rep(1:3, each = 3)) %>% left_join(tibble(Analysis = 1:3, IF = info_frac)) %>% mutate(n_c = n_c * IF, n_e = n_e * IF) %>% select(Analysis, stratum, p_c, p_pool, p_e, n_c, n_e, xi_c, xi_e) %>% mutate( sigma_h0 = sqrt(p_pool * (1 - p_pool) * (1 / n_c + 1 / n_e)), sigma_h1 = sqrt(p_c * (1 - p_c) / n_c + p_e * (1 - p_e) / n_e) ) x %>% gt() %>% gt::fmt_number(6:11, decimals = 4) %>% gt::tab_footnote( footnote = \"sigma_h0 = the H0 sd per stratum per analysis.\", locations = gt::cells_column_labels(columns = sigma_h0) ) %>% gt::tab_footnote( footnote = \"sigma_h1 = the H0 sd per stratum per analysis.\", locations = gt::cells_column_labels(columns = sigma_h1) ) temp <- x %>% group_by(Analysis) %>% summarise( sum_invar_H0 = sum(1 / sigma_h0^2), sum_invar_H1 = sum(1 / sigma_h1^2), sum_ss = sum((n_c * n_e) / (n_c + n_e)) ) x <- x %>% left_join(temp) %>% mutate( weight_invar_H0 = 1 / sigma_h0^2 / sum_invar_H0, weight_invar_H1 = 1 / sigma_h1^2 / sum_invar_H1, weight_ss = (n_c * n_e) / (n_c + n_e) / sum_ss ) %>% select(-c(sum_invar_H0, sum_invar_H1, sum_ss)) x %>% gt() %>% fmt_number(6:14, decimals = 4) %>% gt::tab_footnote( footnote = \"weight_invar_H0 = the weight per stratum per analysis calculated by INVAR by using variance under H0.\", locations = gt::cells_column_labels(columns = weight_invar_H0) ) %>% gt::tab_footnote( footnote = \"weight_invar_H1 = the weight per stratum per analysis calculated by INVAR by using variance under H1.\", locations = gt::cells_column_labels(columns = weight_invar_H1) ) %>% gt::tab_footnote( footnote = \"weight_ss = the weight per stratum per analysis calculated by SS.\", locations = gt::cells_column_labels(columns = weight_ss) ) x <- x %>% group_by(Analysis) %>% summarise( rd_invar_H0 = sum(weight_invar_H0 * abs(p_c - p_e)), rd_invar_H1 = sum(weight_invar_H1 * abs(p_c - p_e)), rd_ss = sum(weight_ss * abs(p_c - p_e)), rd0 = 0, info_invar_H0 = 1 / sum( weight_invar_H0^2 * p_c * (1 - p_c) / n_c + weight_invar_H0^2 * p_e * (1 - p_e) / n_e ), info_invar_H1 = 1 / sum( weight_invar_H1^2 * p_c * (1 - p_c) / n_c + weight_invar_H1^2 * p_e * (1 - p_e) / n_e ), info_ss = 1 / sum( weight_ss^2 * p_c * (1 - p_c) / n_c + weight_ss^2 * p_e * (1 - p_e) / n_e ), info0_invar_H0 = 1 / sum( weight_invar_H0^2 * p_pool * (1 - p_pool) / n_c + weight_invar_H0^2 * p_pool * (1 - p_pool) / n_e ), info0_invar_H1 = 1 / sum( weight_invar_H1^2 * p_pool * (1 - p_pool) / n_c + weight_invar_H1^2 * p_pool * (1 - p_pool) / n_e ), info0_ss = 1 / sum( weight_ss^2 * p_pool * (1 - p_pool) / n_c + weight_ss^2 * p_pool * (1 - p_pool) / n_e ) ) x %>% gt::gt() %>% fmt_number(c(2:4, 6:11), decimals = 6) %>% gt::tab_footnote( footnote = \"info_invar_H0 = the statistical information under H1 per stratum per analysis calculated by INVAR by using variance under H0.\", locations = gt::cells_column_labels(columns = info_invar_H0) ) %>% gt::tab_footnote( footnote = \"info_invar_H1 = the statistical information under H1 per stratum per analysis calculated by INVAR by using variance under H0.\", locations = gt::cells_column_labels(columns = info_invar_H1) ) %>% gt::tab_footnote( footnote = \"info_ss = the statistical information under H1 per stratum per analysis calculated by SS.\", locations = gt::cells_column_labels(columns = info_ss) ) %>% gt::tab_footnote( footnote = \"info0_invar_H0 = the statistical information under H0 per stratum per analysis calculated by INVAR by using variance under H0.\", locations = gt::cells_column_labels(columns = info0_invar_H0) ) %>% gt::tab_footnote( footnote = \"info0_invar_H1 = the statistical information under H0 per stratum per analysis calculated by INVAR by using variance under H0.\", locations = gt::cells_column_labels(columns = info0_invar_H1) ) %>% gt::tab_footnote( footnote = \"info0_ss = the statistical information under H0 per stratum per analysis calculated by SS.\", locations = gt::cells_column_labels(columns = info0_ss) ) # Sample size under H0 ---- y_invar_h0 <- gs_design_npe( theta = x$rd_invar_H0, info = x$info0_invar_H0, info0 = x$info0_invar_H0, info_scale = \"h0_h1_info\", alpha = 0.025, beta = 0.2, upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = rep(-Inf, 3), test_lower = FALSE, ) y_invar_h1 <- gs_design_npe( theta = x$rd_invar_H1, info = x$info0_invar_H1, info0 = x$info0_invar_H1, info_scale = \"h0_h1_info\", alpha = 0.025, beta = 0.2, upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = rep(-Inf, 3), test_lower = FALSE, ) y_ss <- gs_design_npe( theta = x$rd_ss, info = x$info0_ss, info0 = x$info0_ss, info_scale = \"h0_h1_info\", alpha = 0.025, beta = 0.2, upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = rep(-Inf, 3), test_lower = FALSE, ) # Sample size under H1 ---- yy_invar_h0 <- gs_design_npe( theta = x$rd_invar_H0, info = x$info_invar_H0, info0 = x$info0_invar_H0, info_scale = \"h0_h1_info\", alpha = 0.025, beta = 0.2, upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = rep(-Inf, 3), test_lower = FALSE, ) yy_invar_h1 <- gs_design_npe( theta = x$rd_invar_H1, info = x$info_invar_H1, info0 = x$info0_invar_H1, info_scale = \"h0_h1_info\", alpha = 0.025, beta = 0.2, upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = rep(-Inf, 3), test_lower = FALSE, ) yy_ss <- gs_design_npe( theta = x$rd_ss, info = x$info_ss, info0 = x$info0_ss, info_scale = \"h0_h1_info\", alpha = 0.025, beta = 0.2, upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = rep(-Inf, 3), test_lower = FALSE, ) ans_math <- tibble::tibble( `Weighting method` = rep(c(\"INVAR-H0\", \"INVAR-H1\", \"Sample Size\"), 2), `Calculated under` = c(rep(\"H0\", 3), rep(\"H1\", 3)), `Sample size` = c( y_invar_h0$info[3] / x$info0_invar_H0[3], y_invar_h1$info[3] / x$info0_invar_H1[3], y_ss$info[3] / x$info0_ss[3], yy_invar_h0$info[3] / x$info_invar_H0[3], yy_invar_h1$info[3] / x$info_invar_H1[3], yy_ss$info[3] / x$info_ss[3] ) ) ans_math %>% gt::gt() %>% gt::tab_header(title = \"Sample size calculated by INVAR and SS\") ## sample size weighting + information scale = \"h0_info\" x_ss0 <- gs_design_rd( p_c = p_c, p_e = p_e, info_frac = 1:3 / 3, rd0 = 0, alpha = .025, beta = .2, ratio = 1, stratum_prev = tibble::tibble(stratum = c(\"S1\", \"S2\", \"S3\"), prevalence = 4:6), weight = \"ss\", upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = c(qnorm(.1), rep(-Inf, 2)), info_scale = \"h0_info\", binding = FALSE ) ## sample size weighting + information scale = \"h1_info\" x_ss1 <- gs_design_rd( p_c = p_c, p_e = p_e, info_frac = 1:3 / 3, rd0 = 0, alpha = .025, beta = .2, ratio = 1, stratum_prev = tibble::tibble(stratum = c(\"S1\", \"S2\", \"S3\"), prevalence = 4:6), weight = \"ss\", upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = c(qnorm(.1), rep(-Inf, 2)), info_scale = \"h1_info\", binding = FALSE ) ## sample size weighting + information scale = \"h0_h1_info\" x_ss2 <- gs_design_rd( p_c = p_c, p_e = p_e, info_frac = 1:3 / 3, rd0 = 0, alpha = .025, beta = .2, ratio = 1, stratum_prev = tibble::tibble(stratum = c(\"S1\", \"S2\", \"S3\"), prevalence = 4:6), weight = \"ss\", upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = c(qnorm(.1), rep(-Inf, 2)), info_scale = \"h0_h1_info\", binding = FALSE ) ## inverse variance weighting + information scale = \"h0_info\" x_invar0 <- gs_design_rd( p_c = p_c, p_e = p_e, info_frac = 1:3 / 3, rd0 = 0, alpha = .025, beta = .2, ratio = 1, stratum_prev = tibble::tibble(stratum = c(\"S1\", \"S2\", \"S3\"), prevalence = 1:3), weight = \"invar\", upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = c(qnorm(.1), rep(-Inf, 2)), info_scale = \"h0_info\", binding = FALSE ) ## inverse variance weighting + information scale = \"h1_info\" x_invar1 <- gs_design_rd( p_c = p_c, p_e = p_e, info_frac = 1:3 / 3, rd0 = 0, alpha = .025, beta = .2, ratio = 1, stratum_prev = tibble::tibble(stratum = c(\"S1\", \"S2\", \"S3\"), prevalence = 1:3), weight = \"invar\", upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = c(qnorm(.1), rep(-Inf, 2)), info_scale = \"h1_info\", binding = FALSE ) ## inverse variance weighting + information scale = \"h0_h1_info\" x_invar2 <- gs_design_rd( p_c = p_c, p_e = p_e, info_frac = 1:3 / 3, rd0 = 0, alpha = .025, beta = .2, ratio = 1, stratum_prev = tibble::tibble(stratum = c(\"S1\", \"S2\", \"S3\"), prevalence = 1:3), weight = \"invar\", upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = c(qnorm(.1), rep(-Inf, 2)), info_scale = \"h0_h1_info\", binding = FALSE ) ## inverse variance weighting + information scale = \"h0_info\" x_invar_h1_0 <- gs_design_rd( p_c = p_c, p_e = p_e, info_frac = 1:3 / 3, rd0 = 0, alpha = .025, beta = .2, ratio = 1, stratum_prev = tibble::tibble(stratum = c(\"S1\", \"S2\", \"S3\"), prevalence = 1:3), weight = \"invar\", upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = c(qnorm(.1), rep(-Inf, 2)), info_scale = \"h0_info\", binding = FALSE ) ## inverse variance weighting + information scale = \"h1_info\" x_invar_h1_1 <- gs_design_rd( p_c = p_c, p_e = p_e, info_frac = 1:3 / 3, rd0 = 0, alpha = .025, beta = .2, ratio = 1, stratum_prev = tibble::tibble(stratum = c(\"S1\", \"S2\", \"S3\"), prevalence = 1:3), weight = \"invar\", upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = c(qnorm(.1), rep(-Inf, 2)), info_scale = \"h1_info\", binding = FALSE ) ## inverse variance weighting + information scale = \"h0_h1_info\" x_invar_h1_2 <- gs_design_rd( p_c = p_c, p_e = p_e, info_frac = 1:3 / 3, rd0 = 0, alpha = .025, beta = .2, ratio = 1, stratum_prev = tibble::tibble(stratum = c(\"S1\", \"S2\", \"S3\"), prevalence = 1:3), weight = \"invar\", upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = c(qnorm(.1), rep(-Inf, 2)), info_scale = \"h0_h1_info\", binding = FALSE ) ans <- tibble::tibble( INVAR0 = x_invar0$analysis$n[1:3], INVAR1 = x_invar1$analysis$n[1:3], INVAR2 = x_invar2$analysis$n[1:3], SS0 = x_ss0$analysis$n[1:3], SS1 = x_ss1$analysis$n[1:3], SS2 = x_ss2$analysis$n[1:3] ) ans %>% gt::gt() %>% gt::tab_header(title = \"Sample size calculated by INVAR and SS\") %>% gt::tab_spanner( label = \"Inverse variance weighting \", columns = c( \"INVAR0\", \"INVAR1\", \"INVAR2\" ) ) %>% gt::tab_spanner( label = \"Sample size weighting\", columns = c(SS0, SS1, SS2) ) %>% cols_label( INVAR0 = \"info_scale = \\\"h0_info\\\"\", INVAR1 = \"info_scale = \\\"h1_info\\\"\", INVAR2 = \"info_scale = \\\"h0_h1_info\\\"\", SS0 = \"info_scale = \\\"h0_info\\\"\", SS1 = \"info_scale = \\\"h1_info\\\"\", SS2 = \"info_scale = \\\"h0_h1_info\\\"\" )"},{"path":"https://merck.github.io/gsDesign2/articles/story-risk-difference.html","id":"summary-2","dir":"Articles","previous_headings":"","what":"Summary","title":"Group sequential design for binary outcomes","text":"\\delta_{k,s}^{null} risk difference H_0. 0, positive, negative superiority, super-superiority non-inferiority design, respectively. superiority design, \\widehat \\sigma^2_{H_0,k,s} = \\widehat p _{k,s}^{pool} \\left(1 - \\widehat p ^{pool}_{k,s} \\right) \\left( \\frac{1}{N_{C,k,s}} + \\frac{1}{N_{E,k,s}} \\right) super-superiority design non-inferiority design, \\hat \\sigma^2 _{H_0,k,s} = \\frac {\\widehat p _{C0,k,s}(1- \\widehat p_{C0,k,s})}{N_ {C,k,s}} + \\frac{ \\widehat p_{E0,k,s} (1 - \\widehat p_{E0,k,s})}{N_{E,k,s}}","code":""},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-seven-test-types.html","id":"introduction","dir":"Articles","previous_headings":"","what":"Introduction","title":"Computing spending boundaries in group sequential design","text":"compare derivation different spending bounds using gsDesign2 gsDesign packages. gsDesign, 6 types bounds. demonstrate replicate using gsDesign2. gsDesign2, gs_spending_bound() function can used derive spending boundaries group sequential design derivations power calculations. demonstrate gs_design_ahr() function , using designs proportional hazards assumptions compare gsDesign::gsSurv(). Since sample size methods differ gsDesign2::gs_design_ahr() gsDesign::gsSurv() functions, use continuous sample sizes spending bounds (Z-values, nominal p-values, spending) identical except noted. Indeed, able reproduce bounds high degree accuracy. Due different sample size methods, sample size boundary approximations vary slightly. also present seventh example implement futility bound based observed hazard ratio well Haybittle-Peto-like efficacy bound. particular, futility bound difficult implement using gsDesign package straightforward using gsDesign2. last two examples, implement integer sample size event counts using to_integer() function gsDesign2 package toInteger() function gsDesign package. generally used cases comparing package computations Examples 1–5. examples, use following design assumptions: choice Type II error 0.15 corresponding 85% power intentional. allows impactful futility bounds interim analyses. Many teams may decide typical 90% power (beta = .1), can make futility bounds less likely impact early decisions.","code":"trial_duration <- 36 # Planned trial duration info_frac <- c(.35, .7, 1) # Information fraction at analyses # 16 month planned enrollment with constant rate enroll_rate <- define_enroll_rate(duration = 16, rate = 1) # Minimum follow-up for gsSurv() (computed) minfup <- trial_duration - sum(enroll_rate$duration) # Failure rates fail_rate <- define_fail_rate( duration = Inf, # Single time period, exponential failure fail_rate = log(2) / 12, # Exponential time-to-event with 12 month median hr = .7, # Proportional hazards dropout_rate = -log(.99) / 12 # 1% dropout rate per year ) alpha <- 0.025 # Type I error (one-sided) beta <- 0.15 # 85% power = 15% Type II error ratio <- 1 # Randomization ratio (experimental / control)"},{"path":"https://merck.github.io/gsDesign2/articles/story-seven-test-types.html","id":"examples","dir":"Articles","previous_headings":"","what":"Examples","title":"Computing spending boundaries in group sequential design","text":"Analogous gsDesign package, look 6 variations combinations efficacy futility bounds.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-seven-test-types.html","id":"example-1-efficacy-bound-only","dir":"Articles","previous_headings":"Examples","what":"Example 1: Efficacy bound only","title":"Computing spending boundaries in group sequential design","text":"One-sided design efficacy bound. easy way use fixed bound (lower = gs_b) negative infinite bounds (lpar = rep(-Inf, 3)); summary table produced, infinite bounds appear. upper bound implements spending bound (upper = gs_spending_bound) list objects provided upar describe spending function associated parameters. parts upar list used sf = gsDesign::sfLDOF select Lan-DeMets spending function approximates O’Brien-Fleming bound. total_spend = alpha sets total spending targeted Type error study. upper bound provides Type error control design specified elsewhere. Now check gsDesign::gsSurv(). noted , sample size event counts vary slightly design derived using gs_design_ahr(). also results slightly different crossing probabilities alternate hypothesis interim analyses well slightly different approximate hazard ratios required cross bounds. Comparing Z-value bounds directly see approximately 6 digits precision parameters chosen (r=32, tol=1e-08):","code":"upar <- list(sf = gsDesign::sfLDOF, total_spend = alpha, param = NULL) one_sided <- gsDesign2::gs_design_ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, ratio = ratio, beta = beta, # Information fraction at analyses and trial duration info_frac = info_frac, analysis_time = trial_duration, # Precision parameters for computations r = 32, tol = 1e-08, # Use NULL information for Type I error, H1 information for Type II error (power) info_scale = \"h0_h1_info\", # Default # Upper spending bound and corresponding parameter(s) upper = gs_spending_bound, upar = upar, # No lower bound lower = gs_b, lpar = rep(-Inf, 3) ) one_sided |> summary() |> gsDesign2::as_gt(title = \"Efficacy bound only\", subtitle = \"alpha-spending\") oneSided <- gsSurv( alpha = alpha, beta = beta, timing = info_frac, T = trial_duration, minfup = minfup, lambdaC = fail_rate$fail_rate, eta = fail_rate$dropout_rate, hr = fail_rate$hr, r = 32, tol = 1e-08, # Precision parameters for computations test.type = 1, # One-sided bound; efficacy only # Upper bound parameters sfu = upar$sf, sfupar = upar$param, ) oneSided |> gsBoundSummary() #> Analysis Value Efficacy #> IA 1: 35% Z 3.6128 #> N: 356 p (1-sided) 0.0002 #> Events: 100 ~HR at bound 0.4852 #> Month: 14 P(Cross) if HR=1 0.0002 #> P(Cross) if HR=0.7 0.0338 #> IA 2: 70% Z 2.4406 #> N: 394 p (1-sided) 0.0073 #> Events: 200 ~HR at bound 0.7079 #> Month: 23 P(Cross) if HR=1 0.0074 #> P(Cross) if HR=0.7 0.5341 #> Final Z 2.0002 #> N: 394 p (1-sided) 0.0227 #> Events: 286 ~HR at bound 0.7891 #> Month: 36 P(Cross) if HR=1 0.0250 #> P(Cross) if HR=0.7 0.8500 one_sided$bound$z - oneSided$upper$bound #> [1] -1.349247e-07 9.218765e-07 3.515345e-07"},{"path":"https://merck.github.io/gsDesign2/articles/story-seven-test-types.html","id":"example-2-symmetric-2-sided-design","dir":"Articles","previous_headings":"Examples","what":"Example 2: Symmetric 2-sided design","title":"Computing spending boundaries in group sequential design","text":"now derive symmetric 2-sided design. requires use argument h1_spending = FALSE use \\alpha-spending upper lower bounds. lower bound labeled futility bound table, better termed efficacy bound control better experimental treatment. compare gsDesign::gsSurv(). Comparing Z-value bounds directly, see approximately 6 digits accuracy.","code":"upar <- list(sf = gsDesign::sfLDOF, total_spend = alpha, param = NULL) lpar <- list(sf = gsDesign::sfLDOF, total_spend = alpha, param = NULL) symmetric <- gs_design_ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, ratio = ratio, beta = beta, # Information fraction at analyses and trial duration info_frac = info_frac, analysis_time = trial_duration, # Precision parameters for computations r = 32, tol = 1e-08, # Use NULL information for Type I error, H1 information for power info_scale = \"h0_h1_info\", # Default # Function and parameter(s) for upper spending bound upper = gs_spending_bound, upar = upar, lower = gs_spending_bound, lpar = lpar, # Symmetric designs use binding bounds binding = TRUE, h1_spending = FALSE # Use null hypothesis spending for lower bound ) symmetric |> summary() |> gsDesign2::as_gt( title = \"2-sided Symmetric Design\", subtitle = \"Single spending function\" ) Symmetric <- gsSurv( test.type = 2, # Two-sided symmetric bound alpha = alpha, beta = beta, timing = info_frac, T = trial_duration, minfup = minfup, r = 32, tol = 1e-08, lambdaC = fail_rate$fail_rate, eta = fail_rate$dropout_rate, hr = fail_rate$hr, sfu = upar$sf, sfupar = upar$param ) Symmetric |> gsBoundSummary() #> Analysis Value Efficacy Futility #> IA 1: 35% Z 3.6128 -3.6128 #> N: 356 p (1-sided) 0.0002 0.0002 #> Events: 100 ~HR at bound 0.4852 2.0609 #> Month: 14 P(Cross) if HR=1 0.0002 0.0002 #> P(Cross) if HR=0.7 0.0338 0.0000 #> IA 2: 70% Z 2.4406 -2.4406 #> N: 394 p (1-sided) 0.0073 0.0073 #> Events: 200 ~HR at bound 0.7079 1.4126 #> Month: 23 P(Cross) if HR=1 0.0074 0.0074 #> P(Cross) if HR=0.7 0.5341 0.0000 #> Final Z 2.0002 -2.0002 #> N: 394 p (1-sided) 0.0227 0.0227 #> Events: 286 ~HR at bound 0.7891 1.2673 #> Month: 36 P(Cross) if HR=1 0.0250 0.0250 #> P(Cross) if HR=0.7 0.8500 0.0000 dplyr::filter(symmetric$bound, bound == \"upper\")$z - Symmetric$upper$bound #> [1] -1.349247e-07 9.218765e-07 4.092976e-07 dplyr::filter(symmetric$bound, bound == \"lower\")$z - Symmetric$lower$bound #> [1] 1.349247e-07 -9.218765e-07 -4.092976e-07"},{"path":"https://merck.github.io/gsDesign2/articles/story-seven-test-types.html","id":"example-3-asymmetric-2-sided-design-with-beta-spending-and-binding-futility","dir":"Articles","previous_headings":"Examples","what":"Example 3: Asymmetric 2-sided design with \\beta-spending and binding futility","title":"Computing spending boundaries in group sequential design","text":"Designs binding futility bounds generally considered acceptable Phase 3 trials Type error controlled futility bound crossed trial continues, infrequent occurrence. binding futility bound means Type error computations assume trial stops futility bound crossed. trial continues futility bound crossed, Type error longer controlled computed efficacy bound. Phase 2b study, may acceptable results slightly smaller sample size less stringent efficacy bounds first analysis comparable design non-binding futility bound presented Example 4. compare gsDesign::gsSurv(). Comparing Z-value bounds directly, see approximately 6 digits accuracy spite needing relaxing accuracy tol = 1e-07 call gsSurv() order get convergence.","code":"upar <- list(sf = gsDesign::sfLDOF, total_spend = alpha, param = NULL) lpar <- list(sf = gsDesign::sfHSD, total_spend = beta, param = -.5) asymmetric_binding <- gs_design_ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, ratio = ratio, beta = beta, # Information fraction at analyses and trial duration info_frac = info_frac, analysis_time = trial_duration, # Precision parameters for computations r = 32, tol = 1e-08, # Use NULL information for Type I error, H1 information for Type II error and power info_scale = \"h0_h1_info\", # Function and parameter(s) for upper spending bound upper = gs_spending_bound, upar = upar, lower = gs_spending_bound, lpar = lpar, # Asymmetric beta-spending design using binding bounds binding = TRUE, h1_spending = TRUE # Use beta-spending for futility ) asymmetric_binding |> summary() |> gsDesign2::as_gt( title = \"2-sided asymmetric design with binding futility\", subtitle = \"Both alpha- and beta-spending used\" ) asymmetricBinding <- gsSurv( test.type = 3, # Two-sided asymmetric bound, binding futility alpha = alpha, beta = beta, timing = info_frac, T = trial_duration, minfup = minfup, r = 32, tol = 1e-07, lambdaC = fail_rate$fail_rate, eta = fail_rate$dropout_rate, hr = fail_rate$hr, sfu = upar$sf, sfupar = upar$param, sfl = lpar$sf, sflpar = lpar$param ) asymmetricBinding |> gsBoundSummary() #> Analysis Value Efficacy Futility #> IA 1: 35% Z 3.6128 0.1436 #> N: 380 p (1-sided) 0.0002 0.4429 #> Events: 107 ~HR at bound 0.4971 0.9726 #> Month: 14 P(Cross) if HR=1 0.0002 0.5571 #> P(Cross) if HR=0.7 0.0387 0.0442 #> IA 2: 70% Z 2.4382 1.1807 #> N: 422 p (1-sided) 0.0074 0.1189 #> Events: 214 ~HR at bound 0.7164 0.8509 #> Month: 23 P(Cross) if HR=1 0.0074 0.8913 #> P(Cross) if HR=0.7 0.5679 0.0969 #> Final Z 1.9232 1.9232 #> N: 422 p (1-sided) 0.0272 0.0272 #> Events: 306 ~HR at bound 0.8024 0.8024 #> Month: 36 P(Cross) if HR=1 0.0250 0.9750 #> P(Cross) if HR=0.7 0.8500 0.1500 dplyr::filter(asymmetric_binding$bound, bound == \"upper\")$z - asymmetricBinding$upper$bound #> [1] -1.349247e-07 2.505886e-04 6.494369e-03 dplyr::filter(asymmetric_binding$bound, bound == \"lower\")$z - asymmetricBinding$lower$bound #> [1] -0.02803415 -0.02670908 -0.01598640"},{"path":"https://merck.github.io/gsDesign2/articles/story-seven-test-types.html","id":"example-4-asymmetric-2-sided-design-with-beta-spending-and-non-binding-futility-bound","dir":"Articles","previous_headings":"Examples","what":"Example 4: Asymmetric 2-sided design with \\beta-spending and non-binding futility bound","title":"Computing spending boundaries in group sequential design","text":"gsDesign package, asymmetric designs non-binding \\beta-spending used futility default design. objectives type design include: Meaningful futility bounds stop trial early treatment benefit emerging experimental treatment vs. control. Type error controlled even trial continues futility bound crossed. compare gsDesign::gsSurv(). Comparing Z-value bounds directly, see approximately 6 digits accuracy.","code":"upar <- list(sf = gsDesign::sfLDOF, total_spend = alpha, param = NULL) lpar <- list(sf = gsDesign::sfHSD, total_spend = beta, param = -.5) asymmetric_nonbinding <- gs_design_ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, ratio = ratio, beta = beta, # Information fraction at analyses and trial duration info_frac = info_frac, analysis_time = trial_duration, # Precision parameters for computations r = 32, tol = 1e-08, # Use NULL information for Type I error, H1 info for Type II error and power info_scale = \"h0_h1_info\", # Default # Function and parameter(s) for upper spending bound upper = gs_spending_bound, upar = upar, lower = gs_spending_bound, lpar = lpar, # Asymmetric beta-spending design use binding bounds binding = FALSE, h1_spending = TRUE # Use beta-spending for futility ) asymmetric_nonbinding |> summary() |> gsDesign2::as_gt( title = \"2-sided asymmetric design with non-binding futility\", subtitle = \"Both alpha- and beta-spending used\" ) asymmetricNonBinding <- gsSurv( test.type = 4, # Two-sided asymmetric bound, non-binding futility alpha = alpha, beta = beta, timing = info_frac, T = trial_duration, minfup = minfup, r = 32, tol = 1e-08, lambdaC = fail_rate$fail_rate, eta = fail_rate$dropout_rate, hr = fail_rate$hr, sfu = upar$sf, sfupar = upar$param, sfl = lpar$sf, sflpar = lpar$param ) asymmetricNonBinding |> gsBoundSummary() #> Analysis Value Efficacy Futility #> IA 1: 35% Z 3.6128 0.1860 #> N: 398 p (1-sided) 0.0002 0.4262 #> Events: 112 ~HR at bound 0.5050 0.9654 #> Month: 14 P(Cross) if HR=1 0.0002 0.5738 #> P(Cross) if HR=0.7 0.0424 0.0442 #> IA 2: 70% Z 2.4406 1.2406 #> N: 440 p (1-sided) 0.0073 0.1074 #> Events: 224 ~HR at bound 0.7215 0.8471 #> Month: 23 P(Cross) if HR=1 0.0073 0.9020 #> P(Cross) if HR=0.7 0.5901 0.0969 #> Final Z 2.0002 2.0002 #> N: 440 p (1-sided) 0.0227 0.0227 #> Events: 320 ~HR at bound 0.7995 0.7995 #> Month: 36 P(Cross) if HR=1 0.0215 0.9785 #> P(Cross) if HR=0.7 0.8500 0.1500 dplyr::filter(asymmetric_nonbinding$bound, bound == \"upper\")$z - asymmetricNonBinding$upper$bound #> [1] -1.349247e-07 9.218765e-07 3.515345e-07 dplyr::filter(asymmetric_nonbinding$bound, bound == \"lower\")$z - asymmetricNonBinding$lower$bound #> [1] -0.03267431 -0.03311078 -0.02426999"},{"path":"https://merck.github.io/gsDesign2/articles/story-seven-test-types.html","id":"example-5-asymmetric-2-sided-design-with-null-hypothesis-spending-and-binding-futility-bound","dir":"Articles","previous_headings":"Examples","what":"Example 5: Asymmetric 2-sided design with null hypothesis spending and binding futility bound","title":"Computing spending boundaries in group sequential design","text":"Now use null hypothesis probabilities set futility bounds. parameter alpha_star used set total spending futility bound null hypothesis. example, set 0.5 50% probability crossing futility bound interim final analyses combined. futility bound final analysis really role, use test_lower argument eliminate evaluation final analysis. arbitrary largely selected interim futility bounds can meaningful tests. case, minor trend favor control first second interim cross futility bound. less stringent \\beta-spending bounds previously described, still address potential ethical issue continuing trial minor trend favor control present. Comparing Z-value bounds directly, see approximately 6 digits accuracy. gsSurv() require alternate arguments r tol.","code":"alpha_star <- .5 upar <- list(sf = gsDesign::sfLDOF, total_spend = alpha, param = NULL) lpar <- list(sf = gsDesign::sfHSD, total_spend = alpha_star, param = 1) asymmetric_safety_binding <- gs_design_ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, ratio = ratio, beta = beta, # Information fraction at analyses and trial duration info_frac = info_frac, analysis_time = trial_duration, # Precision parameters for computations r = 32, tol = 1e-08, # Use NULL information for Type I error, H1 information for Type II error info_scale = \"h0_info\", # Function and parameter(s) for upper spending bound upper = gs_spending_bound, upar = upar, lower = gs_spending_bound, lpar = lpar, test_lower = c(TRUE, TRUE, FALSE), # Asymmetric design use binding bounds binding = TRUE, h1_spending = FALSE # Use null-spending for futility ) asymmetric_safety_binding |> summary() |> gsDesign2::as_gt( title = \"2-sided asymmetric safety design with binding futility\", subtitle = \"Alpha-spending used for both bounds, asymmetrically\" ) asymmetricSafetyBinding <- gsSurv( test.type = 5, # Two-sided asymmetric bound, binding futility, H0 futility spending astar = alpha_star, # Total Type I error spend for futility alpha = alpha, beta = beta, timing = info_frac, T = trial_duration, minfup = minfup, lambdaC = fail_rate$fail_rate, eta = fail_rate$dropout_rate, hr = fail_rate$hr, sfu = upar$sf, sfupar = upar$param, sfl = lpar$sf, sflpar = lpar$param ) asymmetricSafetyBinding |> gsBoundSummary() #> Analysis Value Efficacy Futility #> IA 1: 35% Z 3.6128 -0.7271 #> N: 356 p (1-sided) 0.0002 0.7664 #> Events: 101 ~HR at bound 0.4856 1.1565 #> Month: 14 P(Cross) if HR=1 0.0002 0.2336 #> P(Cross) if HR=0.7 0.0340 0.0060 #> IA 2: 70% Z 2.4405 -0.4203 #> N: 394 p (1-sided) 0.0073 0.6629 #> Events: 201 ~HR at bound 0.7082 1.0612 #> Month: 23 P(Cross) if HR=1 0.0074 0.3982 #> P(Cross) if HR=0.7 0.5353 0.0070 #> Final Z 1.9979 -0.2531 #> N: 394 p (1-sided) 0.0229 0.5999 #> Events: 286 ~HR at bound 0.7895 1.0304 #> Month: 36 P(Cross) if HR=1 0.0250 0.5000 #> P(Cross) if HR=0.7 0.8500 0.0072 dplyr::filter(asymmetric_safety_binding$bound, bound == \"upper\")$z - asymmetricSafetyBinding$upper$bound #> [1] -1.349247e-07 9.211210e-07 4.185954e-07 dplyr::filter(asymmetric_safety_binding$bound, bound == \"lower\")$z - asymmetricSafetyBinding$lower$bound[1:2] #> [1] 4.348992e-08 -3.276118e-08"},{"path":"https://merck.github.io/gsDesign2/articles/story-seven-test-types.html","id":"example-6-asymmetric-2-sided-design-with-null-hypothesis-spending-and-non-binding-futility-bound","dir":"Articles","previous_headings":"Examples","what":"Example 6: Asymmetric 2-sided design with null hypothesis spending and non-binding futility bound","title":"Computing spending boundaries in group sequential design","text":", recommend non-binding bound presented binding bound example 5. eliminate final futility bound using test_lower argument. Addition, show eliminate efficacy bound interim 1 allowing team decide early stop trial efficacy without longer-term data. corresponding gsDesign::gsSurv() design strictly comparable since option eliminate futility efficacy analyses enabled.","code":"upar <- list(sf = gsDesign::sfLDOF, total_spend = alpha, param = NULL) lpar <- list(sf = gsDesign::sfHSD, total_spend = alpha_star, param = 1) asymmetric_safety_nonbinding <- gs_design_ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, ratio = ratio, beta = beta, # Information fraction at analyses and trial duration info_frac = info_frac, analysis_time = trial_duration, # Precision parameters for computations r = 32, tol = 1e-08, # Use NULL information for Type I error, H1 information for Type II error info_scale = \"h0_info\", # Function and parameter(s) for upper spending bound upper = gs_spending_bound, upar = upar, test_upper = c(FALSE, TRUE, TRUE), lower = gs_spending_bound, lpar = lpar, test_lower = c(TRUE, TRUE, FALSE), # Asymmetric design use non-binding bounds binding = FALSE, h1_spending = FALSE # Use null-spending for futility ) |> to_integer() asymmetric_safety_nonbinding |> summary() |> gsDesign2::as_gt( title = \"2-sided asymmetric safety design with non-binding futility\", subtitle = \"Alpha-spending used for both bounds, asymmetrically\" ) |> gt::tab_footnote(footnote = \"Integer-based sample size and event counts\") asymmetricSafetyNonBinding <- gsSurv( test.type = 6, # Two-sided asymmetric bound, binding futility, H0 futility spending astar = alpha_star, # Total Type I error spend for futility alpha = alpha, beta = beta, timing = info_frac, T = trial_duration, minfup = minfup, r = 32, tol = 1e-08, lambdaC = fail_rate$fail_rate, eta = fail_rate$dropout_rate, hr = fail_rate$hr, sfu = upar$sf, sfupar = upar$param, sfl = lpar$sf, sflpar = lpar$param ) asymmetricSafetyBinding |> gsBoundSummary() #> Analysis Value Efficacy Futility #> IA 1: 35% Z 3.6128 -0.7271 #> N: 356 p (1-sided) 0.0002 0.7664 #> Events: 101 ~HR at bound 0.4856 1.1565 #> Month: 14 P(Cross) if HR=1 0.0002 0.2336 #> P(Cross) if HR=0.7 0.0340 0.0060 #> IA 2: 70% Z 2.4405 -0.4203 #> N: 394 p (1-sided) 0.0073 0.6629 #> Events: 201 ~HR at bound 0.7082 1.0612 #> Month: 23 P(Cross) if HR=1 0.0074 0.3982 #> P(Cross) if HR=0.7 0.5353 0.0070 #> Final Z 1.9979 -0.2531 #> N: 394 p (1-sided) 0.0229 0.5999 #> Events: 286 ~HR at bound 0.7895 1.0304 #> Month: 36 P(Cross) if HR=1 0.0250 0.5000 #> P(Cross) if HR=0.7 0.8500 0.0072"},{"path":"https://merck.github.io/gsDesign2/articles/story-seven-test-types.html","id":"example-7-alternate-bound-types","dir":"Articles","previous_headings":"Examples","what":"Example 7: Alternate bound types","title":"Computing spending boundaries in group sequential design","text":"consider two types alternative boundary computation approaches. Computing futility bounds based hazard ratio. Computing efficacy bounds Haybittle-Peto related Fleming-Harrington-O’Brien approach. begin futility bound. consider non-binding futility bound impact efficacy bound. Assume clinical trial team wishes stop trial first two interim analyses targeted interim hazard ratio achieved. approach can require bit iteration (trial error) incorporate final design endpoint count; skip iteration . assume wish consider stopping futility hazard ratio greater 1 0.9 observed interim analyses 1 2 104 209 events observed, respectively. final analysis planned 300 events. wish translate hazard ratios specified corresponding Z-values; can done follows. add final futility bound -Inf, indicating final futility analysis; gives us vector Z-value bounds analyses. type bound, Type II error computed rather based bounds rather spending approach bounds computed based specified spending. efficacy bound, first consider Haybittle-Peto fixed bound interim analyses. Using Bonferroni approach, test nominal levels 0.001, 0.001, 0.023 3 analyses. accounting correlations, actually quite use 0.025 1-sided Type error allowed. allow user substitute code follows verify . alternative approach use fixed spending approach analysis suggested Fleming, Harrington, O’Brien (1984). , iteration shown, use piecewise linear spending function select interim bounds match desired Haybittle-Peto interim bounds. However, using approach slightly liberal final bound achieved still controls Type error. see targeted bounds achieved nominal p-values 0.0001 interim efficacy bound targeted hazard ratios interim futility bounds. methods, trial designers control design characteristics may desire. particular, note Haybittle-Peto efficacy bounds less stringent first interim stringent second interim corresponding O’Brien-Fleming-like bounds computed spending approach. may may desirable.","code":"# Targeted events at interim and final analysis # This is based on above designs and then adjusted, as necessary targeted_events <- c(104, 209, 300) interim_futility_z <- -gsDesign::hrn2z(hr = c(1, .9), n = targeted_events[1:2]) interim_futility_z #> [1] 0.0000000 0.7615897 lower <- gs_b # Allows specifying fixed Z-values for futility # Translated HR bounds to Z-value scale lpar <- c(interim_futility_z, -Inf) upper <- gs_b upar <- qnorm(c(.001, .001, .0023), lower.tail = FALSE) upper <- gs_spending_bound upar <- list( sf = gsDesign::sfLinear, total_spend = alpha, param = c(targeted_events[1:2] / targeted_events[3], c(.001, .0018) / .025), timing = NULL ) asymmetric_fixed_bounds <- gs_design_ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, ratio = ratio, beta = beta, # Information fraction at analyses and trial duration info_frac = info_frac, analysis_time = trial_duration, # Precision parameters for computations r = 32, tol = 1e-08, # Use NULL information for Type I error, H1 information for Type II error info_scale = \"h0_info\", # Function and parameter(s) for upper spending bound upper = upper, upar = upar, lower = lower, lpar = lpar, # Non-binding futility bounds binding = FALSE ) |> to_integer() asymmetric_fixed_bounds |> summary() |> gsDesign2::as_gt( title = \"2-sided asymmetric safety design with fixed non-binding futility\", subtitle = \"Futility bounds computed to approximate HR\" ) |> gt::tab_footnote(footnote = \"Integer-based sample size and event counts\")"},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"overview","dir":"Articles","previous_headings":"","what":"Overview","title":"Spending time examples","text":"multiple scenarios event-based spending group sequential designs limitations terms ensuring adequate follow-ensuring adequate spending preserved final analysis. Example contexts often arises trials may delayed treatment effect, control failure rates different expected, multiple hypotheses tested. general, situations found ensuring adequate follow-duration adequate number events important fully evaluate potential effectiveness new treatment. testing multiple hypotheses, carefully thinking possible spending issues can critical. addition, group sequential trials, preserving adequate \\alpha-spending final evaluation hypothesis important difficult using traditional event-based spending. document, outline three examples demonstrate issues: importance adequate events adequate follow-duration ensure power fixed design, importance guaranteeing reasonable amount \\alpha-spending final analysis group sequential design. trial examining outcome biomarker positive overall populations, show importance considering design reacts incorrect design assumptions biomarker prevalence. group sequential design options, demonstrate concept spending time effective way adapt. Traditionally Lan DeMets (1983), spending done according targeting specific number events outcome end trial. However, delayed treatment effect scenarios substantial literature (e.g., Lin et al. (2020), Roychoudhury et al. (2021)) documenting importance adequate follow-duration addition requiring adequate number events traditional proportional hazards assumption. approaches taken, found spending time approach generalizes well addressing variety scenarios. fact spending need correspond information fraction perhaps first raised Lan DeMets (1989) calendar-time spending discussed. However, note Proschan, Lan, Wittes (2006) raised scenarios spending alternatives considered. Two specific spending approaches suggested : Spending according minimum planned observed event counts. suggested delayed effect examples. Spending common spending time across multiple hypotheses; e.g., multiple population example, spending overall population rate biomarker positive subgroup regardless event counts time overall population. consistent Follmann, Proschan, Geller (1994) applied multiple experimental treatments compared common control. Spending time case corresponds approach Fleming, Harrington, O’Brien (1984) fixed incremental spending set potentially variable number interim analyses. document fairly long demonstrates number scenarios relevant spending time concept. layout intended make easy possibly focus individual examples interested full review. Code blocks can unfolded interested implementation. Rather bog conceptual discussion implementation details, tried provide sufficient comments code guide implementation interested .","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"delayed-effect-scenario","dir":"Articles","previous_headings":"","what":"Delayed effect scenario","title":"Spending time examples","text":"consider example single stratum possibility delayed treatment effect. next two sections consider 1) fixed design interim analysis, 2) design interim analysis. Following common assumptions: control group time--event exponentially distributed median 12 months. 2.5% one-sided Type error. 90% power. constant enrollment rate expected enrollment duration 12 months. targeted trial duration 30 months. delayed effect experimental group compared control, hazard ratio 1 first 4 months hazard ratio 0.6 thereafter. restrictions constant control failure rate, two hazard ratio time intervals constant enrollment required, simplify example. approach taken uses average-hazard ratio approach approximating treatment effect Mukhopadhyay et al. (2020) asymptotic group sequential theory Tsiatis (1982).","code":"# control median m <- 12 # enrollment rate enroll_rate <- define_enroll_rate( duration = 12, # expected enrollment duration of 12 months rate = 1 # here the rate is a ratio, which will be updated to achieve the desired sample size ) # failure rate fail_rate <- define_fail_rate( duration = c(4, 100), # hazard ratio of 1 for the first 4 months and a hazard ratio of 0.6 thereafter hr = c(1, .6), fail_rate = log(2) / m, # exponential distribution dropout_rate = .001 )"},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"fixed-design-delayed-effect","dir":"Articles","previous_headings":"","what":"Fixed design, delayed effect","title":"Spending time examples","text":"sample size events design shown . see average hazard ratio (AHR) assumptions 0.7026, part way early HR 1 later HR 0.6 assumed experimental versus control therapy.","code":"# bounds for fixed design are just a fixed bound for nominal p = 0.025, 1-sided z_025 <- qnorm(.975) # fixed design, single stratum # find sample size for 30 month trial under given # enrollment and sample size assumptions xx <- gs_design_ahr(enroll_rate, fail_rate, analysis_time = 30, upper = gs_b, upar = z_025, lower = gs_b, lpar = z_025 ) # get the summary table of the fixed design summary(xx, analysis_vars = c(\"time\", \"n\", \"event\", \"ahr\", \"info_frac\"), analysis_decimals = c(0, 0, 0, 4, 4) ) %>% as_gt()"},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"scenario-1-less-experimental-benefit","dir":"Articles","previous_headings":"Fixed design, delayed effect > Power when assumptions design are wrong","what":"Scenario 1: less experimental benefit","title":"Spending time examples","text":"assume instead effect delay 6 months instead 4 control median 10 months instead 12, substantial impact power. , assumed targeted events required final analysis resulting expected final analysis time 25 months instead planned 30 average hazard ratio 0.78 expected time analysis rather targeted average hazard ratio 0.70 original assumptions. Now also require 30 months trial duration addition targeted events. improves power 63% 76% increase 25 30 months duration 340 377 expected events, important gain. driven average hazard ratio 0.78 compared 0.76 increased expected number events. also ensures adequate follow-better describe longer-term differences survival; may particularly important early follow-suggests delayed effect crossing survival curves. Thus, adaptation event-based design based also require adequate follow-can help ensure power large clinical trial investment clinically relevant underlying survival benefit.","code":"# update the median of control arm am <- 10 # alternate control median (the original is 12) # update the failure rate table fail_rate$duration[1] <- 6 # the original is 4 fail_rate$fail_rate <- log(2) / am # the original is log(2)/12 # get the targeted number of events target_events <- xx$analysis$event # update the design and calculate the power under the targeted events yy <- gs_power_ahr( enroll_rate = xx$enroll_rate, fail_rate = fail_rate, # here we want to achieve the target events # and set analysis_time as NULL # so the analysis_time will be calculated according to the target events event = target_events, analysis_time = NULL, upper = gs_b, upar = z_025, lower = gs_b, lpar = z_025 ) yy %>% summary() %>% as_gt() yy <- gs_power_ahr( enroll_rate = xx$enroll_rate, fail_rate = fail_rate, # here we want to achieve the targeted events, # but also keep the 30 month as the analysis_time event = target_events, analysis_time = 30, upper = gs_b, upar = z_025, lower = gs_b, lpar = z_025 ) # get the summary table of updated design yy %>% summary() %>% as_gt()"},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"scenario-2-low-control-event-rates","dir":"Articles","previous_headings":"Fixed design, delayed effect","what":"Scenario 2: low control event rates","title":"Spending time examples","text":"Now assume longer planned control median, 16 months demonstrate value retaining event count requirement. analyze 30 months, power trial 87% 288 events expected. also require adequate events, restore power 94.5, originally targeted level 90%. cost expected trial duration becomes 38.5 months rather 30; however, since control median now larger, additional follow-useful characterize tail behavior. Note scenario likely particularly interested retaining power treatment effect actually stronger original alternate hypothesis. Thus, example, time cutoff alone ensured sufficient follow-power trial.","code":"# alternate control median am <- 16 # the original is 12 # update the failure rate fail_rate$fail_rate <- log(2) / am fail_rate$duration[1] <- 4 # calculate the power when trial duration is 30 month yy <- gs_power_ahr( enroll_rate = xx$enroll_rate, fail_rate = fail_rate, # here we set analysisTime as 30 # and calculate the corresponding number of events event = NULL, analysis_time = 30, upper = gs_b, upar = z_025, lower = gs_b, lpar = z_025 ) yy %>% summary() %>% as_gt() # calculate the power when trial duration is 30 month and the events is the targeted events yy <- gs_power_ahr( enroll_rate = xx$enroll_rate, fail_rate = fail_rate, # here we set trial duration as 30 month # and keep the events as the target events event = target_events, analysis_time = 30, upper = gs_b, upar = z_025, lower = gs_b, lpar = z_025 ) yy %>% summary() %>% as_gt()"},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"conclusions-for-fixed-design","dir":"Articles","previous_headings":"Fixed design, delayed effect","what":"Conclusions for fixed design","title":"Spending time examples","text":"summary, demonstrated value requiring adequate events adequate follow-duration approach analysis done one requirements. Requiring retain power important treatment benefit characterization time potential delayed onset positive beneficial treatment effect.","code":""},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"alternative-spending-strategies","dir":"Articles","previous_headings":"Group sequential design","what":"Alternative spending strategies","title":"Spending time examples","text":"extend design detect delayed effect group sequential design single interim analysis 80% final planned events accrued. assume final analysis require targeted trial duration events based fixed design based evaluations . assume efficacy bound uses Lan DeMets (1983) spending function approximating O’Brien-Fleming bound. futility bound planned, exception demonstration one scenario. interim analysis far enough trial substantial probability stopping early design assumptions. Coding different strategies must done carefully. Spending approach 1: time design, specify spending function specifying use information fraction design. Spending approach 2: wished use 22 30 months calendar analysis times use calendar fraction spending, need specify spending time design. Spending approach 3: Next show set information-based spending power calculation timing analysis based information fraction; e.g., propose requiring achieving planned event counts, also planned study duration analysis performed. critical set maximum planned information update information fraction calculation case. Spending approach 4: final case replace information fraction design specific spending time plugged spending function compute incremental \\alpha-spending analysis. case, use planned information fraction design, 0.8 interim analysis 1 final analysis. used regardless scenario using compute power, recall information fraction still used computing correlations asymptotic distribution approximation design tests.","code":"# Spending for design with planned information fraction (IF) upar_design_if <- list( # total_spend represents one-sided Type I error total_spend = 0.025, # Spending function and associated # parameter (NULL, in this case) sf = sfLDOF, param = NULL, # Do NOT specify spending time here as it will be set # by information fraction specified in call to gs_design_ahr() timing = NULL, # Do NOT specify maximum information here as it will be # set as the design maximum information max_info = NULL ) # CF is for calendar fraction upar_design_cf <- upar_design_if # Now switch spending time to calendar fraction upar_design_cf$timing <- c(22, 30) / 30 # We now need to change max_info from spending as specified for design upar_actual_info_frac <- upar_design_if # Note that we still have timing = NULL, unchanged from information-based design upar_actual_info_frac <- NULL # Replace NULL maximum information with planned maximum null hypothesis # information from design # This max will be updated for each planned design later upar_actual_info_frac$max_info <- 100 # Copy original upper planned spending upar_planned_info_frac <- upar_design_if # Interim and final spending time will always be the same, regardless of # expected events or calendar timing of analysis upar_planned_info_frac$timing <- c(0.8, 1) # We will reset planned maximum information later"},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"planned-design","dir":"Articles","previous_headings":"Group sequential design","what":"Planned design","title":"Spending time examples","text":"extend design studied group sequential design single interim analysis 80% final planned events accrued. assume final analysis require targeted trial duration events based fixed design evaluations made . assume efficacy bound uses Lan-DeMets spending function approximating O’Brien-Fleming bound. futility bound planned. interim analysis far enough trial substantial probability stopping early design assumptions.","code":"# Control median m <- 12 # Planned information fraction at interim(s) and final planned_info_frac <- c(.8, 1) # No futility bound lpar <- rep(-Inf, 2) # enrollment rate enroll_rate <- define_enroll_rate( duration = 12, rate = 1 ) # failure rate fail_rate <- define_fail_rate( duration = c(4, 100), hr = c(1, .6), fail_rate = log(2) / m, dropout_rate = .001 ) # get the group sequential design model xx <- gs_design_ahr( enroll_rate, fail_rate, # final analysis time set to targeted study duration; # analysis times before are 'small' to ensure use of information fraction for timing analysis_time = c(1, 30), # timing here matches what went into planned_info_frac above info_frac = planned_info_frac, # upper bound : spending approach 1 upper = gs_spending_bound, upar = upar_design_if, # lower bound: no futility bound lower = gs_b, lpar = lpar ) # get the summary table xx %>% summary() %>% as_gt()"},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"two-alternate-approaches","dir":"Articles","previous_headings":"Group sequential design","what":"Two alternate approaches","title":"Spending time examples","text":"consider two alternate approaches demonstrate spending time concept may helpful practice. However, skipping following two subsections can done interest. first demonstrates calendar spending Lan DeMets (1989). second basically method Fleming, Harrington, O’Brien (1984) fixed incremental spend used potentially variable number interim analyses, final bound computed based unspent one-sided Type error assigned hypothesis.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"calendar-spending","dir":"Articles","previous_headings":"Group sequential design > Two alternate approaches","what":"Calendar spending","title":"Spending time examples","text":"use sample size , change efficacy bound spending calendar-based. reason spending different information-based spending mainly due fact expected information linear time. case, calendar fraction interim less information fraction, exactly opposite true earlier trial. just note calendar-based spending chosen, may worth comparing design bounds bounds using spending function, information-based spending see important differences trial team possibly scientific regulatory community. note also risk enough events achieve targeted power final analysis calendar-based spending strategy. examine calendar-based spending document.","code":"yy <- gs_power_ahr( enroll_rate = xx$enroll_rate, fail_rate = xx$fail_rate, # Planned time will drive timing since information accrues faster event = 1:2, # Interim time rounded analysis_time = c(22, 30), # upper bound: use calendar fraction upper = gs_spending_bound, upar = upar_design_cf, # lower bound: no futility bound lower = gs_b, lpar = lpar ) yy %>% summary() %>% as_gt()"},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"fixed-incremental-spend-with-a-variable-number-of-analyses","dir":"Articles","previous_headings":"Group sequential design > Two alternate approaches","what":"Fixed incremental spend with a variable number of analyses","title":"Spending time examples","text":"noted, method proposed Fleming, Harrington, O’Brien (1984). general strategy demonstrated interim analyses every 6 months final targeted follow-time cumulative number events achieved. efficacy analyses start, fixed incremental spend 0.001 used interim. criteria final analysis met, remaining \\alpha spent. Cumulative spending months 18 24 0.001 0.002, respectively, full cumulative \\alpha-spending 0.025 final analysis. done setting spending time 18 24 months 1/25, 2/25 1; .e., 1/25 incremental \\alpha-spending incorporated interim analysis remaining \\alpha spent final analysis. enables strategy analyzing every 6 months minimum targeted follow-minimum number events observed, time final analysis performed. skip efficacy analyses first two interim analyses months 6 12. futility, simply use nominal 1-sided p-value 0.05 favoring control interim. note raises flag futility bound crossed Data Monitoring Committee (DMC) can choose continue trial even futility bound crossed. However, bound may effective providing DMC guidance stop futility prematurely. comparison designs, leave enrollment rates, failure rates, dropout rates final analysis time . see following table summarizing efficacy bounds power little impact total power futility analyses specified. cumulative \\alpha-spending 0.001 0.002 efficacy interim analyses, see nominal p-value bound second interim 0.0015, 0.001 incremental \\alpha-spend. also note nominal p-values testing, approximate hazard ratio required cross bounds presumably help justify consideration completing trial based definitive interim efficacy finding. Also, small interim spend, final nominal p-value reduced much overall \\alpha=0.025 Type error set group sequential design. also examine futility bound. nominal p-value 0.05 analysis one-sided p-value favor control experimental treatment. can see probability stopping early alternate hypothesis (\\beta-spending) substantial even given early delayed effect. Also, substantial approximate observed hazard ratios cross futility bound seem reasonable given timing number events observed; exception small number events first interim, larger number observed time early excess risk. may useful plan additional analyses futility bound crossed support stopping . example, looking subgroups evaluating smoothed hazard rates time treatment group may useful. clinical trial study team complete discussion futility bound considerations time design.","code":"# Cumulative spending at IA3 and IA4 will be 0.001 and 0.002, respectively. # Power spending function sfPower with param = 1 is linear in timing # which makes setting the above cumulative spending targets simple by # setting timing variable the the cumulative proportion of spending at each analysis. # There will be no efficacy testing at IA1 or IA2. # Thus, incremental spend, which will be unused, is set very small for these analyses. upar_fho <- list( total_spend = 0.025, sf = sfPower, param = 1, timing = c((1:2) / 250, (1:2) / 25, 1) ) fho <- gs_power_ahr( enroll_rate = xx$enroll_rate, fail_rate = xx$fail_rate, event = NULL, analysis_time = seq(6, 30, 6), upper = gs_spending_bound, upar = upar_fho, # No efficacy testing at IA1 or IA2 # Thus, the small alpha the spending function would have # allocated will not be used test_upper = c(FALSE, FALSE, TRUE, TRUE, TRUE), lower = gs_b, lpar = c(rep(qnorm(.05), 4), -Inf) ) fho %>% summary() %>% as_gt()"},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"scenario-with-less-treatment-effect","dir":"Articles","previous_headings":"Group sequential design","what":"Scenario with less treatment effect","title":"Spending time examples","text":", compute power assumption changing median control group time--event 10 months rather assumed 12 delay effect onset 6 months rather 4. otherwise change enrollment, dropout hazard ratio assumptions. following examples, require targeted number events targeted trial duration group sequential design interim final analyses. first example, uses interim spending based event count observed originally planned final event count information fraction 323 / 355 = 0.91. gives event-based spending 0.0191, substantially targeted information fraction 284 / 355 = 0.8 targeted interim spending 0.0122. reduces power overall 76% 73% lowers nominal p-value bound final analysis 0.0218 0.0165; see following two tables. Noting average hazard ratio 0.8 interim 0.76 final analysis emphasizes value preserving \\alpha-spending final analysis. Thus, example valuable limit spending interim analysis minimum planned spending opposed using event-based spending. Just important, general design principle making interim analysis criteria stringent final ensured alternate scenario. multiple trials delayed effects observed difference final nominal p-value bound made difference ensure statistically significant finding.","code":"# Alternate control median am <- 10 # Update the failure rate fail_rate$fail_rate <- log(2) / am fail_rate$duration[1] <- 6 # Set planned maximum information from planned design max_info0 <- max(xx$analysis$info) upar_actual_info_frac <- upar_design_if upar_actual_info_frac$max_info <- max_info0 # compute power if actual information fraction relative to original # planned total is used yy <- gs_power_ahr( enroll_rate = xx$enroll_rate, fail_rate = fail_rate, # Planned time will drive timing since information accrues faster event = 1:2, analysis_time = xx$analysis$time, upper = gs_spending_bound, upar = upar_actual_info_frac, lower = gs_b, lpar = lpar ) yy %>% summary() %>% filter(Bound == \"Efficacy\") %>% gt() %>% fmt_number(columns = 3:6, decimals = 4) yz <- gs_power_ahr( enroll_rate = xx$enroll_rate, fail_rate = fail_rate, event = xx$analysis$Events, analysis_time = xx$analysis$time, upper = gs_spending_bound, upar = upar_planned_info_frac, lower = gs_b, lpar = lpar ) #> Warning: Unknown or uninitialised column: `Events`. yz %>% summary() %>% gt() %>% fmt_number(columns = 3:6, decimals = 4)"},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"scenario-with-longer-control-median","dir":"Articles","previous_headings":"Group sequential design","what":"Scenario with longer control median","title":"Spending time examples","text":"Now return example control median longer expected confirm spending according planned level alone without considering actual number events also result power reduction. power gain great (94.2% vs 95.0%) interim final p-value bounds aligned intent emphasizing final analysis smaller average hazard ratio expected (0.680 vs 0.723 interim). First, show result using planned spending. Since number events less expected, used actual number events interim bound stringent obtain slightly greater power.","code":"# Alternate control median am <- 16 # Update the failure rate fail_rate$fail_rate <- log(2) / am # Return to 4 month delay with HR=1 before HR = 0.6 fail_rate$duration[1] <- 4 # Start with spending based on planned information # which is greater than actual information yy <- gs_power_ahr( enroll_rate = xx$enroll_rate, fail_rate = fail_rate, event = c(1, max(xx$analysis$event)), analysis_time = xx$analysis$time, upper = gs_spending_bound, upar = upar_planned_info_frac, lower = gs_b, lpar = lpar ) yy %>% summary() %>% gt() %>% fmt_number(columns = 3:6, decimals = 4) yz <- gs_power_ahr( enroll_rate = xx$enroll_rate, fail_rate = fail_rate, event = c(1, max(xx$analysis$event)), analysis_time = xx$analysis$time, upper = gs_spending_bound, upar = upar_actual_info_frac, lower = gs_b, lpar = lpar ) yz %>% summary() %>% gt() %>% fmt_number(columns = 3:6, decimals = 4)"},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"summary-for-spending-time-motivation-assuming-delayed-benefit","dir":"Articles","previous_headings":"Group sequential design","what":"Summary for spending time motivation assuming delayed benefit","title":"Spending time examples","text":"summary, using minimum planned actual spending adapt design based event-based spending adapts interim bound stringent final bound different scenarios ensures better power event-based interim analysis spending.","code":""},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"assumptions","dir":"Articles","previous_headings":"Testing multiple hypotheses","what":"Assumptions","title":"Spending time examples","text":"consider simple case use method Maurer Bretz (2013) test overall population biomarker subgroup endpoint. assume exponential failure rate median 12 control group regardless population. hazard ratio biomarker positive subgroup assumed 0.6, negative population 0.8. assume biomarker positive group represents half population, meaning enrollment rates assumed negative positive patients. difference failure rates two strata hazard ratio. case, assume proportional hazards within negative (HR = 0.8) positive (HR = 0.6) patients. illustrative purposes, choosing strategy based possible feeling much less certainty study start whether underlying benefit biomarker negative population. wish ensure power biomarker positive group, allow good chance positive overall population finding lesser benefit biomarker negative population. alternative trial strategy planned, alternate approach following considered. case, design first biomarker positive population one-sided Type error controlled \\alpha = 0.0125:","code":"# we assume an exponential failure rate with a median of 12 # for the control group regardless of population. m <- 12 # the enrollment rate of both subgroup and population is the same enroll_rate <- define_enroll_rate( stratum = c(\"Positive\", \"Negative\"), duration = 12, rate = 20 ) # the hazard ratio in the biomarker positive subgroup will be assumed to be 0.6, # and in the negative population 0.8. fail_rate <- define_fail_rate( stratum = c(\"Positive\", \"Negative\"), hr = c(0.6, 0.8), duration = 100, fail_rate = log(2) / m, dropout_rate = 0.001 )"},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"planned-design-for-biomarker-positive-population","dir":"Articles","previous_headings":"Testing multiple hypotheses","what":"Planned design for biomarker positive population","title":"Spending time examples","text":"","code":"# Since execution will be event-based for biomarker population, # there will be no need to change spending plan for different scenarios. # upper bound: spending based on information fraction upar_design_spend <- list( sf = gsDesign::sfLDOF, # spending function total_spend = 0.0125, # total alpha spend is now 0.0125 timing = NULL, # to select maximum planned information for information fraction param = NULL ) # lower bound: no futility bound lpar <- rep(-Inf, 2) # Z = -infinity for lower bound # we will base the combined hypothesis design to ensure power in the biomarker subgroup positive <- gs_design_ahr( # enroll/failure rates enroll_rate = enroll_rate %>% filter(stratum == \"Positive\"), fail_rate = fail_rate %>% filter(stratum == \"Positive\"), # Following drives information fraction for interim info_frac = c(.8, 1), # Total study duration driven by final analysis_time value, i.e., 30 # Enter small increasing values before that # so information fraction in planned_info_frac drives timing of interims analysis_time = c(1, 30), # upper bound upper = gs_spending_bound, upar = upar_design_spend, # lower lower lower = gs_b, lpar = lpar ) positive %>% summary() %>% gt() %>% fmt_number(columns = 3:6, decimals = 4)"},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"planned-design-for-overall-population","dir":"Articles","previous_headings":"Testing multiple hypotheses","what":"Planned design for overall population","title":"Spending time examples","text":"adjust overall study enrollment rate match design requirement biomarker positive population. Now can examine power overall population based hazard ratio assumptions biomarker negative biomarker positive subgroups just calculated enrollment assumption. use analysis times biomarker positive population design. see interim information fraction overall population slightly greater biomarker positive population . compensate enable flexibility biomarker positive prevalence changes, use spending time biomarker positive subgroup regardless true fraction final planned events analysis. Thus, interim nominal p-value bound biomarker positive overall populations. make much difference , see natural way adapt design observed biomarker positive prevalence different assumed design.","code":"# Get enrollment rate inflation factor compared to originally input rate inflation_factor <- positive$enroll_rate$rate[1] / enroll_rate$rate[1] # Using this inflation factor, set planned enrollment rates planned_enroll_rate <- enroll_rate %>% mutate(rate = rate * inflation_factor) planned_enroll_rate %>% gt() # Store overall enrollment rates for future use overall_enroll_rate <- planned_enroll_rate %>% summarize( stratum = \"All\", duration = first(duration), rate = sum(rate) ) overall_enroll_rate %>% gt() # Set total spend for overall population, O'Brien-Fleming spending function, and # same spending time as biomarker subgroup upar_overall_planned_info_frac <- list( sf = gsDesign::sfLDOF, # O'Brien-Fleming spending function param = NULL, total_spend = 0.0125, # alpha timing = c(.8, 1), # same spending time as biomarker subgroup max_info = NULL # we will use actual final information as planned initially ) overall_planned_bounds <- gs_power_ahr( # enroll/failure rates enroll_rate = planned_enroll_rate, fail_rate = fail_rate, # analysis time: the planned analysis time for biomarker positive population analysis_time = positive$analysis$time, # events will be determined by expected events at planned analysis times event = NULL, # upper bound: planned spending times are specified the same as before upper = gs_spending_bound, upar = upar_overall_planned_info_frac, # lower bound: no futility lower = gs_b, lpar = lpar ) overall_planned_bounds %>% summary() %>% gt() %>% fmt_number(columns = 3:6, decimals = 4)"},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"alternate-scenarios-overview","dir":"Articles","previous_headings":"Testing multiple hypotheses","what":"Alternate scenarios overview","title":"Spending time examples","text":"divide evaluations three subsections: one higher prevalence biomarker positive patients expected; one lower biomarker prevalence; differing event rate hazard ratio assumptions. case, assume total enrollment rate 48.8 per month planned . also assume enroll targeted biomarker positive subgroup enrollment 293 achieved, regardless overall enrollment. specify interim analysis timing require 80% planned final analysis events biomarker positive population least 10 months minimum follow-; thus, biomarker population never vary events spending . spending time used overall population, compare event-based spending. choices arbitrary. think reasonable, design planner think carefully variations suit clinical trial team needs.","code":"## Setting spending alternatives # Using information (event)-based spending time relative to overall population plan # Set total spend for overall population, O'Brien-Fleming spending function. # For design information-spending, we set timing = NULL and max_info to plan from above upar_overall_planned_info_frac <- list( sf = gsDesign::sfLDOF, # O'Brien-Fleming spending function total_spend = 0.0125, # alpha max_info = max(overall_planned_bounds$info0), # we will use planned final information for # overall population from design to # compute information fraction relative to plan param = NULL, timing = planned_info_frac ) #> Warning in max(overall_planned_bounds$info0): no non-missing arguments to max; #> returning -Inf # Using planned information fraction will demonstrate problems below. # Set total spend for overall population, O'Brien-Fleming spending function, and # same spending time as biomarker subgroup upar_overall_actual_info_frac <- list( sf = gsDesign::sfLDOF, # O'Brien-Fleming spending function total_spend = 0.0125, # alpha max_info = max(overall_planned_bounds$info0), # we will use planned final information # for overall population from design param = NULL, timing = NULL ) #> Warning in max(overall_planned_bounds$info0): no non-missing arguments to max; #> returning -Inf"},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"biomarker-subgroup-power","dir":"Articles","previous_headings":"Testing multiple hypotheses > Alternate scenarios overview > Biomarker subgroup prevalence higher than planned","what":"Biomarker subgroup power","title":"Spending time examples","text":"suppose biomarker prevalence 60%, higher 50% prevalence design anticipated. enrollment rates positive versus negative patients expected enrollment duration now: Now can compute power biomarker positive group targeted events. Since simple proportional hazards model, thing changing original design takes slightly less time.","code":"# update the enrollment rate due to 60% prevalence positive_60_enroll_rate <- rbind( overall_enroll_rate %>% mutate(stratum = \"Positive\", rate = 0.6 * rate), overall_enroll_rate %>% mutate(stratum = \"Negative\", rate = 0.4 * rate) ) # update the enrollment duration positive_60_enroll_rate$duration <- max(positive$analysis$n) / overall_enroll_rate$rate / 0.6 # display the updated enrollment rate table positive_60_enroll_rate %>% gt() %>% fmt_number(columns = \"rate\", decimals = 1) positive_60_power <- gs_power_ahr( # enrollment/failure rate enroll_rate = positive_60_enroll_rate %>% filter(stratum == \"Positive\"), fail_rate = fail_rate %>% filter(stratum == \"Positive\"), # number of events event = positive$analysis$event, # analysis time will be calcuated to achieve the targeted events analysis_time = NULL, # upper bound upper = gs_spending_bound, upar = upar_design_spend, # lower bound lower = gs_b, lpar = lpar ) positive_60_power %>% summary() %>% gt() %>% fmt_number(columns = 3:6, decimals = 4)"},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"overall-population-power","dir":"Articles","previous_headings":"Testing multiple hypotheses > Alternate scenarios overview > Biomarker subgroup prevalence higher than planned","what":"Overall population power","title":"Spending time examples","text":"Now use spending overall population, resulting full \\alpha-spending end trial even though originally targeted events expected achieved. note information fraction computed based originally planned events overall population. Given larger proportion patients biomarker positive, average hazard ratio stronger originally planned power overall population still 90%. used information-based (.e., event-based) spending, reached full spending final analysis thus lower power.","code":"gs_power_ahr( # set the enrollment/failure rate enroll_rate = positive_60_enroll_rate, fail_rate = fail_rate, # set evnets and analysis time event = NULL, analysis_time = positive_60_power$analysis$time, # set upper bound: use planned spending in spite of lower overall information upper = gs_spending_bound, upar = upar_overall_planned_info_frac, # set lower bound: no futility lower = gs_b, lpar = rep(-Inf, 2) ) %>% summary() %>% gt() %>% fmt_number(columns = 3:6, decimals = 4) gs_power_ahr( # set the enrollment/failure rate enroll_rate = positive_60_enroll_rate, fail_rate = fail_rate, # set evnets and analysis time event = NULL, analysis_time = positive_60_power$analysis$time, # upper bound: use actual spending which uses less than complete alpha upper = gs_spending_bound, upar = upar_overall_actual_info_frac, # lower bound: no futility lower = gs_b, lpar = lpar ) %>% summary() %>% gt() %>% fmt_number(columns = 3:6, decimals = 4)"},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"biomarker-subgroup-prevalence-lower-than-planned","dir":"Articles","previous_headings":"Testing multiple hypotheses > Alternate scenarios overview","what":"Biomarker subgroup prevalence lower than planned","title":"Spending time examples","text":"suppose biomarker prevalence 40%, lower 50% prevalence design anticipated. enrollment rates positive versus negative patients expected enrollment duration now :","code":"# set the enrollment rate under 40% prevalence positive_40_enroll_rate <- rbind( overall_enroll_rate %>% mutate(stratum = \"Positive\", rate = 0.4 * rate), overall_enroll_rate %>% mutate(stratum = \"Negative\", rate = 0.6 * rate) ) # update the duration of enrollment table positive_40_enroll_rate$duration <- max(positive$analysis$n) / positive_40_enroll_rate$rate[1] # display the enrollment table positive_40_enroll_rate %>% gt() %>% fmt_number(columns = \"rate\", decimals = 1)"},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"biomarker-positive-subgroup-power","dir":"Articles","previous_headings":"Testing multiple hypotheses > Alternate scenarios overview > Biomarker subgroup prevalence lower than planned","what":"Biomarker positive subgroup power","title":"Spending time examples","text":"Now can compute power biomarker positive group targeted events.","code":"upar_actual_info_frac$total_spend <- 0.0125 upar_actual_info_frac$max_info <- max(positive$analysis$info) positive_40_power <- gs_power_ahr( # set enrollment/failure rate enroll_rate = positive_40_enroll_rate %>% filter(stratum == \"Positive\"), fail_rate = fail_rate %>% filter(stratum == \"Positive\"), # set events/analysis time event = positive$analysis$event, analysis_time = NULL, # set upper bound upper = gs_spending_bound, upar = upar_actual_info_frac, # set lower bound lower = gs_b, lpar = rep(-Inf, 2) ) positive_40_power %>% summary() %>% gt() %>% fmt_number(columns = 3:6, decimals = 4)"},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"overall-population-power-1","dir":"Articles","previous_headings":"Testing multiple hypotheses > Alternate scenarios overview > Biomarker subgroup prevalence lower than planned","what":"Overall population power","title":"Spending time examples","text":"see adapting overall sample size spending according biomarker subgroup, retain 90% power. spite lower overall effect size, larger adapted sample size ensures power retention.","code":"gs_power_ahr( enroll_rate = positive_40_enroll_rate, fail_rate = fail_rate, event = 1:2, analysis_time = positive_40_power$analysis$time, upper = gs_spending_bound, upar = upar_overall_planned_info_frac, lower = gs_b, lpar = rep(-Inf, 2) ) %>% summary() %>% gt() %>% fmt_number(columns = 3:6, decimals = 4)"},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"summary-of-findings","dir":"Articles","previous_headings":"Testing multiple hypotheses","what":"Summary of findings","title":"Spending time examples","text":"suggested two overall findings planning executing trial potentially delayed treatment effect: Require targeted event count minimum follow-completing analysis trial helps ensure powering trial appropriately better description tail behavior may essential long-term results key establishing potentially positive risk-benefit. Use fixed, small incremental \\alpha-spend interim proposed Fleming, Harrington, O’Brien (1984) variable number interim analyses ensure adequate follow-. Use minimum planned actual spending interim analyses. implementing Fleming, Harrington, O’Brien (1984) approach, also suggested simple approach futility may quite useful practically scenario potentially delayed onset treatment effect. basically looks evidence favorable control group effect relative experimental setting nominal p-value cutoff 1-sided 0.05 level early interim futility analyses. crossing survival curves inferior survival curves may exist, may useful way ensure continuing trial ethical; approach perhaps useful experimental treatment replacing components control treatment case add-treatment may toxic potentially detrimental effects. addition delayed effect example, considered example testing biomarker positive subgroup overall population. Using common spending time hypotheses common interim analysis strategy advocated Follmann, Proschan, Geller (1994) can helpful implement spending hypotheses adequate \\alpha spend final analysis also ensure full utilization \\alpha-spending. suggested using minimum planned actual spending interim analysis. Spending can based key hypothesis (e.g., biomarker positive population) minimum spending time among hypotheses tested. Taking advantage know correlations ensure full \\alpha utilization multiple hypothesis testing also simply implemented strategy Anderson et al. (2022). summary, illustrated motivation illustration spending time approach examples commonly encountered. Approaches suggested included implementation Fleming, Harrington, O’Brien (1984) fixed incremental \\alpha-spend interim analysis well use minimum planned actual spending interim analyses.","code":""},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-update-boundary.html","id":"design-assumptions","dir":"Articles","previous_headings":"","what":"Design assumptions","title":"Efficacy and futility boundary update","text":"assume two analyses: interim analysis (IA) final analysis (FA). IA planned 20 months opening enrollment, followed FA month 36. planned enrollment period spans 14 months, first 2 months enrollment rate 1/3 final rate, next 2 months rate 2/3 final rate, final rate remaining 10 months. obtain targeted 90% power, rates multiplied constant. control arm assumed follow exponential distribution median 9 months dropout rate 0.0001 per month regardless treatment group. Finally, experimental treatment group piecewise exponential 3-month delayed treatment effect; , first 3 months HR = 1 HR 0.6 thereafter. use null hypothesis information boundary crossing probability calculations null alternate hypotheses. also imply null hypothesis information used information fraction used spending functions derive design.","code":"alpha <- 0.0125 beta <- 0.1 ratio <- 1 # Enrollment enroll_rate <- define_enroll_rate( duration = c(2, 2, 10), rate = (1:3) / 3 ) # Failure and dropout fail_rate <- define_fail_rate( duration = c(3, Inf), fail_rate = log(2) / 9, hr = c(1, 0.6), dropout_rate = .0001 ) # IA and FA analysis time analysis_time <- c(20, 36) # Randomization ratio ratio <- 1 info_scale <- \"h0_info\""},{"path":"https://merck.github.io/gsDesign2/articles/story-update-boundary.html","id":"one-sided-design","dir":"Articles","previous_headings":"","what":"One-sided design","title":"Efficacy and futility boundary update","text":"design, efficacy bounds IA FA. use Lan DeMets (1983) spending function total alpha 0.0125, approximates O’Brien-Fleming bound. planned design targets: Planned events: 227, 349 Planned information fraction interim final analysis: 0.6504, 1 Planned alpha spending: 0.0054, 0.025 Planned efficacy bounds: 2.9048, 2.2593 note rounding final targeted events increases power slightly targeted 90%.","code":"upper <- gs_spending_bound upar <- list(sf = gsDesign::sfLDOF, total_spend = alpha, param = NULL) x <- gs_design_ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, alpha = alpha, beta = beta, info_frac = NULL, info_scale = \"h0_info\", analysis_time = analysis_time, ratio = ratio, upper = gs_spending_bound, upar = upar, test_upper = TRUE, lower = gs_b, lpar = rep(-Inf, 2), test_lower = FALSE ) |> to_integer() x |> summary() |> as_gt() |> tab_header(title = \"Planned design\")"},{"path":"https://merck.github.io/gsDesign2/articles/story-update-boundary.html","id":"bounds-for-alternate-alpha","dir":"Articles","previous_headings":"One-sided design","what":"Bounds for alternate alpha","title":"Efficacy and futility boundary update","text":"stage study design, may required report designs multiple \\alpha alpha reallocated due rejection another hypothesis. design stage, planned \\alpha 0.0125. Assume updated \\alpha 0.025 due reallocation \\alpha hypothesis. corresponding bounds updated boundaries utilize planned treatment effect planned statistical information null hypothesis, considering original design info_scale = \"h0_info\".","code":"gs_update_ahr( x = x, alpha = 0.025 ) |> summary(col_decimals = c(z = 4)) |> as_gt(title = \"Updated design\", subtitle = \"For alternate alpha = 0.025\")"},{"path":"https://merck.github.io/gsDesign2/articles/story-update-boundary.html","id":"updating-bounds-with-observed-events-at-time-of-analyses","dir":"Articles","previous_headings":"One-sided design","what":"Updating bounds with observed events at time of analyses","title":"Efficacy and futility boundary update","text":"provide simulation observed events IA FA differ planned. case differences planned due using calendar-based cutoffs simulated data. practice, even attempting match event counts exactly observed events analyses often differ planned. also assume protocol specifies full \\alpha spent final analysis even case like shortfall events versus design plan. observed data example generated simtrial::sim_pw_surv(). updated design ","code":"set.seed(123) # Make simulated data reproducible # Generate trial data observed_data <- simtrial::sim_pw_surv( n = x$analysis$n[x$analysis$analysis == 2], stratum = data.frame(stratum = \"All\", p = 1), block = c(rep(\"control\", 2), rep(\"experimental\", 2)), enroll_rate = x$enroll_rate, fail_rate = (fail_rate |> simtrial::to_sim_pw_surv())$fail_rate, dropout_rate = (fail_rate |> simtrial::to_sim_pw_surv())$dropout_rate ) # Cut simulated data for interim analysis at planned calendar time observed_data_ia <- observed_data |> simtrial::cut_data_by_date(analysis_time[1]) # Cut simulated data for final analysis at planned calendar time observed_data_fa <- observed_data |> simtrial::cut_data_by_date(analysis_time[2]) # Set spending fraction for interim according to observed events # divided by planned final events. # Final spending fraction is 1 per plan even if there is a shortfall # of events versus planned (as specified above) ustime <- c(sum(observed_data_ia$event) / max(x$analysis$event), 1) # Update bound gs_update_ahr( x = x, ustime = ustime, observed_data = list(observed_data_ia, observed_data_fa) ) |> summary(col_decimals = c(z = 4)) |> as_gt(title = \"Updated design\", subtitle = paste0(\"With observed \", sum(observed_data_ia$event), \" events at IA and \", sum(observed_data_fa$event), \" events at FA\"))"},{"path":"https://merck.github.io/gsDesign2/articles/story-update-boundary.html","id":"two-sided-asymmetric-design-beta-spending-with-non-binding-lower-bound","dir":"Articles","previous_headings":"","what":"Two-sided asymmetric design, beta-spending with non-binding lower bound","title":"Efficacy and futility boundary update","text":"section, investigate 2 sided asymmetric design, non-binding \\beta-spending used generate futility bounds. \\beta-spending refers Type II error (1 - power) spending lower bound crossing probabilities alternative hypothesis. Non-binding bound computation assumes trial continues lower bound crossed Type error, Type II error. original designs, employ Lan-DeMets spending function used approximate O’Brien-Fleming bounds (Lan DeMets 1983) efficacy futility bounds. total spending efficacy 0.0125, futility 0.1. addition, assume futility test final analysis. planned design, Planned events: 236, 363 Planned information fraction (timing): 0.6501, 1 Planned alpha spending: 0.0054388, 0.025 Planned efficacy bounds: 2.9057, 2.2593 Planned futility bounds: 0.6453 Since added futility bounds, sample size number events larger 1-sided example.","code":"# Upper and lower bounds uses spending with Lan-DeMets spending approximating # O'Brien-Fleming bound upper <- gs_spending_bound upar <- list(sf = gsDesign::sfLDOF, total_spend = alpha, param = NULL) lower <- gs_spending_bound lpar <- list(sf = gsDesign::sfLDOF, total_spend = beta, param = NULL) x <- gs_design_ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, alpha = alpha, beta = beta, info_frac = NULL, info_scale = \"h0_info\", analysis_time = c(20, 36), ratio = ratio, upper = gs_spending_bound, upar = upar, test_upper = TRUE, lower = lower, lpar = lpar, test_lower = c(TRUE, FALSE), binding = FALSE ) |> to_integer() x |> summary() |> as_gt() |> tab_header(title = \"Planned design\", subtitle = \"2-sided asymmetric design, non-binding futility\")"},{"path":"https://merck.github.io/gsDesign2/articles/story-update-boundary.html","id":"bounds-for-alternate-alpha-1","dir":"Articles","previous_headings":"Two-sided asymmetric design, beta-spending with non-binding lower bound","what":"Bounds for alternate alpha","title":"Efficacy and futility boundary update","text":"may want report design bounds multiple \\alpha case Type error may reallocated another hypothesis. assume now \\alpha 0.025 still use sample size event timing original alpha = 0.0125. updated bounds ","code":"gs_update_ahr( x = x, alpha = 0.025 ) |> summary(col_decimals = c(z = 4)) |> as_gt(title = \"Updated design\", subtitle = \"For alpha = 0.025\")"},{"path":"https://merck.github.io/gsDesign2/articles/story-update-boundary.html","id":"updating-bounds-with-observed-events-at-time-of-analyses-1","dir":"Articles","previous_headings":"Two-sided asymmetric design, beta-spending with non-binding lower bound","what":"Updating bounds with observed events at time of analyses","title":"Efficacy and futility boundary update","text":"assume observed events 1-sided example . updated design ","code":"# Update spending fraction as above ustime <- c(sum(observed_data_ia$event) / max(x$analysis$event), 1) gs_update_ahr( x = x, ustime = ustime, # Spending fraction for futility bound same as for efficacy lstime = ustime, observed_data = list(observed_data_ia, observed_data_fa) ) |> summary(col_decimals = c(z = 4)) |> as_gt(title = \"Updated design\", subtitle = paste0(\"With observed \", sum(observed_data_ia$event), \" events at IA and \", sum(observed_data_fa$event), \" events at FA\"))"},{"path":[]},{"path":"https://merck.github.io/gsDesign2/authors.html","id":null,"dir":"","previous_headings":"","what":"Authors","title":"Authors and Citation","text":"Keaven Anderson. Author. Yilong Zhang. Author. Yujie Zhao. Author, maintainer. Jianxiao Yang. Author. Nan Xiao. Author. Amin Shirazi. Contributor. Ruixue Wang. Contributor. Yi Cui. Contributor. Ping Yang. Contributor. Xin Tong Li. Contributor. Chenxiang Li. Contributor. Hiroaki Fukuda. Contributor. Hongtao Zhang. Contributor. Yalin Zhu. Contributor. John Blischak. Contributor. Dickson Wanjau. Contributor. Merck & Co., Inc., Rahway, NJ, USA affiliates. Copyright holder.","code":""},{"path":"https://merck.github.io/gsDesign2/authors.html","id":"citation","dir":"","previous_headings":"","what":"Citation","title":"Authors and Citation","text":"Anderson K, Zhang Y, Zhao Y, Yang J, Xiao N (2024). gsDesign2: Group Sequential Design Non-Constant Effect. R package version 1.1.2.23, https://github.com/Merck/gsDesign2, https://merck.github.io/gsDesign2/.","code":"@Manual{, title = {gsDesign2: Group Sequential Design with Non-Constant Effect}, author = {Keaven Anderson and Yilong Zhang and Yujie Zhao and Jianxiao Yang and Nan Xiao}, year = {2024}, note = {R package version 1.1.2.23, https://github.com/Merck/gsDesign2}, url = {https://merck.github.io/gsDesign2/}, }"},{"path":[]},{"path":"https://merck.github.io/gsDesign2/index.html","id":"objective","dir":"","previous_headings":"","what":"Objective","title":"Group Sequential Design with Non-Constant Effect","text":"goal gsDesign2 enable fixed group sequential design non-proportional hazards. Piecewise constant enrollment, failure rates dropout rates stratified population available enable highly flexible enrollment, time--event time--dropout assumptions. Substantial flexibility top gsDesign package intended selecting boundaries. Comments usability features encouraged still young package.","code":""},{"path":"https://merck.github.io/gsDesign2/index.html","id":"installation","dir":"","previous_headings":"","what":"Installation","title":"Group Sequential Design with Non-Constant Effect","text":"Install released version gsDesign2 CRAN: install development version GitHub :","code":"install.packages(\"gsDesign2\") remotes::install_github(\"Merck/gsDesign2\")"},{"path":[]},{"path":"https://merck.github.io/gsDesign2/index.html","id":"step-1-specifying-enrollment-and-failure-rates","dir":"","previous_headings":"Use cases","what":"Step 1: specifying enrollment and failure rates","title":"Group Sequential Design with Non-Constant Effect","text":"basic example shows solve common problem. assume 4 month delay treatment effect. Specifically, assume hazard ratio 1 4 months 0.6 thereafter. example assume exponential failure rate low exponential dropout rate. enroll_rate specification indicates expected enrollment duration 12 months exponential inter-arrival times. resulting failure rate specification following table. many rows strata needed can specified approximate whatever patterns wish.","code":"library(gsDesign2) # Basic example # Constant enrollment over 12 months # Rate will be adjusted later by gsDesign2 NPH to get sample size enroll_rate <- define_enroll_rate(duration = 12, rate = 1) # 12 month median exponential failure rate in control # 4 month delay in effect with HR=0.6 after # Low exponential dropout rate median_surv <- 12 fail_rate <- define_fail_rate( duration = c(4, Inf), fail_rate = log(2) / median_surv, hr = c(1, .6), dropout_rate = .001 ) fail_rate |> gt::gt()"},{"path":"https://merck.github.io/gsDesign2/index.html","id":"step-2-derive-a-fixed-design-with-no-interim-analyses","dir":"","previous_headings":"Use cases","what":"Step 2: derive a fixed design with no interim analyses","title":"Group Sequential Design with Non-Constant Effect","text":"Computing fixed sample size design 2.5% one-sided Type error 90% power. specify trial duration 36 months analysis_time. Enrollment duration sum enroll_rate$duration. used fixed_design() since single analysis: input enrollment rates now scaled achieve power: failure dropout rates remain unchanged input. summary obtained . columns : Design: sample size derivation method. N: sample size; generally round even number. Event: generally round . Bound: Z value efficacy; inverse normal 1 - alpha. alpha: 1-sided alpha level testing. Power: power corresponding enrollment, failure rate, trial targeted events.","code":"fd <- fixed_design_ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, alpha = 0.025, power = 0.9, study_duration = 36, ratio = 1 # Experimental/control randomization ratio ) fd$enroll_rate |> gt::gt() fd |> summary() |> as_gt()"},{"path":"https://merck.github.io/gsDesign2/index.html","id":"step-3-group-sequential-design","dir":"","previous_headings":"Use cases","what":"Step 3: group sequential design","title":"Group Sequential Design with Non-Constant Effect","text":"provide simple example group sequential design demonstrates couple features available gsDesign package. first specifying analysis times calendar time rather information fraction. second efficacy futility bound analysis. addition methods non-proportional hazards demonstrated fixed design . use O’Brien-Fleming spending function derive efficacy bounds 24 36 months. futility, simply require nominally significant trend wrong direction (p < 0.1) 8 months, trend favor experimental treatment 14 months (Z > 0) bound later (Z = -\\infty). Thus, two efficacy analyses two separate, earlier futility analysis. Power set 80% due somewhat aggressive futility bounds used safety (analysis 1 half way enrollment) proof concept (analysis 2). aggressive futility bounds may desirable previous proof concept experimental treatment established; essentially, becomes Phase II/III design interim evaluation appropriate efficacy trends completing trial. Now summarize derived design. summary table described vignette summarize group sequential designs gt tables. Note design trend favor experimental treatment minor 8 months due delayed effect assumption used (see AHR analysis 1 table). design trend 16 months somewhat favorable looking HR < 1 (favoring experimental treatment) proof concept. Actual bounds timing selected trial situation dependent, hope suggestions provocative might considered.","code":"gsd <- gs_design_ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, ratio = 1, alpha = 0.025, beta = 0.2, # 80% power; enables aggressive futility bound specified analysis_time = c(8, 14, 24, 36), binding = FALSE, # Non-binding futility bound upper = gs_spending_bound, # Use spending bound for efficacy; total_spend is normally alpha upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), test_upper = c(FALSE, FALSE, TRUE, TRUE), # Only test efficacy after 1st analysis lower = gs_b, # Fixed Z-values will be provided for futility bound lpar = c(qnorm(0.1), 0, -Inf, -Inf) ) gsd |> summary() |> as_gt()"},{"path":"https://merck.github.io/gsDesign2/reference/ahr.html","id":null,"dir":"Reference","previous_headings":"","what":"Average hazard ratio under non-proportional hazards — ahr","title":"Average hazard ratio under non-proportional hazards — ahr","text":"Provides geometric average hazard ratio various non-proportional hazards assumptions either single multiple strata studies. piecewise exponential distribution allows simple method specify distribution enrollment pattern enrollment, failure dropout rates changes time.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/ahr.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Average hazard ratio under non-proportional hazards — ahr","text":"","code":"ahr( enroll_rate = define_enroll_rate(duration = c(2, 2, 10), rate = c(3, 6, 9)), fail_rate = define_fail_rate(duration = c(3, 100), fail_rate = log(2)/c(9, 18), hr = c(0.9, 0.6), dropout_rate = 0.001), total_duration = 30, ratio = 1 )"},{"path":"https://merck.github.io/gsDesign2/reference/ahr.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Average hazard ratio under non-proportional hazards — ahr","text":"enroll_rate enroll_rate data frame without stratum created define_enroll_rate(). fail_rate fail_rate data frame without stratum created define_fail_rate(). total_duration Total follow-start enrollment data cutoff; can single value vector positive numbers. ratio Ratio experimental control randomization.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/ahr.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Average hazard ratio under non-proportional hazards — ahr","text":"data frame time (total_duration), ahr (average hazard ratio), n (sample size), event (expected number events), info (information given scenarios), info0 (information related null hypothesis) value total_duration input.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/ahr.html","id":"specification","dir":"Reference","previous_headings":"","what":"Specification","title":"Average hazard ratio under non-proportional hazards — ahr","text":"contents section shown PDF user manual .","code":""},{"path":"https://merck.github.io/gsDesign2/reference/ahr.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Average hazard ratio under non-proportional hazards — ahr","text":"","code":"# Example 1: default ahr() #> time ahr n event info info0 #> 1 30 0.6952153 108 58.49097 14.32724 14.62274 # Example 2: default with multiple analysis times (varying total_duration) ahr(total_duration = c(15, 30)) #> time ahr n event info info0 #> 1 15 0.7857415 108 30.27841 7.441186 7.569603 #> 2 30 0.6952153 108 58.49097 14.327243 14.622742 # Example 3: stratified population enroll_rate <- define_enroll_rate( stratum = c(rep(\"Low\", 2), rep(\"High\", 3)), duration = c(2, 10, 4, 4, 8), rate = c(5, 10, 0, 3, 6) ) fail_rate <- define_fail_rate( stratum = c(rep(\"Low\", 2), rep(\"High\", 2)), duration = c(1, Inf, 1, Inf), fail_rate = c(.1, .2, .3, .4), dropout_rate = .001, hr = c(.9, .75, .8, .6) ) ahr(enroll_rate = enroll_rate, fail_rate = fail_rate, total_duration = c(15, 30)) #> time ahr n event info info0 #> 1 15 0.7332218 164 113.2782 28.18130 28.31954 #> 2 30 0.7175169 170 166.1836 41.49942 41.54590"},{"path":"https://merck.github.io/gsDesign2/reference/ahr_blinded.html","id":null,"dir":"Reference","previous_headings":"","what":"Blinded estimation of average hazard ratio — ahr_blinded","title":"Blinded estimation of average hazard ratio — ahr_blinded","text":"Based blinded data assumed hazard ratios different intervals, compute blinded estimate average hazard ratio (AHR) corresponding estimate statistical information. function intended use computing futility bounds based spending assuming input hazard ratio (hr) values intervals specified .","code":""},{"path":"https://merck.github.io/gsDesign2/reference/ahr_blinded.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Blinded estimation of average hazard ratio — ahr_blinded","text":"","code":"ahr_blinded( surv = survival::Surv(time = simtrial::ex1_delayed_effect$month, event = simtrial::ex1_delayed_effect$evntd), intervals = c(3, Inf), hr = c(1, 0.6), ratio = 1 )"},{"path":"https://merck.github.io/gsDesign2/reference/ahr_blinded.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Blinded estimation of average hazard ratio — ahr_blinded","text":"surv Input survival object (see survival::Surv()); note 0 = censored, 1 = event survival::Surv(). intervals Vector containing positive values indicating interval lengths exponential rates assumed. Note final infinite interval added events occur final interval specified. hr Vector hazard ratios assumed interval. ratio Ratio experimental control randomization.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/ahr_blinded.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Blinded estimation of average hazard ratio — ahr_blinded","text":"tibble one row containing ahr - Blinded average hazard ratio based assumed period-specific hazard ratios input fail_rate observed events corresponding intervals. event - Total observed number events. info0 - Information related null hypothesis. theta - Natural parameter group sequential design representing expected incremental drift analyses.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/ahr_blinded.html","id":"specification","dir":"Reference","previous_headings":"","what":"Specification","title":"Blinded estimation of average hazard ratio — ahr_blinded","text":"contents section shown PDF user manual .","code":""},{"path":"https://merck.github.io/gsDesign2/reference/ahr_blinded.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Blinded estimation of average hazard ratio — ahr_blinded","text":"","code":"ahr_blinded( surv = survival::Surv( time = simtrial::ex2_delayed_effect$month, event = simtrial::ex2_delayed_effect$evntd ), intervals = c(4, 100), hr = c(1, .55), ratio = 1 ) #> # A tibble: 1 × 4 #> event ahr theta info0 #> #> 1 228 0.826 0.191 57"},{"path":"https://merck.github.io/gsDesign2/reference/as_gt.html","id":null,"dir":"Reference","previous_headings":"","what":"Convert summary table of a fixed or group sequential design object to a gt object — as_gt","title":"Convert summary table of a fixed or group sequential design object to a gt object — as_gt","text":"Convert summary table fixed group sequential design object gt object","code":""},{"path":"https://merck.github.io/gsDesign2/reference/as_gt.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Convert summary table of a fixed or group sequential design object to a gt object — as_gt","text":"","code":"as_gt(x, ...) # S3 method for class 'fixed_design' as_gt(x, title = NULL, footnote = NULL, ...) # S3 method for class 'gs_design' as_gt( x, title = NULL, subtitle = NULL, colname_spanner = \"Cumulative boundary crossing probability\", colname_spannersub = c(\"Alternate hypothesis\", \"Null hypothesis\"), footnote = NULL, display_bound = c(\"Efficacy\", \"Futility\"), display_columns = NULL, display_inf_bound = FALSE, ... )"},{"path":"https://merck.github.io/gsDesign2/reference/as_gt.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Convert summary table of a fixed or group sequential design object to a gt object — as_gt","text":"x summary object fixed group sequential design. ... Additional arguments (used). title string specify title gt table. footnote list containing content, location, attr. content vector string specify footnote text; location vector string specify locations put superscript footnote index; attr vector string specify attributes footnotes, example, c(\"colname\", \"title\", \"subtitle\", \"analysis\", \"spanner\"); users can use functions gt package customize table. subtitle string specify subtitle gt table. colname_spanner string specify spanner gt table. colname_spannersub vector strings specify spanner details gt table. display_bound vector strings specifying label bounds. default c(\"Efficacy\", \"Futility\"). display_columns vector strings specifying variables displayed summary table. display_inf_bound Logical, whether display +/-inf bound.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/as_gt.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Convert summary table of a fixed or group sequential design object to a gt object — as_gt","text":"gt_tbl object.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/as_gt.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Convert summary table of a fixed or group sequential design object to a gt object — as_gt","text":"","code":"if (FALSE) { # interactive() && !identical(Sys.getenv(\"IN_PKGDOWN\"), \"true\") library(dplyr) # Enrollment rate enroll_rate <- define_enroll_rate( duration = 18, rate = 20 ) # Failure rates fail_rate <- define_fail_rate( duration = c(4, 100), fail_rate = log(2) / 12, dropout_rate = .001, hr = c(1, .6) ) # Study duration in months study_duration <- 36 # Experimental / Control randomization ratio ratio <- 1 # 1-sided Type I error alpha <- 0.025 # Type II error (1 - power) beta <- 0.1 # Example 1 ---- fixed_design_ahr( alpha = alpha, power = 1 - beta, enroll_rate = enroll_rate, fail_rate = fail_rate, study_duration = study_duration, ratio = ratio ) %>% summary() %>% as_gt() # Example 2 ---- fixed_design_fh( alpha = alpha, power = 1 - beta, enroll_rate = enroll_rate, fail_rate = fail_rate, study_duration = study_duration, ratio = ratio ) %>% summary() %>% as_gt() } if (FALSE) { # interactive() && !identical(Sys.getenv(\"IN_PKGDOWN\"), \"true\") library(dplyr) # Example 1 ---- # The default output gs_design_ahr() %>% summary() %>% as_gt() gs_power_ahr() %>% summary() %>% as_gt() gs_design_wlr() %>% summary() %>% as_gt() gs_power_wlr() %>% summary() %>% as_gt() gs_power_combo() %>% summary() %>% as_gt() gs_design_rd() %>% summary() %>% as_gt() gs_power_rd() %>% summary() %>% as_gt() # Example 2 ---- # Usage of title = ..., subtitle = ... # to edit the title/subtitle gs_power_wlr() %>% summary() %>% as_gt( title = \"Bound Summary\", subtitle = \"from gs_power_wlr\" ) # Example 3 ---- # Usage of colname_spanner = ..., colname_spannersub = ... # to edit the spanner and its sub-spanner gs_power_wlr() %>% summary() %>% as_gt( colname_spanner = \"Cumulative probability to cross boundaries\", colname_spannersub = c(\"under H1\", \"under H0\") ) # Example 4 ---- # Usage of footnote = ... # to edit the footnote gs_power_wlr() %>% summary() %>% as_gt( footnote = list( content = c( \"approximate weighted hazard ratio to cross bound.\", \"wAHR is the weighted AHR.\", \"the crossing probability.\", \"this table is generated by gs_power_wlr.\" ), location = c(\"~wHR at bound\", NA, NA, NA), attr = c(\"colname\", \"analysis\", \"spanner\", \"title\") ) ) # Example 5 ---- # Usage of display_bound = ... # to either show efficacy bound or futility bound, or both(default) gs_power_wlr() %>% summary() %>% as_gt(display_bound = \"Efficacy\") # Example 6 ---- # Usage of display_columns = ... # to select the columns to display in the summary table gs_power_wlr() %>% summary() %>% as_gt(display_columns = c(\"Analysis\", \"Bound\", \"Nominal p\", \"Z\", \"Probability\")) }"},{"path":"https://merck.github.io/gsDesign2/reference/as_rtf.html","id":null,"dir":"Reference","previous_headings":"","what":"Write summary table of a fixed or group sequential design object to an RTF file — as_rtf","title":"Write summary table of a fixed or group sequential design object to an RTF file — as_rtf","text":"Write summary table fixed group sequential design object RTF file","code":""},{"path":"https://merck.github.io/gsDesign2/reference/as_rtf.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Write summary table of a fixed or group sequential design object to an RTF file — as_rtf","text":"","code":"as_rtf(x, ...) # S3 method for class 'fixed_design' as_rtf( x, title = NULL, footnote = NULL, col_rel_width = NULL, orientation = c(\"portrait\", \"landscape\"), text_font_size = 9, file, ... ) # S3 method for class 'gs_design' as_rtf( x, title = NULL, subtitle = NULL, colname_spanner = \"Cumulative boundary crossing probability\", colname_spannersub = c(\"Alternate hypothesis\", \"Null hypothesis\"), footnote = NULL, display_bound = c(\"Efficacy\", \"Futility\"), display_columns = NULL, display_inf_bound = TRUE, col_rel_width = NULL, orientation = c(\"portrait\", \"landscape\"), text_font_size = 9, file, ... )"},{"path":"https://merck.github.io/gsDesign2/reference/as_rtf.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Write summary table of a fixed or group sequential design object to an RTF file — as_rtf","text":"x summary object fixed group sequential design. ... Additional arguments (used). title string specify title RTF table. footnote list containing content, location, attr. content vector string specify footnote text; location vector string specify locations put superscript footnote index; attr vector string specify attributes footnotes, example, c(\"colname\", \"title\", \"subtitle\", \"analysis\", \"spanner\"); users can use functions gt package customize table. col_rel_width Column relative width vector e.g. c(2,1,1) refers 2:1:1. Default NULL equal column width. orientation Orientation 'portrait' 'landscape'. text_font_size Text font size. vary text font size column, use numeric vector length vector equal number columns displayed e.g. c(9,20,40). file File path output. subtitle string specify subtitle RTF table. colname_spanner string specify spanner RTF table. colname_spannersub vector strings specify spanner details RTF table. display_bound vector strings specifying label bounds. default c(\"Efficacy\", \"Futility\"). display_columns vector strings specifying variables displayed summary table. display_inf_bound Logical, whether display +/-inf bound.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/as_rtf.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Write summary table of a fixed or group sequential design object to an RTF file — as_rtf","text":"as_rtf() returns input x invisibly.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/as_rtf.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Write summary table of a fixed or group sequential design object to an RTF file — as_rtf","text":"","code":"library(dplyr) #> #> Attaching package: ‘dplyr’ #> The following objects are masked from ‘package:stats’: #> #> filter, lag #> The following objects are masked from ‘package:base’: #> #> intersect, setdiff, setequal, union # Enrollment rate enroll_rate <- define_enroll_rate( duration = 18, rate = 20 ) # Failure rates fail_rate <- define_fail_rate( duration = c(4, 100), fail_rate = log(2) / 12, dropout_rate = .001, hr = c(1, .6) ) # Study duration in months study_duration <- 36 # Experimental / Control randomization ratio ratio <- 1 # 1-sided Type I error alpha <- 0.025 # Type II error (1 - power) beta <- 0.1 # AHR ---- # under fixed power x <- fixed_design_ahr( alpha = alpha, power = 1 - beta, enroll_rate = enroll_rate, fail_rate = fail_rate, study_duration = study_duration, ratio = ratio ) %>% summary() x %>% as_rtf(file = tempfile(fileext = \".rtf\")) x %>% as_rtf(title = \"Fixed design\", file = tempfile(fileext = \".rtf\")) x %>% as_rtf( footnote = \"Power computed with average hazard ratio method given the sample size\", file = tempfile(fileext = \".rtf\") ) x %>% as_rtf(text_font_size = 10, file = tempfile(fileext = \".rtf\")) # FH ---- # under fixed power fixed_design_fh( alpha = alpha, power = 1 - beta, enroll_rate = enroll_rate, fail_rate = fail_rate, study_duration = study_duration, ratio = ratio ) %>% summary() %>% as_rtf(file = tempfile(fileext = \".rtf\")) #' # \\donttest{ # the default output library(dplyr) gs_design_ahr() %>% summary() %>% as_rtf(file = tempfile(fileext = \".rtf\")) gs_power_ahr(lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.1)) %>% summary() %>% as_rtf(file = tempfile(fileext = \".rtf\")) gs_design_wlr() %>% summary() %>% as_rtf(file = tempfile(fileext = \".rtf\")) gs_power_wlr(lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.1)) %>% summary() %>% as_rtf(file = tempfile(fileext = \".rtf\")) gs_power_combo() %>% summary() %>% as_rtf(file = tempfile(fileext = \".rtf\")) gs_design_rd() %>% summary() %>% as_rtf(file = tempfile(fileext = \".rtf\")) gs_power_rd() %>% summary() %>% as_rtf(file = tempfile(fileext = \".rtf\")) # usage of title = ..., subtitle = ... # to edit the title/subtitle gs_power_wlr(lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.1)) %>% summary() %>% as_rtf( title = \"Bound Summary\", subtitle = \"from gs_power_wlr\", file = tempfile(fileext = \".rtf\") ) # usage of colname_spanner = ..., colname_spannersub = ... # to edit the spanner and its sub-spanner gs_power_wlr(lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.1)) %>% summary() %>% as_rtf( colname_spanner = \"Cumulative probability to cross boundaries\", colname_spannersub = c(\"under H1\", \"under H0\"), file = tempfile(fileext = \".rtf\") ) # usage of footnote = ... # to edit the footnote gs_power_wlr(lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.1)) %>% summary() %>% as_rtf( footnote = list( content = c( \"approximate weighted hazard ratio to cross bound.\", \"wAHR is the weighted AHR.\", \"the crossing probability.\", \"this table is generated by gs_power_wlr.\" ), location = c(\"~wHR at bound\", NA, NA, NA), attr = c(\"colname\", \"analysis\", \"spanner\", \"title\") ), file = tempfile(fileext = \".rtf\") ) # usage of display_bound = ... # to either show efficacy bound or futility bound, or both(default) gs_power_wlr(lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.1)) %>% summary() %>% as_rtf( display_bound = \"Efficacy\", file = tempfile(fileext = \".rtf\") ) # usage of display_columns = ... # to select the columns to display in the summary table gs_power_wlr(lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.1)) %>% summary() %>% as_rtf( display_columns = c(\"Analysis\", \"Bound\", \"Nominal p\", \"Z\", \"Probability\"), file = tempfile(fileext = \".rtf\") ) # }"},{"path":"https://merck.github.io/gsDesign2/reference/define_enroll_rate.html","id":null,"dir":"Reference","previous_headings":"","what":"Define enrollment rate — define_enroll_rate","title":"Define enrollment rate — define_enroll_rate","text":"Define enrollment rate subjects study following piecewise exponential distribution.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/define_enroll_rate.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Define enrollment rate — define_enroll_rate","text":"","code":"define_enroll_rate(duration, rate, stratum = \"All\")"},{"path":"https://merck.github.io/gsDesign2/reference/define_enroll_rate.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Define enrollment rate — define_enroll_rate","text":"duration numeric vector ordered piecewise study duration interval. rate numeric vector enrollment rate duration. stratum character vector stratum name.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/define_enroll_rate.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Define enrollment rate — define_enroll_rate","text":"enroll_rate data frame.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/define_enroll_rate.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Define enrollment rate — define_enroll_rate","text":"duration ordered piecewise duration equal \\(t_i - t_{-1}\\), \\(0 = t_0 < t_i < \\cdots < t_M = \\infty\\). enrollment rates defined duration length. study multiple strata, different duration rates can specified stratum.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/define_enroll_rate.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Define enrollment rate — define_enroll_rate","text":"","code":"# Define enroll rate without stratum define_enroll_rate( duration = c(2, 2, 10), rate = c(3, 6, 9) ) #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 # Define enroll rate with stratum define_enroll_rate( duration = rep(c(2, 2, 2, 18), 3), rate = c((1:4) / 3, (1:4) / 2, (1:4) / 6), stratum = c(array(\"High\", 4), array(\"Moderate\", 4), array(\"Low\", 4)) ) #> # A tibble: 12 × 3 #> stratum duration rate #> #> 1 High 2 0.333 #> 2 High 2 0.667 #> 3 High 2 1 #> 4 High 18 1.33 #> 5 Moderate 2 0.5 #> 6 Moderate 2 1 #> 7 Moderate 2 1.5 #> 8 Moderate 18 2 #> 9 Low 2 0.167 #> 10 Low 2 0.333 #> 11 Low 2 0.5 #> 12 Low 18 0.667"},{"path":"https://merck.github.io/gsDesign2/reference/define_fail_rate.html","id":null,"dir":"Reference","previous_headings":"","what":"Define failure rate — define_fail_rate","title":"Define failure rate — define_fail_rate","text":"Define subject failure rate study two treatment groups. Also supports stratified designs different failure rates stratum.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/define_fail_rate.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Define failure rate — define_fail_rate","text":"","code":"define_fail_rate(duration, fail_rate, dropout_rate, hr = 1, stratum = \"All\")"},{"path":"https://merck.github.io/gsDesign2/reference/define_fail_rate.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Define failure rate — define_fail_rate","text":"duration numeric vector ordered piecewise study duration interval. fail_rate numeric vector failure rate duration control group. dropout_rate numeric vector dropout rate duration. hr numeric vector hazard ratio treatment control group. stratum character vector stratum name.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/define_fail_rate.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Define failure rate — define_fail_rate","text":"fail_rate data frame.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/define_fail_rate.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Define failure rate — define_fail_rate","text":"Define failure dropout rate subjects study following piecewise exponential distribution. duration ordered piecewise duration equal \\(t_i - t_{-1}\\), \\(0 = t_0 < t_i < \\cdots < t_M = \\infty\\). failure rate, dropout rate, hazard ratio study duration can specified. study multiple strata, different duration, failure rates, dropout rates, hazard ratios can specified stratum.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/define_fail_rate.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Define failure rate — define_fail_rate","text":"","code":"# Define enroll rate define_fail_rate( duration = c(3, 100), fail_rate = log(2) / c(9, 18), hr = c(.9, .6), dropout_rate = .001 ) #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 # Define enroll rate with stratum define_fail_rate( stratum = c(rep(\"Low\", 2), rep(\"High\", 2)), duration = 1, fail_rate = c(.1, .2, .3, .4), dropout_rate = .001, hr = c(.9, .75, .8, .6) ) #> # A tibble: 4 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 Low 1 0.1 0.001 0.9 #> 2 Low 1 0.2 0.001 0.75 #> 3 High 1 0.3 0.001 0.8 #> 4 High 1 0.4 0.001 0.6"},{"path":"https://merck.github.io/gsDesign2/reference/event_diff.html","id":null,"dir":"Reference","previous_headings":"","what":"Considering the enrollment rate, failure rate, and randomization ratio, calculate the difference between the targeted number of events and the accumulated events at time x — event_diff","title":"Considering the enrollment rate, failure rate, and randomization ratio, calculate the difference between the targeted number of events and the accumulated events at time x — event_diff","text":"helper function passed uniroot()","code":""},{"path":"https://merck.github.io/gsDesign2/reference/event_diff.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Considering the enrollment rate, failure rate, and randomization ratio, calculate the difference between the targeted number of events and the accumulated events at time x — event_diff","text":"","code":"event_diff(x, enroll_rate, fail_rate, ratio, target_event)"},{"path":"https://merck.github.io/gsDesign2/reference/event_diff.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Considering the enrollment rate, failure rate, and randomization ratio, calculate the difference between the targeted number of events and the accumulated events at time x — event_diff","text":"x Duration enroll_rate enroll_rate data frame without stratum created define_enroll_rate(). fail_rate fail_rate data frame without stratum created define_fail_rate(). ratio Experimental:Control randomization ratio. target_event targeted number events achieved.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/event_diff.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Considering the enrollment rate, failure rate, and randomization ratio, calculate the difference between the targeted number of events and the accumulated events at time x — event_diff","text":"single numeric value represents difference expected number events provided duration (x) targeted number events (target_event)","code":""},{"path":"https://merck.github.io/gsDesign2/reference/expected_accrual.html","id":null,"dir":"Reference","previous_headings":"","what":"Piecewise constant expected accrual — expected_accrual","title":"Piecewise constant expected accrual — expected_accrual","text":"Computes expected cumulative enrollment (accrual) given set piecewise constant enrollment rates times.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/expected_accrual.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Piecewise constant expected accrual — expected_accrual","text":"","code":"expected_accrual( time = 0:24, enroll_rate = define_enroll_rate(duration = c(3, 3, 18), rate = c(5, 10, 20)) )"},{"path":"https://merck.github.io/gsDesign2/reference/expected_accrual.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Piecewise constant expected accrual — expected_accrual","text":"time Times enrollment computed. enroll_rate enroll_rate data frame without stratum created define_enroll_rate().","code":""},{"path":"https://merck.github.io/gsDesign2/reference/expected_accrual.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Piecewise constant expected accrual — expected_accrual","text":"vector expected cumulative enrollment specified times.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/expected_accrual.html","id":"specification","dir":"Reference","previous_headings":"","what":"Specification","title":"Piecewise constant expected accrual — expected_accrual","text":"contents section shown PDF user manual .","code":""},{"path":"https://merck.github.io/gsDesign2/reference/expected_accrual.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Piecewise constant expected accrual — expected_accrual","text":"","code":"library(tibble) # Example 1: default expected_accrual() #> [1] 0 5 10 15 25 35 45 65 85 105 125 145 165 185 205 225 245 265 285 #> [20] 305 325 345 365 385 405 # Example 2: unstratified design expected_accrual( time = c(5, 10, 20), enroll_rate = define_enroll_rate( duration = c(3, 3, 18), rate = c(5, 10, 20) ) ) #> [1] 35 125 325 expected_accrual( time = c(5, 10, 20), enroll_rate = define_enroll_rate( duration = c(3, 3, 18), rate = c(5, 10, 20), ) ) #> [1] 35 125 325 # Example 3: stratified design expected_accrual( time = c(24, 30, 40), enroll_rate = define_enroll_rate( stratum = c(\"subgroup\", \"complement\"), duration = c(33, 33), rate = c(30, 30) ) ) #> [1] 1440 1800 1980 # Example 4: expected accrual over time # Scenario 4.1: for the enrollment in the first 3 months, # it is exactly 3 * 5 = 15. expected_accrual( time = 3, enroll_rate = define_enroll_rate(duration = c(3, 3, 18), rate = c(5, 10, 20)) ) #> [1] 15 # Scenario 4.2: for the enrollment in the first 6 months, # it is exactly 3 * 5 + 3 * 10 = 45. expected_accrual( time = 6, enroll_rate = define_enroll_rate(duration = c(3, 3, 18), rate = c(5, 10, 20)) ) #> [1] 45 # Scenario 4.3: for the enrollment in the first 24 months, # it is exactly 3 * 5 + 3 * 10 + 18 * 20 = 405. expected_accrual( time = 24, enroll_rate = define_enroll_rate(duration = c(3, 3, 18), rate = c(5, 10, 20)) ) #> [1] 405 # Scenario 4.4: for the enrollment after 24 months, # it is the same as that from the 24 months, since the enrollment is stopped. expected_accrual( time = 25, enroll_rate = define_enroll_rate(duration = c(3, 3, 18), rate = c(5, 10, 20)) ) #> [1] 405 # Instead of compute the enrolled subjects one time point by one time point, # we can also compute it once. expected_accrual( time = c(3, 6, 24, 25), enroll_rate = define_enroll_rate(duration = c(3, 3, 18), rate = c(5, 10, 20)) ) #> [1] 15 45 405 405"},{"path":"https://merck.github.io/gsDesign2/reference/expected_event.html","id":null,"dir":"Reference","previous_headings":"","what":"Expected events observed under piecewise exponential model — expected_event","title":"Expected events observed under piecewise exponential model — expected_event","text":"Computes expected events time strata assumption piecewise constant enrollment rates piecewise exponential failure censoring rates. piecewise exponential distribution allows simple method specify distribution enrollment pattern enrollment, failure dropout rates changes time. main purpose may generate trial can analyzed single point time using group sequential methods, routine can also used simulate adaptive trial design. intent enable sample size calculations non-proportional hazards assumptions stratified populations.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/expected_event.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Expected events observed under piecewise exponential model — expected_event","text":"","code":"expected_event( enroll_rate = define_enroll_rate(duration = c(2, 2, 10), rate = c(3, 6, 9)), fail_rate = define_fail_rate(duration = c(3, 100), fail_rate = log(2)/c(9, 18), dropout_rate = 0.001), total_duration = 25, simple = TRUE )"},{"path":"https://merck.github.io/gsDesign2/reference/expected_event.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Expected events observed under piecewise exponential model — expected_event","text":"enroll_rate enroll_rate data frame without stratum created define_enroll_rate(). fail_rate fail_rate data frame without stratum created define_fail_rate(). total_duration Total follow-start enrollment data cutoff. simple default (TRUE), return numeric expected number events, otherwise data frame described .","code":""},{"path":"https://merck.github.io/gsDesign2/reference/expected_event.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Expected events observed under piecewise exponential model — expected_event","text":"default simple = TRUE return total expected number events real number. Otherwise, simple = FALSE, data frame returned following variables period specified fail_rate: t: start period. fail_rate: failure rate period. event: expected events period. records returned data frame correspond input data frame fail_rate.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/expected_event.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Expected events observed under piecewise exponential model — expected_event","text":"periods generally supplied output input. intent enable expected event calculations tidy format maximize flexibility variety purposes.","code":""},{"path":[]},{"path":"https://merck.github.io/gsDesign2/reference/expected_event.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Expected events observed under piecewise exponential model — expected_event","text":"","code":"library(gsDesign2) # Default arguments, simple output (total event count only) expected_event() #> [1] 57.3537 # Event count by time period expected_event(simple = FALSE) #> t fail_rate event #> 1 0 0.07701635 22.24824 #> 2 3 0.03850818 35.10546 # Early cutoff expected_event(total_duration = .5) #> [1] 0.02850923 # Single time period example expected_event( enroll_rate = define_enroll_rate(duration = 10, rate = 10), fail_rate = define_fail_rate(duration = 100, fail_rate = log(2) / 6, dropout_rate = .01), total_duration = 22, simple = FALSE ) #> t fail_rate event #> 1 0 0.1155245 80.40974 # Single time period example, multiple enrollment periods expected_event( enroll_rate = define_enroll_rate(duration = c(5, 5), rate = c(10, 20)), fail_rate = define_fail_rate(duration = 100, fail_rate = log(2) / 6, dropout_rate = .01), total_duration = 22, simple = FALSE ) #> t fail_rate event #> 1 0 0.1155245 118.8484"},{"path":"https://merck.github.io/gsDesign2/reference/expected_time.html","id":null,"dir":"Reference","previous_headings":"","what":"Predict time at which a targeted event count is achieved — expected_time","title":"Predict time at which a targeted event count is achieved — expected_time","text":"expected_time() made match input format ahr() solve time expected accumulated events equal input target. Enrollment failure rate distributions specified follows. piecewise exponential distribution allows simple method specify distribution enrollment pattern enrollment, failure dropout rates changes time.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/expected_time.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Predict time at which a targeted event count is achieved — expected_time","text":"","code":"expected_time( enroll_rate = define_enroll_rate(duration = c(2, 2, 10), rate = c(3, 6, 9) * 5), fail_rate = define_fail_rate(stratum = \"All\", duration = c(3, 100), fail_rate = log(2)/c(9, 18), hr = c(0.9, 0.6), dropout_rate = rep(0.001, 2)), target_event = 150, ratio = 1, interval = c(0.01, 100) )"},{"path":"https://merck.github.io/gsDesign2/reference/expected_time.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Predict time at which a targeted event count is achieved — expected_time","text":"enroll_rate enroll_rate data frame without stratum created define_enroll_rate(). fail_rate fail_rate data frame without stratum created define_fail_rate(). target_event targeted number events achieved. ratio Experimental:Control randomization ratio. interval interval presumed include time expected event count equal target_event.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/expected_time.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Predict time at which a targeted event count is achieved — expected_time","text":"data frame Time (computed match events target_event), AHR (average hazard ratio), Events (target_event input), info (information given scenarios), info0 (information related null hypothesis) value total_duration input.","code":""},{"path":[]},{"path":"https://merck.github.io/gsDesign2/reference/expected_time.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Predict time at which a targeted event count is achieved — expected_time","text":"","code":"# Example 1 ---- # default # \\donttest{ expected_time() #> time ahr event info info0 #> 1 14.90814 0.7865729 150 36.86707 37.5 # } # Example 2 ---- # check that result matches a finding using AHR() # Start by deriving an expected event count enroll_rate <- define_enroll_rate(duration = c(2, 2, 10), rate = c(3, 6, 9) * 5) fail_rate <- define_fail_rate( duration = c(3, 100), fail_rate = log(2) / c(9, 18), hr = c(.9, .6), dropout_rate = .001 ) total_duration <- 20 xx <- ahr(enroll_rate, fail_rate, total_duration) xx #> time ahr n event info info0 #> 1 20 0.7377944 540 208.3641 50.97575 52.09103 # Next we check that the function confirms the timing of the final analysis. # \\donttest{ expected_time(enroll_rate, fail_rate, target_event = xx$event, interval = c(.5, 1.5) * xx$time ) #> time ahr event info info0 #> 1 20 0.7377944 208.3641 50.97575 52.09103 # } # Example 3 ---- # In this example, we verify `expected_time()` by `ahr()`. # \\donttest{ x <- ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, ratio = 1, total_duration = 20 ) cat(\"The number of events by 20 months is \", x$event, \".\\n\") #> The number of events by 20 months is 208.3641 . y <- expected_time( enroll_rate = enroll_rate, fail_rate = fail_rate, ratio = 1, target_event = x$event ) cat(\"The time to get \", x$event, \" is \", y$time, \"months.\\n\") #> The time to get 208.3641 is 20 months. # }"},{"path":"https://merck.github.io/gsDesign2/reference/fastlag.html","id":null,"dir":"Reference","previous_headings":"","what":"Find the ","title":"Find the ","text":"Fast replacement dplyr::lag simple case n = 1L always supplying new value insert beginning vector.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/fastlag.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Find the ","text":"","code":"fastlag(x, first)"},{"path":"https://merck.github.io/gsDesign2/reference/fastlag.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Find the ","text":"x vector (length(x) > 0) first single value (length(first) == 1)","code":""},{"path":"https://merck.github.io/gsDesign2/reference/fastlag.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Find the ","text":"vector begins first followed x final value removed","code":""},{"path":"https://merck.github.io/gsDesign2/reference/fastlag.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Find the ","text":"Important: function fast provides minimal safety checks. relies coercion rules c. best results, x first type atomic vector, though fine mix numeric integer vectors long code also rely distinction. can also work lists needed.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/fastlag.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Find the ","text":"","code":"gsDesign2:::fastlag(1:5, first = 100) == c(100, 1:4) #> [1] TRUE TRUE TRUE TRUE TRUE"},{"path":"https://merck.github.io/gsDesign2/reference/fixed_design.html","id":null,"dir":"Reference","previous_headings":"","what":"Fixed design under non-proportional hazards — fixed_design_ahr","title":"Fixed design under non-proportional hazards — fixed_design_ahr","text":"Computes fixed design sample size (given power) power (given sample size) : fixed_design_ahr() - Average hazard ratio method. fixed_design_fh() - Weighted logrank test Fleming-Harrington weights (Farrington Manning, 1990). fixed_design_mb() - Weighted logrank test Magirr-Burman weights. fixed_design_lf() - Lachin-Foulkes method (Lachin Foulkes, 1986). fixed_design_maxcombo() - MaxCombo method. fixed_design_rmst() - RMST method. fixed_design_milestone() - Milestone method. Additionally, fixed_design_rd() provides fixed design binary endpoint treatment effect measuring risk difference.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/fixed_design.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Fixed design under non-proportional hazards — fixed_design_ahr","text":"","code":"fixed_design_ahr( enroll_rate, fail_rate, alpha = 0.025, power = NULL, ratio = 1, study_duration = 36, event = NULL ) fixed_design_fh( alpha = 0.025, power = NULL, ratio = 1, study_duration = 36, enroll_rate, fail_rate, rho = 0, gamma = 0 ) fixed_design_lf( alpha = 0.025, power = NULL, ratio = 1, study_duration = 36, enroll_rate, fail_rate ) fixed_design_maxcombo( alpha = 0.025, power = NULL, ratio = 1, study_duration = 36, enroll_rate, fail_rate, rho = c(0, 0, 1), gamma = c(0, 1, 0), tau = rep(-1, 3) ) fixed_design_mb( alpha = 0.025, power = NULL, ratio = 1, study_duration = 36, enroll_rate, fail_rate, tau = 6, w_max = Inf ) fixed_design_milestone( alpha = 0.025, power = NULL, ratio = 1, enroll_rate, fail_rate, study_duration = 36, tau = NULL ) fixed_design_rd( alpha = 0.025, power = NULL, ratio = 1, p_c, p_e, rd0 = 0, n = NULL ) fixed_design_rmst( alpha = 0.025, power = NULL, ratio = 1, study_duration = 36, enroll_rate, fail_rate, tau = NULL )"},{"path":"https://merck.github.io/gsDesign2/reference/fixed_design.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Fixed design under non-proportional hazards — fixed_design_ahr","text":"enroll_rate Enrollment rates. fail_rate Failure dropout rates. alpha One-sided Type error (strictly 0 1). power Power (NULL compute power strictly 0 1 - alpha otherwise). ratio Experimental:Control randomization ratio. study_duration Study duration. event Targeted event analysis. rho vector numbers paring gamma tau MaxCombo test. gamma vector numbers paring rho tau MaxCombo test. tau Test parameter RMST. w_max Test parameter Magirr-Burman method. p_c numerical value control arm rate. p_e numerical value experimental arm rate. rd0 Risk difference null hypothesis, default 0. n Sample size. NULL power input, sample size computed achieve targeted power","code":""},{"path":"https://merck.github.io/gsDesign2/reference/fixed_design.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Fixed design under non-proportional hazards — fixed_design_ahr","text":"list design characteristic summary.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/fixed_design.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Fixed design under non-proportional hazards — fixed_design_ahr","text":"","code":"# AHR method ---- library(dplyr) # Example 1: given power and compute sample size x <- fixed_design_ahr( alpha = .025, power = .9, enroll_rate = define_enroll_rate(duration = 18, rate = 1), fail_rate = define_fail_rate( duration = c(4, 100), fail_rate = log(2) / 12, hr = c(1, .6), dropout_rate = .001 ), study_duration = 36 ) x %>% summary() #> # A tibble: 1 × 7 #> Design N Events Time Bound alpha Power #> #> 1 Average hazard ratio 463. 325. 36 1.96 0.025 0.9 # Example 2: given sample size and compute power x <- fixed_design_ahr( alpha = .025, enroll_rate = define_enroll_rate(duration = 18, rate = 20), fail_rate = define_fail_rate( duration = c(4, 100), fail_rate = log(2) / 12, hr = c(1, .6), dropout_rate = .001 ), study_duration = 36 ) x %>% summary() #> # A tibble: 1 × 7 #> Design N Events Time Bound alpha Power #> #> 1 Average hazard ratio 360 252. 36 1.96 0.025 0.816 # WLR test with FH weights ---- library(dplyr) # Example 1: given power and compute sample size x <- fixed_design_fh( alpha = .025, power = .9, enroll_rate = define_enroll_rate(duration = 18, rate = 1), fail_rate = define_fail_rate( duration = c(4, 100), fail_rate = log(2) / 12, hr = c(1, .6), dropout_rate = .001 ), study_duration = 36, rho = 1, gamma = 1 ) x %>% summary() #> # A tibble: 1 × 7 #> Design N Events Time Bound alpha Power #> #> 1 Fleming-Harrington FH(1, 1) 352. 247. 36 1.96 0.025 0.9 # Example 2: given sample size and compute power x <- fixed_design_fh( alpha = .025, enroll_rate = define_enroll_rate(duration = 18, rate = 20), fail_rate = define_fail_rate( duration = c(4, 100), fail_rate = log(2) / 12, hr = c(1, .6), dropout_rate = .001 ), study_duration = 36, rho = 1, gamma = 1 ) x %>% summary() #> # A tibble: 1 × 7 #> Design N Events Time Bound alpha Power #> #> 1 Fleming-Harrington FH(1, 1) 360 252. 36 1.96 0.025 0.906 # LF method ---- library(dplyr) # Example 1: given power and compute sample size x <- fixed_design_lf( alpha = .025, power = .9, enroll_rate = define_enroll_rate(duration = 18, rate = 1), fail_rate = define_fail_rate( duration = 100, fail_rate = log(2) / 12, hr = .7, dropout_rate = .001 ), study_duration = 36 ) x %>% summary() #> # A tibble: 1 × 7 #> Design N Events Time Bound alpha Power #> #> 1 Lachin and Foulkes 463. 329. 36 1.96 0.025 0.9 # Example 2: given sample size and compute power x <- fixed_design_fh( alpha = .025, enroll_rate = define_enroll_rate(duration = 18, rate = 20), fail_rate = define_fail_rate( duration = 100, fail_rate = log(2) / 12, hr = .7, dropout_rate = .001 ), study_duration = 36 ) x %>% summary() #> # A tibble: 1 × 7 #> Design N Events Time Bound alpha Power #> #> 1 Fleming-Harrington FH(0, 0) (logrank) 360 256. 36 1.96 0.025 0.819 # MaxCombo test ---- library(dplyr) # Example 1: given power and compute sample size x <- fixed_design_maxcombo( alpha = .025, power = .9, enroll_rate = define_enroll_rate(duration = 18, rate = 1), fail_rate = define_fail_rate( duration = c(4, 100), fail_rate = log(2) / 12, hr = c(1, .6), dropout_rate = .001 ), study_duration = 36, rho = c(0, 0.5), gamma = c(0, 0), tau = c(-1, -1) ) x %>% summary() #> # A tibble: 1 × 7 #> Design N Events Time Bound alpha Power #> #> 1 MaxCombo: FHC(0, 0), FHC(0.5, 0) 483. 339. 36 2.02 0.025 0.900 # Example 2: given sample size and compute power x <- fixed_design_maxcombo( alpha = .025, enroll_rate = define_enroll_rate(duration = 18, rate = 20), fail_rate = define_fail_rate( duration = c(4, 100), fail_rate = log(2) / 12, hr = c(1, .6), dropout_rate = .001 ), study_duration = 36, rho = c(0, 0.5), gamma = c(0, 0), tau = c(-1, -1) ) x %>% summary() #> # A tibble: 1 × 7 #> Design N Events Time Bound alpha Power #> #> 1 MaxCombo: FHC(0, 0), FHC(0.5, 0) 360. 252. 36 2.02 0.025 0.797 # WLR test with MB weights ---- library(dplyr) # Example 1: given power and compute sample size x <- fixed_design_mb( alpha = .025, power = .9, enroll_rate = define_enroll_rate(duration = 18, rate = 1), fail_rate = define_fail_rate( duration = c(4, 100), fail_rate = log(2) / 12, hr = c(1, .6), dropout_rate = .001 ), study_duration = 36, tau = 4, w_max = 2 ) x %>% summary() #> # A tibble: 1 × 7 #> Design N Events Time Bound alpha Power #> #> 1 Modestly weighted LR: tau = 4 430. 301. 36 1.96 0.025 0.9 # Example 2: given sample size and compute power x <- fixed_design_mb( alpha = .025, enroll_rate = define_enroll_rate(duration = 18, rate = 20), fail_rate = define_fail_rate( duration = c(4, 100), fail_rate = log(2) / 12, hr = c(1, .6), dropout_rate = .001 ), study_duration = 36, tau = 4, w_max = 2 ) x %>% summary() #> # A tibble: 1 × 7 #> Design N Events Time Bound alpha Power #> #> 1 Modestly weighted LR: tau = 4 360 252. 36 1.96 0.025 0.844 # Milestone method ---- library(dplyr) # Example 1: given power and compute sample size x <- fixed_design_milestone( alpha = .025, power = .9, enroll_rate = define_enroll_rate(duration = 18, rate = 1), fail_rate = define_fail_rate( duration = 100, fail_rate = log(2) / 12, hr = .7, dropout_rate = .001 ), study_duration = 36, tau = 18 ) x %>% summary() #> # A tibble: 1 × 7 #> Design N Events Time Bound alpha Power #> #> 1 Milestone: tau = 18 606. 431. 36 1.96 0.025 0.9 # Example 2: given sample size and compute power x <- fixed_design_milestone( alpha = .025, enroll_rate = define_enroll_rate(duration = 18, rate = 20), fail_rate = define_fail_rate( duration = 100, fail_rate = log(2) / 12, hr = .7, dropout_rate = .001 ), study_duration = 36, tau = 18 ) x %>% summary() #> # A tibble: 1 × 7 #> Design N Events Time Bound alpha Power #> #> 1 Milestone: tau = 18 360 256. 36 1.96 0.025 0.705 # Binary endpoint with risk differences ---- library(dplyr) # Example 1: given power and compute sample size x <- fixed_design_rd( alpha = 0.025, power = 0.9, p_c = .15, p_e = .1, rd0 = 0, ratio = 1 ) x %>% summary() #> # A tibble: 1 × 5 #> Design N Bound alpha Power #> #> 1 Risk difference 1835. 1.96 0.025 0.9 # Example 2: given sample size and compute power x <- fixed_design_rd( alpha = 0.025, power = NULL, p_c = .15, p_e = .1, rd0 = 0, n = 2000, ratio = 1 ) x %>% summary() #> # A tibble: 1 × 5 #> Design N Bound alpha Power #> #> 1 Risk difference 2000 1.96 0.025 0.923 # RMST method ---- library(dplyr) # Example 1: given power and compute sample size x <- fixed_design_rmst( alpha = .025, power = .9, enroll_rate = define_enroll_rate(duration = 18, rate = 1), fail_rate = define_fail_rate( duration = 100, fail_rate = log(2) / 12, hr = .7, dropout_rate = .001 ), study_duration = 36, tau = 18 ) x %>% summary() #> # A tibble: 1 × 7 #> Design N Events Time Bound alpha Power #> #> 1 RMST: tau = 18 671. 477. 36 1.96 0.025 0.9 # Example 2: given sample size and compute power x <- fixed_design_rmst( alpha = .025, enroll_rate = define_enroll_rate(duration = 18, rate = 20), fail_rate = define_fail_rate( duration = 100, fail_rate = log(2) / 12, hr = .7, dropout_rate = .001 ), study_duration = 36, tau = 18 ) x %>% summary() #> # A tibble: 1 × 7 #> Design N Events Time Bound alpha Power #> #> 1 RMST: tau = 18 360 256. 36 1.96 0.025 0.661"},{"path":"https://merck.github.io/gsDesign2/reference/get_combo_power.html","id":null,"dir":"Reference","previous_headings":"","what":"Function to calculate power — get_combo_power","title":"Function to calculate power — get_combo_power","text":"helper function passed uniroot()","code":""},{"path":"https://merck.github.io/gsDesign2/reference/get_combo_power.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Function to calculate power — get_combo_power","text":"","code":"get_combo_power(n, bound, info_fh, theta_fh, corr_fh, algorithm, beta, ...)"},{"path":"https://merck.github.io/gsDesign2/reference/get_combo_power.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Function to calculate power — get_combo_power","text":"n Input sample size algorithm object class GenzBretz, Miwa TVPACK specifying algorithm used well associated hyper parameters. beta Type II error. ... Additional parameters passed mvtnorm::pmvnorm.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/get_combo_power.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Function to calculate power — get_combo_power","text":"optimal sample size (single numeric value)","code":""},{"path":"https://merck.github.io/gsDesign2/reference/get_combo_power.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Function to calculate power — get_combo_power","text":"function calculates difference derived power targeted power (1 - beta), based provided sample size, upper lower boundaries, treatment effect.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gsDesign2-package.html","id":null,"dir":"Reference","previous_headings":"","what":"gsDesign2: Group Sequential Design with Non-Constant Effect — gsDesign2-package","title":"gsDesign2: Group Sequential Design with Non-Constant Effect — gsDesign2-package","text":"goal 'gsDesign2' enable fixed group sequential design non-proportional hazards. enable highly flexible enrollment, time--event time--dropout assumptions, 'gsDesign2' offers piecewise constant enrollment, failure rates, dropout rates stratified population. package includes three methods designs: average hazard ratio, weighted logrank tests Yung Liu (2019) doi:10.1111/biom.13196 , MaxCombo tests. Substantial flexibility top 'gsDesign' package intended selecting boundaries.","code":""},{"path":[]},{"path":"https://merck.github.io/gsDesign2/reference/gsDesign2-package.html","id":"author","dir":"Reference","previous_headings":"","what":"Author","title":"gsDesign2: Group Sequential Design with Non-Constant Effect — gsDesign2-package","text":"Maintainer: Yujie Zhao yujie.zhao@merck.com Authors: Keaven Anderson keaven_anderson@merck.com Yilong Zhang elong0527@gmail.com Jianxiao Yang yangjx@ucla.edu Nan Xiao nan.xiao1@merck.com contributors: Amin Shirazi ashirazist@gmail.com [contributor] Ruixue Wang ruixue.wang@merck.com [contributor] Yi Cui yi.cui@merck.com [contributor] Ping Yang ping.yang1@merck.com [contributor] Xin Tong Li xin.tong.li@merck.com [contributor] Chenxiang Li chenxiang.li@merck.com [contributor] Hiroaki Fukuda hiroaki.fukuda@merck.com [contributor] Hongtao Zhang hongtao.zhang1@merck.com [contributor] Yalin Zhu yalin.zhu@outlook.com [contributor] John Blischak jdblischak@gmail.com [contributor] Dickson Wanjau dickson.wanjau@merck.com [contributor] Merck & Co., Inc., Rahway, NJ, USA affiliates [copyright holder]","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_b.html","id":null,"dir":"Reference","previous_headings":"","what":"Default boundary generation — gs_b","title":"Default boundary generation — gs_b","text":"gs_b() simplest version function used upper lower arguments gs_power_npe() gs_design_npe() upper_bound lower_bound arguments gs_prob_combo() pmvnorm_combo(). simply returns vector Z-values input vector par , k specified, par[k] returned. Note bounds need change changing information analyses, gs_b() used. instance, spending function bounds use gs_spending_bound().","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_b.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Default boundary generation — gs_b","text":"","code":"gs_b(par = NULL, k = NULL, ...)"},{"path":"https://merck.github.io/gsDesign2/reference/gs_b.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Default boundary generation — gs_b","text":"par gs_b(), just Z-values boundaries; can include infinite values. k NULL (default), return par, else return par[k]. ... arguments passed methods.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_b.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Default boundary generation — gs_b","text":"Returns vector input par k NULL, otherwise, par[k].","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_b.html","id":"specification","dir":"Reference","previous_headings":"","what":"Specification","title":"Default boundary generation — gs_b","text":"contents section shown PDF user manual .","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_b.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Default boundary generation — gs_b","text":"","code":"# Simple: enter a vector of length 3 for bound gs_b(par = 4:2) #> [1] 4 3 2 # 2nd element of par gs_b(par = 4:2, k = 2) #> [1] 3 # Generate an efficacy bound using a spending function # Use Lan-DeMets spending approximation of O'Brien-Fleming bound # as 50%, 75% and 100% of final spending # Information fraction IF <- c(.5, .75, 1) gs_b(par = gsDesign::gsDesign( alpha = .025, k = length(IF), test.type = 1, sfu = gsDesign::sfLDOF, timing = IF )$upper$bound) #> [1] 2.962588 2.359018 2.014084"},{"path":"https://merck.github.io/gsDesign2/reference/gs_create_arm.html","id":null,"dir":"Reference","previous_headings":"","what":"Create npsurvSS arm object — gs_create_arm","title":"Create npsurvSS arm object — gs_create_arm","text":"Create npsurvSS arm object","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_create_arm.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Create npsurvSS arm object — gs_create_arm","text":"","code":"gs_create_arm(enroll_rate, fail_rate, ratio, total_time = 1e+06)"},{"path":"https://merck.github.io/gsDesign2/reference/gs_create_arm.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Create npsurvSS arm object — gs_create_arm","text":"enroll_rate Enrollment rates. fail_rate Failure dropout rates. ratio Experimental:Control randomization ratio. total_time Total analysis time.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_create_arm.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Create npsurvSS arm object — gs_create_arm","text":"list two arms.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_create_arm.html","id":"specification","dir":"Reference","previous_headings":"","what":"Specification","title":"Create npsurvSS arm object — gs_create_arm","text":"contents section shown PDF user manual .","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_create_arm.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Create npsurvSS arm object — gs_create_arm","text":"","code":"enroll_rate <- define_enroll_rate( duration = c(2, 2, 10), rate = c(3, 6, 9) ) fail_rate <- define_fail_rate( duration = c(3, 100), fail_rate = log(2) / c(9, 18), hr = c(.9, .6), dropout_rate = .001 ) gs_create_arm(enroll_rate, fail_rate, ratio = 1) #> $arm0 #> $size #> [1] 1 #> #> $accr_time #> [1] 14 #> #> $accr_dist #> [1] \"pieceuni\" #> #> $accr_interval #> [1] 0 2 4 14 #> #> $accr_param #> [1] 0.05555556 0.11111111 0.83333333 #> #> $surv_cure #> [1] 0 #> #> $surv_interval #> [1] 0 3 Inf #> #> $surv_shape #> [1] 1 #> #> $surv_scale #> [1] 0.07701635 0.03850818 #> #> $loss_shape #> [1] 1 #> #> $loss_scale #> [1] 0.001 #> #> $follow_time #> [1] 999986 #> #> $total_time #> [1] 1e+06 #> #> attr(,\"class\") #> [1] \"list\" \"arm\" #> #> $arm1 #> $size #> [1] 1 #> #> $accr_time #> [1] 14 #> #> $accr_dist #> [1] \"pieceuni\" #> #> $accr_interval #> [1] 0 2 4 14 #> #> $accr_param #> [1] 0.05555556 0.11111111 0.83333333 #> #> $surv_cure #> [1] 0 #> #> $surv_interval #> [1] 0 3 Inf #> #> $surv_shape #> [1] 1 #> #> $surv_scale #> [1] 0.06931472 0.02310491 #> #> $loss_shape #> [1] 1 #> #> $loss_scale #> [1] 0.001 #> #> $follow_time #> [1] 999986 #> #> $total_time #> [1] 1e+06 #> #> attr(,\"class\") #> [1] \"list\" \"arm\" #>"},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_ahr.html","id":null,"dir":"Reference","previous_headings":"","what":"Group sequential design using average hazard ratio under non-proportional hazards — gs_design_ahr","title":"Group sequential design using average hazard ratio under non-proportional hazards — gs_design_ahr","text":"Group sequential design using average hazard ratio non-proportional hazards","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_ahr.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Group sequential design using average hazard ratio under non-proportional hazards — gs_design_ahr","text":"","code":"gs_design_ahr( enroll_rate = define_enroll_rate(duration = c(2, 2, 10), rate = c(3, 6, 9)), fail_rate = define_fail_rate(duration = c(3, 100), fail_rate = log(2)/c(9, 18), hr = c(0.9, 0.6), dropout_rate = 0.001), alpha = 0.025, beta = 0.1, info_frac = NULL, analysis_time = 36, ratio = 1, binding = FALSE, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = alpha), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfLDOF, total_spend = beta), h1_spending = TRUE, test_upper = TRUE, test_lower = TRUE, info_scale = c(\"h0_h1_info\", \"h0_info\", \"h1_info\"), r = 18, tol = 1e-06, interval = c(0.01, 1000) )"},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_ahr.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Group sequential design using average hazard ratio under non-proportional hazards — gs_design_ahr","text":"enroll_rate Enrollment rates. fail_rate Failure dropout rates. alpha One-sided Type error. beta Type II error. info_frac Targeted information fraction analysis. analysis_time Minimum time analysis. ratio Experimental:Control randomization ratio (yet implemented). binding Indicator whether futility bound binding; default FALSE recommended. upper Function compute upper bound. upar Parameters passed upper. lower Function compute lower bound. lpar Parameters passed lower. h1_spending Indicator lower bound set spending alternate hypothesis (input fail_rate) spending used lower bound. test_upper Indicator analyses include upper (efficacy) bound; single value TRUE (default) indicates analyses; otherwise, logical vector length info indicate analyses efficacy bound. test_lower Indicator analyses include lower bound; single value TRUE (default) indicates analyses; single value FALSE indicated lower bound; otherwise, logical vector length info indicate analyses lower bound. info_scale Information scale calculation. Options : \"h0_h1_info\" (default): variance null alternative hypotheses used. \"h0_info\": variance null hypothesis used. \"h1_info\": variance alternative hypothesis used. r Integer value controlling grid numerical integration Jennison Turnbull (2000); default 18, range 1 80. Larger values provide larger number grid points greater accuracy. Normally, r changed user. tol Tolerance parameter boundary convergence (Z-scale). interval interval presumed include time expected event count equal targeted event.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_ahr.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Group sequential design using average hazard ratio under non-proportional hazards — gs_design_ahr","text":"list input parameters, enrollment rate, analysis, bound.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_ahr.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Group sequential design using average hazard ratio under non-proportional hazards — gs_design_ahr","text":"added.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_ahr.html","id":"specification","dir":"Reference","previous_headings":"","what":"Specification","title":"Group sequential design using average hazard ratio under non-proportional hazards — gs_design_ahr","text":"contents section shown PDF user manual .","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_ahr.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Group sequential design using average hazard ratio under non-proportional hazards — gs_design_ahr","text":"","code":"library(gsDesign) #> #> Attaching package: ‘gsDesign’ #> The following objects are masked from ‘package:gsDesign2’: #> #> as_gt, as_rtf library(gsDesign2) library(dplyr) # Example 1 ---- # call with defaults gs_design_ahr() #> $input #> $input$enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $input$alpha #> [1] 0.025 #> #> $input$beta #> [1] 0.1 #> #> $input$ratio #> [1] 1 #> #> $input$info_frac #> [1] 1 #> #> $input$analysis_time #> [1] 36 #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.025 #> #> #> $input$lower #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$lpar #> $input$lpar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$lpar$total_spend #> [1] 0.1 #> #> #> $input$test_upper #> [1] TRUE #> #> $input$test_lower #> [1] TRUE #> #> $input$h1_spending #> [1] TRUE #> #> $input$binding #> [1] FALSE #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 13.2 #> 2 All 2 26.4 #> 3 All 10 39.7 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $bound #> # A tibble: 1 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.9 0.025 1.96 0.795 0.0250 #> #> $analysis #> # A tibble: 1 × 10 #> analysis time n event ahr theta info info0 info_frac info_frac0 #> #> 1 1 36 476. 292. 0.683 0.381 71.7 73.0 1 1 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" # Example 2 ---- # Single analysis gs_design_ahr(analysis_time = 40) #> $input #> $input$enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $input$alpha #> [1] 0.025 #> #> $input$beta #> [1] 0.1 #> #> $input$ratio #> [1] 1 #> #> $input$info_frac #> [1] 1 #> #> $input$analysis_time #> [1] 40 #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.025 #> #> #> $input$lower #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$lpar #> $input$lpar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$lpar$total_spend #> [1] 0.1 #> #> #> $input$test_upper #> [1] TRUE #> #> $input$test_lower #> [1] TRUE #> #> $input$h1_spending #> [1] TRUE #> #> $input$binding #> [1] FALSE #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 11.9 #> 2 All 2 23.8 #> 3 All 10 35.6 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $bound #> # A tibble: 1 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.9 0.025 1.96 0.791 0.0250 #> #> $analysis #> # A tibble: 1 × 10 #> analysis time n event ahr theta info info0 info_frac info_frac0 #> #> 1 1 40 428. 280. 0.678 0.389 68.8 69.9 1 1 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" # Example 3 ---- # Multiple analysis_time gs_design_ahr(analysis_time = c(12, 24, 36)) #> $input #> $input$enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $input$alpha #> [1] 0.025 #> #> $input$beta #> [1] 0.1 #> #> $input$ratio #> [1] 1 #> #> $input$info_frac #> [1] 0.3080415 0.7407917 1.0000000 #> #> $input$analysis_time #> [1] 12 24 36 #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.025 #> #> #> $input$lower #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$lpar #> $input$lpar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$lpar$total_spend #> [1] 0.1 #> #> #> $input$test_upper #> [1] TRUE #> #> $input$test_lower #> [1] TRUE #> #> $input$h1_spending #> [1] TRUE #> #> $input$binding #> [1] FALSE #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 14.5 #> 2 All 2 29.1 #> 3 All 10 43.6 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $bound #> # A tibble: 6 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.00248 0.0000538 3.87 0.459 0.0000538 #> 2 1 lower 0.00321 0.0443 -1.70 1.41 0.956 #> 3 2 upper 0.579 0.00921 2.36 0.736 0.00919 #> 4 2 lower 0.0556 0.830 0.953 0.884 0.170 #> 5 3 upper 0.900 0.0244 2.01 0.799 0.0222 #> 6 3 lower 0.100 0.976 2.01 0.799 0.0223 #> #> $analysis #> # A tibble: 3 × 10 #> analysis time n event ahr theta info info0 info_frac info_frac0 #> #> 1 1 12 436. 98.8 0.811 0.210 24.4 24.7 0.309 0.308 #> 2 2 24 523. 238. 0.715 0.335 58.1 59.4 0.738 0.741 #> 3 3 36 523. 321. 0.683 0.381 78.8 80.2 1 1 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" # Example 4 ---- # Specified information fraction # \\donttest{ gs_design_ahr(info_frac = c(.25, .75, 1), analysis_time = 36) #> $input #> $input$enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $input$alpha #> [1] 0.025 #> #> $input$beta #> [1] 0.1 #> #> $input$ratio #> [1] 1 #> #> $input$info_frac #> [1] 0.25 0.75 1.00 #> #> $input$analysis_time #> [1] 36 #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.025 #> #> #> $input$lower #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$lpar #> $input$lpar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$lpar$total_spend #> [1] 0.1 #> #> #> $input$test_upper #> [1] TRUE #> #> $input$test_lower #> [1] TRUE #> #> $input$h1_spending #> [1] TRUE #> #> $input$binding #> [1] FALSE #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 14.6 #> 2 All 2 29.1 #> 3 All 10 43.7 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $bound #> # A tibble: 6 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.000295 0.00000737 4.33 0.380 0.00000737 #> 2 1 lower 0.00108 0.0135 -2.21 1.64 0.987 #> 3 2 upper 0.599 0.00965 2.34 0.740 0.00965 #> 4 2 lower 0.0570 0.843 1.01 0.878 0.157 #> 5 3 upper 0.900 0.0244 2.01 0.799 0.0221 #> 6 3 lower 0.100 0.976 2.01 0.799 0.0221 #> #> $analysis #> # A tibble: 3 × 10 #> analysis time n event ahr theta info info0 info_frac info_frac0 #> #> 1 1 10.7 382. 80.4 0.823 0.195 19.8 20.1 0.251 0.250 #> 2 2 24.4 524. 241. 0.714 0.337 59.0 60.3 0.747 0.750 #> 3 3 36 524. 322. 0.683 0.381 79.0 80.4 1 1 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" # } # Example 5 ---- # multiple analysis times & info_frac # driven by times gs_design_ahr(info_frac = c(.25, .75, 1), analysis_time = c(12, 25, 36)) #> $input #> $input$enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $input$alpha #> [1] 0.025 #> #> $input$beta #> [1] 0.1 #> #> $input$ratio #> [1] 1 #> #> $input$info_frac #> [1] 0.25 0.75 1.00 #> #> $input$analysis_time #> [1] 12 25 36 #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.025 #> #> #> $input$lower #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$lpar #> $input$lpar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$lpar$total_spend #> [1] 0.1 #> #> #> $input$test_upper #> [1] TRUE #> #> $input$test_lower #> [1] TRUE #> #> $input$h1_spending #> [1] TRUE #> #> $input$binding #> [1] FALSE #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 14.6 #> 2 All 2 29.3 #> 3 All 10 43.9 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $bound #> # A tibble: 6 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.00251 0.0000538 3.87 0.460 0.0000538 #> 2 1 lower 0.00321 0.0446 -1.70 1.41 0.955 #> 3 2 upper 0.635 0.0105 2.31 0.746 0.0104 #> 4 2 lower 0.0599 0.862 1.09 0.871 0.138 #> 5 3 upper 0.900 0.0243 2.02 0.799 0.0219 #> 6 3 lower 0.100 0.976 2.01 0.799 0.0220 #> #> $analysis #> # A tibble: 3 × 10 #> analysis time n event ahr theta info info0 info_frac info_frac0 #> #> 1 1 12 439. 99.5 0.811 0.210 24.5 24.9 0.309 0.308 #> 2 2 25 527. 248. 0.711 0.341 60.5 61.9 0.763 0.766 #> 3 3 36 527. 323. 0.683 0.381 79.3 80.7 1 1 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" # driven by info_frac # \\donttest{ gs_design_ahr(info_frac = c(1 / 3, .8, 1), analysis_time = c(12, 25, 36)) #> $input #> $input$enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $input$alpha #> [1] 0.025 #> #> $input$beta #> [1] 0.1 #> #> $input$ratio #> [1] 1 #> #> $input$info_frac #> [1] 0.3333333 0.8000000 1.0000000 #> #> $input$analysis_time #> [1] 12 25 36 #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.025 #> #> #> $input$lower #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$lpar #> $input$lpar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$lpar$total_spend #> [1] 0.1 #> #> #> $input$test_upper #> [1] TRUE #> #> $input$test_lower #> [1] TRUE #> #> $input$h1_spending #> [1] TRUE #> #> $input$binding #> [1] FALSE #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 14.7 #> 2 All 2 29.5 #> 3 All 10 44.2 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $bound #> # A tibble: 6 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.00510 0.000104 3.71 0.490 0.000104 #> 2 1 lower 0.00459 0.0665 -1.50 1.33 0.934 #> 3 2 upper 0.701 0.0122 2.25 0.756 0.0122 #> 4 2 lower 0.0655 0.896 1.26 0.856 0.104 #> 5 3 upper 0.900 0.0241 2.03 0.799 0.0214 #> 6 3 lower 0.100 0.976 2.02 0.799 0.0216 #> #> $analysis #> # A tibble: 3 × 10 #> analysis time n event ahr theta info info0 info_frac info_frac0 #> #> 1 1 12.5 465. 108. 0.806 0.216 26.7 27.1 0.334 0.333 #> 2 2 26.4 530. 260. 0.706 0.348 63.7 65.1 0.797 0.800 #> 3 3 36 530. 325. 0.683 0.381 79.9 81.3 1 1 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" # } # Example 6 ---- # 2-sided symmetric design with O'Brien-Fleming spending # \\donttest{ gs_design_ahr( analysis_time = c(12, 24, 36), binding = TRUE, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL), h1_spending = FALSE ) #> $input #> $input$enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $input$alpha #> [1] 0.025 #> #> $input$beta #> [1] 0.1 #> #> $input$ratio #> [1] 1 #> #> $input$info_frac #> [1] 0.3080415 0.7407917 1.0000000 #> #> $input$analysis_time #> [1] 12 24 36 #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.025 #> #> $input$upar$param #> NULL #> #> $input$upar$timing #> NULL #> #> #> $input$lower #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$lpar #> $input$lpar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$lpar$total_spend #> [1] 0.025 #> #> $input$lpar$param #> NULL #> #> $input$lpar$timing #> NULL #> #> #> $input$test_upper #> [1] TRUE #> #> $input$test_lower #> [1] TRUE #> #> $input$h1_spending #> [1] FALSE #> #> $input$binding #> [1] TRUE #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 13.7 #> 2 All 2 27.5 #> 3 All 10 41.2 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $bound #> # A tibble: 6 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.00226 0.0000538 3.87 0.449 0.0000538 #> 2 1 lower 0.000000613 0.0000538 -3.87 2.23 1.00 #> 3 2 upper 0.550 0.00921 2.36 0.730 0.00919 #> 4 2 lower 0.00000125 0.00921 -2.36 1.37 0.991 #> 5 3 upper 0.900 0.0250 2.01 0.794 0.0222 #> 6 3 lower 0.00000128 0.0250 -2.01 1.26 0.978 #> #> $analysis #> # A tibble: 3 × 10 #> analysis time n event ahr theta info info0 info_frac info_frac0 #> #> 1 1 12 412. 93.4 0.811 0.210 23.0 23.3 0.309 0.308 #> 2 2 24 494. 224. 0.715 0.335 54.9 56.1 0.738 0.741 #> 3 3 36 494. 303. 0.683 0.381 74.4 75.8 1 1 #> #> attr(,\"class\") #> [1] \"ahr\" \"gs_design\" \"list\" # } # 2-sided asymmetric design with O'Brien-Fleming upper spending # Pocock lower spending under H1 (NPH) # \\donttest{ gs_design_ahr( analysis_time = c(12, 24, 36), binding = TRUE, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfLDPocock, total_spend = 0.1, param = NULL, timing = NULL), h1_spending = TRUE ) #> $input #> $input$enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $input$alpha #> [1] 0.025 #> #> $input$beta #> [1] 0.1 #> #> $input$ratio #> [1] 1 #> #> $input$info_frac #> [1] 0.3080415 0.7407917 1.0000000 #> #> $input$analysis_time #> [1] 12 24 36 #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.025 #> #> $input$upar$param #> NULL #> #> $input$upar$timing #> NULL #> #> #> $input$lower #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$lpar #> $input$lpar$sf #> function (alpha, t, param) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> t[t > 1] <- 1 #> x <- list(name = \"Lan-DeMets Pocock approximation\", param = NULL, #> parname = \"none\", sf = sfLDPocock, spend = alpha * log(1 + #> (exp(1) - 1) * t), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$lpar$total_spend #> [1] 0.1 #> #> $input$lpar$param #> NULL #> #> $input$lpar$timing #> NULL #> #> #> $input$test_upper #> [1] TRUE #> #> $input$test_lower #> [1] TRUE #> #> $input$h1_spending #> [1] TRUE #> #> $input$binding #> [1] TRUE #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 16.5 #> 2 All 2 32.9 #> 3 All 10 49.4 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $bound #> # A tibble: 6 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.00305 0.0000538 3.87 0.481 0.0000538 #> 2 1 lower 0.0430 0.268 -0.619 1.12 0.732 #> 3 2 upper 0.638 0.00921 2.36 0.750 0.00920 #> 4 2 lower 0.0823 0.874 1.13 0.871 0.129 #> 5 3 upper 0.900 0.0250 1.98 0.813 0.0240 #> 6 3 lower 0.100 0.975 1.97 0.813 0.0243 #> #> $analysis #> # A tibble: 3 × 10 #> analysis time n event ahr theta info info0 info_frac info_frac0 #> #> 1 1 12 494. 112. 0.811 0.210 27.6 28.0 0.309 0.308 #> 2 2 24 593. 269. 0.715 0.335 65.9 67.3 0.738 0.741 #> 3 3 36 593. 364. 0.683 0.381 89.3 90.9 1 1 #> #> attr(,\"class\") #> [1] \"ahr\" \"gs_design\" \"list\" # } # Example 7 ---- # \\donttest{ gs_design_ahr( alpha = 0.0125, analysis_time = c(12, 24, 36), upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.0125, param = NULL, timing = NULL), lower = gs_b, lpar = rep(-Inf, 3) ) #> $input #> $input$enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $input$alpha #> [1] 0.0125 #> #> $input$beta #> [1] 0.1 #> #> $input$ratio #> [1] 1 #> #> $input$info_frac #> [1] 0.3080415 0.7407917 1.0000000 #> #> $input$analysis_time #> [1] 12 24 36 #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.0125 #> #> $input$upar$param #> NULL #> #> $input$upar$timing #> NULL #> #> #> $input$lower #> function (par = NULL, k = NULL, ...) #> { #> if (is.null(k)) { #> return(par) #> } #> else { #> return(par[k]) #> } #> } #> #> #> #> $input$lpar #> [1] -Inf -Inf -Inf #> #> $input$test_upper #> [1] TRUE #> #> $input$test_lower #> [1] TRUE #> #> $input$h1_spending #> [1] TRUE #> #> $input$binding #> [1] FALSE #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 16.1 #> 2 All 2 32.2 #> 3 All 10 48.3 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $bound #> # A tibble: 3 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.000619 0.00000679 4.35 0.435 0.00000679 #> 2 2 upper 0.505 0.00371 2.68 0.719 0.00371 #> 3 3 upper 0.900 0.0125 2.28 0.785 0.0114 #> #> $analysis #> # A tibble: 3 × 10 #> analysis time n event ahr theta info info0 info_frac info_frac0 #> #> 1 1 12 483. 109. 0.811 0.210 27.0 27.4 0.309 0.308 #> 2 2 24 579. 263. 0.715 0.335 64.3 65.8 0.738 0.741 #> 3 3 36 579. 355. 0.683 0.381 87.2 88.8 1 1 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" gs_design_ahr( alpha = 0.0125, analysis_time = c(12, 24, 36), upper = gs_b, upar = gsDesign::gsDesign( k = 3, test.type = 1, n.I = c(.25, .75, 1), sfu = sfLDOF, sfupar = NULL, alpha = 0.0125 )$upper$bound, lower = gs_b, lpar = rep(-Inf, 3) ) #> $input #> $input$enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $input$alpha #> [1] 0.0125 #> #> $input$beta #> [1] 0.1 #> #> $input$ratio #> [1] 1 #> #> $input$info_frac #> [1] 0.3080415 0.7407917 1.0000000 #> #> $input$analysis_time #> [1] 12 24 36 #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$upper #> function (par = NULL, k = NULL, ...) #> { #> if (is.null(k)) { #> return(par) #> } #> else { #> return(par[k]) #> } #> } #> #> #> #> $input$upar #> [1] 4.859940 2.658446 2.280095 #> #> $input$lower #> function (par = NULL, k = NULL, ...) #> { #> if (is.null(k)) { #> return(par) #> } #> else { #> return(par[k]) #> } #> } #> #> #> #> $input$lpar #> [1] -Inf -Inf -Inf #> #> $input$test_upper #> [1] TRUE #> #> $input$test_lower #> [1] TRUE #> #> $input$h1_spending #> [1] TRUE #> #> $input$binding #> [1] FALSE #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 16.1 #> 2 All 2 32.2 #> 3 All 10 48.3 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $bound #> # A tibble: 3 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.0000938 0.000000587 4.86 0.395 0.000000587 #> 2 2 upper 0.513 0.00393 2.66 0.721 0.00393 #> 3 3 upper 0.900 0.0125 2.28 0.785 0.0113 #> #> $analysis #> # A tibble: 3 × 10 #> analysis time n event ahr theta info info0 info_frac info_frac0 #> #> 1 1 12 483. 110. 0.811 0.210 27.0 27.4 0.309 0.308 #> 2 2 24 580. 263. 0.715 0.335 64.4 65.9 0.738 0.741 #> 3 3 36 580. 356. 0.683 0.381 87.3 88.9 1 1 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" # }"},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_combo.html","id":null,"dir":"Reference","previous_headings":"","what":"Group sequential design using MaxCombo test under non-proportional hazards — gs_design_combo","title":"Group sequential design using MaxCombo test under non-proportional hazards — gs_design_combo","text":"Group sequential design using MaxCombo test non-proportional hazards","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_combo.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Group sequential design using MaxCombo test under non-proportional hazards — gs_design_combo","text":"","code":"gs_design_combo( enroll_rate = define_enroll_rate(duration = 12, rate = 500/12), fail_rate = define_fail_rate(duration = c(4, 100), fail_rate = log(2)/15, hr = c(1, 0.6), dropout_rate = 0.001), fh_test = rbind(data.frame(rho = 0, gamma = 0, tau = -1, test = 1, analysis = 1:3, analysis_time = c(12, 24, 36)), data.frame(rho = c(0, 0.5), gamma = 0.5, tau = -1, test = 2:3, analysis = 3, analysis_time = 36)), ratio = 1, alpha = 0.025, beta = 0.2, binding = FALSE, upper = gs_b, upar = c(3, 2, 1), lower = gs_b, lpar = c(-1, 0, 1), algorithm = mvtnorm::GenzBretz(maxpts = 1e+05, abseps = 1e-05), n_upper_bound = 1000, ... )"},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_combo.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Group sequential design using MaxCombo test under non-proportional hazards — gs_design_combo","text":"enroll_rate Enrollment rates. fail_rate Failure dropout rates. fh_test data frame summarize test analysis. See examples data structure. ratio Experimental:Control randomization ratio (yet implemented). alpha One-sided Type error. beta Type II error. binding Indicator whether futility bound binding; default FALSE recommended. upper Function compute upper bound. upar Parameters passed upper. lower Function compute lower bound. lpar Parameters passed lower. algorithm object class GenzBretz, Miwa TVPACK specifying algorithm used well associated hyper parameters. n_upper_bound numeric value upper limit sample size. ... Additional parameters passed mvtnorm::pmvnorm.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_combo.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Group sequential design using MaxCombo test under non-proportional hazards — gs_design_combo","text":"list input parameters, enrollment rate, analysis, bound.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_combo.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Group sequential design using MaxCombo test under non-proportional hazards — gs_design_combo","text":"","code":"# The example is slow to run library(dplyr) library(mvtnorm) library(gsDesign) enroll_rate <- define_enroll_rate( duration = 12, rate = 500 / 12 ) fail_rate <- define_fail_rate( duration = c(4, 100), fail_rate = log(2) / 15, # median survival 15 month hr = c(1, .6), dropout_rate = 0.001 ) fh_test <- rbind( data.frame( rho = 0, gamma = 0, tau = -1, test = 1, analysis = 1:3, analysis_time = c(12, 24, 36) ), data.frame( rho = c(0, 0.5), gamma = 0.5, tau = -1, test = 2:3, analysis = 3, analysis_time = 36 ) ) x <- gsSurv( k = 3, test.type = 4, alpha = 0.025, beta = 0.2, astar = 0, timing = 1, sfu = sfLDOF, sfupar = 0, sfl = sfLDOF, sflpar = 0, lambdaC = 0.1, hr = 0.6, hr0 = 1, eta = 0.01, gamma = 10, R = 12, S = NULL, T = 36, minfup = 24, ratio = 1 ) # Example 1 ---- # User-defined boundary # \\donttest{ gs_design_combo( enroll_rate, fail_rate, fh_test, alpha = 0.025, beta = 0.2, ratio = 1, binding = FALSE, upar = x$upper$bound, lpar = x$lower$bound ) #> $enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 37.1 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $bounds #> analysis bound probability probability0 z nominal p #> 1 1 upper 0.002056984 0.0001035057 3.7103029 0.0001035057 #> 2 1 lower 0.140694990 0.4066436377 -0.2361874 0.5933563623 #> 3 2 upper 0.469124612 0.0060406872 2.5114070 0.0060125477 #> 4 2 lower 0.185586571 0.8846152138 1.1703638 0.1209273043 #> 5 3 upper 0.799998476 0.0254946088 1.9929702 0.0231323552 #> 6 3 lower 0.200008755 0.9745050358 1.9929702 0.0231323552 #> #> $analysis #> analysis time n event event_frac ahr #> 1 1 12 444.7987 95.53766 0.3241690 0.8418858 #> 2 2 24 444.7987 219.09306 0.7434051 0.7164215 #> 3 3 36 444.7987 294.71556 1.0000000 0.6831740 #> #> attr(,\"class\") #> [1] \"non_binding\" \"combo\" \"gs_design\" \"list\" # } # Example 2 ---- # \\donttest{ # Boundary derived by spending function gs_design_combo( enroll_rate, fail_rate, fh_test, alpha = 0.025, beta = 0.2, ratio = 1, binding = FALSE, upper = gs_spending_combo, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), # alpha spending lower = gs_spending_combo, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.2), # beta spending ) #> $enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 25.1 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $bounds #> analysis bound probability probability0 z nominal p #> 1 1 upper 2.087715e-08 3.299865e-10 6.1753973 3.299865e-10 #> 2 1 lower 3.269631e-04 3.303090e-03 -2.7160707 9.966969e-01 #> 3 2 upper 2.203276e-01 2.565830e-03 2.7986508 2.565830e-03 #> 4 2 lower 8.468643e-02 7.431751e-01 0.6531624 2.568258e-01 #> 5 3 upper 8.000054e-01 2.371235e-02 2.0972454 1.798593e-02 #> 6 3 lower 1.999978e-01 9.762886e-01 2.0972475 1.798584e-02 #> #> $analysis #> analysis time n event event_frac ahr #> 1 1 12 301.2538 64.70585 0.3241690 0.8418858 #> 2 2 24 301.2538 148.38759 0.7434051 0.7164215 #> 3 3 36 301.2538 199.60528 1.0000000 0.6831740 #> #> attr(,\"class\") #> [1] \"non_binding\" \"combo\" \"gs_design\" \"list\" # }"},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_npe.html","id":null,"dir":"Reference","previous_headings":"","what":"Group sequential design computation with non-constant effect and information — gs_design_npe","title":"Group sequential design computation with non-constant effect and information — gs_design_npe","text":"Derives group sequential design size, bounds boundary crossing probabilities based proportionate information effect size analyses. allows non-constant treatment effect time, also can applied usual homogeneous effect size designs. requires treatment effect proportionate statistical information analysis well method deriving bounds, spending. routine enables two things available gsDesign package: non-constant effect, 2) flexibility boundary selection. many applications, non-proportional-hazards design function gs_design_nph() used; calls function. Initial bound types supported 1) spending bounds, fixed bounds, 3) Haybittle-Peto-like bounds. requirement boundary update method can bound without knowledge future bounds. example, bounds based conditional power require knowledge future bounds supported routine; limited conditional power method demonstrated. Boundary family designs Wang-Tsiatis designs including original (non-spending-function-based) O'Brien-Fleming Pocock designs supported gs_power_npe().","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_npe.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Group sequential design computation with non-constant effect and information — gs_design_npe","text":"","code":"gs_design_npe( theta = 0.1, theta0 = NULL, theta1 = NULL, info = 1, info0 = NULL, info1 = NULL, info_scale = c(\"h0_h1_info\", \"h0_info\", \"h1_info\"), alpha = 0.025, beta = 0.1, upper = gs_b, upar = qnorm(0.975), lower = gs_b, lpar = -Inf, test_upper = TRUE, test_lower = TRUE, binding = FALSE, r = 18, tol = 1e-06 )"},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_npe.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Group sequential design computation with non-constant effect and information — gs_design_npe","text":"theta Natural parameter group sequential design representing expected incremental drift analyses; used power calculation. theta0 Natural parameter used upper bound spending; NULL, set 0. theta1 Natural parameter used lower bound spending; NULL, set theta yields usual beta-spending. set 0, spending 2-sided null hypothesis. info Proportionate statistical information analyses input theta. info0 Proportionate statistical information null hypothesis, different alternative; impacts null hypothesis bound calculation. info1 Proportionate statistical information alternate hypothesis; impacts null hypothesis bound calculation. info_scale Information scale calculation. Options : \"h0_h1_info\" (default): variance null alternative hypotheses used. \"h0_info\": variance null hypothesis used. \"h1_info\": variance alternative hypothesis used. alpha One-sided Type error. beta Type II error. upper Function compute upper bound. upar Parameters passed function provided upper. lower Function compare lower bound. lpar Parameters passed function provided lower. test_upper Indicator analyses include upper (efficacy) bound; single value TRUE (default) indicates analyses; otherwise, logical vector length info indicate analyses efficacy bound. test_lower Indicator analyses include lower bound; single value TRUE (default) indicates analyses; single value FALSE indicates lower bound; otherwise, logical vector length info indicate analyses lower bound. binding Indicator whether futility bound binding; default FALSE recommended. r Integer value controlling grid numerical integration Jennison Turnbull (2000); default 18, range 1 80. Larger values provide larger number grid points greater accuracy. Normally r changed user. tol Tolerance parameter boundary convergence (Z-scale).","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_npe.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Group sequential design computation with non-constant effect and information — gs_design_npe","text":"tibble columns analysis, bound, z, probability, theta, info, info0.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_npe.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Group sequential design computation with non-constant effect and information — gs_design_npe","text":"inputs info info0 vectors length increasing positive numbers. design returned change constant scale factor ensure design power 1 - beta. bound specifications upper, lower, upar, lpar used ensure Type error boundary properties specified.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_npe.html","id":"specification","dir":"Reference","previous_headings":"","what":"Specification","title":"Group sequential design computation with non-constant effect and information — gs_design_npe","text":"contents section shown PDF user manual .","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_npe.html","id":"author","dir":"Reference","previous_headings":"","what":"Author","title":"Group sequential design computation with non-constant effect and information — gs_design_npe","text":"Keaven Anderson keaven_anderson@merck.com","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_npe.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Group sequential design computation with non-constant effect and information — gs_design_npe","text":"","code":"library(dplyr) library(gsDesign) # Example 1 ---- # Single analysis # Lachin book p 71 difference of proportions example pc <- .28 # Control response rate pe <- .40 # Experimental response rate p0 <- (pc + pe) / 2 # Ave response rate under H0 # Information per increment of 1 in sample size info0 <- 1 / (p0 * (1 - p0) * 4) info <- 1 / (pc * (1 - pc) * 2 + pe * (1 - pe) * 2) # Result should round up to next even number = 652 # Divide information needed under H1 by information per patient added gs_design_npe(theta = pe - pc, info = info, info0 = info0) #> # A tibble: 1 × 10 #> analysis bound z probability probability0 theta info info0 info1 #> #> 1 1 upper 1.96 0.9 0.025 0.12 737. 725. 737. #> # ℹ 1 more variable: info_frac # Example 2 ---- # Fixed bound x <- gs_design_npe( alpha = 0.0125, theta = c(.1, .2, .3), info = (1:3) * 80, info0 = (1:3) * 80, upper = gs_b, upar = gsDesign::gsDesign(k = 3, sfu = gsDesign::sfLDOF, alpha = 0.0125)$upper$bound, lower = gs_b, lpar = c(-1, 0, 0) ) x #> # A tibble: 6 × 10 #> analysis bound z probability probability0 theta info_frac info info0 #> #> 1 1 upper 4.17 0.000278 0.0000152 0.1 0.333 51.6 51.6 #> 2 1 lower -1 0.0429 0.159 0.1 0.333 51.6 51.6 #> 3 2 upper 2.85 0.208 0.00222 0.2 0.667 103. 103. #> 4 2 lower 0 0.0537 0.513 0.2 0.667 103. 103. #> 5 3 upper 2.26 0.900 0.0125 0.3 1 155. 155. #> 6 3 lower 0 0.0537 0.606 0.3 1 155. 155. #> # ℹ 1 more variable: info1 # Same upper bound; this represents non-binding Type I error and will total 0.025 gs_power_npe( theta = rep(0, 3), info = (x %>% filter(bound == \"upper\"))$info, upper = gs_b, upar = (x %>% filter(bound == \"upper\"))$z, lower = gs_b, lpar = rep(-Inf, 3) ) #> # A tibble: 6 × 10 #> analysis bound z probability theta theta1 info_frac info info0 info1 #> #> 1 1 upper 4.17 0.0000152 0 0 0.333 51.6 51.6 51.6 #> 2 2 upper 2.85 0.00222 0 0 0.667 103. 103. 103. #> 3 3 upper 2.26 0.0125 0 0 1 155. 155. 155. #> 4 1 lower -Inf 0 0 0 0.333 51.6 51.6 51.6 #> 5 2 lower -Inf 0 0 0 0.667 103. 103. 103. #> 6 3 lower -Inf 0 0 0 1 155. 155. 155. # Example 3 ---- # Spending bound examples # Design with futility only at analysis 1; efficacy only at analyses 2, 3 # Spending bound for efficacy; fixed bound for futility # NOTE: test_upper and test_lower DO NOT WORK with gs_b; must explicitly make bounds infinite # test_upper and test_lower DO WORK with gs_spending_bound gs_design_npe( theta = c(.1, .2, .3), info = (1:3) * 40, info0 = (1:3) * 40, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL), lower = gs_b, lpar = c(-1, -Inf, -Inf), test_upper = c(FALSE, TRUE, TRUE) ) #> # A tibble: 6 × 10 #> analysis bound z probability probability0 theta info_frac info info0 #> #> 1 1 upper Inf 0 0 0.1 0.333 44.6 44.6 #> 2 1 lower -1 0.0477 0.159 0.1 0.333 44.6 44.6 #> 3 2 upper 2.51 0.267 0.00605 0.2 0.667 89.1 89.1 #> 4 2 lower -Inf 0.0477 0.159 0.2 0.667 89.1 89.1 #> 5 3 upper 1.99 0.900 0.0249 0.3 1 134. 134. #> 6 3 lower -Inf 0.0477 0.159 0.3 1 134. 134. #> # ℹ 1 more variable: info1 # one can try `info_scale = \"h1_info\"` or `info_scale = \"h0_info\"` here gs_design_npe( theta = c(.1, .2, .3), info = (1:3) * 40, info0 = (1:3) * 30, info_scale = \"h1_info\", upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL), lower = gs_b, lpar = c(-1, -Inf, -Inf), test_upper = c(FALSE, TRUE, TRUE) ) #> # A tibble: 6 × 10 #> analysis bound z probability probability0 theta info_frac info info0 #> #> 1 1 upper Inf 0 0 0.1 0.333 44.6 44.6 #> 2 1 lower -1 0.0477 0.159 0.1 0.333 44.6 44.6 #> 3 2 upper 2.51 0.267 0.00605 0.2 0.667 89.1 89.1 #> 4 2 lower -Inf 0.0477 0.159 0.2 0.667 89.1 89.1 #> 5 3 upper 1.99 0.900 0.0249 0.3 1 134. 134. #> 6 3 lower -Inf 0.0477 0.159 0.3 1 134. 134. #> # ℹ 1 more variable: info1 # Example 4 ---- # Spending function bounds # 2-sided asymmetric bounds # Lower spending based on non-zero effect gs_design_npe( theta = c(.1, .2, .3), info = (1:3) * 40, info0 = (1:3) * 30, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfHSD, total_spend = 0.1, param = -1, timing = NULL) ) #> # A tibble: 6 × 10 #> analysis bound z probability probability0 theta info_frac info info0 #> #> 1 1 upper 3.71 0.000145 0.000104 0.1 0.333 43.5 32.7 #> 2 1 lower -1.34 0.0139 0.0909 0.1 0.333 43.5 32.7 #> 3 2 upper 2.51 0.258 0.00605 0.2 0.667 87.1 65.3 #> 4 2 lower 0.150 0.0460 0.562 0.2 0.667 87.1 65.3 #> 5 3 upper 1.99 0.900 0.0249 0.3 1 131. 98.0 #> 6 3 lower 2.00 0.0908 0.976 0.3 1 131. 98.0 #> # ℹ 1 more variable: info1 # Example 5 ---- # Two-sided symmetric spend, O'Brien-Fleming spending # Typically, 2-sided bounds are binding xx <- gs_design_npe( theta = c(.1, .2, .3), info = (1:3) * 40, binding = TRUE, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL) ) xx #> # A tibble: 6 × 10 #> analysis bound z probability probability0 theta info_frac info info0 #> #> 1 1 upper 3.71 0.00104 0.000104 0.1 0.333 39.8 39.8 #> 2 1 lower -3.08 0.000104 0.00104 0.1 0.333 39.8 39.8 #> 3 2 upper 2.51 0.233 0.00605 0.2 0.667 79.5 79.5 #> 4 2 lower -0.728 0.00605 0.233 0.2 0.667 79.5 79.5 #> 5 3 upper 1.99 0.900 0.0250 0.3 1 119. 119. #> 6 3 lower 1.28 0.0250 0.900 0.3 1 119. 119. #> # ℹ 1 more variable: info1 # Re-use these bounds under alternate hypothesis # Always use binding = TRUE for power calculations gs_power_npe( theta = c(.1, .2, .3), info = (1:3) * 40, binding = TRUE, upper = gs_b, lower = gs_b, upar = (xx %>% filter(bound == \"upper\"))$z, lpar = -(xx %>% filter(bound == \"upper\"))$z ) #> # A tibble: 6 × 10 #> analysis bound z probability theta theta1 info_frac info info0 info1 #> #> 1 1 upper 3.71 0.00104 0.1 0.1 0.333 40 40 40 #> 2 2 upper 2.51 0.235 0.2 0.2 0.667 80 80 80 #> 3 3 upper 1.99 0.902 0.3 0.3 1 120 120 120 #> 4 1 lower -3.71 0.00000704 0.1 0.1 0.333 40 40 40 #> 5 2 lower -2.51 0.0000151 0.2 0.2 0.667 80 80 80 #> 6 3 lower -1.99 0.0000151 0.3 0.3 1 120 120 120"},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_rd.html","id":null,"dir":"Reference","previous_headings":"","what":"Group sequential design of binary outcome measuring in risk difference — gs_design_rd","title":"Group sequential design of binary outcome measuring in risk difference — gs_design_rd","text":"Group sequential design binary outcome measuring risk difference","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_rd.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Group sequential design of binary outcome measuring in risk difference — gs_design_rd","text":"","code":"gs_design_rd( p_c = tibble::tibble(stratum = \"All\", rate = 0.2), p_e = tibble::tibble(stratum = \"All\", rate = 0.15), info_frac = 1:3/3, rd0 = 0, alpha = 0.025, beta = 0.1, ratio = 1, stratum_prev = NULL, weight = c(\"unstratified\", \"ss\", \"invar\"), upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = c(qnorm(0.1), rep(-Inf, 2)), test_upper = TRUE, test_lower = TRUE, info_scale = c(\"h0_h1_info\", \"h0_info\", \"h1_info\"), binding = FALSE, r = 18, tol = 1e-06, h1_spending = TRUE )"},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_rd.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Group sequential design of binary outcome measuring in risk difference — gs_design_rd","text":"p_c Rate control group. p_e Rate experimental group. info_frac Statistical information fraction. rd0 Treatment effect super-superiority designs, default 0. alpha One-sided Type error. beta Type II error. ratio Experimental:Control randomization ratio (yet implemented). stratum_prev Randomization ratio different stratum. unstratified design NULL. Otherwise tibble containing two columns (stratum prevalence). weight weighting scheme stratified population. upper Function compute upper bound. lower Function compute lower bound. upar Parameters passed upper. lpar Parameters passed lower. test_upper Indicator analyses include upper (efficacy) bound; single value TRUE (default) indicates analyses; otherwise, logical vector length info indicate analyses efficacy bound. test_lower Indicator analyses include lower bound; single value TRUE (default) indicates analyses; single value FALSE indicates lower bound; otherwise, logical vector length info indicate analyses lower bound. info_scale Information scale calculation. Options : \"h0_h1_info\" (default): variance null alternative hypotheses used. \"h0_info\": variance null hypothesis used. \"h1_info\": variance alternative hypothesis used. binding Indicator whether futility bound binding; default FALSE recommended. r Integer value controlling grid numerical integration Jennison Turnbull (2000); default 18, range 1 80. Larger values provide larger number grid points greater accuracy. Normally, r changed user. tol Tolerance parameter boundary convergence (Z-scale). h1_spending Indicator lower bound set spending alternate hypothesis (input fail_rate) spending used lower bound.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_rd.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Group sequential design of binary outcome measuring in risk difference — gs_design_rd","text":"list input parameters, analysis, bound.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_rd.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Group sequential design of binary outcome measuring in risk difference — gs_design_rd","text":"added.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_rd.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Group sequential design of binary outcome measuring in risk difference — gs_design_rd","text":"","code":"library(gsDesign) # Example 1 ---- # unstratified group sequential design x <- gs_design_rd( p_c = tibble::tibble(stratum = \"All\", rate = .2), p_e = tibble::tibble(stratum = \"All\", rate = .15), info_frac = c(0.7, 1), rd0 = 0, alpha = .025, beta = .1, ratio = 1, stratum_prev = NULL, weight = \"unstratified\", upper = gs_b, lower = gs_b, upar = gsDesign(k = 2, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = c(qnorm(.1), rep(-Inf, 2)) ) y <- gs_power_rd( p_c = tibble::tibble(stratum = \"All\", rate = .2), p_e = tibble::tibble(stratum = \"All\", rate = .15), n = tibble::tibble(stratum = \"All\", n = x$analysis$n, analysis = 1:2), rd0 = 0, ratio = 1, weight = \"unstratified\", upper = gs_b, lower = gs_b, upar = gsDesign(k = 2, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = c(qnorm(.1), rep(-Inf, 2)) ) # The above 2 design share the same power with the same sample size and treatment effect x$bound$probability[x$bound$bound == \"upper\" & x$bound$analysis == 2] #> [1] 0.9 y$bound$probability[y$bound$bound == \"upper\" & y$bound$analysis == 2] #> [1] 0.9 # Example 2 ---- # stratified group sequential design gs_design_rd( p_c = tibble::tibble( stratum = c(\"biomarker positive\", \"biomarker negative\"), rate = c(.2, .25) ), p_e = tibble::tibble( stratum = c(\"biomarker positive\", \"biomarker negative\"), rate = c(.15, .22) ), info_frac = c(0.7, 1), rd0 = 0, alpha = .025, beta = .1, ratio = 1, stratum_prev = tibble::tibble( stratum = c(\"biomarker positive\", \"biomarker negative\"), prevalence = c(.4, .6) ), weight = \"ss\", upper = gs_spending_bound, lower = gs_b, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL), lpar = rep(-Inf, 2) ) #> $input #> $input$p_c #> # A tibble: 2 × 2 #> stratum rate #> #> 1 biomarker positive 0.2 #> 2 biomarker negative 0.25 #> #> $input$p_e #> # A tibble: 2 × 2 #> stratum rate #> #> 1 biomarker positive 0.15 #> 2 biomarker negative 0.22 #> #> $input$info_frac #> [1] 0.7 1.0 #> #> $input$rd0 #> [1] 0 #> #> $input$alpha #> [1] 0.025 #> #> $input$beta #> [1] 0.1 #> #> $input$ratio #> [1] 1 #> #> $input$stratum_prev #> # A tibble: 2 × 2 #> stratum prevalence #> #> 1 biomarker positive 0.4 #> 2 biomarker negative 0.6 #> #> $input$weight #> [1] \"ss\" #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.025 #> #> $input$upar$param #> NULL #> #> $input$upar$timing #> NULL #> #> #> $input$test_upper #> [1] TRUE #> #> $input$lower #> function (par = NULL, k = NULL, ...) #> { #> if (is.null(k)) { #> return(par) #> } #> else { #> return(par[k]) #> } #> } #> #> #> #> $input$lpar #> [1] -Inf -Inf #> #> $input$test_lower #> [1] TRUE #> #> $input$h1_spending #> [1] TRUE #> #> $input$binding #> [1] FALSE #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $bound #> # A tibble: 2 × 7 #> analysis bound probability probability0 z `~risk difference at bound` #> #> 1 1 upper 0.616 0.00738 2.44 0.0339 #> 2 2 upper 0.900 0.0250 2.00 0.0232 #> # ℹ 1 more variable: `nominal p` #> #> $analysis #> # A tibble: 2 × 8 #> analysis n rd rd0 info info0 info_frac info_frac0 #> #> 1 1 3426. 0.038 0 5184. 5172. 0.7 0.7 #> 2 2 4894. 0.038 0 7406. 7388. 1 1 #> #> attr(,\"class\") #> [1] \"non_binding\" \"rd\" \"gs_design\" \"list\""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_wlr.html","id":null,"dir":"Reference","previous_headings":"","what":"Group sequential design using weighted log-rank test under non-proportional hazards — gs_design_wlr","title":"Group sequential design using weighted log-rank test under non-proportional hazards — gs_design_wlr","text":"Group sequential design using weighted log-rank test non-proportional hazards","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_wlr.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Group sequential design using weighted log-rank test under non-proportional hazards — gs_design_wlr","text":"","code":"gs_design_wlr( enroll_rate = define_enroll_rate(duration = c(2, 2, 10), rate = c(3, 6, 9)), fail_rate = tibble(stratum = \"All\", duration = c(3, 100), fail_rate = log(2)/c(9, 18), hr = c(0.9, 0.6), dropout_rate = rep(0.001, 2)), weight = wlr_weight_fh, approx = \"asymptotic\", alpha = 0.025, beta = 0.1, ratio = 1, info_frac = NULL, info_scale = c(\"h0_h1_info\", \"h0_info\", \"h1_info\"), analysis_time = 36, binding = FALSE, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = alpha), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfLDOF, total_spend = beta), test_upper = TRUE, test_lower = TRUE, h1_spending = TRUE, r = 18, tol = 1e-06, interval = c(0.01, 1000) )"},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_wlr.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Group sequential design using weighted log-rank test under non-proportional hazards — gs_design_wlr","text":"enroll_rate Enrollment rates. fail_rate Failure dropout rates. weight Weight weighted log rank test: \"1\" = unweighted. \"n\" = Gehan-Breslow. \"sqrtN\" = Tarone-Ware. \"FH_p[]_q[b]\" = Fleming-Harrington p=q=b. approx Approximate estimation method Z statistics. \"event_driven\" = work proportional hazard model log rank test. \"asymptotic\". alpha One-sided Type error. beta Type II error. ratio Experimental:Control randomization ratio (yet implemented). info_frac Targeted information fraction analysis. info_scale Information scale calculation. Options : \"h0_h1_info\" (default): variance null alternative hypotheses used. \"h0_info\": variance null hypothesis used. \"h1_info\": variance alternative hypothesis used. analysis_time Minimum time analysis. binding Indicator whether futility bound binding; default FALSE recommended. upper Function compute upper bound. upar Parameters passed upper. lower Function compute lower bound. lpar Parameters passed lower. test_upper Indicator analyses include upper (efficacy) bound; single value TRUE (default) indicates analyses; otherwise, logical vector length info indicate analyses efficacy bound. test_lower Indicator analyses include lower bound; single value TRUE (default) indicates analyses; single value FALSE indicated lower bound; otherwise, logical vector length info indicate analyses lower bound. h1_spending Indicator lower bound set spending alternate hypothesis (input fail_rate) spending used lower bound. r Integer value controlling grid numerical integration Jennison Turnbull (2000); default 18, range 1 80. Larger values provide larger number grid points greater accuracy. Normally, r changed user. tol Tolerance parameter boundary convergence (Z-scale). interval interval presumed include time expected event count equal targeted event.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_wlr.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Group sequential design using weighted log-rank test under non-proportional hazards — gs_design_wlr","text":"list input parameters, enrollment rate, analysis, bound.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_wlr.html","id":"specification","dir":"Reference","previous_headings":"","what":"Specification","title":"Group sequential design using weighted log-rank test under non-proportional hazards — gs_design_wlr","text":"contents section shown PDF user manual .","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_wlr.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Group sequential design using weighted log-rank test under non-proportional hazards — gs_design_wlr","text":"","code":"library(dplyr) library(mvtnorm) library(gsDesign) library(gsDesign2) # set enrollment rates enroll_rate <- define_enroll_rate(duration = 12, rate = 1) # set failure rates fail_rate <- define_fail_rate( duration = c(4, 100), fail_rate = log(2) / 15, # median survival 15 month hr = c(1, .6), dropout_rate = 0.001 ) # Example 1 ---- # Information fraction driven design gs_design_wlr( enroll_rate = enroll_rate, fail_rate = fail_rate, ratio = 1, alpha = 0.025, beta = 0.2, weight = function(x, arm0, arm1) { wlr_weight_fh(x, arm0, arm1, rho = 0, gamma = 0.5) }, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.2), analysis_time = 36, info_frac = 1:3/3 ) #> $input #> $input$enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 1 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $input$weight #> function (x, arm0, arm1) #> { #> wlr_weight_fh(x, arm0, arm1, rho = 0, gamma = 0.5) #> } #> #> #> $input$approx #> [1] \"asymptotic\" #> #> $input$alpha #> [1] 0.025 #> #> $input$beta #> [1] 0.2 #> #> $input$ratio #> [1] 1 #> #> $input$info_frac #> [1] 0.3333333 0.6666667 1.0000000 #> #> $input$analysis_time #> [1] 36 #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.025 #> #> #> $input$lower #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$lpar #> $input$lpar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$lpar$total_spend #> [1] 0.2 #> #> #> $input$test_upper #> [1] TRUE #> #> $input$test_lower #> [1] TRUE #> #> $input$h1_spending #> [1] TRUE #> #> $input$binding #> [1] FALSE #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 25.1 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $bounds #> # A tibble: 6 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.00173 0.0000794 3.78 0.490 0.0000794 #> 2 1 lower 0.0269 0.134 -1.11 1.23 0.866 #> 3 2 upper 0.126 0.00562 2.54 0.671 0.00559 #> 4 2 lower 0.117 0.568 0.155 0.976 0.439 #> 5 3 upper 0.800 0.0249 1.99 0.754 0.0233 #> 6 3 lower 0.200 0.975 1.99 0.754 0.0233 #> #> $analysis #> # A tibble: 3 × 9 #> analysis time n event ahr theta info info0 info_frac #> #> 1 1 18.0 301. 112. 0.702 0.354 5.46 5.53 0.333 #> 2 2 26.6 301. 162. 0.657 0.420 10.9 11.2 0.667 #> 3 3 36 301. 199. 0.639 0.732 16.4 17.1 1 #> #> attr(,\"class\") #> [1] \"non_binding\" \"wlr\" \"gs_design\" \"list\" # Example 2 ---- # Calendar time driven design gs_design_wlr( enroll_rate = enroll_rate, fail_rate = fail_rate, ratio = 1, alpha = 0.025, beta = 0.2, weight = function(x, arm0, arm1) { wlr_weight_fh(x, arm0, arm1, rho = 0, gamma = 0.5) }, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.2), analysis_time = 1:3*12, info_frac = NULL ) #> $input #> $input$enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 1 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $input$weight #> function (x, arm0, arm1) #> { #> wlr_weight_fh(x, arm0, arm1, rho = 0, gamma = 0.5) #> } #> #> #> $input$approx #> [1] \"asymptotic\" #> #> $input$alpha #> [1] 0.025 #> #> $input$beta #> [1] 0.2 #> #> $input$ratio #> [1] 1 #> #> $input$info_frac #> [1] 0.1325089 0.5649964 1.0000000 #> #> $input$analysis_time #> [1] 12 24 36 #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.025 #> #> #> $input$lower #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$lpar #> $input$lpar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$lpar$total_spend #> [1] 0.2 #> #> #> $input$test_upper #> [1] TRUE #> #> $input$test_lower #> [1] TRUE #> #> $input$h1_spending #> [1] TRUE #> #> $input$binding #> [1] FALSE #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 24.0 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $bounds #> # A tibble: 6 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.0000000732 3.30e-10 6.18 0.208 3.30e-10 #> 2 1 lower 0.000441 7.55e- 3 -2.43 1.85 9.92e- 1 #> 3 2 upper 0.301 2.57e- 3 2.80 0.625 2.57e- 3 #> 4 2 lower 0.0882 8.23e- 1 0.925 0.856 1.77e- 1 #> 5 3 upper 0.800 2.20e- 2 1.97 0.751 2.42e- 2 #> 6 3 lower 0.200 9.78e- 1 1.97 0.751 2.42e- 2 #> #> $analysis #> # A tibble: 3 × 9 #> analysis time n event ahr theta info info0 info_frac #> #> 1 1 12 288. 61.9 0.781 0.626 2.08 2.09 0.133 #> 2 2 24 288. 142. 0.666 0.765 8.86 9.07 0.565 #> 3 3 36 288. 191. 0.639 0.732 15.7 16.4 1 #> #> attr(,\"class\") #> [1] \"non_binding\" \"wlr\" \"gs_design\" \"list\" # Example 3 ---- # Both calendar time and information fraction driven design gs_design_wlr( enroll_rate = enroll_rate, fail_rate = fail_rate, ratio = 1, alpha = 0.025, beta = 0.2, weight = function(x, arm0, arm1) { wlr_weight_fh(x, arm0, arm1, rho = 0, gamma = 0.5) }, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.2), analysis_time = 1:3*12, info_frac = c(0.3, 0.7, 1) ) #> $input #> $input$enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 1 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $input$weight #> function (x, arm0, arm1) #> { #> wlr_weight_fh(x, arm0, arm1, rho = 0, gamma = 0.5) #> } #> #> #> $input$approx #> [1] \"asymptotic\" #> #> $input$alpha #> [1] 0.025 #> #> $input$beta #> [1] 0.2 #> #> $input$ratio #> [1] 1 #> #> $input$info_frac #> [1] 0.3 0.7 1.0 #> #> $input$analysis_time #> [1] 12 24 36 #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.025 #> #> #> $input$lower #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$lpar #> $input$lpar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$lpar$total_spend #> [1] 0.2 #> #> #> $input$test_upper #> [1] TRUE #> #> $input$test_lower #> [1] TRUE #> #> $input$h1_spending #> [1] TRUE #> #> $input$binding #> [1] FALSE #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 25.3 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $bounds #> # A tibble: 6 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.000646 0.0000315 4.00 0.461 0.0000315 #> 2 1 lower 0.0197 0.0954 -1.31 1.29 0.905 #> 3 2 upper 0.154 0.00693 2.46 0.683 0.00692 #> 4 2 lower 0.126 0.608 0.266 0.960 0.395 #> 5 3 upper 0.800 0.0249 2.00 0.755 0.0229 #> 6 3 lower 0.200 0.975 2.00 0.755 0.0229 #> #> $analysis #> # A tibble: 3 × 9 #> analysis time n event ahr theta info info0 info_frac #> #> 1 1 17.1 304. 107. 0.711 0.341 4.96 5.02 0.300 #> 2 2 27.5 304. 167. 0.655 0.424 11.6 11.9 0.700 #> 3 3 36 304. 201. 0.639 0.732 16.5 17.3 1 #> #> attr(,\"class\") #> [1] \"non_binding\" \"wlr\" \"gs_design\" \"list\""},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_ahr.html","id":null,"dir":"Reference","previous_headings":"","what":"Information and effect size based on AHR approximation — gs_info_ahr","title":"Information and effect size based on AHR approximation — gs_info_ahr","text":"Based piecewise enrollment rate, failure rate, dropout rates computes approximate information effect size using average hazard ratio model.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_ahr.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Information and effect size based on AHR approximation — gs_info_ahr","text":"","code":"gs_info_ahr( enroll_rate = define_enroll_rate(duration = c(2, 2, 10), rate = c(3, 6, 9)), fail_rate = define_fail_rate(duration = c(3, 100), fail_rate = log(2)/c(9, 18), hr = c(0.9, 0.6), dropout_rate = 0.001), ratio = 1, event = NULL, analysis_time = NULL, interval = c(0.01, 1000) )"},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_ahr.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Information and effect size based on AHR approximation — gs_info_ahr","text":"enroll_rate Enrollment rates. fail_rate Failure dropout rates. ratio Experimental:Control randomization ratio. event Targeted minimum events analysis. analysis_time Targeted minimum study duration analysis. interval interval presumed include time expected event count equal targeted event.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_ahr.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Information and effect size based on AHR approximation — gs_info_ahr","text":"data frame columns Analysis, Time, AHR, Events, theta, info, info0. info, info0 contain statistical information H1, H0, respectively. analysis k, Time[k] maximum analysis_time[k] expected time required accrue targeted event[k]. AHR expected average hazard ratio analysis.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_ahr.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Information and effect size based on AHR approximation — gs_info_ahr","text":"ahr() function computes statistical information targeted event times. expected_time() function used get events average HR targeted analysis_time.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_ahr.html","id":"specification","dir":"Reference","previous_headings":"","what":"Specification","title":"Information and effect size based on AHR approximation — gs_info_ahr","text":"contents section shown PDF user manual .","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_ahr.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Information and effect size based on AHR approximation — gs_info_ahr","text":"","code":"library(gsDesign) library(gsDesign2) # Example 1 ---- # \\donttest{ # Only put in targeted events gs_info_ahr(event = c(30, 40, 50)) #> analysis time event ahr theta info info0 #> 1 1 14.90817 30.00008 0.7865726 0.2400702 7.373433 7.50002 #> 2 2 19.16437 40.00000 0.7442008 0.2954444 9.789940 10.00000 #> 3 3 24.54264 50.00000 0.7128241 0.3385206 12.227632 12.50000 # } # Example 2 ---- # Only put in targeted analysis times gs_info_ahr(analysis_time = c(18, 27, 36)) #> analysis time event ahr theta info info0 #> 1 1 18 37.59032 0.7545471 0.2816376 9.208013 9.397579 #> 2 2 27 54.01154 0.7037599 0.3513180 13.216112 13.502885 #> 3 3 36 66.23948 0.6833395 0.3807634 16.267921 16.559870 # Example 3 ---- # \\donttest{ # Some analysis times after time at which targeted event accrue # Check that both Time >= input analysis_time and event >= input event gs_info_ahr(event = c(30, 40, 50), analysis_time = c(16, 19, 26)) #> analysis time event ahr theta info info0 #> 1 1 16.00000 33.06876 0.7759931 0.2536117 8.118487 8.267189 #> 2 2 19.16437 40.00000 0.7442008 0.2954444 9.789940 10.000001 #> 3 3 26.00000 52.41802 0.7071808 0.3464689 12.822714 13.104505 gs_info_ahr(event = c(30, 40, 50), analysis_time = c(14, 20, 24)) #> analysis time event ahr theta info info0 #> 1 1 14.90817 30.00008 0.7865726 0.2400702 7.373433 7.50002 #> 2 2 20.00000 41.67282 0.7377944 0.3040901 10.195150 10.41821 #> 3 3 24.54264 50.00000 0.7128241 0.3385206 12.227632 12.50000 # }"},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_combo.html","id":null,"dir":"Reference","previous_headings":"","what":"Information and effect size for MaxCombo test — gs_info_combo","title":"Information and effect size for MaxCombo test — gs_info_combo","text":"Information effect size MaxCombo test","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_combo.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Information and effect size for MaxCombo test — gs_info_combo","text":"","code":"gs_info_combo( enroll_rate = define_enroll_rate(duration = c(2, 2, 10), rate = c(3, 6, 9)), fail_rate = define_fail_rate(duration = c(3, 100), fail_rate = log(2)/c(9, 18), hr = c(0.9, 0.6), dropout_rate = 0.001), ratio = 1, event = NULL, analysis_time = NULL, rho, gamma, tau = rep(-1, length(rho)), approx = \"asymptotic\" )"},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_combo.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Information and effect size for MaxCombo test — gs_info_combo","text":"enroll_rate enroll_rate data frame without stratum created define_enroll_rate(). fail_rate fail_rate data frame without stratum created define_fail_rate(). ratio Experimental:Control randomization ratio (yet implemented). event Targeted events analysis. analysis_time Minimum time analysis. rho Weighting parameters. gamma Weighting parameters. tau Weighting parameters. approx Approximation method.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_combo.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Information and effect size for MaxCombo test — gs_info_combo","text":"tibble columns test index, analysis index, analysis time, sample size, number events, ahr, delta, sigma2, theta, statistical information.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_combo.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Information and effect size for MaxCombo test — gs_info_combo","text":"","code":"gs_info_combo(rho = c(0, 0.5), gamma = c(0.5, 0), analysis_time = c(12, 24)) #> test analysis time n event ahr delta sigma2 #> 1 1 1 12 89.99998 20.40451 0.7739222 -0.004130002 0.00633611 #> 2 1 2 24 107.99998 49.06966 0.6744758 -0.020174155 0.02617985 #> 3 2 1 12 89.99998 20.40451 0.8182558 -0.008800844 0.04088161 #> 4 2 2 24 107.99998 49.06966 0.7278445 -0.031421204 0.08709509 #> theta info info0 #> 1 0.6518199 0.5702498 0.5733464 #> 2 0.7705987 2.8274229 2.8855151 #> 3 0.2152764 3.6793441 3.6861985 #> 4 0.3607689 9.4062683 9.4737329"},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_rd.html","id":null,"dir":"Reference","previous_headings":"","what":"Information and effect size under risk difference — gs_info_rd","title":"Information and effect size under risk difference — gs_info_rd","text":"Information effect size risk difference","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_rd.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Information and effect size under risk difference — gs_info_rd","text":"","code":"gs_info_rd( p_c = tibble::tibble(stratum = \"All\", rate = 0.2), p_e = tibble::tibble(stratum = \"All\", rate = 0.15), n = tibble::tibble(stratum = \"All\", n = c(100, 200, 300), analysis = 1:3), rd0 = 0, ratio = 1, weight = c(\"unstratified\", \"ss\", \"invar\") )"},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_rd.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Information and effect size under risk difference — gs_info_rd","text":"p_c Rate control group. p_e Rate experimental group. n Sample size. rd0 risk difference H0. ratio Experimental:Control randomization ratio. weight Weighting method, can \"unstratified\", \"ss\", \"invar\".","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_rd.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Information and effect size under risk difference — gs_info_rd","text":"tibble columns analysis index, sample size, risk difference, risk difference null hypothesis, theta1 (standardized treatment effect alternative hypothesis), theta0 (standardized treatment effect null hypothesis), statistical information.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_rd.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Information and effect size under risk difference — gs_info_rd","text":"","code":"# Example 1 ---- # unstratified case with H0: rd0 = 0 gs_info_rd( p_c = tibble::tibble(stratum = \"All\", rate = .15), p_e = tibble::tibble(stratum = \"All\", rate = .1), n = tibble::tibble(stratum = \"All\", n = c(100, 200, 300), analysis = 1:3), rd0 = 0, ratio = 1 ) #> # A tibble: 3 × 8 #> analysis n rd rd0 theta1 theta0 info1 info0 #> #> 1 1 100 0.05 0 0.05 0 230. 229. #> 2 2 200 0.05 0 0.05 0 460. 457. #> 3 3 300 0.05 0 0.05 0 690. 686. # Example 2 ---- # unstratified case with H0: rd0 != 0 gs_info_rd( p_c = tibble::tibble(stratum = \"All\", rate = .2), p_e = tibble::tibble(stratum = \"All\", rate = .15), n = tibble::tibble(stratum = \"All\", n = c(100, 200, 300), analysis = 1:3), rd0 = 0.005, ratio = 1 ) #> # A tibble: 3 × 8 #> analysis n rd rd0 theta1 theta0 info1 info0 #> #> 1 1 100 0.05 0.005 0.05 0.005 174. 173. #> 2 2 200 0.05 0.005 0.05 0.005 348. 346. #> 3 3 300 0.05 0.005 0.05 0.005 522. 519. # Example 3 ---- # stratified case under sample size weighting and H0: rd0 = 0 gs_info_rd( p_c = tibble::tibble(stratum = c(\"S1\", \"S2\", \"S3\"), rate = c(.15, .2, .25)), p_e = tibble::tibble(stratum = c(\"S1\", \"S2\", \"S3\"), rate = c(.1, .16, .19)), n = tibble::tibble( stratum = rep(c(\"S1\", \"S2\", \"S3\"), each = 3), analysis = rep(1:3, 3), n = c(50, 100, 200, 40, 80, 160, 60, 120, 240) ), rd0 = 0, ratio = 1, weight = \"ss\" ) #> # A tibble: 3 × 8 #> analysis n rd rd0 theta1 theta0 info1 info0 #> #> 1 1 150 0.0513 0 0.0513 0 261. 260. #> 2 2 300 0.0513 0 0.0513 0 522. 519. #> 3 3 600 0.0513 0 0.0513 0 1043. 1038. # Example 4 ---- # stratified case under inverse variance weighting and H0: rd0 = 0 gs_info_rd( p_c = tibble::tibble( stratum = c(\"S1\", \"S2\", \"S3\"), rate = c(.15, .2, .25) ), p_e = tibble::tibble( stratum = c(\"S1\", \"S2\", \"S3\"), rate = c(.1, .16, .19) ), n = tibble::tibble( stratum = rep(c(\"S1\", \"S2\", \"S3\"), each = 3), analysis = rep(1:3, 3), n = c(50, 100, 200, 40, 80, 160, 60, 120, 240) ), rd0 = 0, ratio = 1, weight = \"invar\" ) #> # A tibble: 3 × 8 #> analysis n rd rd0 theta1 theta0 info1 info0 #> #> 1 1 150 0.0507 0 0.0507 0 271. 269. #> 2 2 300 0.0507 0 0.0507 0 542. 539. #> 3 3 600 0.0507 0 0.0507 0 1083. 1078. # Example 5 ---- # stratified case under sample size weighting and H0: rd0 != 0 gs_info_rd( p_c = tibble::tibble( stratum = c(\"S1\", \"S2\", \"S3\"), rate = c(.15, .2, .25) ), p_e = tibble::tibble( stratum = c(\"S1\", \"S2\", \"S3\"), rate = c(.1, .16, .19) ), n = tibble::tibble( stratum = rep(c(\"S1\", \"S2\", \"S3\"), each = 3), analysis = rep(1:3, 3), n = c(50, 100, 200, 40, 80, 160, 60, 120, 240) ), rd0 = 0.02, ratio = 1, weight = \"ss\" ) #> # A tibble: 3 × 8 #> analysis n rd rd0 theta1 theta0 info1 info0 #> #> 1 1 150 0.0513 0.02 0.0513 0.02 261. 260. #> 2 2 300 0.0513 0.02 0.0513 0.02 522. 519. #> 3 3 600 0.0513 0.02 0.0513 0.02 1043. 1038. # Example 6 ---- # stratified case under inverse variance weighting and H0: rd0 != 0 gs_info_rd( p_c = tibble::tibble( stratum = c(\"S1\", \"S2\", \"S3\"), rate = c(.15, .2, .25) ), p_e = tibble::tibble( stratum = c(\"S1\", \"S2\", \"S3\"), rate = c(.1, .16, .19) ), n = tibble::tibble( stratum = rep(c(\"S1\", \"S2\", \"S3\"), each = 3), analysis = rep(1:3, 3), n = c(50, 100, 200, 40, 80, 160, 60, 120, 240) ), rd0 = 0.02, ratio = 1, weight = \"invar\" ) #> # A tibble: 3 × 8 #> analysis n rd rd0 theta1 theta0 info1 info0 #> #> 1 1 150 0.0507 0.02 0.0507 0.02 271. 269. #> 2 2 300 0.0507 0.02 0.0507 0.02 542. 539. #> 3 3 600 0.0507 0.02 0.0507 0.02 1083. 1078. # Example 7 ---- # stratified case under inverse variance weighting and H0: rd0 != 0 and # rd0 difference for different statum gs_info_rd( p_c = tibble::tibble( stratum = c(\"S1\", \"S2\", \"S3\"), rate = c(.15, .2, .25) ), p_e = tibble::tibble( stratum = c(\"S1\", \"S2\", \"S3\"), rate = c(.1, .16, .19) ), n = tibble::tibble( stratum = rep(c(\"S1\", \"S2\", \"S3\"), each = 3), analysis = rep(1:3, 3), n = c(50, 100, 200, 40, 80, 160, 60, 120, 240) ), rd0 = tibble::tibble( stratum = c(\"S1\", \"S2\", \"S3\"), rd0 = c(0.01, 0.02, 0.03) ), ratio = 1, weight = \"invar\" ) #> # A tibble: 3 × 8 #> analysis n rd rd0 theta1 theta0 info1 info0 #> #> 1 1 150 0.0507 0.0190 0.0507 0.0190 271. 269. #> 2 2 300 0.0507 0.0190 0.0507 0.0190 542. 539. #> 3 3 600 0.0507 0.0190 0.0507 0.0190 1083. 1078."},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_wlr.html","id":null,"dir":"Reference","previous_headings":"","what":"Information and effect size for weighted log-rank test — gs_info_wlr","title":"Information and effect size for weighted log-rank test — gs_info_wlr","text":"Based piecewise enrollment rate, failure rate, dropout rates computes approximate information effect size using average hazard ratio model.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_wlr.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Information and effect size for weighted log-rank test — gs_info_wlr","text":"","code":"gs_info_wlr( enroll_rate = define_enroll_rate(duration = c(2, 2, 10), rate = c(3, 6, 9)), fail_rate = define_fail_rate(duration = c(3, 100), fail_rate = log(2)/c(9, 18), hr = c(0.9, 0.6), dropout_rate = 0.001), ratio = 1, event = NULL, analysis_time = NULL, weight = wlr_weight_fh, approx = \"asymptotic\", interval = c(0.01, 1000) )"},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_wlr.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Information and effect size for weighted log-rank test — gs_info_wlr","text":"enroll_rate enroll_rate data frame without stratum created define_enroll_rate(). fail_rate Failure dropout rates. ratio Experimental:Control randomization ratio. event Targeted minimum events analysis. analysis_time Targeted minimum study duration analysis. weight Weight weighted log rank test: \"1\" = unweighted. \"n\" = Gehan-Breslow. \"sqrtN\" = Tarone-Ware. \"FH_p[]_q[b]\" = Fleming-Harrington p=q=b. approx Approximate estimation method Z statistics. \"event_driven\" = work proportional hazard model log rank test. \"asymptotic\". interval interval presumed include time expected event count equal targeted event.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_wlr.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Information and effect size for weighted log-rank test — gs_info_wlr","text":"tibble columns Analysis, Time, N, Events, AHR, delta, sigma2, theta, info, info0. info info0 contain statistical information H1, H0, respectively. analysis k, Time[k] maximum analysis_time[k] expected time required accrue targeted event[k]. AHR expected average hazard ratio analysis.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_wlr.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Information and effect size for weighted log-rank test — gs_info_wlr","text":"ahr() function computes statistical information targeted event times. expected_time() function used get events average HR targeted analysis_time.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_wlr.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Information and effect size for weighted log-rank test — gs_info_wlr","text":"","code":"library(gsDesign2) # Set enrollment rates enroll_rate <- define_enroll_rate(duration = 12, rate = 500 / 12) # Set failure rates fail_rate <- define_fail_rate( duration = c(4, 100), fail_rate = log(2) / 15, # median survival 15 month hr = c(1, .6), dropout_rate = 0.001 ) # Set the targeted number of events and analysis time event <- c(30, 40, 50) analysis_time <- c(10, 24, 30) gs_info_wlr( enroll_rate = enroll_rate, fail_rate = fail_rate, event = event, analysis_time = analysis_time ) #> analysis time n event ahr delta sigma2 theta #> 1 1 10 416.6667 77.80361 0.8720599 -0.005325328 0.03890022 0.1368971 #> 2 2 24 500.0001 246.28341 0.7164215 -0.040920239 0.12270432 0.3334865 #> 3 3 30 500.0001 293.69568 0.6955693 -0.052942680 0.14583769 0.3630247 #> info info0 #> 1 16.20843 16.22923 #> 2 61.35217 62.08666 #> 3 72.91885 74.25144"},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_ahr.html","id":null,"dir":"Reference","previous_headings":"","what":"Group sequential design power using average hazard ratio under non-proportional hazards — gs_power_ahr","title":"Group sequential design power using average hazard ratio under non-proportional hazards — gs_power_ahr","text":"Group sequential design power using average hazard ratio non-proportional hazards.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_ahr.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Group sequential design power using average hazard ratio under non-proportional hazards — gs_power_ahr","text":"","code":"gs_power_ahr( enroll_rate = define_enroll_rate(duration = c(2, 2, 10), rate = c(3, 6, 9)), fail_rate = define_fail_rate(duration = c(3, 100), fail_rate = log(2)/c(9, 18), hr = c(0.9, 0.6), dropout_rate = rep(0.001, 2)), event = c(30, 40, 50), analysis_time = NULL, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfLDOF, total_spend = NULL), test_lower = TRUE, test_upper = TRUE, ratio = 1, binding = FALSE, info_scale = c(\"h0_h1_info\", \"h0_info\", \"h1_info\"), r = 18, tol = 1e-06, interval = c(0.01, 1000) )"},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_ahr.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Group sequential design power using average hazard ratio under non-proportional hazards — gs_power_ahr","text":"enroll_rate enroll_rate data frame without stratum created define_enroll_rate(). fail_rate Failure dropout rates. event Targeted event analysis. analysis_time Minimum time analysis. upper Function compute upper bound. upar Parameters passed upper. lower Function compute lower bound. lpar Parameters passed lower. test_lower Indicator analyses include lower bound; single value TRUE (default) indicates analyses; single value FALSE indicated lower bound; otherwise, logical vector length info indicate analyses lower bound. test_upper Indicator analyses include upper (efficacy) bound; single value TRUE (default) indicates analyses; otherwise, logical vector length info indicate analyses efficacy bound. ratio Experimental:Control randomization ratio (yet implemented). binding Indicator whether futility bound binding; default FALSE recommended. info_scale Information scale calculation. Options : \"h0_h1_info\" (default): variance null alternative hypotheses used. \"h0_info\": variance null hypothesis used. \"h1_info\": variance alternative hypothesis used. r Integer value controlling grid numerical integration Jennison Turnbull (2000); default 18, range 1 80. Larger values provide larger number grid points greater accuracy. Normally, r changed user. tol Tolerance parameter boundary convergence (Z-scale). interval interval presumed include time expected event count equal targeted event.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_ahr.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Group sequential design power using average hazard ratio under non-proportional hazards — gs_power_ahr","text":"tibble columns Analysis, Bound, Z, Probability, theta, Time, AHR, Events. Contains row analysis bound.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_ahr.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Group sequential design power using average hazard ratio under non-proportional hazards — gs_power_ahr","text":"Bound satisfy input upper bound specification upper, upar, lower bound specification lower, lpar. ahr() computes statistical information targeted event times. expected_time() function used get events average HR targeted analysis_time.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_ahr.html","id":"specification","dir":"Reference","previous_headings":"","what":"Specification","title":"Group sequential design power using average hazard ratio under non-proportional hazards — gs_power_ahr","text":"contents section shown PDF user manual .","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_ahr.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Group sequential design power using average hazard ratio under non-proportional hazards — gs_power_ahr","text":"","code":"library(gsDesign2) library(dplyr) # Example 1 ---- # The default output of `gs_power_ahr()` is driven by events, # i.e., `event = c(30, 40, 50)`, `analysis_time = NULL` # \\donttest{ gs_power_ahr(lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.1)) #> $input #> $input$enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $input$event #> [1] 30 40 50 #> #> $input$analysis_time #> NULL #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.025 #> #> #> $input$lower #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$lpar #> $input$lpar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$lpar$total_spend #> [1] 0.1 #> #> #> $input$test_lower #> [1] TRUE #> #> $input$test_upper #> [1] TRUE #> #> $input$ratio #> [1] 1 #> #> $input$binding #> [1] FALSE #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $bound #> # A tibble: 6 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.0231 0.00381 2.67 0.374 0.00381 #> 2 1 lower 0.0349 0.121 -1.17 1.54 0.879 #> 3 2 upper 0.0897 0.0122 2.29 0.481 0.0110 #> 4 2 lower 0.0668 0.265 -0.663 1.24 0.746 #> 5 3 upper 0.207 0.0250 2.03 0.559 0.0211 #> 6 3 lower 0.101 0.430 -0.227 1.07 0.590 #> #> $analysis #> analysis time n event ahr theta info info0 #> 1 1 14.90817 108 30.00008 0.7865726 0.2400702 7.373433 7.50002 #> 2 2 19.16437 108 40.00000 0.7442008 0.2954444 9.789940 10.00000 #> 3 3 24.54264 108 50.00000 0.7128241 0.3385206 12.227632 12.50000 #> info_frac info_frac0 #> 1 0.6030140 0.6000016 #> 2 0.8006407 0.8000001 #> 3 1.0000000 1.0000000 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" # } # Example 2 ---- # 2-sided symmetric O'Brien-Fleming spending bound, driven by analysis time, # i.e., `event = NULL`, `analysis_time = c(12, 24, 36)` gs_power_ahr( analysis_time = c(12, 24, 36), event = NULL, binding = TRUE, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.025) ) #> $input #> $input$enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $input$event #> NULL #> #> $input$analysis_time #> [1] 12 24 36 #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.025 #> #> #> $input$lower #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$lpar #> $input$lpar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$lpar$total_spend #> [1] 0.025 #> #> #> $input$test_lower #> [1] TRUE #> #> $input$test_upper #> [1] TRUE #> #> $input$ratio #> [1] 1 #> #> $input$binding #> [1] TRUE #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $bound #> # A tibble: 6 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.000370 0.0000538 3.87 0.178 0.0000538 #> 2 1 lower 0.0000612 0.000343 -3.40 4.55 1.00 #> 3 2 upper 0.116 0.00921 2.36 0.506 0.00919 #> 4 2 lower 0.00907 0.115 -1.20 1.42 0.885 #> 5 3 upper 0.324 0.0250 2.01 0.608 0.0222 #> 6 3 lower 0.0250 0.324 -0.473 1.12 0.682 #> #> $analysis #> analysis time n event ahr theta info info0 info_frac #> 1 1 12 90 20.40451 0.8107539 0.2097907 5.028327 5.101127 0.3090946 #> 2 2 24 108 49.06966 0.7151566 0.3352538 11.999266 12.267415 0.7376029 #> 3 3 36 108 66.23948 0.6833395 0.3807634 16.267921 16.559870 1.0000000 #> info_frac0 #> 1 0.3080415 #> 2 0.7407917 #> 3 1.0000000 #> #> attr(,\"class\") #> [1] \"ahr\" \"gs_design\" \"list\" # Example 3 ---- # 2-sided symmetric O'Brien-Fleming spending bound, driven by event, # i.e., `event = c(20, 50, 70)`, `analysis_time = NULL` # \\donttest{ gs_power_ahr( analysis_time = NULL, event = c(20, 50, 70), binding = TRUE, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.025) ) #> $input #> $input$enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $input$event #> [1] 20 50 70 #> #> $input$analysis_time #> NULL #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.025 #> #> #> $input$lower #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$lpar #> $input$lpar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$lpar$total_spend #> [1] 0.025 #> #> #> $input$test_lower #> [1] TRUE #> #> $input$test_upper #> [1] TRUE #> #> $input$ratio #> [1] 1 #> #> $input$binding #> [1] TRUE #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $bound #> # A tibble: 6 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.000198 0.0000275 4.03 0.163 0.0000275 #> 2 1 lower 0.0000312 0.000181 -3.57 4.98 1.00 #> 3 2 upper 0.110 0.00800 2.41 0.502 0.00799 #> 4 2 lower 0.00782 0.109 -1.23 1.42 0.891 #> 5 3 upper 0.352 0.0250 2.00 0.617 0.0226 #> 6 3 lower 0.0250 0.352 -0.393 1.10 0.653 #> #> $analysis #> analysis time n event ahr theta info info0 #> 1 1 11.87087 88.8378 20 0.8119328 0.2083377 4.929331 5.0 #> 2 2 24.54264 108.0000 50 0.7128241 0.3385206 12.227632 12.5 #> 3 3 39.39207 108.0000 70 0.6785816 0.3877506 17.218358 17.5 #> info_frac info_frac0 #> 1 0.2862834 0.2857143 #> 2 0.7101509 0.7142857 #> 3 1.0000000 1.0000000 #> #> attr(,\"class\") #> [1] \"ahr\" \"gs_design\" \"list\" # } # Example 4 ---- # 2-sided symmetric O'Brien-Fleming spending bound, # driven by both `event` and `analysis_time`, i.e., # both `event` and `analysis_time` are not `NULL`, # then the analysis will driven by the maximal one, i.e., # Time = max(analysis_time, calculated Time for targeted event) # Events = max(events, calculated events for targeted analysis_time) # \\donttest{ gs_power_ahr( analysis_time = c(12, 24, 36), event = c(30, 40, 50), binding = TRUE, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.025) ) #> $input #> $input$enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $input$event #> [1] 30 40 50 #> #> $input$analysis_time #> [1] 12 24 36 #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.025 #> #> #> $input$lower #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$lpar #> $input$lpar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$lpar$total_spend #> [1] 0.025 #> #> #> $input$test_lower #> [1] TRUE #> #> $input$test_upper #> [1] TRUE #> #> $input$ratio #> [1] 1 #> #> $input$binding #> [1] TRUE #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $bound #> # A tibble: 6 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.00706 0.000867 3.13 0.316 0.000867 #> 2 1 lower 0.000935 0.00658 -2.48 2.49 0.993 #> 3 2 upper 0.115 0.00921 2.37 0.505 0.00892 #> 4 2 lower 0.00912 0.113 -1.21 1.42 0.888 #> 5 3 upper 0.324 0.0250 2.01 0.607 0.0222 #> 6 3 lower 0.0251 0.323 -0.474 1.12 0.682 #> #> $analysis #> analysis time n event ahr theta info info0 #> 1 1 14.90817 108 30.00008 0.7865726 0.2400702 7.373433 7.50002 #> 2 2 24.00000 108 49.06966 0.7151566 0.3352538 11.999266 12.26741 #> 3 3 36.00000 108 66.23948 0.6833395 0.3807634 16.267921 16.55987 #> info_frac info_frac0 #> 1 0.4532499 0.4529033 #> 2 0.7376029 0.7407917 #> 3 1.0000000 1.0000000 #> #> attr(,\"class\") #> [1] \"ahr\" \"gs_design\" \"list\" # }"},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_combo.html","id":null,"dir":"Reference","previous_headings":"","what":"Group sequential design power using MaxCombo test under non-proportional hazards — gs_power_combo","title":"Group sequential design power using MaxCombo test under non-proportional hazards — gs_power_combo","text":"Group sequential design power using MaxCombo test non-proportional hazards","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_combo.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Group sequential design power using MaxCombo test under non-proportional hazards — gs_power_combo","text":"","code":"gs_power_combo( enroll_rate = define_enroll_rate(duration = 12, rate = 500/12), fail_rate = define_fail_rate(duration = c(4, 100), fail_rate = log(2)/15, hr = c(1, 0.6), dropout_rate = 0.001), fh_test = rbind(data.frame(rho = 0, gamma = 0, tau = -1, test = 1, analysis = 1:3, analysis_time = c(12, 24, 36)), data.frame(rho = c(0, 0.5), gamma = 0.5, tau = -1, test = 2:3, analysis = 3, analysis_time = 36)), ratio = 1, binding = FALSE, upper = gs_b, upar = c(3, 2, 1), lower = gs_b, lpar = c(-1, 0, 1), algorithm = mvtnorm::GenzBretz(maxpts = 1e+05, abseps = 1e-05), ... )"},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_combo.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Group sequential design power using MaxCombo test under non-proportional hazards — gs_power_combo","text":"enroll_rate Enrollment rates. fail_rate Failure dropout rates. fh_test data frame summarize test analysis. See examples data structure. ratio Experimental:Control randomization ratio (yet implemented). binding Indicator whether futility bound binding; default FALSE recommended. upper Function compute upper bound. upar Parameters passed upper. lower Function compute lower bound. lpar Parameters passed lower. algorithm object class GenzBretz, Miwa TVPACK specifying algorithm used well associated hyper parameters. ... Additional parameters passed mvtnorm::pmvnorm.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_combo.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Group sequential design power using MaxCombo test under non-proportional hazards — gs_power_combo","text":"list input parameters, enrollment rate, analysis, bound.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_combo.html","id":"specification","dir":"Reference","previous_headings":"","what":"Specification","title":"Group sequential design power using MaxCombo test under non-proportional hazards — gs_power_combo","text":"contents section shown PDF user manual .","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_combo.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Group sequential design power using MaxCombo test under non-proportional hazards — gs_power_combo","text":"","code":"library(dplyr) library(mvtnorm) library(gsDesign) library(gsDesign2) enroll_rate <- define_enroll_rate( duration = 12, rate = 500 / 12 ) fail_rate <- define_fail_rate( duration = c(4, 100), fail_rate = log(2) / 15, # median survival 15 month hr = c(1, .6), dropout_rate = 0.001 ) fh_test <- rbind( data.frame(rho = 0, gamma = 0, tau = -1, test = 1, analysis = 1:3, analysis_time = c(12, 24, 36)), data.frame(rho = c(0, 0.5), gamma = 0.5, tau = -1, test = 2:3, analysis = 3, analysis_time = 36) ) # Example 1 ---- # Minimal Information Fraction derived bound # \\donttest{ gs_power_combo( enroll_rate = enroll_rate, fail_rate = fail_rate, fh_test = fh_test, upper = gs_spending_combo, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_combo, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.2) ) #> $enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 41.7 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $bound #> analysis bound probability probability0 z nominal p #> 1 1 upper 6.329275e-08 3.299865e-10 6.175397 3.299865e-10 #> 2 1 lower 3.269613e-04 0.000000e+00 -2.516527 9.940741e-01 #> 3 2 upper 4.260145e-01 2.565830e-03 2.798651 2.565830e-03 #> 4 2 lower 8.468664e-02 0.000000e+00 1.237721 1.079098e-01 #> 5 3 upper 9.015980e-01 2.500822e-02 2.097499 1.797473e-02 #> 6 3 lower 2.000038e-01 0.000000e+00 2.958921 1.543591e-03 #> #> $analysis #> analysis time n event event_frac ahr #> 1 1 12 500.0001 107.3943 0.3241690 0.8418858 #> 2 2 24 500.0001 246.2834 0.7434051 0.7164215 #> 3 3 36 500.0001 331.2910 1.0000000 0.6831740 #> #> attr(,\"class\") #> [1] \"non_binding\" \"combo\" \"gs_design\" \"list\" # }"},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_npe.html","id":null,"dir":"Reference","previous_headings":"","what":"Group sequential bound computation with non-constant effect — gs_power_npe","title":"Group sequential bound computation with non-constant effect — gs_power_npe","text":"Derives group sequential bounds boundary crossing probabilities design. allows non-constant treatment effect time, also can applied usual homogeneous effect size designs. requires treatment effect statistical information analysis well method deriving bounds, spending. routine enables two things available gsDesign package: non-constant effect, 2) flexibility boundary selection. many applications, non-proportional-hazards design function gs_design_nph() used; calls function. Initial bound types supported 1) spending bounds, fixed bounds, 3) Haybittle-Peto-like bounds. requirement boundary update method can bound without knowledge future bounds. example, bounds based conditional power require knowledge future bounds supported routine; limited conditional power method demonstrated. Boundary family designs Wang-Tsiatis designs including original (non-spending-function-based) O'Brien-Fleming Pocock designs supported gs_power_npe().","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_npe.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Group sequential bound computation with non-constant effect — gs_power_npe","text":"","code":"gs_power_npe( theta = 0.1, theta0 = NULL, theta1 = NULL, info = 1, info0 = NULL, info1 = NULL, info_scale = c(\"h0_h1_info\", \"h0_info\", \"h1_info\"), upper = gs_b, upar = qnorm(0.975), lower = gs_b, lpar = -Inf, test_upper = TRUE, test_lower = TRUE, binding = FALSE, r = 18, tol = 1e-06 )"},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_npe.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Group sequential bound computation with non-constant effect — gs_power_npe","text":"theta Natural parameter group sequential design representing expected incremental drift analyses; used power calculation. theta0 Natural parameter null hypothesis, needed upper bound computation. theta1 Natural parameter alternate hypothesis, needed lower bound computation. info Statistical information analyses input theta. info0 Statistical information null hypothesis, different info; impacts null hypothesis bound calculation. info1 Statistical information hypothesis used futility bound calculation different info; impacts futility hypothesis bound calculation. info_scale Information scale calculation. Options : \"h0_h1_info\" (default): variance null alternative hypotheses used. \"h0_info\": variance null hypothesis used. \"h1_info\": variance alternative hypothesis used. upper Function compute upper bound. upar Parameters passed upper. lower Function compare lower bound. lpar parameters passed lower. test_upper Indicator analyses include upper (efficacy) bound; single value TRUE (default) indicates analyses; otherwise, logical vector length info indicate analyses efficacy bound. test_lower Indicator analyses include lower bound; single value TRUE (default) indicates analyses; single value FALSE indicated lower bound; otherwise, logical vector length info indicate analyses lower bound. binding Indicator whether futility bound binding; default FALSE recommended. r Integer value controlling grid numerical integration Jennison Turnbull (2000); default 18, range 1 80. Larger values provide larger number grid points greater accuracy. Normally, r changed user. tol Tolerance parameter boundary convergence (Z-scale).","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_npe.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Group sequential bound computation with non-constant effect — gs_power_npe","text":"tibble columns analysis index, bounds, z, crossing probability, theta (standardized treatment effect), theta1 (standardized treatment effect alternative hypothesis), information fraction, statistical information.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_npe.html","id":"specification","dir":"Reference","previous_headings":"","what":"Specification","title":"Group sequential bound computation with non-constant effect — gs_power_npe","text":"contents section shown PDF user manual .","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_npe.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Group sequential bound computation with non-constant effect — gs_power_npe","text":"","code":"library(gsDesign) library(gsDesign2) library(dplyr) # Default (single analysis; Type I error controlled) gs_power_npe(theta = 0) %>% filter(bound == \"upper\") #> # A tibble: 1 × 10 #> analysis bound z probability theta theta1 info_frac info info0 info1 #> #> 1 1 upper 1.96 0.0250 0 0 1 1 1 1 # Fixed bound gs_power_npe( theta = c(.1, .2, .3), info = (1:3) * 40, upper = gs_b, upar = gsDesign::gsDesign(k = 3, sfu = gsDesign::sfLDOF)$upper$bound, lower = gs_b, lpar = c(-1, 0, 0) ) #> # A tibble: 6 × 10 #> analysis bound z probability theta theta1 info_frac info info0 info1 #> #> 1 1 upper 3.71 0.00104 0.1 0.1 0.333 40 40 40 #> 2 2 upper 2.51 0.235 0.2 0.2 0.667 80 80 80 #> 3 3 upper 1.99 0.869 0.3 0.3 1 120 120 120 #> 4 1 lower -1 0.0513 0.1 0.1 0.333 40 40 40 #> 5 2 lower 0 0.0715 0.2 0.2 0.667 80 80 80 #> 6 3 lower 0 0.0715 0.3 0.3 1 120 120 120 # Same fixed efficacy bounds, no futility bound (i.e., non-binding bound), null hypothesis gs_power_npe( theta = rep(0, 3), info = (1:3) * 40, upar = gsDesign::gsDesign(k = 3, sfu = gsDesign::sfLDOF)$upper$bound, lpar = rep(-Inf, 3) ) %>% filter(bound == \"upper\") #> # A tibble: 3 × 10 #> analysis bound z probability theta theta1 info_frac info info0 info1 #> #> 1 1 upper 3.71 0.000104 0 0 0.333 40 40 40 #> 2 2 upper 2.51 0.00605 0 0 0.667 80 80 80 #> 3 3 upper 1.99 0.0250 0 0 1 120 120 120 # Fixed bound with futility only at analysis 1; efficacy only at analyses 2, 3 gs_power_npe( theta = c(.1, .2, .3), info = (1:3) * 40, upper = gs_b, upar = c(Inf, 3, 2), lower = gs_b, lpar = c(qnorm(.1), -Inf, -Inf) ) #> # A tibble: 6 × 10 #> analysis bound z probability theta theta1 info_frac info info0 info1 #> #> 1 1 upper Inf 0 0.1 0.1 0.333 40 40 40 #> 2 2 upper 3 0.113 0.2 0.2 0.667 80 80 80 #> 3 3 upper 2 0.887 0.3 0.3 1 120 120 120 #> 4 1 lower -1.28 0.0278 0.1 0.1 0.333 40 40 40 #> 5 2 lower -Inf 0.0278 0.2 0.2 0.667 80 80 80 #> 6 3 lower -Inf 0.0278 0.3 0.3 1 120 120 120 # Spending function bounds # Lower spending based on non-zero effect gs_power_npe( theta = c(.1, .2, .3), info = (1:3) * 40, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfHSD, total_spend = 0.1, param = -1, timing = NULL) ) #> # A tibble: 6 × 10 #> analysis bound z probability theta theta1 info_frac info info0 info1 #> #> 1 1 upper 3.71 0.00104 0.1 0.1 0.333 40 40 40 #> 2 2 upper 2.51 0.235 0.2 0.2 0.667 80 80 80 #> 3 3 upper 1.99 0.883 0.3 0.3 1 120 120 120 #> 4 1 lower -1.36 0.0230 0.1 0.1 0.333 40 40 40 #> 5 2 lower 0.0726 0.0552 0.2 0.2 0.667 80 80 80 #> 6 3 lower 1.86 0.100 0.3 0.3 1 120 120 120 # Same bounds, but power under different theta gs_power_npe( theta = c(.15, .25, .35), info = (1:3) * 40, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfHSD, total_spend = 0.1, param = -1, timing = NULL) ) #> # A tibble: 6 × 10 #> analysis bound z probability theta theta1 info_frac info info0 info1 #> #> 1 1 upper 3.71 0.00288 0.15 0.15 0.333 40 40 40 #> 2 2 upper 2.51 0.391 0.25 0.25 0.667 80 80 80 #> 3 3 upper 1.99 0.931 0.35 0.35 1 120 120 120 #> 4 1 lower -1.05 0.0230 0.15 0.15 0.333 40 40 40 #> 5 2 lower 0.520 0.0552 0.25 0.25 0.667 80 80 80 #> 6 3 lower 2.41 0.100 0.35 0.35 1 120 120 120 # Two-sided symmetric spend, O'Brien-Fleming spending # Typically, 2-sided bounds are binding x <- gs_power_npe( theta = rep(0, 3), info = (1:3) * 40, binding = TRUE, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL) ) # Re-use these bounds under alternate hypothesis # Always use binding = TRUE for power calculations gs_power_npe( theta = c(.1, .2, .3), info = (1:3) * 40, binding = TRUE, upar = (x %>% filter(bound == \"upper\"))$z, lpar = -(x %>% filter(bound == \"upper\"))$z ) #> # A tibble: 6 × 10 #> analysis bound z probability theta theta1 info_frac info info0 info1 #> #> 1 1 upper 3.71 0.00104 0.1 0.1 0.333 40 40 40 #> 2 2 upper 2.51 0.235 0.2 0.2 0.667 80 80 80 #> 3 3 upper 1.99 0.902 0.3 0.3 1 120 120 120 #> 4 1 lower -3.71 0.00000704 0.1 0.1 0.333 40 40 40 #> 5 2 lower -2.51 0.0000151 0.2 0.2 0.667 80 80 80 #> 6 3 lower -1.99 0.0000151 0.3 0.3 1 120 120 120 # Different values of `r` and `tol` lead to different numerical accuracy # Larger `r` and smaller `tol` give better accuracy, but leads to slow computation n_analysis <- 5 gs_power_npe( theta = rep(0.1, n_analysis), theta0 = NULL, theta1 = NULL, info = 1:n_analysis, info0 = 1:n_analysis, info1 = NULL, info_scale = \"h0_info\", upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL), lower = gs_b, lpar = -rep(Inf, n_analysis), test_upper = TRUE, test_lower = FALSE, binding = FALSE, # Try different combinations of (r, tol) with # r in 6, 18, 24, 30, 35, 40, 50, 60, 70, 80, 90, 100 # tol in 1e-6, 1e-12 r = 6, tol = 1e-6 ) #> # A tibble: 10 × 10 #> analysis bound z probability theta theta1 info_frac info info0 info1 #> #> 1 1 upper 4.88 0.000000890 0.1 0.1 0.2 1 1 1 #> 2 2 upper 3.36 0.000650 0.1 0.1 0.4 2 2 2 #> 3 3 upper 2.68 0.00627 0.1 0.1 0.6 3 3 3 #> 4 4 upper 2.29 0.0200 0.1 0.1 0.8 4 4 4 #> 5 5 upper 2.03 0.0408 0.1 0.1 1 5 5 5 #> 6 1 lower -Inf 0 0.1 0.1 0.2 1 1 1 #> 7 2 lower -Inf 0 0.1 0.1 0.4 2 2 2 #> 8 3 lower -Inf 0 0.1 0.1 0.6 3 3 3 #> 9 4 lower -Inf 0 0.1 0.1 0.8 4 4 4 #> 10 5 lower -Inf 0 0.1 0.1 1 5 5 5"},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_rd.html","id":null,"dir":"Reference","previous_headings":"","what":"Group sequential design power of binary outcome measuring in risk difference — gs_power_rd","title":"Group sequential design power of binary outcome measuring in risk difference — gs_power_rd","text":"Group sequential design power binary outcome measuring risk difference","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_rd.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Group sequential design power of binary outcome measuring in risk difference — gs_power_rd","text":"","code":"gs_power_rd( p_c = tibble::tibble(stratum = \"All\", rate = 0.2), p_e = tibble::tibble(stratum = \"All\", rate = 0.15), n = tibble::tibble(stratum = \"All\", n = c(40, 50, 60), analysis = 1:3), rd0 = 0, ratio = 1, weight = c(\"unstratified\", \"ss\", \"invar\"), upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = c(qnorm(0.1), rep(-Inf, 2)), info_scale = c(\"h0_h1_info\", \"h0_info\", \"h1_info\"), binding = FALSE, test_upper = TRUE, test_lower = TRUE, r = 18, tol = 1e-06 )"},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_rd.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Group sequential design power of binary outcome measuring in risk difference — gs_power_rd","text":"p_c Rate control group. p_e Rate experimental group. n Sample size. rd0 Treatment effect super-superiority designs, default 0. ratio Experimental:control randomization ratio. weight Weighting method, can \"unstratified\", \"ss\", \"invar\". upper Function compute upper bound. lower Function compare lower bound. upar Parameters passed upper. lpar Parameters passed lower. info_scale Information scale calculation. Options : \"h0_h1_info\" (default): variance null alternative hypotheses used. \"h0_info\": variance null hypothesis used. \"h1_info\": variance alternative hypothesis used. binding Indicator whether futility bound binding; default FALSE recommended. test_upper Indicator analyses include upper (efficacy) bound; single value TRUE (default) indicates analyses; otherwise, logical vector length info indicate analyses efficacy bound. test_lower Indicator analyses include lower bound; single value TRUE (default) indicates analyses; single value FALSE indicated lower bound; otherwise, logical vector length info indicate analyses lower bound. r Integer value controlling grid numerical integration Jennison Turnbull (2000); default 18, range 1 80. Larger values provide larger number grid points greater accuracy. Normally, r changed user. tol Tolerance parameter boundary convergence (Z-scale).","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_rd.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Group sequential design power of binary outcome measuring in risk difference — gs_power_rd","text":"list input parameter, analysis, bound.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_rd.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Group sequential design power of binary outcome measuring in risk difference — gs_power_rd","text":"","code":"# Example 1 ---- library(gsDesign) # unstratified case with H0: rd0 = 0 gs_power_rd( p_c = tibble::tibble( stratum = \"All\", rate = .2 ), p_e = tibble::tibble( stratum = \"All\", rate = .15 ), n = tibble::tibble( stratum = \"All\", n = c(20, 40, 60), analysis = 1:3 ), rd0 = 0, ratio = 1, upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = c(qnorm(.1), rep(-Inf, 2)) ) #> $bound #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~risk difference at bound` #> #> 1 1 upper 0.000309 0.000104 3.71 0.629 #> 2 2 upper 0.0182 0.00605 2.51 0.301 #> 3 3 upper 0.0728 0.0250 1.99 0.195 #> 4 1 lower 0.0571 0.100 -1.28 -0.217 #> # ℹ 1 more variable: `nominal p` #> #> $analysis #> # A tibble: 3 × 10 #> analysis n rd rd0 theta1 theta0 info info0 info_frac info_frac0 #> #> 1 1 20 0.05 0 0.05 0 34.8 34.6 0.333 0.333 #> 2 2 40 0.05 0 0.05 0 69.6 69.3 0.667 0.667 #> 3 3 60 0.05 0 0.05 0 104. 104. 1 1 #> #> attr(,\"class\") #> [1] \"non_binding\" \"rd\" \"gs_design\" \"list\" # Example 2 ---- # unstratified case with H0: rd0 != 0 gs_power_rd( p_c = tibble::tibble( stratum = \"All\", rate = .2 ), p_e = tibble::tibble( stratum = \"All\", rate = .15 ), n = tibble::tibble( stratum = \"All\", n = c(20, 40, 60), analysis = 1:3 ), rd0 = 0.005, ratio = 1, upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = c(qnorm(.1), rep(-Inf, 2)) ) #> $bound #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~risk difference at bound` #> #> 1 1 upper 0.000309 0.000116 3.71 0.571 #> 2 2 upper 0.0182 0.00680 2.51 0.276 #> 3 3 upper 0.0728 0.0281 1.99 0.181 #> 4 1 lower 0.0571 0.0949 -1.28 -0.191 #> # ℹ 1 more variable: `nominal p` #> #> $analysis #> # A tibble: 3 × 10 #> analysis n rd rd0 theta1 theta0 info info0 info_frac info_frac0 #> #> 1 1 20 0.05 0.005 0.05 0.005 34.8 34.6 0.333 0.333 #> 2 2 40 0.05 0.005 0.05 0.005 69.6 69.3 0.667 0.667 #> 3 3 60 0.05 0.005 0.05 0.005 104. 104. 1 1 #> #> attr(,\"class\") #> [1] \"non_binding\" \"rd\" \"gs_design\" \"list\" # use spending function gs_power_rd( p_c = tibble::tibble( stratum = \"All\", rate = .2 ), p_e = tibble::tibble( stratum = \"All\", rate = .15 ), n = tibble::tibble( stratum = \"All\", n = c(20, 40, 60), analysis = 1:3 ), rd0 = 0.005, ratio = 1, upper = gs_spending_bound, lower = gs_b, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL), lpar = c(qnorm(.1), rep(-Inf, 2)) ) #> $bound #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~risk difference at bound` #> #> 1 1 upper 0.000309 0.000116 3.71 0.571 #> 2 2 upper 0.0182 0.00680 2.51 0.276 #> 3 3 upper 0.0728 0.0281 1.99 0.181 #> 4 1 lower 0.0571 0.0949 -1.28 -0.191 #> # ℹ 1 more variable: `nominal p` #> #> $analysis #> # A tibble: 3 × 10 #> analysis n rd rd0 theta1 theta0 info info0 info_frac info_frac0 #> #> 1 1 20 0.05 0.005 0.05 0.005 34.8 34.6 0.333 0.333 #> 2 2 40 0.05 0.005 0.05 0.005 69.6 69.3 0.667 0.667 #> 3 3 60 0.05 0.005 0.05 0.005 104. 104. 1 1 #> #> attr(,\"class\") #> [1] \"non_binding\" \"rd\" \"gs_design\" \"list\" # Example 3 ---- # stratified case under sample size weighting and H0: rd0 = 0 gs_power_rd( p_c = tibble::tibble( stratum = c(\"S1\", \"S2\", \"S3\"), rate = c(.15, .2, .25) ), p_e = tibble::tibble( stratum = c(\"S1\", \"S2\", \"S3\"), rate = c(.1, .16, .19) ), n = tibble::tibble( stratum = rep(c(\"S1\", \"S2\", \"S3\"), each = 3), analysis = rep(1:3, 3), n = c(10, 20, 24, 18, 26, 30, 10, 20, 24) ), rd0 = 0, ratio = 1, weight = \"ss\", upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = c(qnorm(.1), rep(-Inf, 2)) ) #> $bound #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~risk difference at bound` #> #> 1 1 upper 0.000437 0.000104 3.71 0.456 #> 2 2 upper 0.0237 0.00604 2.51 0.228 #> 3 3 upper 0.0795 0.0237 1.99 0.166 #> 4 1 lower 0.0470 0.100 -1.28 -0.157 #> # ℹ 1 more variable: `nominal p` #> #> $analysis #> # A tibble: 3 × 10 #> analysis n rd rd0 theta1 theta0 info info0 info_frac info_frac0 #> #> 1 1 38 0.0479 0 0.0479 0 66.3 66.0 0.485 0.485 #> 2 2 66 0.0491 0 0.0491 0 116. 115. 0.846 0.846 #> 3 3 78 0.0492 0 0.0492 0 137. 136. 1 1 #> #> attr(,\"class\") #> [1] \"non_binding\" \"rd\" \"gs_design\" \"list\" # Example 4 ---- # stratified case under inverse variance weighting and H0: rd0 = 0 gs_power_rd( p_c = tibble::tibble( stratum = c(\"S1\", \"S2\", \"S3\"), rate = c(.15, .2, .25) ), p_e = tibble::tibble( stratum = c(\"S1\", \"S2\", \"S3\"), rate = c(.1, .16, .19) ), n = tibble::tibble( stratum = rep(c(\"S1\", \"S2\", \"S3\"), each = 3), analysis = rep(1:3, 3), n = c(10, 20, 24, 18, 26, 30, 10, 20, 24) ), rd0 = 0, ratio = 1, weight = \"invar\", upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = c(qnorm(.1), rep(-Inf, 2)) ) #> $bound #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~risk difference at bound` #> #> 1 1 upper 0.000443 0.000104 3.71 0.449 #> 2 2 upper 0.0240 0.00604 2.51 0.225 #> 3 3 upper 0.0803 0.0237 1.99 0.164 #> 4 1 lower 0.0467 0.100 -1.28 -0.155 #> # ℹ 1 more variable: `nominal p` #> #> $analysis #> # A tibble: 3 × 10 #> analysis n rd rd0 theta1 theta0 info info0 info_frac info_frac0 #> #> 1 1 38 0.0477 0 0.0477 0 68.2 67.9 0.483 0.483 #> 2 2 66 0.0488 0 0.0488 0 119. 119. 0.845 0.845 #> 3 3 78 0.0489 0 0.0489 0 141. 141. 1 1 #> #> attr(,\"class\") #> [1] \"non_binding\" \"rd\" \"gs_design\" \"list\" # Example 5 ---- # stratified case under sample size weighting and H0: rd0 != 0 gs_power_rd( p_c = tibble::tibble( stratum = c(\"S1\", \"S2\", \"S3\"), rate = c(.15, .2, .25) ), p_e = tibble::tibble( stratum = c(\"S1\", \"S2\", \"S3\"), rate = c(.1, .16, .19) ), n = tibble::tibble( stratum = rep(c(\"S1\", \"S2\", \"S3\"), each = 3), analysis = rep(1:3, 3), n = c(10, 20, 24, 18, 26, 30, 10, 20, 24) ), rd0 = 0.02, ratio = 1, weight = \"ss\", upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = c(qnorm(.1), rep(-Inf, 2)) ) #> $bound #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~risk difference at bound` #> #> 1 1 upper 0.000437 0.000194 3.71 0.285 #> 2 2 upper 0.0237 0.0109 2.51 0.153 #> 3 3 upper 0.0795 0.0401 1.99 0.117 #> 4 1 lower 0.0470 0.0744 -1.28 -0.0717 #> # ℹ 1 more variable: `nominal p` #> #> $analysis #> # A tibble: 3 × 10 #> analysis n rd rd0 theta1 theta0 info info0 info_frac info_frac0 #> #> 1 1 38 0.0479 0.02 0.0479 0.02 66.3 66.0 0.485 0.485 #> 2 2 66 0.0491 0.02 0.0491 0.02 116. 115. 0.846 0.846 #> 3 3 78 0.0492 0.02 0.0492 0.02 137. 136. 1 1 #> #> attr(,\"class\") #> [1] \"non_binding\" \"rd\" \"gs_design\" \"list\" # Example 6 ---- # stratified case under inverse variance weighting and H0: rd0 != 0 gs_power_rd( p_c = tibble::tibble( stratum = c(\"S1\", \"S2\", \"S3\"), rate = c(.15, .2, .25) ), p_e = tibble::tibble( stratum = c(\"S1\", \"S2\", \"S3\"), rate = c(.1, .16, .19) ), n = tibble::tibble( stratum = rep(c(\"S1\", \"S2\", \"S3\"), each = 3), analysis = rep(1:3, 3), n = c(10, 20, 24, 18, 26, 30, 10, 20, 24) ), rd0 = 0.03, ratio = 1, weight = \"invar\", upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = c(qnorm(.1), rep(-Inf, 2)) ) #> $bound #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~risk difference at bound` #> #> 1 1 upper 0.000443 0.000267 3.71 0.197 #> 2 2 upper 0.0240 0.0145 2.51 0.113 #> 3 3 upper 0.0803 0.0518 1.99 0.0906 #> 4 1 lower 0.0467 0.0632 -1.28 -0.0275 #> # ℹ 1 more variable: `nominal p` #> #> $analysis #> # A tibble: 3 × 10 #> analysis n rd rd0 theta1 theta0 info info0 info_frac info_frac0 #> #> 1 1 38 0.0477 0.03 0.0477 0.03 68.2 67.9 0.483 0.483 #> 2 2 66 0.0488 0.03 0.0488 0.03 119. 119. 0.845 0.845 #> 3 3 78 0.0489 0.03 0.0489 0.03 141. 141. 1 1 #> #> attr(,\"class\") #> [1] \"non_binding\" \"rd\" \"gs_design\" \"list\""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_wlr.html","id":null,"dir":"Reference","previous_headings":"","what":"Group sequential design power using weighted log rank test under non-proportional hazards — gs_power_wlr","title":"Group sequential design power using weighted log rank test under non-proportional hazards — gs_power_wlr","text":"Group sequential design power using weighted log rank test non-proportional hazards","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_wlr.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Group sequential design power using weighted log rank test under non-proportional hazards — gs_power_wlr","text":"","code":"gs_power_wlr( enroll_rate = define_enroll_rate(duration = c(2, 2, 10), rate = c(3, 6, 9)), fail_rate = tibble(stratum = \"All\", duration = c(3, 100), fail_rate = log(2)/c(9, 18), hr = c(0.9, 0.6), dropout_rate = rep(0.001, 2)), event = c(30, 40, 50), analysis_time = NULL, binding = FALSE, upper = gs_spending_bound, lower = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lpar = list(sf = gsDesign::sfLDOF, total_spend = NULL), test_upper = TRUE, test_lower = TRUE, ratio = 1, weight = wlr_weight_fh, info_scale = c(\"h0_h1_info\", \"h0_info\", \"h1_info\"), approx = \"asymptotic\", r = 18, tol = 1e-06, interval = c(0.01, 1000) )"},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_wlr.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Group sequential design power using weighted log rank test under non-proportional hazards — gs_power_wlr","text":"enroll_rate Enrollment rates. fail_rate Failure dropout rates. event Targeted event analysis. analysis_time Minimum time analysis. binding Indicator whether futility bound binding; default FALSE recommended. upper Function compute upper bound. lower Function compute lower bound. upar Parameters passed upper. lpar Parameters passed lower. test_upper Indicator analyses include upper (efficacy) bound; single value TRUE (default) indicates analyses; otherwise, logical vector length info indicate analyses efficacy bound. test_lower Indicator analyses include lower bound; single value TRUE (default) indicates analyses; single value FALSE indicated lower bound; otherwise, logical vector length info indicate analyses lower bound. ratio Experimental:Control randomization ratio (yet implemented). weight Weight weighted log rank test: \"1\" = unweighted. \"n\" = Gehan-Breslow. \"sqrtN\" = Tarone-Ware. \"FH_p[]_q[b]\" = Fleming-Harrington p=q=b. info_scale Information scale calculation. Options : \"h0_h1_info\" (default): variance null alternative hypotheses used. \"h0_info\": variance null hypothesis used. \"h1_info\": variance alternative hypothesis used. approx Approximate estimation method Z statistics. \"event_driven\" = work proportional hazard model log rank test. \"asymptotic\". r Integer value controlling grid numerical integration Jennison Turnbull (2000); default 18, range 1 80. Larger values provide larger number grid points greater accuracy. Normally, r changed user. tol Tolerance parameter boundary convergence (Z-scale). interval interval presumed include time expected event count equal targeted event.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_wlr.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Group sequential design power using weighted log rank test under non-proportional hazards — gs_power_wlr","text":"list input parameters, enrollment rate, analysis, bound.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_wlr.html","id":"specification","dir":"Reference","previous_headings":"","what":"Specification","title":"Group sequential design power using weighted log rank test under non-proportional hazards — gs_power_wlr","text":"contents section shown PDF user manual .","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_wlr.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Group sequential design power using weighted log rank test under non-proportional hazards — gs_power_wlr","text":"","code":"library(gsDesign) library(gsDesign2) # set enrollment rates enroll_rate <- define_enroll_rate(duration = 12, rate = 500 / 12) # set failure rates fail_rate <- define_fail_rate( duration = c(4, 100), fail_rate = log(2) / 15, # median survival 15 month hr = c(1, .6), dropout_rate = 0.001 ) # set the targeted number of events and analysis time target_events <- c(30, 40, 50) target_analysisTime <- c(10, 24, 30) # Example 1 ---- # \\donttest{ # fixed bounds and calculate the power for targeted number of events gs_power_wlr( enroll_rate = enroll_rate, fail_rate = fail_rate, event = target_events, analysis_time = NULL, upper = gs_b, upar = gsDesign( k = length(target_events), test.type = 1, n.I = target_events, maxn.IPlan = max(target_events), sfu = sfLDOF, sfupar = NULL )$upper$bound, lower = gs_b, lpar = c(qnorm(.1), rep(-Inf, 2)) ) #> $input #> $input$enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 41.7 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $input$event #> [1] 30 40 50 #> #> $input$analysis_time #> NULL #> #> $input$binding #> [1] FALSE #> #> $input$ratio #> [1] 1 #> #> $input$upper #> function (par = NULL, k = NULL, ...) #> { #> if (is.null(k)) { #> return(par) #> } #> else { #> return(par[k]) #> } #> } #> #> #> #> $input$upar #> [1] 2.668630 2.288719 2.030702 #> #> $input$test_upper #> [1] TRUE #> #> $input$lower #> function (par = NULL, k = NULL, ...) #> { #> if (is.null(k)) { #> return(par) #> } #> else { #> return(par[k]) #> } #> } #> #> #> #> $input$lpar #> [1] -1.281552 -Inf -Inf #> #> $input$test_lower #> [1] TRUE #> #> $input$weight #> function (x, arm0, arm1, rho = 0, gamma = 0, tau = NULL) #> { #> n <- arm0$size + arm1$size #> p1 <- arm1$size/n #> p0 <- 1 - p1 #> if (!is.null(tau)) { #> if (tau > 0) { #> x <- pmin(x, tau) #> } #> } #> esurv <- p0 * npsurvSS::psurv(x, arm0) + p1 * npsurvSS::psurv(x, #> arm1) #> (1 - esurv)^rho * esurv^gamma #> } #> #> #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$approx #> [1] \"asymptotic\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 41.7 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $bounds #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.00470 0.00381 2.67 0.377 0.00381 #> 2 1 lower 0.0881 0.100 -1.28 1.60 0.9 #> 3 2 upper 0.0182 0.0127 2.29 0.485 0.0110 #> 4 3 upper 0.0439 0.0268 2.03 0.563 0.0211 #> #> $analysis #> analysis time n event ahr theta info info0 #> 1 1 5.893949 245.5812 29.99999 0.9636346 0.03704308 3.683799 3.684201 #> 2 2 6.900922 287.5384 40.00003 0.9373448 0.06470405 5.749119 5.750793 #> 3 3 7.808453 325.3522 50.00000 0.9155821 0.08819527 8.132495 8.136743 #> info_frac info_frac0 #> 1 0.4529728 0.4527857 #> 2 0.7069318 0.7067685 #> 3 1.0000000 1.0000000 #> #> attr(,\"class\") #> [1] \"non_binding\" \"wlr\" \"gs_design\" \"list\" # } # Example 2 ---- # fixed bounds and calculate the power for targeted analysis time # \\donttest{ gs_power_wlr( enroll_rate = enroll_rate, fail_rate = fail_rate, event = NULL, analysis_time = target_analysisTime, upper = gs_b, upar = gsDesign( k = length(target_events), test.type = 1, n.I = target_events, maxn.IPlan = max(target_events), sfu = sfLDOF, sfupar = NULL )$upper$bound, lower = gs_b, lpar = c(qnorm(.1), rep(-Inf, 2)) ) #> $input #> $input$enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 41.7 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $input$event #> NULL #> #> $input$analysis_time #> [1] 10 24 30 #> #> $input$binding #> [1] FALSE #> #> $input$ratio #> [1] 1 #> #> $input$upper #> function (par = NULL, k = NULL, ...) #> { #> if (is.null(k)) { #> return(par) #> } #> else { #> return(par[k]) #> } #> } #> #> #> #> $input$upar #> [1] 2.668630 2.288719 2.030702 #> #> $input$test_upper #> [1] TRUE #> #> $input$lower #> function (par = NULL, k = NULL, ...) #> { #> if (is.null(k)) { #> return(par) #> } #> else { #> return(par[k]) #> } #> } #> #> #> #> $input$lpar #> [1] -1.281552 -Inf -Inf #> #> $input$test_lower #> [1] TRUE #> #> $input$weight #> function (x, arm0, arm1, rho = 0, gamma = 0, tau = NULL) #> { #> n <- arm0$size + arm1$size #> p1 <- arm1$size/n #> p0 <- 1 - p1 #> if (!is.null(tau)) { #> if (tau > 0) { #> x <- pmin(x, tau) #> } #> } #> esurv <- p0 * npsurvSS::psurv(x, arm0) + p1 * npsurvSS::psurv(x, #> arm1) #> (1 - esurv)^rho * esurv^gamma #> } #> #> #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$approx #> [1] \"asymptotic\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 41.7 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $bounds #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.0172 0.00381 2.67 0.546 0.00381 #> 2 1 lower 0.0335 0.100 -1.28 1.34 0.9 #> 3 2 upper 0.622 0.0141 2.29 0.747 0.0110 #> 4 3 upper 0.842 0.0263 2.03 0.789 0.0211 #> #> $analysis #> analysis time n event ahr theta info info0 #> 1 1 10 416.6667 77.80361 0.8720599 0.1368971 16.20843 16.22923 #> 2 2 24 500.0000 246.28341 0.7164215 0.3334865 61.35217 62.08666 #> 3 3 30 500.0000 293.69568 0.6955693 0.3630247 72.91885 74.25144 #> info_frac info_frac0 #> 1 0.2222803 0.2185712 #> 2 0.8413760 0.8361677 #> 3 1.0000000 1.0000000 #> #> attr(,\"class\") #> [1] \"non_binding\" \"wlr\" \"gs_design\" \"list\" # } # Example 3 ---- # fixed bounds and calculate the power for targeted analysis time & number of events # \\donttest{ gs_power_wlr( enroll_rate = enroll_rate, fail_rate = fail_rate, event = target_events, analysis_time = target_analysisTime, upper = gs_b, upar = gsDesign( k = length(target_events), test.type = 1, n.I = target_events, maxn.IPlan = max(target_events), sfu = sfLDOF, sfupar = NULL )$upper$bound, lower = gs_b, lpar = c(qnorm(.1), rep(-Inf, 2)) ) #> $input #> $input$enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 41.7 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $input$event #> [1] 30 40 50 #> #> $input$analysis_time #> [1] 10 24 30 #> #> $input$binding #> [1] FALSE #> #> $input$ratio #> [1] 1 #> #> $input$upper #> function (par = NULL, k = NULL, ...) #> { #> if (is.null(k)) { #> return(par) #> } #> else { #> return(par[k]) #> } #> } #> #> #> #> $input$upar #> [1] 2.668630 2.288719 2.030702 #> #> $input$test_upper #> [1] TRUE #> #> $input$lower #> function (par = NULL, k = NULL, ...) #> { #> if (is.null(k)) { #> return(par) #> } #> else { #> return(par[k]) #> } #> } #> #> #> #> $input$lpar #> [1] -1.281552 -Inf -Inf #> #> $input$test_lower #> [1] TRUE #> #> $input$weight #> function (x, arm0, arm1, rho = 0, gamma = 0, tau = NULL) #> { #> n <- arm0$size + arm1$size #> p1 <- arm1$size/n #> p0 <- 1 - p1 #> if (!is.null(tau)) { #> if (tau > 0) { #> x <- pmin(x, tau) #> } #> } #> esurv <- p0 * npsurvSS::psurv(x, arm0) + p1 * npsurvSS::psurv(x, #> arm1) #> (1 - esurv)^rho * esurv^gamma #> } #> #> #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$approx #> [1] \"asymptotic\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 41.7 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $bounds #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.0172 0.00381 2.67 0.546 0.00381 #> 2 1 lower 0.0335 0.100 -1.28 1.34 0.9 #> 3 2 upper 0.622 0.0141 2.29 0.747 0.0110 #> 4 3 upper 0.842 0.0263 2.03 0.789 0.0211 #> #> $analysis #> analysis time n event ahr theta info info0 #> 1 1 10 416.6667 77.80361 0.8720599 0.1368971 16.20843 16.22923 #> 2 2 24 500.0000 246.28341 0.7164215 0.3334865 61.35217 62.08666 #> 3 3 30 500.0000 293.69568 0.6955693 0.3630247 72.91885 74.25144 #> info_frac info_frac0 #> 1 0.2222803 0.2185712 #> 2 0.8413760 0.8361677 #> 3 1.0000000 1.0000000 #> #> attr(,\"class\") #> [1] \"non_binding\" \"wlr\" \"gs_design\" \"list\" # } # Example 4 ---- # spending bounds and calculate the power for targeted number of events # \\donttest{ gs_power_wlr( enroll_rate = enroll_rate, fail_rate = fail_rate, event = target_events, analysis_time = NULL, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.2) ) #> $input #> $input$enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 41.7 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $input$event #> [1] 30 40 50 #> #> $input$analysis_time #> NULL #> #> $input$binding #> [1] FALSE #> #> $input$ratio #> [1] 1 #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.025 #> #> #> $input$test_upper #> [1] TRUE #> #> $input$lower #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$lpar #> $input$lpar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$lpar$total_spend #> [1] 0.2 #> #> #> $input$test_lower #> [1] TRUE #> #> $input$weight #> function (x, arm0, arm1, rho = 0, gamma = 0, tau = NULL) #> { #> n <- arm0$size + arm1$size #> p1 <- arm1$size/n #> p0 <- 1 - p1 #> if (!is.null(tau)) { #> if (tau > 0) { #> x <- pmin(x, tau) #> } #> } #> esurv <- p0 * npsurvSS::psurv(x, arm0) + p1 * npsurvSS::psurv(x, #> arm1) #> (1 - esurv)^rho * esurv^gamma #> } #> #> #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$approx #> [1] \"asymptotic\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 41.7 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $bounds #> # A tibble: 6 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.00110 0.000865 3.13 0.319 0.000865 #> 2 1 lower 0.0569 0.0655 -1.51 1.74 0.935 #> 3 2 upper 0.0115 0.00767 2.44 0.463 0.00739 #> 4 2 lower 0.127 0.159 -1.06 1.40 0.857 #> 5 3 upper 0.0427 0.0250 2.00 0.568 0.0226 #> 6 3 lower 0.200 0.266 -0.738 1.23 0.770 #> #> $analysis #> analysis time n event ahr theta info info0 #> 1 1 5.893949 245.5812 29.99999 0.9636346 0.03704308 3.683799 3.684201 #> 2 2 6.900922 287.5384 40.00003 0.9373448 0.06470405 5.749119 5.750793 #> 3 3 7.808453 325.3522 50.00000 0.9155821 0.08819527 8.132495 8.136743 #> info_frac info_frac0 #> 1 0.4529728 0.4527857 #> 2 0.7069318 0.7067685 #> 3 1.0000000 1.0000000 #> #> attr(,\"class\") #> [1] \"non_binding\" \"wlr\" \"gs_design\" \"list\" # } # Example 5 ---- # spending bounds and calculate the power for targeted analysis time # \\donttest{ gs_power_wlr( enroll_rate = enroll_rate, fail_rate = fail_rate, event = NULL, analysis_time = target_analysisTime, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.2) ) #> $input #> $input$enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 41.7 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $input$event #> NULL #> #> $input$analysis_time #> [1] 10 24 30 #> #> $input$binding #> [1] FALSE #> #> $input$ratio #> [1] 1 #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.025 #> #> #> $input$test_upper #> [1] TRUE #> #> $input$lower #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$lpar #> $input$lpar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$lpar$total_spend #> [1] 0.2 #> #> #> $input$test_lower #> [1] TRUE #> #> $input$weight #> function (x, arm0, arm1, rho = 0, gamma = 0, tau = NULL) #> { #> n <- arm0$size + arm1$size #> p1 <- arm1$size/n #> p0 <- 1 - p1 #> if (!is.null(tau)) { #> if (tau > 0) { #> x <- pmin(x, tau) #> } #> } #> esurv <- p0 * npsurvSS::psurv(x, arm0) + p1 * npsurvSS::psurv(x, #> arm1) #> (1 - esurv)^rho * esurv^gamma #> } #> #> #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$approx #> [1] \"asymptotic\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 41.7 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $bounds #> # A tibble: 6 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.0000207 0.00000163 4.65 0.348 0.00000163 #> 2 1 lower 0.00659 0.0269 -1.93 1.55 0.973 #> 3 2 upper 0.663 0.0142 2.19 0.756 0.0142 #> 4 2 lower 0.162 0.947 1.62 0.814 0.0527 #> 5 3 upper 0.811 0.0225 2.04 0.789 0.0209 #> 6 3 lower 0.200 0.980 2.13 0.780 0.0165 #> #> $analysis #> analysis time n event ahr theta info info0 #> 1 1 10 416.6667 77.80361 0.8720599 0.1368971 16.20843 16.22923 #> 2 2 24 500.0000 246.28341 0.7164215 0.3334865 61.35217 62.08666 #> 3 3 30 500.0000 293.69568 0.6955693 0.3630247 72.91885 74.25144 #> info_frac info_frac0 #> 1 0.2222803 0.2185712 #> 2 0.8413760 0.8361677 #> 3 1.0000000 1.0000000 #> #> attr(,\"class\") #> [1] \"non_binding\" \"wlr\" \"gs_design\" \"list\" # } # Example 6 ---- # spending bounds and calculate the power for targeted analysis time & number of events # \\donttest{ gs_power_wlr( enroll_rate = enroll_rate, fail_rate = fail_rate, event = target_events, analysis_time = target_analysisTime, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.2) ) #> $input #> $input$enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 41.7 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $input$event #> [1] 30 40 50 #> #> $input$analysis_time #> [1] 10 24 30 #> #> $input$binding #> [1] FALSE #> #> $input$ratio #> [1] 1 #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.025 #> #> #> $input$test_upper #> [1] TRUE #> #> $input$lower #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$lpar #> $input$lpar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$lpar$total_spend #> [1] 0.2 #> #> #> $input$test_lower #> [1] TRUE #> #> $input$weight #> function (x, arm0, arm1, rho = 0, gamma = 0, tau = NULL) #> { #> n <- arm0$size + arm1$size #> p1 <- arm1$size/n #> p0 <- 1 - p1 #> if (!is.null(tau)) { #> if (tau > 0) { #> x <- pmin(x, tau) #> } #> } #> esurv <- p0 * npsurvSS::psurv(x, arm0) + p1 * npsurvSS::psurv(x, #> arm1) #> (1 - esurv)^rho * esurv^gamma #> } #> #> #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$approx #> [1] \"asymptotic\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 41.7 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $bounds #> # A tibble: 6 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.0000207 0.00000163 4.65 0.348 0.00000163 #> 2 1 lower 0.00659 0.0269 -1.93 1.55 0.973 #> 3 2 upper 0.663 0.0142 2.19 0.756 0.0142 #> 4 2 lower 0.162 0.947 1.62 0.814 0.0527 #> 5 3 upper 0.811 0.0225 2.04 0.789 0.0209 #> 6 3 lower 0.200 0.980 2.13 0.780 0.0165 #> #> $analysis #> analysis time n event ahr theta info info0 #> 1 1 10 416.6667 77.80361 0.8720599 0.1368971 16.20843 16.22923 #> 2 2 24 500.0000 246.28341 0.7164215 0.3334865 61.35217 62.08666 #> 3 3 30 500.0000 293.69568 0.6955693 0.3630247 72.91885 74.25144 #> info_frac info_frac0 #> 1 0.2222803 0.2185712 #> 2 0.8413760 0.8361677 #> 3 1.0000000 1.0000000 #> #> attr(,\"class\") #> [1] \"non_binding\" \"wlr\" \"gs_design\" \"list\" # }"},{"path":"https://merck.github.io/gsDesign2/reference/gs_spending_bound.html","id":null,"dir":"Reference","previous_headings":"","what":"Derive spending bound for group sequential boundary — gs_spending_bound","title":"Derive spending bound for group sequential boundary — gs_spending_bound","text":"Computes one bound time based spending given distributional assumptions. user specifies gs_spending_bound() use functions, intended use . important user specifications made list provided functions using gs_spending_bound(). Function uses numerical integration Newton-Raphson iteration derive individual bound group sequential design satisfies targeted boundary crossing probability. Algorithm simple extension Chapter 19 Jennison Turnbull (2000).","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_spending_bound.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Derive spending bound for group sequential boundary — gs_spending_bound","text":"","code":"gs_spending_bound( k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, r = 18, tol = 1e-06 )"},{"path":"https://merck.github.io/gsDesign2/reference/gs_spending_bound.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Derive spending bound for group sequential boundary — gs_spending_bound","text":"k Analysis bound computed. par list following items: sf (class spending function). total_spend (total spend). param (parameters needed spending function sf()). timing (vector containing values spending function evaluated NULL information-based spending used). max_info (timing NULL, can input positive number used info information fraction analysis). hgm1 Subdensity grid h1() (k=2) hupdate() (k>2) analysis k-1; k=1, used may NULL. theta Natural parameter used lower bound spending; represents average drift time analysis least analysis k; upper bound spending always set null hypothesis (theta = 0). info Statistical information analyses, least analysis k. efficacy TRUE (default) efficacy bound, FALSE otherwise. test_bound logical vector length info indicate analyses bound. r Integer value controlling grid numerical integration Jennison Turnbull (2000); default 18, range 1 80. Larger values provide larger number grid points greater accuracy. Normally r changed user. tol Tolerance parameter convergence (Z-scale).","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_spending_bound.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Derive spending bound for group sequential boundary — gs_spending_bound","text":"Returns numeric bound (possibly infinite) , upon failure, generates error message.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_spending_bound.html","id":"specification","dir":"Reference","previous_headings":"","what":"Specification","title":"Derive spending bound for group sequential boundary — gs_spending_bound","text":"contents section shown PDF user manual .","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_spending_bound.html","id":"references","dir":"Reference","previous_headings":"","what":"References","title":"Derive spending bound for group sequential boundary — gs_spending_bound","text":"Jennison C Turnbull BW (2000), Group Sequential Methods Applications Clinical Trials. Boca Raton: Chapman Hall.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_spending_bound.html","id":"author","dir":"Reference","previous_headings":"","what":"Author","title":"Derive spending bound for group sequential boundary — gs_spending_bound","text":"Keaven Anderson keaven_anderson@merck.com","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_spending_bound.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Derive spending bound for group sequential boundary — gs_spending_bound","text":"","code":"gs_power_ahr( analysis_time = c(12, 24, 36), event = c(30, 40, 50), binding = TRUE, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL) ) #> $input #> $input$enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $input$event #> [1] 30 40 50 #> #> $input$analysis_time #> [1] 12 24 36 #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.025 #> #> $input$upar$param #> NULL #> #> $input$upar$timing #> NULL #> #> #> $input$lower #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$lpar #> $input$lpar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$lpar$total_spend #> [1] 0.025 #> #> $input$lpar$param #> NULL #> #> $input$lpar$timing #> NULL #> #> #> $input$test_lower #> [1] TRUE #> #> $input$test_upper #> [1] TRUE #> #> $input$ratio #> [1] 1 #> #> $input$binding #> [1] TRUE #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $bound #> # A tibble: 6 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.00706 0.000867 3.13 0.316 0.000867 #> 2 1 lower 0.000935 0.00658 -2.48 2.49 0.993 #> 3 2 upper 0.115 0.00921 2.37 0.505 0.00892 #> 4 2 lower 0.00912 0.113 -1.21 1.42 0.888 #> 5 3 upper 0.324 0.0250 2.01 0.607 0.0222 #> 6 3 lower 0.0251 0.323 -0.474 1.12 0.682 #> #> $analysis #> analysis time n event ahr theta info info0 #> 1 1 14.90817 108 30.00008 0.7865726 0.2400702 7.373433 7.50002 #> 2 2 24.00000 108 49.06966 0.7151566 0.3352538 11.999266 12.26741 #> 3 3 36.00000 108 66.23948 0.6833395 0.3807634 16.267921 16.55987 #> info_frac info_frac0 #> 1 0.4532499 0.4529033 #> 2 0.7376029 0.7407917 #> 3 1.0000000 1.0000000 #> #> attr(,\"class\") #> [1] \"ahr\" \"gs_design\" \"list\""},{"path":"https://merck.github.io/gsDesign2/reference/gs_spending_combo.html","id":null,"dir":"Reference","previous_headings":"","what":"Derive spending bound for MaxCombo group sequential boundary — gs_spending_combo","title":"Derive spending bound for MaxCombo group sequential boundary — gs_spending_combo","text":"Derive spending bound MaxCombo group sequential boundary","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_spending_combo.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Derive spending bound for MaxCombo group sequential boundary — gs_spending_combo","text":"","code":"gs_spending_combo(par = NULL, info = NULL)"},{"path":"https://merck.github.io/gsDesign2/reference/gs_spending_combo.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Derive spending bound for MaxCombo group sequential boundary — gs_spending_combo","text":"par list following items: sf (class spending function). total_spend (total spend). param (parameters needed spending function sf()). timing (vector containing values spending function evaluated NULL information-based spending used). max_info (timing NULL, can input positive number used info information fraction analysis). info Statistical information analyses, least analysis k.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_spending_combo.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Derive spending bound for MaxCombo group sequential boundary — gs_spending_combo","text":"vector alpha spending per analysis.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_spending_combo.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Derive spending bound for MaxCombo group sequential boundary — gs_spending_combo","text":"","code":"# alpha-spending par <- list(sf = gsDesign::sfLDOF, total_spend = 0.025) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.0001035057 0.0060483891 0.0250000000 par <- list(sf = gsDesign::sfLDPocock, total_spend = 0.025) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.01132081 0.01908456 0.02500000 par <- list(sf = gsDesign::sfHSD, total_spend = 0.025, param = -40) gs_spending_combo(par, info = 1:3 / 3) #> [1] 6.557724e-14 4.048992e-08 2.500000e-02 # Kim-DeMets (power) Spending Function par <- list(sf = gsDesign::sfPower, total_spend = 0.025, param = 1.5) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.004811252 0.013608276 0.025000000 # Exponential Spending Function par <- list(sf = gsDesign::sfExponential, total_spend = 0.025, param = 1) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.000015625 0.003952847 0.025000000 # Two-parameter Spending Function Families par <- list(sf = gsDesign::sfLogistic, total_spend = 0.025, param = c(.1, .4, .01, .1)) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.001757277 0.008146545 0.025000000 par <- list(sf = gsDesign::sfBetaDist, total_spend = 0.025, param = c(.1, .4, .01, .1)) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.001818609 0.006568999 0.025000000 par <- list(sf = gsDesign::sfCauchy, total_spend = 0.025, param = c(.1, .4, .01, .1)) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.001378849 0.023755732 0.025000000 par <- list(sf = gsDesign::sfExtremeValue, total_spend = 0.025, param = c(.1, .4, .01, .1)) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.001785159 0.007184159 0.025000000 par <- list(sf = gsDesign::sfExtremeValue2, total_spend = 0.025, param = c(.1, .4, .01, .1)) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.001799588 0.007015878 0.025000000 par <- list(sf = gsDesign::sfNormal, total_spend = 0.025, param = c(.1, .4, .01, .1)) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.001797471 0.006969761 0.025000000 # t-distribution Spending Function par <- list(sf = gsDesign::sfTDist, total_spend = 0.025, param = c(-1, 1.5, 4)) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.002063494 0.009705759 0.025000000 # Piecewise Linear and Step Function Spending Functions par <- list(sf = gsDesign::sfLinear, total_spend = 0.025, param = c(.2, .4, .05, .2)) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.00375000 0.01388889 0.02500000 par <- list(sf = gsDesign::sfStep, total_spend = 0.025, param = c(1 / 3, 2 / 3, .1, .1)) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.0025 0.0025 0.0250 # Pointwise Spending Function par <- list(sf = gsDesign::sfPoints, total_spend = 0.025, param = c(.25, .25)) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.00625 0.00625 0.02500 # Truncated, trimmed and gapped spending functions par <- list(sf = gsDesign::sfTruncated, total_spend = 0.025, param = list(trange = c(.2, .8), sf = gsDesign::sfHSD, param = 1)) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.00788072 0.02137939 0.02500000 par <- list(sf = gsDesign::sfTrimmed, total_spend = 0.025, param = list(trange = c(.2, .8), sf = gsDesign::sfHSD, param = 1)) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.01121102 0.01924407 0.02500000 par <- list(sf = gsDesign::sfGapped, total_spend = 0.025, param = list(trange = c(.2, .8), sf = gsDesign::sfHSD, param = 1)) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.007169093 0.007169093 0.025000000 # Xi and Gallo conditional error spending functions par <- list(sf = gsDesign::sfXG1, total_spend = 0.025, param = 0.5) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.0001035057 0.0060483891 0.0250000000 par <- list(sf = gsDesign::sfXG2, total_spend = 0.025, param = 0.14) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.008419231 0.021216583 0.025000000 par <- list(sf = gsDesign::sfXG3, total_spend = 0.025, param = 0.013) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.02428922 0.02477989 0.02500000 # beta-spending par <- list(sf = gsDesign::sfLDOF, total_spend = 0.2) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.02643829 0.11651432 0.20000000"},{"path":"https://merck.github.io/gsDesign2/reference/gs_update_ahr.html","id":null,"dir":"Reference","previous_headings":"","what":"Group sequential design using average hazard ratio under non-proportional hazards — gs_update_ahr","title":"Group sequential design using average hazard ratio under non-proportional hazards — gs_update_ahr","text":"Group sequential design using average hazard ratio non-proportional hazards","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_update_ahr.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Group sequential design using average hazard ratio under non-proportional hazards — gs_update_ahr","text":"","code":"gs_update_ahr( x = NULL, alpha = NULL, ustime = NULL, lstime = NULL, observed_data = NULL )"},{"path":"https://merck.github.io/gsDesign2/reference/gs_update_ahr.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Group sequential design using average hazard ratio under non-proportional hazards — gs_update_ahr","text":"x design created either gs_design_ahr gs_power_ahr. alpha Type error updated design. ustime Default NULL case upper bound spending time determined timing. Otherwise, vector length k (total number analyses) spending time analysis. lstime Default NULL case lower bound spending time determined timing. Otherwise, vector length k (total number analyses) spending time analysis observed_data list observed datasets analyses.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_update_ahr.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Group sequential design using average hazard ratio under non-proportional hazards — gs_update_ahr","text":"list input parameters, enrollment rate, analysis, bound.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_update_ahr.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Group sequential design using average hazard ratio under non-proportional hazards — gs_update_ahr","text":"","code":"library(gsDesign) library(gsDesign2) library(dplyr) alpha <- 0.025 beta <- 0.1 ratio <- 1 # Enrollment enroll_rate <- define_enroll_rate( duration = c(2, 2, 10), rate = (1:3) / 3) # Failure and dropout fail_rate <- define_fail_rate( duration = c(3, Inf), fail_rate = log(2) / 9, hr = c(1, 0.6), dropout_rate = .0001) # IA and FA analysis time analysis_time <- c(20, 36) # Randomization ratio ratio <- 1 # ------------------------------------------------- # # Example A: one-sided design (efficacy only) # ------------------------------------------------- # # Original design upper <- gs_spending_bound upar <- list(sf = sfLDOF, total_spend = alpha) x <- gs_design_ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, alpha = alpha, beta = beta, ratio = ratio, info_scale = \"h0_info\", info_frac = NULL, analysis_time = c(20, 36), upper = gs_spending_bound, upar = upar, lower = gs_b, lpar = rep(-Inf, 2), test_upper = TRUE, test_lower = FALSE) |> to_integer() # Observed dataset at IA and FA set.seed(123) observed_data <- simtrial::sim_pw_surv( n = x$analysis$n[x$analysis$analysis == 2], stratum = data.frame(stratum = \"All\", p = 1), block = c(rep(\"control\", 2), rep(\"experimental\", 2)), enroll_rate = x$enroll_rate, fail_rate = (fail_rate |> simtrial::to_sim_pw_surv())$fail_rate, dropout_rate = (fail_rate |> simtrial::to_sim_pw_surv())$dropout_rate) observed_data_ia <- observed_data |> simtrial::cut_data_by_date(x$analysis$time[1]) observed_data_fa <- observed_data |> simtrial::cut_data_by_date(x$analysis$time[2]) observed_event_ia <- sum(observed_data_ia$event) observed_event_fa <- sum(observed_data_fa$event) planned_event_ia <- x$analysis$event[1] planned_event_fa <- x$analysis$event[2] # Example A1 ---- # IA spending = observed events / final planned events # the remaining alpha will be allocated to FA. ustime <- c(observed_event_ia / planned_event_fa, 1) gs_update_ahr( x = x, ustime = ustime, observed_data = list(observed_data_ia, observed_data_fa)) #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 10.2 #> 2 All 2 20.3 #> 3 All 10 30.5 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.0001 1 #> 2 All Inf 0.0770 0.0001 0.6 #> #> $bound #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.315 0.00484 2.59 0.686 0.00484 #> 2 2 upper 0.901 0.0250 1.99 0.794 0.0235 #> 3 1 lower 0 0 -Inf Inf 1 #> 4 2 lower 0 0 -Inf Inf 1 #> #> $analysis #> analysis time n event ahr theta info info0 info_frac #> 1 1 19.86657 366 188 0.7356221 0.3070388 47.00 47.00 0.6329966 #> 2 2 35.81007 366 295 0.6832088 0.3809547 73.75 73.75 1.0000000 #> info_frac0 #> 1 0.6372881 #> 2 1.0000000 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" #> [5] \"updated_design\" # Example A2 ---- # IA, FA spending = observed events / final planned events ustime <- c(observed_event_ia, observed_event_fa) / planned_event_fa gs_update_ahr( x = x, ustime = ustime, observed_data = list(observed_data_ia, observed_data_fa)) #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 10.2 #> 2 All 2 20.3 #> 3 All 10 30.5 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.0001 1 #> 2 All Inf 0.0770 0.0001 0.6 #> #> $bound #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.315 0.00484 2.59 0.686 0.00484 #> 2 2 upper 0.899 0.0245 2.00 0.793 0.0230 #> 3 1 lower 0 0 -Inf Inf 1 #> 4 2 lower 0 0 -Inf Inf 1 #> #> $analysis #> analysis time n event ahr theta info info0 info_frac #> 1 1 19.86657 366 188 0.7356221 0.3070388 47.00 47.00 0.6329966 #> 2 2 35.81007 366 295 0.6832088 0.3809547 73.75 73.75 0.9932660 #> info_frac0 #> 1 0.6372881 #> 2 1.0000000 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" #> [5] \"updated_design\" # Example A3 ---- # IA spending = min(observed events, planned events) / final planned events ustime <- c(min(observed_event_ia, planned_event_ia) / planned_event_fa, 1) gs_update_ahr( x = x, ustime = ustime, observed_data = list(observed_data_ia, observed_data_fa)) #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 10.2 #> 2 All 2 20.3 #> 3 All 10 30.5 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.0001 1 #> 2 All Inf 0.0770 0.0001 0.6 #> #> $bound #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.315 0.00484 2.59 0.686 0.00484 #> 2 2 upper 0.901 0.0250 1.99 0.794 0.0235 #> 3 1 lower 0 0 -Inf Inf 1 #> 4 2 lower 0 0 -Inf Inf 1 #> #> $analysis #> analysis time n event ahr theta info info0 info_frac #> 1 1 19.86657 366 188 0.7356221 0.3070388 47.00 47.00 0.6329966 #> 2 2 35.81007 366 295 0.6832088 0.3809547 73.75 73.75 1.0000000 #> info_frac0 #> 1 0.6372881 #> 2 1.0000000 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" #> [5] \"updated_design\" # Example A4 ---- # IA spending = min(observed events, planned events) / final planned events ustime <- c(min(observed_event_ia, planned_event_ia), min(observed_event_fa, planned_event_fa)) / planned_event_fa gs_update_ahr( x = x, ustime = ustime, observed_data = list(observed_data_ia, observed_data_fa)) #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 10.2 #> 2 All 2 20.3 #> 3 All 10 30.5 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.0001 1 #> 2 All Inf 0.0770 0.0001 0.6 #> #> $bound #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.315 0.00484 2.59 0.686 0.00484 #> 2 2 upper 0.899 0.0245 2.00 0.793 0.0230 #> 3 1 lower 0 0 -Inf Inf 1 #> 4 2 lower 0 0 -Inf Inf 1 #> #> $analysis #> analysis time n event ahr theta info info0 info_frac #> 1 1 19.86657 366 188 0.7356221 0.3070388 47.00 47.00 0.6329966 #> 2 2 35.81007 366 295 0.6832088 0.3809547 73.75 73.75 0.9932660 #> info_frac0 #> 1 0.6372881 #> 2 1.0000000 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" #> [5] \"updated_design\" # alpha is upadted to 0.05 gs_update_ahr( x = x, alpha = 0.05, ustime = ustime, observed_data = list(observed_data_ia, observed_data_fa)) #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 10.2 #> 2 All 2 20.3 #> 3 All 10 30.5 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.0001 1 #> 2 All Inf 0.0770 0.0001 0.6 #> #> $bound #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.461 0.0138 2.20 0.725 0.0138 #> 2 2 upper 0.943 0.0492 1.69 0.821 0.0451 #> 3 1 lower 0 0 -Inf Inf 1 #> 4 2 lower 0 0 -Inf Inf 1 #> #> $analysis #> analysis time n event ahr theta info info0 info_frac #> 1 1 19.86657 366 188 0.7356221 0.3070388 47.00 47.00 0.6329966 #> 2 2 35.81007 366 295 0.6832088 0.3809547 73.75 73.75 0.9932660 #> info_frac0 #> 1 0.6372881 #> 2 1.0000000 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" #> [5] \"updated_design\" # ------------------------------------------------- # # Example B: Two-sided asymmetric design, # beta-spending with non-binding lower bound # ------------------------------------------------- # # Original design x <- gs_design_ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, alpha = alpha, beta = beta, ratio = ratio, info_scale = \"h0_info\", info_frac = NULL, analysis_time = c(20, 36), upper = gs_spending_bound, upar = list(sf = sfLDOF, total_spend = alpha), test_upper = TRUE, lower = gs_spending_bound, lpar = list(sf = sfLDOF, total_spend = beta), test_lower = c(TRUE, FALSE), binding = FALSE) |> to_integer() # Example B1 ---- # IA spending = observed events / final planned events # the remaining alpha will be allocated to FA. ustime <- c(observed_event_ia / planned_event_fa, 1) gs_update_ahr( x = x, ustime = ustime, lstime = ustime, observed_data = list(observed_data_ia, observed_data_fa)) #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 10.6 #> 2 All 2 21.2 #> 3 All 10 31.8 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.0001 1 #> 2 All Inf 0.0770 0.0001 0.6 #> #> $bound #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.315 0.00484 2.59 0.686 0.00484 #> 2 2 upper 0.891 0.0248 1.99 0.794 0.0235 #> 3 1 lower 0.0387 0.633 0.339 0.952 0.367 #> 4 2 lower 0.0387 0.633 -Inf Inf 1 #> #> $analysis #> analysis time n event ahr theta info info0 info_frac #> 1 1 19.91897 382 188 0.7356221 0.3070388 47.00 47.00 0.6329966 #> 2 2 36.06513 382 295 0.6832088 0.3809547 73.75 73.75 1.0000000 #> info_frac0 #> 1 0.6372881 #> 2 1.0000000 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" #> [5] \"updated_design\" # Example B2 ---- # IA, FA spending = observed events / final planned events ustime <- c(observed_event_ia, observed_event_fa) / planned_event_fa gs_update_ahr( x = x, ustime = ustime, lstime = ustime, observed_data = list(observed_data_ia, observed_data_fa)) #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 10.6 #> 2 All 2 21.2 #> 3 All 10 31.8 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.0001 1 #> 2 All Inf 0.0770 0.0001 0.6 #> #> $bound #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.315 0.00484 2.59 0.686 0.00484 #> 2 2 upper 0.890 0.0243 2.00 0.793 0.0230 #> 3 1 lower 0.0387 0.633 0.339 0.952 0.367 #> 4 2 lower 0.0387 0.633 -Inf Inf 1 #> #> $analysis #> analysis time n event ahr theta info info0 info_frac #> 1 1 19.91897 382 188 0.7356221 0.3070388 47.00 47.00 0.6329966 #> 2 2 36.06513 382 295 0.6832088 0.3809547 73.75 73.75 0.9932660 #> info_frac0 #> 1 0.6372881 #> 2 1.0000000 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" #> [5] \"updated_design\" # Example B3 ---- ustime <- c(min(observed_event_ia, planned_event_ia) / planned_event_fa, 1) gs_update_ahr( x = x, ustime = ustime, lstime = ustime, observed_data = list(observed_data_ia, observed_data_fa)) #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 10.6 #> 2 All 2 21.2 #> 3 All 10 31.8 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.0001 1 #> 2 All Inf 0.0770 0.0001 0.6 #> #> $bound #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.315 0.00484 2.59 0.686 0.00484 #> 2 2 upper 0.891 0.0248 1.99 0.794 0.0235 #> 3 1 lower 0.0387 0.633 0.339 0.952 0.367 #> 4 2 lower 0.0387 0.633 -Inf Inf 1 #> #> $analysis #> analysis time n event ahr theta info info0 info_frac #> 1 1 19.91897 382 188 0.7356221 0.3070388 47.00 47.00 0.6329966 #> 2 2 36.06513 382 295 0.6832088 0.3809547 73.75 73.75 1.0000000 #> info_frac0 #> 1 0.6372881 #> 2 1.0000000 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" #> [5] \"updated_design\" # Example B4 ---- # IA spending = min(observed events, planned events) / final planned events ustime <- c(min(observed_event_ia, planned_event_ia), min(observed_event_fa, planned_event_fa)) / planned_event_fa gs_update_ahr( x = x, ustime = ustime, lstime = ustime, observed_data = list(observed_data_ia, observed_data_fa)) #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 10.6 #> 2 All 2 21.2 #> 3 All 10 31.8 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.0001 1 #> 2 All Inf 0.0770 0.0001 0.6 #> #> $bound #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.315 0.00484 2.59 0.686 0.00484 #> 2 2 upper 0.890 0.0243 2.00 0.793 0.0230 #> 3 1 lower 0.0387 0.633 0.339 0.952 0.367 #> 4 2 lower 0.0387 0.633 -Inf Inf 1 #> #> $analysis #> analysis time n event ahr theta info info0 info_frac #> 1 1 19.91897 382 188 0.7356221 0.3070388 47.00 47.00 0.6329966 #> 2 2 36.06513 382 295 0.6832088 0.3809547 73.75 73.75 0.9932660 #> info_frac0 #> 1 0.6372881 #> 2 1.0000000 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" #> [5] \"updated_design\" # Example B5 ---- # alpha is updated to 0.05 ---- gs_update_ahr(x = x, alpha = 0.05) #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 10.6 #> 2 All 2 21.2 #> 3 All 10 31.8 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.0001 1 #> 2 All Inf 0.0770 0.0001 0.6 #> #> $bound #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.511 0.0144 2.19 0.735 0.0144 #> 2 2 upper 0.934 0.0487 1.69 0.826 0.0458 #> 3 1 lower 0.0401 0.679 0.464 0.937 0.321 #> 4 2 lower 0.0401 0.679 -Inf Inf 1 #> #> $analysis #> analysis time n event ahr theta info info0 info_frac #> 1 1 19.91897 382 202 0.7322996 0.3115656 50.50 50.50 0.6495177 #> 2 2 36.06513 382 311 0.6829028 0.3814027 77.75 77.75 1.0000000 #> info_frac0 #> 1 0.6495177 #> 2 1.0000000 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" #> [5] \"updated_design\" # Example B6 ---- # updated boundaries only when IA data is observed ustime <- c(observed_event_ia / planned_event_fa, 1) gs_update_ahr( x = x, ustime = ustime, lstime = ustime, observed_data = list(observed_data_ia, NULL)) #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 10.6 #> 2 All 2 21.2 #> 3 All 10 31.8 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.0001 1 #> 2 All Inf 0.0770 0.0001 0.6 #> #> $bound #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.315 0.00484 2.59 0.686 0.00484 #> 2 2 upper 0.903 0.0247 1.99 0.798 0.0233 #> 3 1 lower 0.0387 0.633 0.339 0.952 0.367 #> 4 2 lower 0.0387 0.633 -Inf Inf 1 #> #> $analysis #> analysis time n event ahr theta info info0 info_frac #> 1 1 19.91897 382 188 0.7356221 0.3070388 47.00 47.00 0.6329966 #> 2 2 36.06513 382 311 0.6829028 0.3814027 77.75 77.75 1.0000000 #> info_frac0 #> 1 0.6045016 #> 2 1.0000000 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" #> [5] \"updated_design\" # ------------------------------------------------- # # Example C: Two-sided asymmetric design, # with calendar spending for efficacy and futility bounds # beta-spending with non-binding lower bound # ------------------------------------------------- # # Original design x <- gs_design_ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, alpha = alpha, beta = beta, ratio = ratio, info_scale = \"h0_info\", info_frac = NULL, analysis_time = c(20, 36), upper = gs_spending_bound, upar = list(sf = sfLDOF, total_spend = alpha, timing = c(20, 36) / 36), test_upper = TRUE, lower = gs_spending_bound, lpar = list(sf = sfLDOF, total_spend = beta, timing = c(20, 36) / 36), test_lower = c(TRUE, FALSE), binding = FALSE) |> to_integer() # Updated design due to potential change of multiplicity graph gs_update_ahr(x = x, alpha = 0.05) #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 10.2 #> 2 All 2 20.4 #> 3 All 10 30.7 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.0001 1 #> 2 All Inf 0.0770 0.0001 0.6 #> #> $bound #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.418 0.00855 2.38 0.711 0.00855 #> 2 2 upper 0.940 0.0493 1.66 0.825 0.0483 #> 3 1 lower 0.0273 0.601 0.257 0.964 0.399 #> 4 2 lower 0.0273 0.601 -Inf Inf 1 #> #> $analysis #> analysis time n event ahr theta info info0 info_frac #> 1 1 19.95805 368 195 0.7319980 0.3119776 48.75 48.75 0.65 #> 2 2 36.16991 368 300 0.6827856 0.3815744 75.00 75.00 1.00 #> info_frac0 #> 1 0.65 #> 2 1.00 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" #> [5] \"updated_design\""},{"path":"https://merck.github.io/gsDesign2/reference/ppwe.html","id":null,"dir":"Reference","previous_headings":"","what":"Piecewise exponential cumulative distribution function — ppwe","title":"Piecewise exponential cumulative distribution function — ppwe","text":"Computes cumulative distribution function (CDF) survival rate piecewise exponential distribution.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/ppwe.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Piecewise exponential cumulative distribution function — ppwe","text":"","code":"ppwe(x, duration, rate, lower_tail = FALSE)"},{"path":"https://merck.github.io/gsDesign2/reference/ppwe.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Piecewise exponential cumulative distribution function — ppwe","text":"x Times distribution computed. duration numeric vector time duration. rate numeric vector event rate. lower_tail Indicator whether lower (TRUE) upper tail (FALSE; default) CDF computed.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/ppwe.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Piecewise exponential cumulative distribution function — ppwe","text":"vector cumulative distribution function survival values.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/ppwe.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Piecewise exponential cumulative distribution function — ppwe","text":"Suppose \\(\\lambda_i\\) failure rate interval \\((t_{-1},t_i], =1,2,\\ldots,M\\) \\(0=t_00\\) : $$\\Lambda(t)=\\sum_{=1}^M \\delta(t\\leq t_i)(\\min(t,t_i)-t_{-1})\\lambda_i.$$ survival time \\(t\\) $$S(t)=\\exp(-\\Lambda(t)).$$","code":""},{"path":"https://merck.github.io/gsDesign2/reference/ppwe.html","id":"specification","dir":"Reference","previous_headings":"","what":"Specification","title":"Piecewise exponential cumulative distribution function — ppwe","text":"contents section shown PDF user manual .","code":""},{"path":"https://merck.github.io/gsDesign2/reference/ppwe.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Piecewise exponential cumulative distribution function — ppwe","text":"","code":"# Plot a survival function with 2 different sets of time values # to demonstrate plot precision corresponding to input parameters. x1 <- seq(0, 10, 10 / pi) duration <- c(3, 3, 1) rate <- c(.2, .1, .005) survival <- ppwe( x = x1, duration = duration, rate = rate ) plot(x1, survival, type = \"l\", ylim = c(0, 1)) x2 <- seq(0, 10, .25) survival <- ppwe( x = x2, duration = duration, rate = rate ) lines(x2, survival, col = 2)"},{"path":"https://merck.github.io/gsDesign2/reference/pw_info.html","id":null,"dir":"Reference","previous_headings":"","what":"Average hazard ratio under non-proportional hazards — pw_info","title":"Average hazard ratio under non-proportional hazards — pw_info","text":"Provides geometric average hazard ratio various non-proportional hazards assumptions either single multiple strata studies. piecewise exponential distribution allows simple method specify distribution enrollment pattern enrollment, failure dropout rates changes time.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/pw_info.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Average hazard ratio under non-proportional hazards — pw_info","text":"","code":"pw_info( enroll_rate = define_enroll_rate(duration = c(2, 2, 10), rate = c(3, 6, 9)), fail_rate = define_fail_rate(duration = c(3, 100), fail_rate = log(2)/c(9, 18), hr = c(0.9, 0.6), dropout_rate = 0.001), total_duration = 30, ratio = 1 )"},{"path":"https://merck.github.io/gsDesign2/reference/pw_info.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Average hazard ratio under non-proportional hazards — pw_info","text":"enroll_rate enroll_rate data frame without stratum created define_enroll_rate(). fail_rate fail_rate data frame without stratum created define_fail_rate(). total_duration Total follow-start enrollment data cutoff; can single value vector positive numbers. ratio Ratio experimental control randomization.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/pw_info.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Average hazard ratio under non-proportional hazards — pw_info","text":"data frame time (total_duration), stratum, t, hr (hazard ratio), event (expected number events), info (information given scenarios), info0 (information related null hypothesis), n (sample size) value total_duration input","code":""},{"path":"https://merck.github.io/gsDesign2/reference/pw_info.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Average hazard ratio under non-proportional hazards — pw_info","text":"","code":"# Example: default pw_info() #> time stratum t hr n event info info0 #> 1 30 All 0 0.9 12 21.24782 5.300180 5.311956 #> 2 30 All 3 0.6 96 37.24314 9.027063 9.310786 # Example: default with multiple analysis times (varying total_duration) pw_info(total_duration = c(15, 30)) #> time stratum t hr n event info info0 #> 1 15 All 0 0.9 12 20.13991 5.023729 5.034979 #> 2 15 All 3 0.6 96 10.13850 2.417457 2.534625 #> 3 30 All 0 0.9 12 21.24782 5.300180 5.311956 #> 4 30 All 3 0.6 96 37.24314 9.027063 9.310786 # Stratified population enroll_rate <- define_enroll_rate( stratum = c(rep(\"Low\", 2), rep(\"High\", 3)), duration = c(2, 10, 4, 4, 8), rate = c(5, 10, 0, 3, 6) ) fail_rate <- define_fail_rate( stratum = c(rep(\"Low\", 2), rep(\"High\", 2)), duration = c(1, Inf, 1, Inf), fail_rate = c(.1, .2, .3, .4), dropout_rate = .001, hr = c(.9, .75, .8, .6) ) # Give results by change-points in the piecewise model ahr(enroll_rate = enroll_rate, fail_rate = fail_rate, total_duration = c(15, 30)) #> time ahr n event info info0 #> 1 15 0.7332218 164 113.2782 28.18130 28.31954 #> 2 30 0.7175169 170 166.1836 41.49942 41.54590 # Same example, give results by strata and time period pw_info(enroll_rate = enroll_rate, fail_rate = fail_rate, total_duration = c(15, 30)) #> time stratum t hr n event info info0 #> 1 15 High 0 0.80 0 12.076677 2.990626 3.019169 #> 2 15 High 1 0.60 54 23.118608 5.741884 5.779652 #> 3 15 Low 0 0.90 5 9.962824 2.484435 2.490706 #> 4 15 Low 1 0.75 105 68.120046 16.964361 17.030011 #> 5 30 High 0 0.80 0 14.169853 3.509171 3.542463 #> 6 30 High 1 0.60 60 45.213092 11.297986 11.303273 #> 7 30 Low 0 0.90 5 9.962824 2.484435 2.490706 #> 8 30 Low 1 0.75 105 96.837847 24.207826 24.209462"},{"path":"https://merck.github.io/gsDesign2/reference/s2pwe.html","id":null,"dir":"Reference","previous_headings":"","what":"Approximate survival distribution with piecewise exponential distribution — s2pwe","title":"Approximate survival distribution with piecewise exponential distribution — s2pwe","text":"Converts discrete set points arbitrary survival distribution piecewise exponential approximation.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/s2pwe.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Approximate survival distribution with piecewise exponential distribution — s2pwe","text":"","code":"s2pwe(times, survival)"},{"path":"https://merck.github.io/gsDesign2/reference/s2pwe.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Approximate survival distribution with piecewise exponential distribution — s2pwe","text":"times Positive increasing times survival distribution provided. survival Survival (1 - cumulative distribution function) specified times.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/s2pwe.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Approximate survival distribution with piecewise exponential distribution — s2pwe","text":"tibble containing duration rate.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/s2pwe.html","id":"specification","dir":"Reference","previous_headings":"","what":"Specification","title":"Approximate survival distribution with piecewise exponential distribution — s2pwe","text":"contents section shown PDF user manual .","code":""},{"path":"https://merck.github.io/gsDesign2/reference/s2pwe.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Approximate survival distribution with piecewise exponential distribution — s2pwe","text":"","code":"# Example: arbitrary numbers s2pwe(1:9, (9:1) / 10) #> # A tibble: 9 × 2 #> duration rate #> #> 1 1 0.105 #> 2 1 0.118 #> 3 1 0.134 #> 4 1 0.154 #> 5 1 0.182 #> 6 1 0.223 #> 7 1 0.288 #> 8 1 0.405 #> 9 1 0.693 # Example: lognormal s2pwe(c(1:6, 9), plnorm(c(1:6, 9), meanlog = 0, sdlog = 2, lower.tail = FALSE)) #> # A tibble: 7 × 2 #> duration rate #> #> 1 1 0.693 #> 2 1 0.316 #> 3 1 0.224 #> 4 1 0.177 #> 5 1 0.148 #> 6 1 0.128 #> 7 3 0.103"},{"path":"https://merck.github.io/gsDesign2/reference/summary.html","id":null,"dir":"Reference","previous_headings":"","what":"Summary for fixed design or group sequential design objects — summary.fixed_design","title":"Summary for fixed design or group sequential design objects — summary.fixed_design","text":"Summary fixed design group sequential design objects","code":""},{"path":"https://merck.github.io/gsDesign2/reference/summary.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Summary for fixed design or group sequential design objects — summary.fixed_design","text":"","code":"# S3 method for class 'fixed_design' summary(object, ...) # S3 method for class 'gs_design' summary( object, analysis_vars = NULL, analysis_decimals = NULL, col_vars = NULL, col_decimals = NULL, bound_names = c(\"Efficacy\", \"Futility\"), ... )"},{"path":"https://merck.github.io/gsDesign2/reference/summary.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Summary for fixed design or group sequential design objects — summary.fixed_design","text":"object design object returned fixed_design_xxx() gs_design_xxx(). ... Additional parameters (used). analysis_vars variables put summary header analysis. analysis_decimals displayed number digits analysis_vars. vector unnamed, must match length analysis_vars. vector named, specify number digits variables want displayed differently defaults. col_vars variables displayed. col_decimals decimals displayed displayed variables col_vars. vector unnamed, must match length col_vars. vector named, specify number digits columns want displayed differently defaults. bound_names Names bounds; default c(\"Efficacy\", \"Futility\").","code":""},{"path":"https://merck.github.io/gsDesign2/reference/summary.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Summary for fixed design or group sequential design objects — summary.fixed_design","text":"summary table (data frame).","code":""},{"path":"https://merck.github.io/gsDesign2/reference/summary.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Summary for fixed design or group sequential design objects — summary.fixed_design","text":"","code":"library(dplyr) # Enrollment rate enroll_rate <- define_enroll_rate( duration = 18, rate = 20 ) # Failure rates fail_rate <- define_fail_rate( duration = c(4, 100), fail_rate = log(2) / 12, hr = c(1, .6), dropout_rate = .001 ) # Study duration in months study_duration <- 36 # Experimental / Control randomization ratio ratio <- 1 # 1-sided Type I error alpha <- 0.025 # Type II error (1 - power) beta <- 0.1 # AHR ---- # under fixed power fixed_design_ahr( alpha = alpha, power = 1 - beta, enroll_rate = enroll_rate, fail_rate = fail_rate, study_duration = study_duration, ratio = ratio ) %>% summary() #> # A tibble: 1 × 7 #> Design N Events Time Bound alpha Power #> #> 1 Average hazard ratio 463. 325. 36 1.96 0.025 0.9 # FH ---- # under fixed power fixed_design_fh( alpha = alpha, power = 1 - beta, enroll_rate = enroll_rate, fail_rate = fail_rate, study_duration = study_duration, ratio = ratio ) %>% summary() #> # A tibble: 1 × 7 #> Design N Events Time Bound alpha Power #> #> 1 Fleming-Harrington FH(0, 0) (logrank) 458. 321. 36 1.96 0.025 0.9 # Design parameters ---- library(gsDesign) library(gsDesign2) library(dplyr) # enrollment/failure rates enroll_rate <- define_enroll_rate( stratum = \"All\", duration = 12, rate = 1 ) fail_rate <- define_fail_rate( duration = c(4, 100), fail_rate = log(2) / 12, hr = c(1, .6), dropout_rate = .001 ) # Information fraction info_frac <- (1:3) / 3 # Analysis times in months; first 2 will be ignored as info_frac will not be achieved analysis_time <- c(.01, .02, 36) # Experimental / Control randomization ratio ratio <- 1 # 1-sided Type I error alpha <- 0.025 # Type II error (1 - power) beta <- .1 # Upper bound upper <- gs_spending_bound upar <- list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL) # Lower bound lower <- gs_spending_bound lpar <- list(sf = gsDesign::sfHSD, total_spend = 0.1, param = 0, timing = NULL) # weight function in WLR wgt00 <- function(x, arm0, arm1) { wlr_weight_fh(x, arm0, arm1, rho = 0, gamma = 0) } wgt05 <- function(x, arm0, arm1) { wlr_weight_fh(x, arm0, arm1, rho = 0, gamma = .5) } # test in COMBO fh_test <- rbind( data.frame(rho = 0, gamma = 0, tau = -1, test = 1, analysis = 1:3, analysis_time = c(12, 24, 36)), data.frame(rho = c(0, 0.5), gamma = 0.5, tau = -1, test = 2:3, analysis = 3, analysis_time = 36) ) # Example 1 ---- # \\donttest{ x_ahr <- gs_design_ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, info_frac = info_frac, # Information fraction analysis_time = analysis_time, ratio = ratio, alpha = alpha, beta = beta, upper = upper, upar = upar, lower = lower, lpar = lpar ) x_ahr %>% summary() #> # A tibble: 6 × 7 #> # Groups: Analysis [3] #> Analysis Bound Z `~HR at bound` `Nominal p` `Alternate hypothesis` #> #> 1 Analysis: 1 Tim… Futi… -0.94 1.19 0.826 0.0338 #> 2 Analysis: 1 Tim… Effi… 3.71 0.510 0.0001 0.0027 #> 3 Analysis: 2 Tim… Futi… 0.63 0.923 0.266 0.0666 #> 4 Analysis: 2 Tim… Effi… 2.51 0.725 0.006 0.414 #> 5 Analysis: 3 Tim… Futi… 1.99 0.812 0.0233 0.101 #> 6 Analysis: 3 Tim… Effi… 1.99 0.812 0.0231 0.9 #> # ℹ 1 more variable: `Null hypothesis` # Customize the digits to display x_ahr %>% summary(analysis_vars = c(\"time\", \"event\", \"info_frac\"), analysis_decimals = c(1, 0, 2)) #> # A tibble: 6 × 7 #> # Groups: Analysis [3] #> Analysis Bound Z `~HR at bound` `Nominal p` `Alternate hypothesis` #> #> 1 Analysis: 1 Tim… Futi… -0.94 1.19 0.826 0.0338 #> 2 Analysis: 1 Tim… Effi… 3.71 0.510 0.0001 0.0027 #> 3 Analysis: 2 Tim… Futi… 0.63 0.923 0.266 0.0666 #> 4 Analysis: 2 Tim… Effi… 2.51 0.725 0.006 0.414 #> 5 Analysis: 3 Tim… Futi… 1.99 0.812 0.0233 0.101 #> 6 Analysis: 3 Tim… Effi… 1.99 0.812 0.0231 0.9 #> # ℹ 1 more variable: `Null hypothesis` # Customize the labels of the crossing probability x_ahr %>% summary(bound_names = c(\"A is better\", \"B is better\")) #> # A tibble: 6 × 7 #> # Groups: Analysis [3] #> Analysis Bound Z `~HR at bound` `Nominal p` `Alternate hypothesis` #> #> 1 Analysis: 1 Tim… B is… -0.94 1.19 0.826 0.0338 #> 2 Analysis: 1 Tim… A is… 3.71 0.510 0.0001 0.0027 #> 3 Analysis: 2 Tim… B is… 0.63 0.923 0.266 0.0666 #> 4 Analysis: 2 Tim… A is… 2.51 0.725 0.006 0.414 #> 5 Analysis: 3 Tim… B is… 1.99 0.812 0.0233 0.101 #> 6 Analysis: 3 Tim… A is… 1.99 0.812 0.0231 0.9 #> # ℹ 1 more variable: `Null hypothesis` # Customize the variables to be summarized for each analysis x_ahr %>% summary(analysis_vars = c(\"n\", \"event\"), analysis_decimals = c(1, 1)) #> # A tibble: 6 × 7 #> # Groups: Analysis [3] #> Analysis Bound Z `~HR at bound` `Nominal p` `Alternate hypothesis` #> #> 1 Analysis: 1 N: … Futi… -0.94 1.19 0.826 0.0338 #> 2 Analysis: 1 N: … Effi… 3.71 0.510 0.0001 0.0027 #> 3 Analysis: 2 N: … Futi… 0.63 0.923 0.266 0.0666 #> 4 Analysis: 2 N: … Effi… 2.51 0.725 0.006 0.414 #> 5 Analysis: 3 N: … Futi… 1.99 0.812 0.0233 0.101 #> 6 Analysis: 3 N: … Effi… 1.99 0.812 0.0231 0.9 #> # ℹ 1 more variable: `Null hypothesis` # Customize the digits for the columns x_ahr %>% summary(col_decimals = c(z = 4)) #> # A tibble: 6 × 7 #> # Groups: Analysis [3] #> Analysis Bound Z `~HR at bound` `Nominal p` `Alternate hypothesis` #> #> 1 Analysis: 1 Ti… Futi… -0.938 1.19 0.826 0.0338 #> 2 Analysis: 1 Ti… Effi… 3.71 0.510 0.0001 0.0027 #> 3 Analysis: 2 Ti… Futi… 0.626 0.923 0.266 0.0666 #> 4 Analysis: 2 Ti… Effi… 2.51 0.725 0.006 0.414 #> 5 Analysis: 3 Ti… Futi… 1.99 0.812 0.0233 0.101 #> 6 Analysis: 3 Ti… Effi… 1.99 0.812 0.0231 0.9 #> # ℹ 1 more variable: `Null hypothesis` # Customize the columns to display x_ahr %>% summary(col_vars = c(\"z\", \"~hr at bound\", \"nominal p\")) #> Adding missing grouping variables: `Analysis` #> # A tibble: 6 × 5 #> # Groups: Analysis [3] #> Analysis Bound Z `~HR at bound` `Nominal p` #> #> 1 Analysis: 1 Time: 11.7 N: 479.6 Events… Futi… -0.94 1.19 0.826 #> 2 Analysis: 1 Time: 11.7 N: 479.6 Events… Effi… 3.71 0.510 0.0001 #> 3 Analysis: 2 Time: 20.3 N: 493.1 Events… Futi… 0.63 0.923 0.266 #> 4 Analysis: 2 Time: 20.3 N: 493.1 Events… Effi… 2.51 0.725 0.006 #> 5 Analysis: 3 Time: 36 N: 493.1 Events: … Futi… 1.99 0.812 0.0233 #> 6 Analysis: 3 Time: 36 N: 493.1 Events: … Effi… 1.99 0.812 0.0231 # Customize columns and digits x_ahr %>% summary(col_vars = c(\"z\", \"~hr at bound\", \"nominal p\"), col_decimals = c(4, 2, 2)) #> Adding missing grouping variables: `Analysis` #> # A tibble: 6 × 5 #> # Groups: Analysis [3] #> Analysis Bound Z `~HR at bound` `Nominal p` #> #> 1 Analysis: 1 Time: 11.7 N: 479.6 Event… Futi… -0.938 1.19 0.83 #> 2 Analysis: 1 Time: 11.7 N: 479.6 Event… Effi… 3.71 0.51 0 #> 3 Analysis: 2 Time: 20.3 N: 493.1 Event… Futi… 0.626 0.92 0.27 #> 4 Analysis: 2 Time: 20.3 N: 493.1 Event… Effi… 2.51 0.72 0.01 #> 5 Analysis: 3 Time: 36 N: 493.1 Events:… Futi… 1.99 0.81 0.02 #> 6 Analysis: 3 Time: 36 N: 493.1 Events:… Effi… 1.99 0.81 0.02 # } # Example 2 ---- # \\donttest{ x_wlr <- gs_design_wlr( enroll_rate = enroll_rate, fail_rate = fail_rate, weight = wgt05, info_frac = NULL, analysis_time = sort(unique(x_ahr$analysis$time)), ratio = ratio, alpha = alpha, beta = beta, upper = upper, upar = upar, lower = lower, lpar = lpar ) x_wlr %>% summary() #> # A tibble: 6 × 7 #> # Groups: Analysis [3] #> Analysis Bound Z `~wHR at bound` `Nominal p` `Alternate hypothesis` #> #> 1 Analysis: 1 Ti… Futi… -1.17 1.28 0.879 0.0141 #> 2 Analysis: 1 Ti… Effi… 6.02 0.284 0 0 #> 3 Analysis: 2 Ti… Futi… 0.57 0.919 0.283 0.0464 #> 4 Analysis: 2 Ti… Effi… 3.16 0.627 0.0008 0.214 #> 5 Analysis: 3 Ti… Futi… 1.96 0.789 0.0247 0.100 #> 6 Analysis: 3 Ti… Effi… 1.96 0.789 0.0247 0.9 #> # ℹ 1 more variable: `Null hypothesis` # } # Maxcombo ---- # \\donttest{ x_combo <- gs_design_combo( ratio = 1, alpha = 0.025, beta = 0.2, enroll_rate = define_enroll_rate(duration = 12, rate = 500 / 12), fail_rate = tibble::tibble( stratum = \"All\", duration = c(4, 100), fail_rate = log(2) / 15, hr = c(1, .6), dropout_rate = .001 ), fh_test = fh_test, upper = gs_spending_combo, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_combo, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.2) ) x_combo %>% summary() #> # A tibble: 6 × 6 #> # Groups: Analysis [3] #> Analysis Bound Z `Nominal p` `Alternate hypothesis` `Null hypothesis` #> #> 1 Analysis: 1 … Futi… -2.72 0.997 0.0003 0.0033 #> 2 Analysis: 1 … Effi… 6.18 0 0 0 #> 3 Analysis: 2 … Futi… 0.65 0.257 0.0847 0.743 #> 4 Analysis: 2 … Effi… 2.8 0.0026 0.220 0.0026 #> 5 Analysis: 3 … Futi… 2.1 0.018 0.2 0.976 #> 6 Analysis: 3 … Effi… 2.1 0.018 0.8 0.0237 # } # Risk difference ---- # \\donttest{ gs_design_rd( p_c = tibble::tibble(stratum = \"All\", rate = .2), p_e = tibble::tibble(stratum = \"All\", rate = .15), info_frac = c(0.7, 1), rd0 = 0, alpha = .025, beta = .1, ratio = 1, stratum_prev = NULL, weight = \"unstratified\", upper = gs_b, lower = gs_b, upar = gsDesign::gsDesign( k = 3, test.type = 1, sfu = gsDesign::sfLDOF, sfupar = NULL )$upper$bound, lpar = c(qnorm(.1), rep(-Inf, 2)) ) %>% summary() #> # A tibble: 3 × 7 #> # Groups: Analysis [2] #> Analysis Bound Z ~Risk difference at …¹ `Nominal p` `Alternate hypothesis` #> #> 1 Analysi… Futi… -1.28 -0.0201 0.9 0 #> 2 Analysi… Effi… 3.71 0.0582 0.0001 0.298 #> 3 Analysi… Effi… 2.51 0.033 0.006 0.9 #> # ℹ abbreviated name: ¹​`~Risk difference at bound` #> # ℹ 1 more variable: `Null hypothesis` # }"},{"path":"https://merck.github.io/gsDesign2/reference/to_integer.html","id":null,"dir":"Reference","previous_headings":"","what":"Rounds sample size to an even number for equal design — to_integer","title":"Rounds sample size to an even number for equal design — to_integer","text":"Rounds sample size even number equal design","code":""},{"path":"https://merck.github.io/gsDesign2/reference/to_integer.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Rounds sample size to an even number for equal design — to_integer","text":"","code":"to_integer(x, ...) # S3 method for class 'fixed_design' to_integer(x, sample_size = TRUE, ...) # S3 method for class 'gs_design' to_integer(x, sample_size = TRUE, ...)"},{"path":"https://merck.github.io/gsDesign2/reference/to_integer.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Rounds sample size to an even number for equal design — to_integer","text":"x object returned fixed_design_xxx() gs_design_xxx(). ... Additional parameters (used). sample_size Logical, indicting ceiling sample size even integer.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/to_integer.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Rounds sample size to an even number for equal design — to_integer","text":"list similar output fixed_design_xxx() gs_design_xxx(), except sample size integer.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/to_integer.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Rounds sample size to an even number for equal design — to_integer","text":"","code":"library(dplyr) library(gsDesign2) # Average hazard ratio # \\donttest{ x <- fixed_design_ahr( alpha = .025, power = .9, enroll_rate = define_enroll_rate(duration = 18, rate = 1), fail_rate = define_fail_rate( duration = c(4, 100), fail_rate = log(2) / 12, hr = c(1, .6), dropout_rate = .001 ), study_duration = 36 ) x |> to_integer() |> summary() #> # A tibble: 1 × 7 #> Design N Events Time Bound alpha Power #> #> 1 Average hazard ratio 464 325 35.9 1.96 0.025 0.900 # FH x <- fixed_design_fh( alpha = 0.025, power = 0.9, enroll_rate = define_enroll_rate(duration = 18, rate = 20), fail_rate = define_fail_rate( duration = c(4, 100), fail_rate = log(2) / 12, hr = c(1, .6), dropout_rate = .001 ), rho = 0.5, gamma = 0.5, study_duration = 36, ratio = 1 ) x |> to_integer() |> summary() #> # A tibble: 1 × 7 #> Design N Events Time Bound alpha Power #> #> 1 Fleming-Harrington FH(0.5, 0.5) 378 264 35.8 1.96 0.025 0.900 # MB x <- fixed_design_mb( alpha = 0.025, power = 0.9, enroll_rate = define_enroll_rate(duration = 18, rate = 20), fail_rate = define_fail_rate( duration = c(4, 100), fail_rate = log(2) / 12, hr = c(1, .6), dropout_rate = .001 ), tau = 4, study_duration = 36, ratio = 1 ) x |> to_integer() |> summary() #> # A tibble: 1 × 7 #> Design N Events Time Bound alpha Power #> #> 1 Modestly weighted LR: tau = 4 430 302 36.1 1.96 0.025 0.901 # } # \\donttest{ # Example 1: Information fraction based spending gs_design_ahr( analysis_time = c(18, 30), upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL), lower = gs_b, lpar = c(-Inf, -Inf) ) |> to_integer() |> summary() #> # A tibble: 2 × 7 #> # Groups: Analysis [2] #> Analysis Bound Z `~HR at bound` `Nominal p` `Alternate hypothesis` #> #> 1 Analysis: 1 Tim… Effi… 2.57 0.696 0.005 0.288 #> 2 Analysis: 2 Tim… Effi… 1.99 0.799 0.0234 0.901 #> # ℹ 1 more variable: `Null hypothesis` gs_design_wlr( analysis_time = c(18, 30), upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL), lower = gs_b, lpar = c(-Inf, -Inf) ) |> to_integer() |> summary() #> # A tibble: 2 × 7 #> # Groups: Analysis [2] #> Analysis Bound Z `~wHR at bound` `Nominal p` `Alternate hypothesis` #> #> 1 Analysis: 1 Ti… Effi… 2.57 0.700 0.0051 0.289 #> 2 Analysis: 2 Ti… Effi… 1.99 0.802 0.0234 0.900 #> # ℹ 1 more variable: `Null hypothesis` gs_design_rd( p_c = tibble::tibble(stratum = c(\"A\", \"B\"), rate = c(.2, .3)), p_e = tibble::tibble(stratum = c(\"A\", \"B\"), rate = c(.15, .27)), weight = \"ss\", stratum_prev = tibble::tibble(stratum = c(\"A\", \"B\"), prevalence = c(.4, .6)), info_frac = c(0.7, 1), upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL), lower = gs_b, lpar = c(-Inf, -Inf) ) |> to_integer() |> summary() #> # A tibble: 2 × 7 #> # Groups: Analysis [2] #> Analysis Bound Z ~Risk difference at …¹ `Nominal p` `Alternate hypothesis` #> #> 1 Analysi… Effi… 2.44 0.0339 0.0074 0.616 #> 2 Analysi… Effi… 2 0.0232 0.0228 0.9 #> # ℹ abbreviated name: ¹​`~Risk difference at bound` #> # ℹ 1 more variable: `Null hypothesis` # Example 2: Calendar based spending x <- gs_design_ahr( upper = gs_spending_bound, analysis_time = c(18, 30), upar = list( sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = c(18, 30) / 30 ), lower = gs_b, lpar = c(-Inf, -Inf) ) |> to_integer() # The IA nominal p-value is the same as the IA alpha spending x$bound$`nominal p`[1] #> [1] 0.003808063 gsDesign::sfLDOF(alpha = 0.025, t = 18 / 30)$spend #> [1] 0.003808063 # }"},{"path":"https://merck.github.io/gsDesign2/reference/wlr_weight.html","id":null,"dir":"Reference","previous_headings":"","what":"Weight functions for weighted log-rank test — wlr_weight","title":"Weight functions for weighted log-rank test — wlr_weight","text":"wlr_weight_fh Fleming-Harrington, FH(rho, gamma) weight function. wlr_weight_1 constant log rank test. wlr_weight_power Gehan-Breslow Tarone-Ware weight function. wlr_weight_mb Magirr (2021) weight function.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/wlr_weight.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Weight functions for weighted log-rank test — wlr_weight","text":"","code":"wlr_weight_fh(x, arm0, arm1, rho = 0, gamma = 0, tau = NULL) wlr_weight_1(x, arm0, arm1) wlr_weight_n(x, arm0, arm1, power = 1) wlr_weight_mb(x, arm0, arm1, tau = NULL, w_max = Inf)"},{"path":"https://merck.github.io/gsDesign2/reference/wlr_weight.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Weight functions for weighted log-rank test — wlr_weight","text":"x vector numeric values. arm0 arm object defined npsurvSS package. arm1 arm object defined npsurvSS package. rho scalar parameter controls type test. gamma scalar parameter controls type test. tau scalar parameter cut-time modest weighted log rank test. power scalar parameter controls power weight function. w_max scalar parameter cut-weight modest weighted log rank test.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/wlr_weight.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Weight functions for weighted log-rank test — wlr_weight","text":"vector weights. vector weights. vector weights. vector weights.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/wlr_weight.html","id":"specification","dir":"Reference","previous_headings":"","what":"Specification","title":"Weight functions for weighted log-rank test — wlr_weight","text":"contents section shown PDF user manual .","code":""},{"path":"https://merck.github.io/gsDesign2/reference/wlr_weight.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Weight functions for weighted log-rank test — wlr_weight","text":"","code":"enroll_rate <- define_enroll_rate( duration = c(2, 2, 10), rate = c(3, 6, 9) ) fail_rate <- define_fail_rate( duration = c(3, 100), fail_rate = log(2) / c(9, 18), hr = c(.9, .6), dropout_rate = .001 ) gs_arm <- gs_create_arm(enroll_rate, fail_rate, ratio = 1) arm0 <- gs_arm$arm0 arm1 <- gs_arm$arm1 wlr_weight_fh(1:3, arm0, arm1, rho = 0, gamma = 0, tau = NULL) #> [1] 1 1 1 enroll_rate <- define_enroll_rate( duration = c(2, 2, 10), rate = c(3, 6, 9) ) fail_rate <- define_fail_rate( duration = c(3, 100), fail_rate = log(2) / c(9, 18), hr = c(.9, .6), dropout_rate = .001 ) gs_arm <- gs_create_arm(enroll_rate, fail_rate, ratio = 1) arm0 <- gs_arm$arm0 arm1 <- gs_arm$arm1 wlr_weight_1(1:3, arm0, arm1) #> [1] 1 enroll_rate <- define_enroll_rate( duration = c(2, 2, 10), rate = c(3, 6, 9) ) fail_rate <- define_fail_rate( duration = c(3, 100), fail_rate = log(2) / c(9, 18), hr = c(.9, .6), dropout_rate = .001 ) gs_arm <- gs_create_arm(enroll_rate, fail_rate, ratio = 1) arm0 <- gs_arm$arm0 arm1 <- gs_arm$arm1 wlr_weight_n(1:3, arm0, arm1, power = 2) #> [1] 3.448634 2.973357 2.563657 enroll_rate <- define_enroll_rate( duration = c(2, 2, 10), rate = c(3, 6, 9) ) fail_rate <- define_fail_rate( duration = c(3, 100), fail_rate = log(2) / c(9, 18), hr = c(.9, .6), dropout_rate = .001 ) gs_arm <- gs_create_arm(enroll_rate, fail_rate, ratio = 1) arm0 <- gs_arm$arm0 arm1 <- gs_arm$arm1 wlr_weight_mb(1:3, arm0, arm1, tau = -1, w_max = 1.2) #> [1] 1.075901 1.157545 1.200000"},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"gsdesign2-112","dir":"Changelog","previous_headings":"","what":"gsDesign2 1.1.2","title":"gsDesign2 1.1.2","text":"CRAN release: 2024-04-09","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"new-features-1-1-2","dir":"Changelog","previous_headings":"","what":"New features","title":"gsDesign2 1.1.2","text":"gs_update_ahr() function now available efficacy futility boundary update based blinded estimation treatment effect (#370).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"bug-fixes-1-1-2","dir":"Changelog","previous_headings":"","what":"Bug fixes","title":"gsDesign2 1.1.2","text":"Fix accrual parameters bugs gs_design_wlr() depending npsurvSS (#344, #356). Fix gs_design_ahr() incorporate information fraction driven design number analyses >= 4 (#358).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"improvements-1-1-2","dir":"Changelog","previous_headings":"","what":"Improvements","title":"gsDesign2 1.1.2","text":"Zero failure rate intervals acceptable input (#360). Study duration > 100 units executable event accrual slow (#368).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"documentation-1-1-2","dir":"Changelog","previous_headings":"","what":"Documentation","title":"gsDesign2 1.1.2","text":"new vignette introducing boundary update available (#278, #364, #366). new vignette bridging gsDesign2 6 test types gsDesign available. pkgdown website re-organized providing better view users (#341).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"testing-1-1-2","dir":"Changelog","previous_headings":"","what":"Testing","title":"gsDesign2 1.1.2","text":"Independent testing as_gt() added (#337). Restructure tests make self-contained (#347).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"gsdesign2-111","dir":"Changelog","previous_headings":"","what":"gsDesign2 1.1.1","title":"gsDesign2 1.1.1","text":"CRAN release: 2024-02-09","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"new-features-1-1-1","dir":"Changelog","previous_headings":"","what":"New features","title":"gsDesign2 1.1.1","text":"as_rtf() method now available fixed_design gs_design objects generating RTF table outputs (#278).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"bug-fixes-1-1-1","dir":"Changelog","previous_headings":"","what":"Bug fixes","title":"gsDesign2 1.1.1","text":"gs_power_wlr() to_integer() now check convert integer sample size rigorously (#322). gs_design_*() now handle exceptions explicitly hazard ratio set 1 throughout study (#301). fixed_design_rd() generate warnings due previous default value change h1_spending (#296).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"improvements-1-1-1","dir":"Changelog","previous_headings":"","what":"Improvements","title":"gsDesign2 1.1.1","text":"gs_power_ahr() now runs twice fast using data.table performance optimizations (#295), enhanced similar improvements gs_info_ahr() pw_info() (#300). Enrollment failure rate input constructors validators refactored check format instead class. change reduces number warning messages catches real exceptions errors properly (#316). Nested functions refactored reusable internal functions, improve code rigor, avoid potential scoping pitfalls, facilitate debugging (#235). fixed designs, variable names table outputs to_integer() summary() updated (#292).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"documentation-1-1-1","dir":"Changelog","previous_headings":"","what":"Documentation","title":"gsDesign2 1.1.1","text":"Add new vignette statistical information null alternative hypothesis (#289). Improve define_enroll_rate() define_fail_rate() documentation adding detailed descriptions improving code examples (#302). function reference page now dedicated sections piecewise exponential distributions computing trial events (#258). Use four trailing dashes convention standardize code comment section format (#308).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"namespace-and-testing-1-1-1","dir":"Changelog","previous_headings":"","what":"Namespace and testing","title":"gsDesign2 1.1.1","text":"Tidy namespace removing rlang adding stats Imports (#307, #325). Qualify namespaces tests avoid library() calls (#332). Fortify GitHub Actions workflows limiting token usage necessary enabling manual trigger workflow runs (#326). Update GitHub Actions workflows latest versions upstream (#330).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"gsdesign2-110","dir":"Changelog","previous_headings":"","what":"gsDesign2 1.1.0","title":"gsDesign2 1.1.0","text":"CRAN release: 2023-08-23","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"breaking-changes-1-1-0","dir":"Changelog","previous_headings":"","what":"Breaking changes","title":"gsDesign2 1.1.0","text":"Split fixed_design() group fixed_design_*() functions enhanced modularity (#263). gs_design_rd() gs_power_rd() now updated options weighting stratified design (#276). ppwe() now accepts two arguments duration rate instead data frame fail_rate (#254). Unexport helper functions gridpts(), h1(), hupdate() (#253).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"new-features-1-1-0","dir":"Changelog","previous_headings":"","what":"New features","title":"gsDesign2 1.1.0","text":"Introduce define_enroll_rate() define_fail_rate() new input constructor functions replace tibble inputs (#238). Add new function pw_info() calculates statistical information piecewise model (#262).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"improvements-1-1-0","dir":"Changelog","previous_headings":"","what":"Improvements","title":"gsDesign2 1.1.0","text":"Add vignette showing canonical joint distribution Z-score B-values null alternative hypothesis AHR test (#246). Refactor expected_event() improve computational performance (@jdblischak, #250). Move source code legacy version inst/ tests/testthat/ developer tests (#269).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"gsdesign2-109","dir":"Changelog","previous_headings":"","what":"gsDesign2 1.0.9","title":"gsDesign2 1.0.9","text":"CRAN release: 2023-06-20","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"improvements-1-0-9","dir":"Changelog","previous_headings":"","what":"Improvements","title":"gsDesign2 1.0.9","text":"Add CRAN download counts badge (#215). Update documentation gs_design_rd() (#220). Format footnote numbers using decimal notation (#222). Split C++ functions individual .cpp header files (#224).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"bug-fixes-1-0-9","dir":"Changelog","previous_headings":"","what":"Bug fixes","title":"gsDesign2 1.0.9","text":"Fix digits display summary() (#231).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"gsdesign2-108","dir":"Changelog","previous_headings":"","what":"gsDesign2 1.0.8","title":"gsDesign2 1.0.8","text":"CRAN release: 2023-05-01","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"improvements-1-0-8","dir":"Changelog","previous_headings":"","what":"Improvements","title":"gsDesign2 1.0.8","text":"Update calculation upper/lower bounds final analysis MaxCombo tests (#217). Update fixed_design() function application stratified design using Lachin Foulkes method (#211). Correct fixed_design() function application rmst (#212). Rename info_scale argument options c(0, 1, 2) c(\"h0_h1_info\", \"h0_info\", \"h1_info\") informative make default value (\"h0_h1_info\") clear (#203). Add missing global functions/variables (#213). Fix outdated argument names use canonical style text elements README.md (#198). Add CRAN downloads badge README.md show monthly downloads (#216).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"bug-fixes-1-0-8","dir":"Changelog","previous_headings":"","what":"Bug fixes","title":"gsDesign2 1.0.8","text":"Fix calculation futility bounds gs_power_ahr() (#202).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"gsdesign2-107","dir":"Changelog","previous_headings":"","what":"gsDesign2 1.0.7","title":"gsDesign2 1.0.7","text":"CRAN release: 2023-03-20","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"improvements-1-0-7","dir":"Changelog","previous_headings":"","what":"Improvements","title":"gsDesign2 1.0.7","text":"Move imported dependencies Suggests Imports. Remove redundant dependencies Suggests. Update GitHub Actions workflows latest versions upstream. Add rule .gitattributes GitHub Linguist keep repository’s language statistics accurate.","code":""},{"path":[]},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"improvements-1-0-6","dir":"Changelog","previous_headings":"","what":"Improvements","title":"gsDesign2 1.0.6","text":"Export functions gridpts(), h1(), hupdate(), gs_create_arm() avoid use ::: code examples. Fix write path issue moving test fixture generation script data-raw/ included package.","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"gsdesign2-105","dir":"Changelog","previous_headings":"","what":"gsDesign2 1.0.5","title":"gsDesign2 1.0.5","text":"First submission CRAN March 2023.","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"breaking-changes-1-0-5","dir":"Changelog","previous_headings":"","what":"Breaking changes","title":"gsDesign2 1.0.5","text":"Passes lintr check entire package (#150, #151, #171). Improve documentation (#161, #163, #168, #176).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"bug-fixes-1-0-5","dir":"Changelog","previous_headings":"","what":"Bug fixes","title":"gsDesign2 1.0.5","text":"check_fail_rate() 1 number fail_rate > 0 (#132). gs_power_ahr() study duration > 48 months (#141). fixed_design() event-based design (#143). gs_design_combo() test applies part analysis (#148). gs_info_rd() variance calculation (#153). summary() capitalized first letter summary header (#164).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"gsdesign2-100","dir":"Changelog","previous_headings":"","what":"gsDesign2 1.0.0","title":"gsDesign2 1.0.0","text":"GitHub release December 2022.","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"breaking-changes-1-0-0","dir":"Changelog","previous_headings":"","what":"Breaking changes","title":"gsDesign2 1.0.0","text":"Merges gsDesign2 v0.2.1 gsdmvn. Updates API follow new style guide vignette(\"style\"). See detailed mapping old API new API #84.","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"new-features-1-0-0","dir":"Changelog","previous_headings":"","what":"New features","title":"gsDesign2 1.0.0","text":"Supports organized summary tables gt tables. Power/sample size calculation risk difference. Integer sample size support (#116, #125). Adds fixed_design() implement different methods power/sample size calculation. Adds info_scale arguments gs_design_*() gs_power_*(). Adds RMST milestone methods fixed design.","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"bug-fixes-1-0-0","dir":"Changelog","previous_headings":"","what":"Bug fixes","title":"gsDesign2 1.0.0","text":"expected_accrual() stratified population. gs_spending_bound() IA close FA (#40). gs_power_bound() applied MaxCombo test (#62). gs_design_npe() type error (#59).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"minor-improvements-1-0-0","dir":"Changelog","previous_headings":"","what":"Minor improvements","title":"gsDesign2 1.0.0","text":"Adds re-organizes vignettes.","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"gsdesign2-021","dir":"Changelog","previous_headings":"","what":"gsDesign2 0.2.1","title":"gsDesign2 0.2.1","text":"GitHub release August 2022. release merging Merck/gsdmvn.","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"gsdesign2-020","dir":"Changelog","previous_headings":"","what":"gsDesign2 0.2.0","title":"gsDesign2 0.2.0","text":"GitHub release May 2022. Supports Biometrical Journal paper “unified framework weighted parametric group sequential design” Keaven M. Anderson, Zifang Guo, Jing Zhao, Linda Z. Sun.","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"gsdesign2-010","dir":"Changelog","previous_headings":"","what":"gsDesign2 0.1.0","title":"gsDesign2 0.1.0","text":"GitHub release May 2021. Updated AHR vignette introduce average hazard ratio concept properly. Added arbitrary distribution vignette demonstrate s2pwe(). Corrected calculations AHR() using stratified population. Release Regulatory/Industry Symposium training.","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"gsdesign2-0009006","dir":"Changelog","previous_headings":"","what":"gsDesign2 0.0.0.9006","title":"gsDesign2 0.0.0.9006","text":"GitHub release December 2019. Added vignette eEvents_df() explaining methods thoroughly. Updated eEvents_df() simplify output option simple = FALSE.","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"gsdesign2-0009005","dir":"Changelog","previous_headings":"","what":"gsDesign2 0.0.0.9005","title":"gsDesign2 0.0.0.9005","text":"GitHub release December 2019. Updated docs/ directory correct reference materials website. Minor fixes eAccrual().","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"gsdesign2-0009004","dir":"Changelog","previous_headings":"","what":"gsDesign2 0.0.0.9004","title":"gsDesign2 0.0.0.9004","text":"GitHub release November 2019. Moved new simulation functions simtrial package (simfix(), simfix2simPWSurv(), pMaxCombo()).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"gsdesign2-0009003","dir":"Changelog","previous_headings":"","what":"gsDesign2 0.0.0.9003","title":"gsDesign2 0.0.0.9003","text":"GitHub release November 2019. Tried make AHR() simfix() compatible . Improved vignette group sequential design. Added pkgdown website documentation vignettes. Added support functions support approximation using visualization piecewise model.","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"gsdesign2-0002","dir":"Changelog","previous_headings":"","what":"gsDesign2 0.0.0.2","title":"gsDesign2 0.0.0.2","text":"GitHub release October 2019. Update AHR() output trial duration, expected events average hazard ratio tibble. Vignette AHRvignette demonstrating sample size computations fixed design non-proportional hazards assumptions. Vignette gsNPH demonstrating sample size computations group sequential design non-proportional hazards assumptions. Initial implementation pMaxCombo() compute p-value MaxCombo test; pMaxComboVignette demonstrates capability.","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"gsdesign2-0001","dir":"Changelog","previous_headings":"","what":"gsDesign2 0.0.0.1","title":"gsDesign2 0.0.0.1","text":"GitHub release September 2019. Computations based piecewise constant enrollment piecewise exponential failure rate. Expected event count calculation different hazard ratios eEvents_df(). Average hazard ratio computation based expected event counts AHR(). Vignette demonstrating fixed sample size computation simulation verify power.","code":""}] +[{"path":"https://merck.github.io/gsDesign2/articles/gsDesign2.html","id":"overview","dir":"Articles","previous_headings":"","what":"Overview","title":"Quick start for NPH sample size and power","text":"provide simple examples use gsDesign2 package deriving fixed group sequential designs non-proportional hazards. piecewise model enrollment, failure rates, dropout rates changing hazard ratio time allow great flexibility design assumptions. Users encouraged suggest features immediate long-term interest add. Topics included : Packages required used. Specifying enrollment rates. Specifying failure dropout rates possibly changing hazard ratio time. Deriving fixed design interim analysis. Simple boundary specification group sequential design. Deriving group sequential design non-proportional hazards. Displaying design properties. Design properties alternate assumptions. Differences gsDesign. Future enhancement priorities. items discussed briefly enable quick start early adopters also suggesting ultimate possibilities software enables. Finally, final section provides current enhancement priorities, potential topic-related enhancements discussed throughout document.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/gsDesign2.html","id":"packages-used","dir":"Articles","previous_headings":"","what":"Packages used","title":"Quick start for NPH sample size and power","text":"gsDesign package used check results proportional hazards well source deriving bounds using spending functions. computations compute expected event accumulation average hazard ratio time; key inputs group sequential distribution parameters. implement group sequential distribution theory non-proportional hazards derive wide variety boundary types group sequential designs. simtrial package used verify design properties using simulation.","code":"library(gsDesign) library(gsDesign2) library(knitr) library(dplyr) library(gt) library(ggplot2)"},{"path":"https://merck.github.io/gsDesign2/articles/gsDesign2.html","id":"enrollment-rates","dir":"Articles","previous_headings":"","what":"Enrollment rates","title":"Quick start for NPH sample size and power","text":"Piecewise constant enrollment rates input tabular format. assume enrollment ramp-25\\%, 50\\%, 75\\% final enrollment rate 2 months followed steady state 100\\% enrollment another 6 months. rates increased later power design appropriately. However, fixed enrollment rate periods remain unchanged.","code":"enroll_rate <- define_enroll_rate( duration = c(2, 2, 2, 6), rate = (1:4) / 4 ) enroll_rate %>% gt()"},{"path":"https://merck.github.io/gsDesign2/articles/gsDesign2.html","id":"failure-and-dropout-rates","dir":"Articles","previous_headings":"","what":"Failure and dropout rates","title":"Quick start for NPH sample size and power","text":"Constant failure dropout rates specified study period stratum; consider single stratum . hazard ratio provided treatment/control hazard rate period stratum. dropout rate period assumed treatment group; restriction eliminated future version, needed. Generally, take advantage identity exponential distribution median m, corresponding failure rate \\lambda \\lambda = \\log(2) / m. consider control group exponential time--event 12 month median. assume hazard ratio 1 4 months, followed hazard ratio 0.6 thereafter. Finally, assume low 0.001 exponential dropout rate per month treatment groups.","code":"median_surv <- 12 fail_rate <- define_fail_rate( duration = c(4, Inf), fail_rate = log(2) / median_surv, hr = c(1, .6), dropout_rate = .001 ) fail_rate %>% gt()"},{"path":"https://merck.github.io/gsDesign2/articles/gsDesign2.html","id":"fixed-design","dir":"Articles","previous_headings":"","what":"Fixed design","title":"Quick start for NPH sample size and power","text":"enrollment, failure dropout rate assumptions now derive sample size trial targeted complete 36 months interim analysis, 90\\% power 2.5\\% Type error. quick summary targeted sample size obtained . Note normally round N even number Events next integer. enrollment rates period increased proportionately size trial desired properties; duration enrollment rate changed.","code":"alpha <- .025 beta <- .1 # 1 - targeted power d <- fixed_design_ahr( enroll_rate = enroll_rate, # Relative enrollment rates fail_rate = fail_rate, # Failure rates from above alpha = alpha, # Type I error power = 1 - beta, # Type II error = 1 - power study_duration = 36 # Planned trial duration ) d %>% summary() %>% as_gt() d$enroll_rate %>% gt()"},{"path":"https://merck.github.io/gsDesign2/articles/gsDesign2.html","id":"group-sequential-design","dir":"Articles","previous_headings":"","what":"Group sequential design","title":"Quick start for NPH sample size and power","text":"go detail group sequential designs . brief, however, sequence tests Z_1, Z_2,\\ldots, Z_K follow multivariate normal distribution performed test new treatment better control (Jennison Turnbull (1999)). assume Z_k > 0 favorable experimental treatment. Generally Type error set tests controlled null hypothesis treatment difference sequence bounds b_1, b_2,\\ldots,b_K chosen Type error \\alpha > 0 \\alpha = 1 - P_0(\\cap_{k=1}^K Z_k < b_k) P_0() refers probability null hypothesis. referred non-binding bound since assumed trial stopped early futility Z_k small.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/gsDesign2.html","id":"simple-efficacy-bound-definition","dir":"Articles","previous_headings":"Group sequential design","what":"Simple efficacy bound definition","title":"Quick start for NPH sample size and power","text":"Lan DeMets (1983) developed spending function method deriving group sequential bounds. involves use non-decreasing spending function f(t) t \\geq 0 f(0)=0 f(t)=\\alpha t \\geq 1. Suppose K>0 analyses performed proportion t_1< t_2 <\\ldots t_K=1 planned statistical information (e.g., proportion planned events time--event endpoint trial proportion observations binomial normal endpoint). Bounds first k analyses 1\\leq k\\leq K recursively defined spending function multivariate normal distribution satisfy f(t_k) = 1 - P_0(\\cap_{j=1}^k Z_j < b_j). quick start, illustrate type efficacy bound. Perhaps common spending function approach Lan DeMets (1983) approximation O’Brien-Fleming bound f(t) = 2-2\\Phi\\left(\\frac{\\Phi^{-1}(1-\\alpha/2)}{t^{1/2}}\\right). Suppose K=3 t_1=0.5, t_2 = 0.75, t_3 = 1. can use assumptions group sequential design efficacy bound using Lan-DeMets O’Brien-Fleming spending function \\alpha = 0.025 Bounds 3 analyses follows. Note expected sample size time data cutoff analysis also N. filter upper bound lower bounds Z = -Inf shown. gsDesign replicate bounds (replicate sample size).","code":"design1s <- gs_design_ahr( alpha = alpha, beta = beta, enroll_rate = enroll_rate, fail_rate = fail_rate, analysis_time = c(16, 26, 36), # Calendar time of planned analyses upper = gs_spending_bound, # Spending function bound for efficacy upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), # Specify spending function and total Type I error lower = gs_b, lpar = rep(-Inf, 3), # No futility bound info_scale = \"h0_h1_info\" ) design1s %>% summary() %>% as_gt( title = \"1-sided group sequential bound using AHR method\", subtitle = \"Lan-DeMets spending to approximate O'Brien-Fleming bound\" ) x <- gsDesign(k = 3, test.type = 1, timing = design1s$analysis$info_frac, sfu = sfLDOF) cat( \"gsDesign\\n Upper bound: \", x$upper$bound, \"\\n Cumulative boundary crossing probability (H0): \", cumsum(x$upper$prob[, 1]), \"\\n Timing (IF): \", x$timing, \"\\ngs_design_ahr\\n Upper bound: \", design1s$bound$z, \"\\n Cumulative boundary crossing probability (H0): \", design1s$bound$probability0, \"\\n Timinng (IF): \", design1s$analysis$info_frac, \"\\n\" ) #> gsDesign #> Upper bound: 3.013804 2.264946 2.027236 #> Cumulative boundary crossing probability (H0): 0.00128997 0.01217731 0.025 #> Timing (IF): 0.4850799 0.7993622 1 #> gs_design_ahr #> Upper bound: 3.003506 2.256138 2.028823 #> Cumulative boundary crossing probability (H0): 0.001334442 0.01246455 0.025 #> Timinng (IF): 0.4850799 0.7993622 1"},{"path":"https://merck.github.io/gsDesign2/articles/gsDesign2.html","id":"two-sided-testing","dir":"Articles","previous_headings":"Group sequential design","what":"Two-sided testing","title":"Quick start for NPH sample size and power","text":"consider symmetric asymmetric 2-sided designs.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/gsDesign2.html","id":"symmetric-2-sided-bounds","dir":"Articles","previous_headings":"Group sequential design > Two-sided testing","what":"Symmetric 2-sided bounds","title":"Quick start for NPH sample size and power","text":"first 2-sided design symmetric design. Design bounds confirmed : bounds can plotted easily:","code":"design2ss <- gs_design_ahr( alpha = alpha, beta = beta, enroll_rate = enroll_rate, fail_rate = fail_rate, analysis_time = c(16, 26, 36), # Calendar analysis times upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), h1_spending = FALSE # This specifies futility testing with spending under NULL ) design2ss %>% summary() %>% as_gt( title = \"2-sided symmetric group sequential bound using AHR method\", subtitle = \"Lan-DeMets spending to approximate O'Brien-Fleming bound\" ) ggplot( data = design2ss$analysis %>% left_join(design2ss$bound, by = \"analysis\"), aes(x = event, y = z, group = bound) ) + geom_line(aes(linetype = bound)) + geom_point() + ggtitle(\"2-sided symmetric bounds with O'Brien-Fleming-like spending\")"},{"path":"https://merck.github.io/gsDesign2/articles/gsDesign2.html","id":"asymmetric-2-sided-bounds","dir":"Articles","previous_headings":"Group sequential design > Two-sided testing","what":"Asymmetric 2-sided bounds","title":"Quick start for NPH sample size and power","text":"Asymmetric 2-sided designs common symmetric since objectives two bounds tend different. often caution analyze early efficacy use conservative bound; principles used example designs far. Stopping lack benefit experimental treatment control overt indication unfavorable trend generally might examined early bounds less stringent. add early futility analysis nominal 1-sided p-value 0.05 wrong direction (Z=\\Phi^{-1}(0.05) 30% 50\\% events accrued. might considered disaster check. point time, may perceived need futility analysis. efficacy, add infinite bound first interim analysis. now slightly larger sample size account possibility early futility stop. Bounds now:","code":"design2sa <- gs_design_ahr( alpha = alpha, beta = beta, enroll_rate = enroll_rate, fail_rate = fail_rate, analysis_time = c(12, 16, 26, 36), upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), # Same efficacy bound as before test_lower = c(FALSE, TRUE, TRUE, TRUE), # Only test efficacy after IA1 lower = gs_b, lpar = c(rep(qnorm(.05), 2), -Inf, -Inf) # Fixed lower bound at first 2 analyses ) design2sa %>% summary() %>% as_gt( title = \"2-sided asymmetric group sequential bound using AHR method\", subtitle = \"Lan-DeMets spending to approximate O'Brien-Fleming bound for efficacy, futility disaster check at IA1, IA2 only\" )"},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-ahr-under-nph.html","id":"introduction","dir":"Articles","previous_headings":"","what":"Introduction","title":"Average hazard ratio and sample size under non-proportional hazards","text":"document demonstrates applications average hazard ratio concept design fixed designs without interim analysis. Throughout consider 2-arm trial experimental control group time--event endpoint. Testing differences treatment groups performed using stratified logrank test. setting, gsDesign2::ahr() routine provides average hazard ratio can used sample size using function gsDesign::nSurv(). approach assumes piecewise constant enrollment rates piecewise exponential failure rates option including multiple strata. approach allows flexibility approximate wide variety scenarios. evaluate approximations used via simulation using simtrial package; specifically provide simulation routine changes specified user easily incorporated. consider non-proportional hazards single stratum multiple strata different underlying proportional hazards assumptions. two things note regarding differences simtrial::simfix() gsDesign2::ahr(): simtrial::simfix() less flexible requires strata enrolled relative rates throughout trial whereas gsDesign2::ahr() allows, example, enrollment start stop different times different strata. document, use restrictive parameterization simtrial::simfix() can confirm asymptotic sample size approximation based gsDesign2::ahr() simulation. simtrial::simfix() provides flexibility test statistics used gsDesign2::ahr() documented pMaxCombo vignette demonstrating use Fleming-Harrington weighted logrank tests combinations tests.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-ahr-under-nph.html","id":"document-organization","dir":"Articles","previous_headings":"Introduction","what":"Document organization","title":"Average hazard ratio and sample size under non-proportional hazards","text":"vignette organized follows: single stratum design assumes delayed treatment benefit. stratified example assumes different proportional hazards 3 strata. Description design scenario. Deriving average hazard ratio. Deriving sample size based average hazard ratio. Computing plotting average hazard ratio function time. Simulation verify sample size approximation provides targeted power. simulation done data cutoff performed 5 different ways: Based targeted trial duration Based targeted minimum follow-duration Based targeted event count Based maximum targeted event count targeted trial duration Based maximum targeted event count targeted minimum follow-method based waiting achieve targeted event count targeted minimum follow-appears practical provide targeted power.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-ahr-under-nph.html","id":"initial-setup","dir":"Articles","previous_headings":"Introduction > Document organization","what":"Initial setup","title":"Average hazard ratio and sample size under non-proportional hazards","text":"begin setting two parameters used throughout simulations used verify accuracy power approximations; either customized simulation. First, set number simulations performed. can increase improve accuracy simulation estimates power. Simulations using simtrial::simfix() routine use blocked randomization. set change individual simulations. Based balanced randomization block set randomization ratio experimental control 1. load packages needed . gsDesign used implementation Schoenfeld (1981) approximation compute number events required power trial proportional hazards assumption. dplyr tibble work tabular data ‘data wrangling’ approach coding. simtrial enable simulations. survival enable Cox proportional hazards estimation (average) hazard ratio simulation compare approximation provided gsDesign2::ahr() routine computes expected average hazard ratio trial (Kalbfleisch Prentice (1981), Schemper, Wakounig, Heinze (2009)). Hidden underneath gsDesign2::eEvents_df() routine provides expected event counts period stratum hazard ratio differs. basic calculation used gsDesign2::ahr() routine.","code":"nsim <- 2000 block <- rep(c(\"Control\", \"Experimental\"), 2) ratio <- 1 library(gsDesign) library(gsDesign2) library(ggplot2) library(dplyr) library(tibble) library(survival) library(gt)"},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-ahr-under-nph.html","id":"design-scenario","dir":"Articles","previous_headings":"Single stratum non-proportional hazards example","what":"Design scenario","title":"Average hazard ratio and sample size under non-proportional hazards","text":"set first scenario design parameters. Enrollment ramps course first 4 months follow-steady state enrollment thereafter. adjusted proportionately power trial later. control group piecewise exponential distribution median 9 first 3 months 18 thereafter. hazard ratio experimental group versus control 1 first 3 months followed 0.55 thereafter. Since single stratum, set strata default:","code":"# Note: this is done differently for multiple strata; see below! enroll_rate <- define_enroll_rate( duration = c(2, 2, 10), rate = c(3, 6, 9) ) fail_rate <- define_fail_rate( duration = c(3, 100), fail_rate = log(2) / c(9, 18), dropout_rate = .001, hr = c(1, .55) ) total_duration <- 30 strata <- tibble::tibble(stratum = \"All\", p = 1)"},{"path":"https://merck.github.io/gsDesign2/articles/story-ahr-under-nph.html","id":"computing-average-hazard-ratio","dir":"Articles","previous_headings":"Single stratum non-proportional hazards example","what":"Computing average hazard ratio","title":"Average hazard ratio and sample size under non-proportional hazards","text":"compute average hazard ratio using gsDesign2::ahr() (average hazard ratio) routine. modify enrollment rates proportionately sample size computed. result given enrollment rates adjusted next step. However, since adjusted proportionately relative enrollment timing changing, average hazard ratio change. Approximations statistical information null (info0) alternate (info) hypotheses provided . Recall parameterization terms \\log(HR), , thus information intended approximate 1 variance Cox regression coefficient treatment effect; checked simulation later. result can explained number events observed first 3 months treatment treatment group. Now can replicate geometric average hazard ratio (AHR) computed using ahr() routine . compute logarithm HR computed weighted average weighting expected number events hazard ratio. Exponentiating resulting weighted average gives geometric mean hazard ratio, label AHR.","code":"avehr <- ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, total_duration = as.numeric(total_duration) ) avehr %>% gt() xx <- pw_info( enroll_rate = enroll_rate, fail_rate = fail_rate, total_duration = as.numeric(total_duration) ) xx %>% gt() xx %>% summarize(AHR = exp(sum(event * log(hr) / sum(event)))) %>% gt()"},{"path":"https://merck.github.io/gsDesign2/articles/story-ahr-under-nph.html","id":"deriving-the-design","dir":"Articles","previous_headings":"Single stratum non-proportional hazards example","what":"Deriving the design","title":"Average hazard ratio and sample size under non-proportional hazards","text":"average hazard ratio, use call gsDesign::nEvents() uses Schoenfeld (1981) approximation derive targeted number events. need average hazard ratio , randomization ratio (experimental/control), Type error Type II error (1 - power). also compute proportionately increase enrollment rates achieve targeted number events; round number events required next higher integer. also compute sample size, rounding nearest even integer.","code":"target_event <- gsDesign::nEvents( hr = avehr$ahr, # average hazard ratio computed above ratio = 1, # randomization ratio alpha = .025, # 1-sided Type I error beta = .1 # Type II error (1-power) ) target_event <- ceiling(target_event) target_event #> [1] 309 # Update enroll_rate to obtain targeted events enroll_rate$rate <- ceiling(target_event) / avehr$event * enroll_rate$rate avehr <- ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, total_duration = as.numeric(total_duration) ) avehr %>% gt() # round up sample size in both treatment groups sample_size <- ceiling(sum(enroll_rate$rate * enroll_rate$duration) / 2) * 2 sample_size #> [1] 576"},{"path":"https://merck.github.io/gsDesign2/articles/story-ahr-under-nph.html","id":"average-hazard-ratio-and-expected-event-accumulation-over-time","dir":"Articles","previous_headings":"Single stratum non-proportional hazards example","what":"Average hazard ratio and expected event accumulation over time","title":"Average hazard ratio and sample size under non-proportional hazards","text":"examine average hazard ratio function trial duration modified enrollment required power trial. also plot expected event accrual time; although graphs go 40 months, recall targeted trial duration 30 months. key design consideration selecting trial duration based things like degree ahr improvement time versus urgency completing trial quickly possible, noting required sample size decrease longer follow-.","code":"avehrtbl <- ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, total_duration = 1:(total_duration + 10) ) ggplot(avehrtbl, aes(x = time, y = ahr)) + geom_line() + ylab(\"Average HR\") + ggtitle(\"Average HR as a function of study duration\") ggplot(avehrtbl, aes(x = time, y = event)) + geom_line() + ylab(\"Expected events\") + ggtitle(\"Expected event accumulation as a function of study duration\")"},{"path":"https://merck.github.io/gsDesign2/articles/story-ahr-under-nph.html","id":"simulation-to-verify-power","dir":"Articles","previous_headings":"Single stratum non-proportional hazards example","what":"Simulation to verify power","title":"Average hazard ratio and sample size under non-proportional hazards","text":"use function simtrial::simfix() simplify setting executing simulation evaluate sample size derivation . Arguments simtrial::simfix() slightly different set-used gsDesign2::ahr() function used . Thus, reformatting input parameters involved. One difference gsDesign2::ahr() parameterization simtrial::simfix() block provided specify fixed block randomization opposed ratio gsDesign2::ahr(). following summarizes outcomes data cutoff chosen. Regardless cutoff chosen, see power approximates targeted 90% quite well. statistical information computed simulation computed one simulation variance Cox regression coefficient treatment (.e., log hazard ratio). column HR exponentiated mean Cox regression coefficients (geometric mean HR). see HR estimate matches simulations quite well. column info estimated statistical information alternate hypothesis, info0 estimate null hypothesis. value info0 1/4 expected events calculated . case, information approximation alternate hypothesis appears slightly small, meaning asymptotic approximation used overpower trial. Nonetheless, approximation power appear quite good noted .","code":"# Do simulations # Cut at targeted study duration results1 <- simtrial::simfix( nsim = nsim, block = block, sampleSize = sample_size, strata = strata, enroll_rate = enroll_rate, fail_rate = fail_rate, total_duration = total_duration, target_event = ceiling(target_event), timingType = 1:5 ) # Loading the data saved previously results1 <- readRDS(\"fixtures/results1.rds\") results1$Positive <- results1$Z <= qnorm(.025) results1 %>% group_by(cut) %>% summarise( Simulations = n(), Power = mean(Positive), sdDur = sd(Duration), Duration = mean(Duration), sdEvents = sd(Events), Events = mean(Events), HR = exp(mean(lnhr)), sdlnhr = sd(lnhr), info = 1 / sdlnhr^2 ) %>% gt() %>% fmt_number(column = 2:9, decimals = 3) avehr %>% gt()"},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-ahr-under-nph.html","id":"design-scenario-1","dir":"Articles","previous_headings":"Different proportional hazards by strata","what":"Design scenario","title":"Average hazard ratio and sample size under non-proportional hazards","text":"set design scenario parameter. limited simultaneous enrollment strata since simtrial::simfix() routine uses simtrial::simPWSurv() limited scenario. specify three strata: High risk: 1/3 population median time--event 6 months treatment effect hazard ratio 1.2. Moderate risk: 1/2 population median time--event 9 months hazard ratio 0.2. Low risk: 1/6 population essentially cured arms (median 100, HR = 1).","code":"strata <- tibble::tibble(stratum = c(\"High\", \"Moderate\", \"Low\"), p = c(1 / 3, 1 / 2, 1 / 6)) enroll_rate <- define_enroll_rate( stratum = c(array(\"High\", 4), array(\"Moderate\", 4), array(\"Low\", 4)), duration = rep(c(2, 2, 2, 18), 3), rate = c((1:4) / 3, (1:4) / 2, (1:4) / 6) ) fail_rate <- define_fail_rate( stratum = c(\"High\", \"Moderate\", \"Low\"), duration = 100, fail_rate = log(2) / c(6, 9, 100), dropout_rate = .001, hr = c(1.2, 1 / 3, 1) ) total_duration <- 36"},{"path":"https://merck.github.io/gsDesign2/articles/story-ahr-under-nph.html","id":"computing-average-hazard-ratio-1","dir":"Articles","previous_headings":"Different proportional hazards by strata","what":"Computing average hazard ratio","title":"Average hazard ratio and sample size under non-proportional hazards","text":"Now transform enrollment rates account stratified population. examine expected events stratum. Getting average log(HR) weighted Events exponentiating, get overall AHR just derived.","code":"ahr2 <- ahr(enroll_rate, fail_rate, total_duration) ahr2 %>% gt() xx <- pw_info(enroll_rate, fail_rate, total_duration) xx %>% gt() xx %>% ungroup() %>% summarise(lnhr = sum(event * log(hr)) / sum(event), AHR = exp(lnhr)) %>% gt()"},{"path":"https://merck.github.io/gsDesign2/articles/story-ahr-under-nph.html","id":"deriving-the-design-1","dir":"Articles","previous_headings":"Different proportional hazards by strata","what":"Deriving the design","title":"Average hazard ratio and sample size under non-proportional hazards","text":"derive sample size . plan sample size based average hazard ratio overall population use across strata. First, derive targeted events: Next, adapt enrollment rates proportionately trial powered targeted failure rates follow-duration. targeted sample size, rounding even integer, :","code":"target_event <- gsDesign::nEvents( hr = ahr2$ahr, ratio = 1, alpha = .025, beta = .1 ) target_event <- ceiling(target_event) target_event #> [1] 216 enroll_rate <- enroll_rate %>% mutate(rate = target_event / ahr2$event * rate) ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, total_duration = total_duration ) %>% gt() sample_size <- ceiling(sum(enroll_rate$rate * enroll_rate$duration) / 2) * 2 sample_size #> [1] 340"},{"path":"https://merck.github.io/gsDesign2/articles/story-ahr-under-nph.html","id":"average-hr-and-expected-event-accumulation-over-time","dir":"Articles","previous_headings":"Different proportional hazards by strata","what":"Average HR and expected event accumulation over time","title":"Average hazard ratio and sample size under non-proportional hazards","text":"Plotting average hazard ratio function study duration, see improves considerably course study. also plot expected event accumulation. , plot 10 months planned study duration 36 months allow evaluation event accumulation versus treatment effect different trial durations.","code":"avehrtbl <- ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, total_duration = 1:(total_duration + 10) ) ggplot(avehrtbl, aes(x = time, y = ahr)) + geom_line() + ylab(\"Average HR\") + ggtitle(\"Average HR as a function of study duration\") ggplot(avehrtbl, aes(x = time, y = event)) + geom_line() + ylab(\"Expected events\") + ggtitle(\"Expected event accumulation as a function of study duration\")"},{"path":"https://merck.github.io/gsDesign2/articles/story-ahr-under-nph.html","id":"simulation-to-verify-power-1","dir":"Articles","previous_headings":"Different proportional hazards by strata","what":"Simulation to verify power","title":"Average hazard ratio and sample size under non-proportional hazards","text":"change enrollment rates stratum produced gsDesign::nSurv() overall enrollment rates needed simtrial::simfix(). Now simulate summarize results. , see expected statistical information simulation greater expected Schoenfeld approximation expected events divided 4. Finally, compare simulation results asymptotic approximation . achieved power simulation just targeted 90%; noting simulation standard error 0.006, asymptotic approximation quite good. Using final cutoff requires targeted events minimum follow-seems reasonable convention preserved targeted design power.","code":"er <- enroll_rate %>% group_by(stratum) %>% mutate(period = seq_len(n())) %>% group_by(period) %>% summarise(rate = sum(rate), duration = last(duration)) er %>% gt() results2 <- simtrial::simfix( nsim = nsim, block = block, sampleSize = sample_size, strata = strata, enroll_rate = er, fail_rate = fail_rate, total_duration = as.numeric(total_duration), target_event = as.numeric(target_event), timingType = 1:5 ) results2 <- readRDS(\"fixtures/results2.rds\") results2$Positive <- (pnorm(results2$Z) <= .025) results2 %>% group_by(cut) %>% summarize( Simulations = n(), Power = mean(Positive), sdDur = sd(Duration), Duration = mean(Duration), sdEvents = sd(Events), Events = mean(Events), HR = exp(mean(lnhr)), sdlnhr = sd(lnhr), info = 1 / sdlnhr^2 ) %>% gt() %>% fmt_number(column = 2:9, decimals = 3) ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, total_duration = total_duration ) %>% gt()"},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-arbitrary-distribution.html","id":"introduction","dir":"Articles","previous_headings":"","what":"Introduction","title":"Approximating an arbitrary survival distribution","text":"demonstrate approximate arbitrary continuous survival distributions piecewise exponential approximations. enables sample size computations arbitrary survival models using software designed piecewise exponential distribution. Three functions particular demonstrated: s2pwe() translates arbitrary survival distribution piecewise exponential. ppwe() computes cumulative survival distribution upper tail distribution form generated s2pwe(). p_pm() provides cumulative survival distribution Poisson mixture distribution.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-arbitrary-distribution.html","id":"lognormal-approximation","dir":"Articles","previous_headings":"","what":"Lognormal approximation","title":"Approximating an arbitrary survival distribution","text":"demonstrate s2pwe() approximating lognormal distribution piecewise exponential failure rates. Note resulting log_normal_rate used, final piecewise exponential duration extended. , arbitrarily approximated 6 piecewise exponential rates duration 1 unit time (say, month) followed final rate extends infinity. compare resulting approximation actual lognormal survival using ppwe() compute survival probabilities P\\{T>t\\}. better approximation, use larger number points. plot log scale y-axis since piecewise exponential survival ppwe() piecewise linear scale. note beginning rate period approximation actual survival distribution approximation match exactly indicated circles graph. considered lognormal distribution due flexibility allows hazard rates time; see, example, Wikipedia.","code":"log_normal_rate <- s2pwe( times = c(1:6, 9), survival = plnorm(c(1:6, 9), meanlog = 0, sdlog = 2, lower.tail = FALSE) ) log_normal_rate ## # A tibble: 7 × 2 ## duration rate ## ## 1 1 0.693 ## 2 1 0.316 ## 3 1 0.224 ## 4 1 0.177 ## 5 1 0.148 ## 6 1 0.128 ## 7 3 0.103 # Use a large number of points to plot lognormal survival times <- seq(0, 12, .025) plot(times, plnorm(times, meanlog = 0, sdlog = 2, lower.tail = FALSE), log = \"y\", type = \"l\", main = \"Lognormal Distribution vs. Piecewise Approximation\", yaxt = \"n\", ylab = \"log(Survival)\", col = 1 ) # Now plot the pieceise approximation using the 7-point approximation from above lines( times, ppwe(x = times, duration = log_normal_rate$duration, rate = log_normal_rate$rate), col = 2 ) # Finally, add point markers at the points used in the approximation points(x = c(0:6), plnorm(c(0:6), meanlog = 0, sdlog = 2, lower.tail = FALSE), col = 1) text(x = c(5, 5), y = c(.5, .4), labels = c(\"Log-normal\", \"Piecewise Approximation (7 pts)\"), col = 1:2, pos = 4)"},{"path":"https://merck.github.io/gsDesign2/articles/story-arbitrary-distribution.html","id":"poisson-mixture-model","dir":"Articles","previous_headings":"","what":"Poisson mixture model","title":"Approximating an arbitrary survival distribution","text":"consider Poisson mixture model incorporate cure model sample size planning. form survival function S(t)=\\exp(-\\theta F_0(t)) t \\geq 0 F_0(t) continuous cumulative distribution function non-negative random variable F_0(0)=0 F_0(t)\\uparrow 1 t\\uparrow \\infty. note t\\uparrow \\infty, S(t)\\downarrow \\exp(-\\theta)=c refer c cure rate. function p_pm() assumes F_0(t)=1-\\exp(-\\lambda t) exponential cumulative distribution function resulting survival distribution t \\geq 0: S(t; \\theta, \\lambda) = \\exp(-\\theta(1-\\exp(-\\lambda t))). Note set default lower.tail=FALSE survival function computation default: plot \\lambda = \\log(2) / 10 make F_0(t) exponential distribution median 10. set \\theta = -\\log(.4) obtain cure rate 0.4. overlay piecewise exponential approximation. note two different \\theta values provide proportional hazards model ratio cumulative hazard function H(t; \\theta, \\lambda) = \\theta\\exp(-\\lambda t) constant: \\frac{\\log(S(t; \\theta_1, \\lambda))}{\\log(S(t; \\theta_2, \\lambda))} = \\theta_1/\\theta_2. given \\theta value can compute \\lambda provide survival rate c_1 > \\exp(-\\theta) arbitrary time t_1>0 setting: \\lambda = -\\log\\left(\\frac{\\theta - \\log(c_1)}{\\theta}\\right)/t_1. compute \\theta \\lambda values cure rate 0.4 survival rate 0.6 30 months: confirm survival time 30:","code":"p_pm <- function(x, theta, lambda, lower_tail = FALSE) { exp(-theta * (1 - exp(-lambda * x))) } lambda <- log(2) / 10 theta <- -log(.4) times <- 0:40 plot(times, p_pm(times, theta, lambda), type = \"l\", ylab = \"Survival\", xlab = \"Time\", log = \"y\") # Now compute piecewise exponential approximation x <- seq(8, 40, 8) pm_rate <- s2pwe( times = x, survival = p_pm(x, theta = theta, lambda = lambda) ) # Now plot the piecewise approximation using the 7-point approximation from above lines( c(0, x), ppwe(x = c(0, x), duration = pm_rate$duration, rate = pm_rate$rate), col = 2 ) points(c(0, x), p_pm(c(0, x), theta, lambda)) theta <- -log(0.4) lambda <- -log((theta + log(.6)) / theta) / 30 p_pm(30, theta, lambda) ## [1] 0.6"},{"path":"https://merck.github.io/gsDesign2/articles/story-canonical-h0-h1.html","id":"null-hypothesis","dir":"Articles","previous_headings":"","what":"Null hypothesis","title":"Canonical joint distribution of Z-score and B-values under null and alternative hypothesis","text":"distribution \\{B_k\\}_{k = 1, \\ldots, K} following structure: B_1, B_2, \\ldots, B_K multivariate normal distribution. E(B_k \\;|\\; H_0) = 0 k = 1, \\ldots, K. \\text{Var}(B_k \\;|\\; H_0) = t_k. \\text{Cov}(B_i, B_j \\;|\\; H_0) = t_i 1 \\leq \\leq j \\leq K. derivation last 2 statement \\begin{eqnarray} \\text{Var}(B_k\\;|\\; H_0) & = & \\frac{ \\text{Var}(\\sum_{=1}^{d_k} \\Delta_i | H_0) }{ \\text{Var}(\\sum_{=1}^{d_K} \\Delta_i | H_0) } = t_k\\\\ \\text{Cov}(B_i, B_j \\;|\\; H_0) & = & \\frac{1}{\\text{Var}(\\sum_{s=1}^{d_K} \\Delta_s\\;|\\; H_0)} \\text{Var} \\left( \\sum_{s=1}^{d_i} \\Delta_s\\;|\\; H_0 \\right) = t_i \\end{eqnarray} Accordingly, \\{Z_k\\}_{k = 1, \\ldots, K} canonical joint distribution following properties: Z_1, Z_2, \\ldots, Z_K multivariate normal distribution. E(Z_k \\;|\\; H_0) = 0. \\text{Var}(Z_k \\;|\\; H_0) = 1. \\text{Cov}(Z_i, Z_j \\;|\\; H_0) = \\sqrt{t_i/t_j} 1 \\leq \\leq j \\leq K.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-canonical-h0-h1.html","id":"alternative-hypothesis","dir":"Articles","previous_headings":"","what":"Alternative hypothesis","title":"Canonical joint distribution of Z-score and B-values under null and alternative hypothesis","text":"alternative hypothesis, 2 B-values (B_i, B_j \\leq j), distribution \\{B_k\\}_{k = 1, \\ldots, K} following structure: B_1, B_2, \\ldots, B_K multivariate normal distribution. E(B_k \\;|\\; H_1) = \\theta_k t_k \\sqrt{\\mathcal I_{k, H_0}} k = 1, \\ldots, K. \\text{Var}(B_k \\;|\\; H_1) = t_k \\mathcal I_{k, H_0} / \\mathcal I_{k, H_1}. \\text{Cov}(B_i, B_j \\;|\\; H_1) = t_i \\; \\mathcal I_{, H_0}/\\mathcal I_{, H_1} 1 \\leq \\leq j \\leq K. last statement derived \\begin{eqnarray} \\text{Cov}(B_i, B_j \\;|\\; H_1) & = & \\frac{1}{\\text{Var}(\\sum_{s=1}^{d_K} \\Delta_s | H_0)} \\text{Var} \\left( \\sum_{s=1}^{d_i} \\Delta_s | H_1 \\right) \\\\ & = & \\underbrace{ \\frac{1}{\\text{Var}(\\sum_{s=1}^{d_K} \\Delta_s | H_0)} \\text{Var} \\left( \\sum_{s=1}^{d_i} \\Delta_s | H_0 \\right) }_{t_i} \\underbrace{ \\text{Var} \\left( \\sum_{s=1}^{d_i} \\Delta_s | H_1 \\right) }_{1/\\mathcal I_{, H_1}} \\bigg/ \\underbrace{ \\text{Var} \\left( \\sum_{s=1}^{d_i} \\Delta_s | H_0 \\right) }_{1/\\mathcal I_{, H_0}} \\\\ & = & t_i\\; \\mathcal I_{, H_0}/\\mathcal I_{, H_1}. \\end{eqnarray} Accordingly, Z_k canonical joint distribution following properties: Z_1, Z_2, \\ldots, Z_K multivariate normal distribution. E(Z_k \\;|\\; H_1) = \\theta_k \\sqrt{\\mathcal I_{k, H_0}} treatment effect \\theta_k k-th analysis. \\text{Var}(Z_k \\;|\\; H_1) = \\mathcal I_{k, H_0} / \\mathcal I_{k, H_1}. \\text{Cov}(Z_i, Z_j \\;|\\; H_1) = \\sqrt{\\frac{t_i}{t_j}} \\frac{\\mathcal I_{, H_0}}{\\mathcal I_{, H_1}} 1 \\leq \\leq j \\leq K. last statement \\begin{eqnarray} \\text{Cov}(Z_i, Z_j \\;|\\; H_1) & = & \\text{Cov}(B_i/\\sqrt{t_i}, B_j/\\sqrt{t_j}) \\\\ & = & \\frac{1}{\\sqrt{t_i t_j}} \\text{Cov}(B_i, B_j) \\\\ & = & \\frac{1}{\\sqrt{t_i t_j}} \\text{Var}(B_i) \\\\ & = & \\sqrt{\\frac{t_i}{t_j}} \\frac{\\mathcal I_{, H_0}}{\\mathcal I_{, H_1}} \\end{eqnarray} local alternative assumption holds, \\text{Cov}(Z_i, Z_j) \\approx \\sqrt{\\frac{t_i}{t_j}}, format canonical joint distribution introduced Chapter 3 Proschan, Lan, Wittes (2006).","code":""},{"path":[]},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-compare-power-delay-effect.html","id":"overview","dir":"Articles","previous_headings":"","what":"Overview","title":"Power for delayed effect scenarios","text":"consider delayed effect scenario control group time--event distribution exponential median 15 months. experimental group hazard ratio vs. control 1 6 months 0.6 thereafter. Enrollment constant rate 12 months. Total study duration 20 48 months. Exponential dropout rate 0.001 per month. scenarios, investigate power, sample size events 6 tests: fh_05: Fleming-Harrington \\rho=0, \\gamma=0.5 test obtain power 85% given 1-sided Type error 0.025. fh_00: regular logrank test \\rho=0, \\gamma=0 fixed study duration \\\\{20, 24, 28, \\ldots, 60\\}. mc2_test: MaxCombo test including 2 WLR tests, .e., \\{(\\rho=0, \\gamma=0, \\tau = -1), (\\rho=0, \\gamma=0.5, \\tau = -1)\\}. mc2_test: MaxCombo test including 3 WLR tests, .e., \\{(\\rho=0, \\gamma=0, \\tau = -1), (\\rho=0, \\gamma=0.5, \\tau = -1), (\\rho=0.5, \\gamma=0.5, \\tau = -1)\\}. mc4_test: MaxCombo test including 4 WLR tests, .e., \\{(\\rho=0, \\gamma=0, \\tau = -1), (\\rho=0, \\gamma=0.5, \\tau = -1), (\\rho=0.5, \\gamma=0.5, \\tau = -1), (\\rho=0.5, \\gamma=0, \\tau = -1)\\}. mb_6: Magirr-Burman \\rho=-1, \\gamma=0, \\tau = 6 test fixed study duration \\\\{20, 24, 28, \\ldots, 60\\}. compute power logrank test. general summary Fleming-Harrington test meaningful power gain relative logrank regardless study durations evaluated.","code":"enroll_rate <- define_enroll_rate(duration = 12, rate = 1) fail_rate <- define_fail_rate( duration = c(6, 100), fail_rate = log(2) / 15, hr = c(1, .6), dropout_rate = 0.001 ) enroll_rate %>% gt() %>% tab_header(title = \"Enrollment Table of Scenario 1\") fail_rate %>% gt() %>% tab_header(title = \"Failure Table of Scenario 1\") tab <- NULL for (trial_duration in seq(24, 60, 4)) { # Fleming-Harrington rho=0, gamma=0.5 test fh_05 <- gs_design_wlr( enroll_rate = enroll_rate, fail_rate = fail_rate, ratio = 1, alpha = 0.025, beta = 0.15, weight = function(x, arm0, arm1) { wlr_weight_fh(x, arm0, arm1, rho = 0, gamma = 0.5) }, upper = gs_b, lower = gs_b, upar = qnorm(.975), lpar = -Inf, analysis_time = trial_duration ) |> to_integer() # Regular logrank test fh_00 <- gs_power_wlr( enroll_rate = fh_05$enroll_rate, fail_rate = fail_rate, ratio = 1, weight = function(x, arm0, arm1) { wlr_weight_fh(x, arm0, arm1, rho = 0, gamma = 0) }, upper = gs_b, lower = gs_b, upar = qnorm(.975), lpar = -Inf, analysis_time = trial_duration, event = .1 ) # MaxCombo test 1 mc2_test <- data.frame( rho = 0, gamma = c(0, .5), tau = -1, test = 1:2, analysis = 1, analysis_time = trial_duration ) mc_2 <- gs_power_combo( enroll_rate = fh_05$enroll_rate, fail_rate = fail_rate, fh_test = mc2_test, upper = gs_spending_combo, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_combo, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.01) ) # MaxCombo test 2 mc3_test <- data.frame( rho = c(0, 0, .5), gamma = c(0, .5, .5), tau = -1, test = 1:3, analysis = 1, analysis_time = trial_duration ) mc_3 <- gs_power_combo( enroll_rate = fh_05$enroll_rate, fail_rate = fail_rate, fh_test = mc3_test, upper = gs_spending_combo, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_combo, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.01) ) # MaxCombo test mc4_test <- data.frame( rho = c(0, 0, .5, .5), gamma = c(0, .5, .5, 0), tau = -1, test = 1:4, analysis = 1, analysis_time = trial_duration ) mc_4 <- gs_power_combo( enroll_rate = fh_05$enroll_rate, fail_rate = fail_rate, fh_test = mc4_test, upper = gs_spending_combo, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_combo, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.01) ) # Magirr-Burman rho=-1, gamma=0, tau = 6 test mb_6 <- gs_power_wlr( enroll_rate = fh_05$enroll_rate, fail_rate = fail_rate, ratio = 1, weight = function(x, arm0, arm1) { wlr_weight_fh(x, arm0, arm1, rho = -1, gamma = 0, tau = 15) }, upper = gs_b, lower = gs_b, upar = qnorm(.975), lpar = -Inf, analysis_time = trial_duration, event = .1 ) tab_new <- tibble( `Study duration` = trial_duration, N = fh_05$analysis$n[1], Events = fh_05$analysi$event[1], `Events/N` = Events / N, # We use the AHR from regular WLR as the AHR of different MaxCombo test AHR = as.numeric(fh_00$analysis$ahr[1]), `FH(0, 0.5) power` = fh_05$bound$probability[1], `FH(0, 0) power` = fh_00$bound$probability[1], `MC2 power` = mc_2$bound$probability[1], `MC4 power` = mc_4$bound$probability[1], `MC3 power` = mc_3$bound$probability[1], `MB6 power` = mb_6$bound$probability[1] ) tab <- rbind(tab, tab_new) } tab %>% gt() %>% fmt_number(columns = c(2, 3), decimals = 1) %>% fmt_number(columns = 4, decimals = 2) %>% fmt_number(columns = 5, decimals = 4) %>% fmt_number(columns = 6:11, decimals = 2)"},{"path":"https://merck.github.io/gsDesign2/articles/story-compare-power-delay-effect.html","id":"an-alternative-scenario","dir":"Articles","previous_headings":"","what":"An Alternative Scenario","title":"Power for delayed effect scenarios","text":"Now consider alternate scenario placebo group starts median, piecewise change median 30 16 months hazard ratio 0.85 late period.","code":"enroll_rate <- define_enroll_rate(duration = 12, rate = 1) fail_rate <- define_fail_rate( duration = c(6, 10, 100), # In Scenario 1: fail_rate = log(2) / 15, fail_rate = log(2) / c(15, 15, 30), dropout_rate = 0.001, # In Scenario 1: hr = c(1, .6) hr = c(1, .6, .85) ) enroll_rate %>% gt() %>% tab_header(title = \"Enrollment Table of Scenario 2\") fail_rate %>% gt() %>% tab_header(title = \"Failure Table of Scenario 2\") tab <- NULL for (trial_duration in seq(20, 60, 4)) { # Fleming-Harrington rho=0, gamma=0.5 test fh_05 <- gs_design_wlr( enroll_rate = enroll_rate, fail_rate = fail_rate, ratio = 1, alpha = 0.025, beta = 0.15, weight = function(x, arm0, arm1) { wlr_weight_fh(x, arm0, arm1, rho = 0, gamma = 0.5) }, upper = gs_b, upar = qnorm(.975), lower = gs_b, lpar = -Inf, analysis_time = trial_duration ) |> to_integer() # Regular logrank test fh_00 <- gs_power_wlr( enroll_rate = fh_05$enroll_rate, fail_rate = fail_rate, ratio = 1, weight = function(x, arm0, arm1) { wlr_weight_fh(x, arm0, arm1, rho = 0, gamma = 0) }, upper = gs_b, upar = qnorm(.975), lower = gs_b, lpar = -Inf, analysis_time = trial_duration, event = .1 ) # MaxCombo test mc2_test <- data.frame( rho = 0, gamma = c(0, .5), tau = -1, test = 1:2, analysis = 1, analysis_time = trial_duration ) mc_2 <- gs_power_combo( enroll_rate = fh_05$enroll_rate, fail_rate = fail_rate, fh_test = mc2_test, upper = gs_spending_combo, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_combo, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.01) ) # MaxCombo test mc3_test <- data.frame( rho = c(0, 0, .5), gamma = c(0, .5, .5), tau = -1, test = 1:3, analysis = 1, analysis_time = trial_duration ) mc_3 <- gs_power_combo( enroll_rate = fh_05$enroll_rate, fail_rate = fail_rate, fh_test = mc3_test, upper = gs_spending_combo, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_combo, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.01) ) # MaxCombo test mc4_test <- data.frame( rho = c(0, 0, .5, .5), gamma = c(0, .5, .5, 0), tau = -1, test = 1:4, analysis = 1, analysis_time = trial_duration ) mc_4 <- gs_power_combo( enroll_rate = fh_05$enroll_rate, fail_rate = fail_rate, fh_test = mc4_test, upper = gs_spending_combo, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_combo, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.01) ) # Magirr-Burman rho=-1, gamma=0, tau = 6 test mb_6 <- gs_power_wlr( enroll_rate = fh_05$enroll_rate, fail_rate = fail_rate, ratio = 1, weight = function(x, arm0, arm1) { wlr_weight_fh(x, arm0, arm1, rho = -1, gamma = 0, tau = 15) }, upper = gs_b, lower = gs_b, upar = qnorm(.975), lpar = -Inf, analysis_time = trial_duration, event = .1 ) tab_new <- tibble( `Study duration` = trial_duration, N = fh_05$analysis$n[1], Events = fh_05$analysi$event[1], `Events/N` = Events / N, # We use the AHR from regular WLR as the AHR of different MaxCombo test AHR = as.numeric(fh_00$analysis$ahr[1]), `FH(0, 0.5) power` = fh_05$bound$probability[1], `FH(0, 0) power` = fh_00$bound$probability[1], `MC2 power` = mc_2$bound$probability[1], `MC4 power` = mc_4$bound$probability[1], `MC3 power` = mc_3$bound$probability[1], `MB6 power` = mb_6$bound$probability[1] ) tab <- rbind(tab, tab_new) } tab %>% gt() %>% fmt_number(columns = c(2, 3), decimals = 1) %>% fmt_number(columns = 4, decimals = 2) %>% fmt_number(columns = 5, decimals = 4) %>% fmt_number(columns = 6:11, decimals = 2)"},{"path":"https://merck.github.io/gsDesign2/articles/story-compute-expected-events.html","id":"introduction","dir":"Articles","previous_headings":"","what":"Introduction","title":"Computing expected events by interval at risk","text":"document derives algorithm computing expected events observed model piecewise constant enrollment, failure dropout rates similar Lachin Foulkes (1986). Specifically, design enable computation average hazard ratio use elsewhere approximate sample size fixed group sequential designs non-proportional hazards assumption (Kalbfleisch Prentice (1981), Schemper, Wakounig, Heinze (2009)). expected events calculation outlined implemented function expected_event().","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-compute-expected-events.html","id":"general-formulation-and-notation","dir":"Articles","previous_headings":"","what":"General formulation and notation","title":"Computing expected events by interval at risk","text":"notation, study time scale denoted \\omega study start first opening enrollment \\omega=0. use variable t indicate patient time t=0 representing time patient enrolled. assume patient time enrollment event independent identically distributed subjects enrolled. also assume patient time censoring independent identically distributed subjects enrolled. individual, let X>0 denote patient time event Y>0 denote patient time loss--follow-. also let U denote (independent) study time entry patient. assume triplet X, Y, U independent. consider single treatment group stratum assume subjects enroll according Poisson process entry rate g(\\omega)\\geq 0 0 \\leq \\omega. expected number subjects enrolled study time \\omega simply \\begin{equation} G(\\omega)=\\int_0^\\omega g(u)du. \\end{equation} Analysis time--event data done using time enrollment patient event, drops , censored prior event time data cutoff; consider data cutoff fixed time \\Omega. key counts consider : \\bar{N}(t) : number patients events study least duration 00, Y_m>0 random variables independent study entry time U. let X_m Y_m define X Y, respectively, interval (t_{m-1},t_m], m=1,2,\\ldots,M, follows: \\begin{align} X&=\\sum_{m=1}^M \\min(X_m,t_m-t_{m-1}) \\prod_{j=1}^{m-1}\\{X_j>t_j-t_{j-1}\\}\\label{eq:Xdef}\\\\ Y&=\\sum_{m=1}^M \\min(Y_m,t_m-t_{m-1})\\prod_{j=1}^{m-1}\\{Y_j>t_j-t_{j-1}\\}\\label{eq:Ydef}. \\end{align} assume X_m Y_m independent exponentially distributed failure rates \\lambda_m \\eta_m, respectively, m=1,2,\\ldots,M. now assume subjects enroll constant rate J intervals defined 0=\\omega_0<\\omega_1<\\ldots<\\omega_J<\\infty. denote enrollment rates \\begin{equation}g(\\omega)=\\gamma_j\\geq 0\\label{eq:gj}\\end{equation} \\omega interval (\\omega_{j-1},\\omega_j], j=0,1,2,\\ldots,J. assume \\gamma_1>0, j>1 assume \\gamma_j \\geq 0. Letting G_0=0 recursively define j=1,\\ldots,J \\begin{equation}G_j=G(\\omega_j)=G_{j-1}+\\gamma_j(\\omega_j-\\omega_{j-1})\\label{eq:Gj}\\end{equation} thus \\omega\\[\\omega_{j-1},\\omega_j] expected enrollment study time \\omega \\begin{equation}G(\\omega)=G_{j-1}+\\gamma_j(\\omega-\\omega_{j-1}).\\label{eq:ENpw}\\end{equation}","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-compute-expected-events.html","id":"an-example-under-the-piecewise-model","dir":"Articles","previous_headings":"The piecewise model","what":"An example under the piecewise model","title":"Computing expected events by interval at risk","text":"consider example piecewise model assuming J=3, \\omega_j=1,2,7 \\gamma_j=3,2,0 j=1,2,3. assume M=2 t_m=4,\\infty, failure rates \\lambda_m=.03,.06, dropout rates \\eta_m=0.001,.002. plot following plot enrollment rate axis right failure dropout rate axis left. plot \\omega reverse order related integration equation E\\{\\bar{n}(t_1,t_2)\\} . also plotted vertical dot-dashed line point either enrollment rate failure (dropout) rate changes.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-compute-expected-events.html","id":"organizing-calculations-under-the-piecewise-model","dir":"Articles","previous_headings":"The piecewise model","what":"Organizing calculations under the piecewise model","title":"Computing expected events by interval at risk","text":"now proceed define algorithms computing expected events observed interval model piecewise constant enrollment, failure rates, dropout rates. assume study duration \\Omega=t_M. assume without loss generality sequence t_m, m=1,2,\\ldots, M constant failure rate \\lambda_m dropout rate \\eta_m interval (t_{m-1},t_m] well constant enrollment rate \\gamma_m interval (t_M-t_m,t_M-t_{m-1}]. Deriving intervals relatively straightforward exercise shown example . example, example , change points vertical lines drawn following scenario calculation purposes. define m=1,\\ldots,M intermediate probability calculations use calculating \\bar n(t_{m-1},t_m) follows: \\begin{align} q_m&=P\\{\\min(X_m,Y_m)>t_m-t_{m-1}\\}=\\exp^{-(\\lambda_m+\\eta_m)(t_m-t_{m-1})} \\label{eq:qm}\\\\ Q_m&=P\\{\\min(X,Y)>t_m\\}=\\prod_{j=1}^m q_j\\label{eq:Qm}\\\\ d_m&=P\\{t_{m-1}t_{m-1}\\}\\cdot P\\{0<\\min (X_m,Y_m)\\leq t_m-t_{m-1},X_m\\leq Y_m\\}\\\\ &=P\\{\\min(X,Y)>t_{m-1}\\}\\cdot P\\{0<\\min (X_m,Y_m)\\leq t_m-t_{m-1}\\}\\cdot P\\{X_m\\leq Y_m|0<\\min (X_m,Y_m)\\leq t_m-t_{m-1}\\}\\\\ &=Q_{m-1}(1-e^{-(\\lambda_m+\\eta_m)(t_m-t_{m-1})}) \\frac{\\lambda_m}{\\lambda_m+\\eta_m}\\\\ \\bar n_m&=E\\{\\bar n(t_{m-1},t_m)\\} \\end{align} Note \\lambda_m+\\eta_m=0, d_m=0. , \\begin{align} \\bar n_m&=G(t_M-t_m)P\\{t_{m-1}t_{m-1}\\} \\int_0^{t_m-t_{m-1}}g_{M+1-m}P\\{X_m\\leq v, X_m\\leq Y_m\\}dv\\\\ &=G_{M+1-m}d_m + \\frac{Q_{m-1}g_{M+1-m}\\lambda_m}{\\lambda_m+\\eta_m} \\int_0^{t_m-t_{m-1}}\\left(1-\\exp^{-(\\lambda_m+\\eta_m)v}\\right)dv\\\\ &=G_{M+1-m}d_m + \\frac{Q_{m-1}g_{M+1-m}\\lambda_m}{\\lambda_m+\\eta_m} \\left(t_m-t_{m-1}-\\frac{1-\\exp^{-(\\lambda_m+\\eta_m)(t_m-t_{m-1})}}{\\lambda_m+\\eta_m}\\right)\\\\ &=G_{M+1-m}d_m + \\frac{Q_{m-1}g_{M+1-m}\\lambda_m}{\\lambda_m+\\eta_m} \\left(t_m-t_{m-1}-\\frac{1-q_m}{\\lambda_m+\\eta_m}\\right) \\end{align} now add q_m, Q_m, d_m calculations enable computation \\bar n_m, expected events time interval.","code":"name_tem <- names(x) names(x) <- c(\"m\", \"tm\", \"lambda\", \"eta\", \"j\", \"omega\", \"gamma\") y <- x %>% mutate( tdel = tm - lag(tm, default = 0), q = exp(-(lambda + eta) * tdel), Q = lag(cumprod(q), default = 1), d = Q * (1 - q) * lambda / (lambda + eta), G = c(5, 5, 3, 0), nbar = G * d + (lambda * Q * gamma) / (lambda + eta) * (tdel - (1 - q) / (lambda + eta)) ) yy <- y names(yy) <- c( \"$m$\", \"$t_m$\", \"$\\\\lambda_m$\", \"$\\\\eta_m$\", \"$j$\", \"$\\\\omega_j=t_M-t_{m-1}$\", \"$\\\\gamma_j$\", \"$t_m-t_{m-1}$\", \"$q_m$\", \"$Q_{m-1}$\", \"$d_m$\", \"$G_{j-1}$\", \"$\\\\bar{n}_m$\" ) yy <- yy %>% select(c(1:7, 12, 8:11, 13)) yy %>% kable(digits = 4) %>% kable_styling(c(\"striped\", \"bordered\")) %>% add_header_above(c( \"Failure and dropout rates\" = 4, \"Enrollment\" = 4, \"Events by time period\" = 5 ))"},{"path":"https://merck.github.io/gsDesign2/articles/story-compute-expected-events.html","id":"verifying-calculations","dir":"Articles","previous_headings":"The piecewise model","what":"Verifying calculations","title":"Computing expected events by interval at risk","text":"check total number events using gsDesign function eEvents(). First, sum \\bar{n}_m values sum(y$nbar) get 1.083773 compare : Next, examine periods defined fail_rate: Now group rows y intervals. Finally, approximate specific numbers using simulation. First, simulate large dataset confirm simulation targeted enrollment pattern. Now confirm expected events follow-interval given targeted enrollment.","code":"event <- gsDesign::eEvents( lambda = y$lambda, eta = y$eta, gamma = y$gamma[rev(seq_along(y$gamma))], S = y$tdel[seq_len(length(y$tdel) - 1)], R = y$tdel[rev(seq_along(y$tdel))], T = max(y$tm) )$d event #> [1] 1.083773 expected_event( enroll_rate = define_enroll_rate(duration = c(1, 1), rate = c(3, 2)), fail_rate = define_fail_rate(duration = c(4, 3), fail_rate = c(.03, .06), dropout_rate = c(.001, .002)), total_duration = 7, simple = FALSE ) #> t fail_rate event #> 1 0 0.03 0.5642911 #> 2 4 0.06 0.5194821 y %>% mutate(t = c(0, 4, 4, 4)) %>% group_by(t) %>% summarise( fail_rate = first(lambda), Events = sum(nbar) ) #> # A tibble: 2 × 3 #> t fail_rate Events #> #> 1 0 0.03 0.564 #> 2 4 0.06 0.519 nsim <- 1e6 xx <- simtrial::simPWSurv( n = nsim, block = (rep(\"xx\", 4)), enroll_rate = define_enroll_rate(rate = c(3, 2) * nsim / 5, duration = c(1, 1)), fail_rate = tibble( stratum = \"All\", period = 1:2, Treatment = \"xx\", rate = c(.03, .06), duration = c(4, Inf) ), dropout_rate = tibble( stratum = \"All\", period = 1:2, Treatment = \"xx\", rate = c(.001, .002), duration = c(4, Inf) ) ) saveRDS(xx, file = \"fixtures/compute_expected_events.rds\", compress = \"xz\") xx <- readRDS(\"fixtures/compute_expected_events.rds\") ecat <- 1 + (xx$enrollTime > 1) + (xx$enrollTime > 2) cat(\"Enrollment pattern: \", table(ecat) / nsim) #> Enrollment pattern: 0.599697 0.399995 0.000308 #' This function is borrowed from Merck/simtrial. #' We copy it here to make gsDesign2 self-contained. #' #' Cut a Dataset for Analysis at a Specified Date #' #' @param x a time-to-event dataset, e.g., generated by \\code{simPWSurv} #' @param cut_date date relative to start of randomization (\\code{cte} from input dataset) #' at which dataset is to be cut off for analysis #' @return A dataset ready for survival analysis #' @examples #' # Use default enrollment and event rates and cut at calendar time 5 after start #' # of randomization #' library(dplyr) #' simPWSurv(n = 20) %>% cut_data(5) cut_data <- function(x, cut_date) { x %>% filter(enrollTime <= cut_date) %>% mutate( tte = pmin(cte, cut_date) - enrollTime, event = fail * (cte <= cut_date) ) %>% select(tte, event, Stratum, Treatment) } yy <- xx %>% cut_data(7) %>% filter(event == 1) %>% mutate(tcat = 4 + (tte > 4) + (tte > 5) + (tte > 6)) cat(\"Event by interval: \", table(yy$tcat) / nsim * 5, \"\\n\") #> Event by interval: 0.56421 0.2591 0.19403 0.067865 cat(\"Total events: \", sum(yy$event) / nsim * 5) #> Total events: 1.085205"},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-compute-npe-bound.html","id":"overview","dir":"Articles","previous_headings":"","what":"Overview","title":"Computing bounds under non-constant treatment effect","text":"consider group sequential designs possibly non-constant treatment effects time. can useful situations assumed non-proportional hazards model laid vignettes/articles/story-npe-background.Rmd. general, assume K \\geq 1 analyses statistical information \\mathcal{}_k information fraction t_k=\\mathcal{}_k/\\mathcal{}_k analysis k, 1\\leq k\\leq K. denote null hypothesis H_{0}: \\theta(t)=0 alternate hypothesis H_1: \\theta(t)=\\theta_1(t) t> 0 t represents information fraction study. study planned stop information fraction t=1, define \\theta(t) t>0 since trial can overrun planned statistical information final analysis. , use shorthand notation \\theta represent \\theta(), \\theta=0 represent \\theta(t)\\equiv 0 t \\theta_1 represent \\theta_i(t_k), effect size analysis k, 1\\leq k\\leq K. purposes, H_0 represent treatment difference, represent non-inferiority hypothesis. Recall assume K analyses bounds -\\infty \\leq a_k< b_k<\\leq \\infty 1\\leq k < K -\\infty \\leq a_K\\leq b_K<\\infty. denote probability crossing upper boundary analysis k without previously crossing bound \\alpha_{k}(\\theta)=P_{\\theta}(\\{Z_{k}\\geq b_{k}\\}\\cap_{j=1}^{k-1}\\{a_{j}\\leq Z_{j}< b_{j}\\}), k=1,2,\\ldots,K. total probability crossing upper bound prior crossing lower bound denoted \\alpha(\\theta)\\equiv\\sum_{k=1}^K\\alpha_k(\\theta). non-binding bounds, define probability \\alpha_{k}^{+}(\\theta)=P_{\\theta}\\{\\{Z_{k}\\geq b_{k}\\}\\cap_{j=1}^{k-1} \\{Z_{j}< b_{j}\\}\\} ignores lower bounds computing upper boundary crossing probabilities. non-binding Type error probability ever crossing upper bound \\theta=0. value \\alpha^+_{k}(0) commonly referred amount Type error spent analysis k, 1\\leq k\\leq K. total upper boundary crossing probability trial denoted one-sided scenario \\alpha^+(\\theta) \\equiv\\sum_{k=1}^{K}\\alpha^+_{k}(\\theta). primarily interested \\alpha(\\theta) compute power \\theta > 0. Type error, may interested \\alpha(0) binding lower bounds, often consider non-binding Type error calculations, \\alpha^{+}(0). denote probability crossing lower bound analysis k without previously crossing bound \\beta_{k}(\\theta)=P_{\\theta}((Z_{k}< a_{k}\\}\\cap_{j=1}^{k-1}\\{ a_{j}\\leq Z_{j}< b_{j}\\}). Efficacy bounds b_k, 1\\leq k\\leq K, group sequential design derived control Type level \\alpha=\\alpha(0). Lower bounds a_k, 1\\leq k\\leq K may used control boundary crossing probabilities either null hypothesis (2-sided testing), alternate hypothesis hypothesis (futility testing). Thus, may consider 3 values \\theta(t): null hypothesis \\theta_0(t)=0 computing efficacy bounds, value \\theta_1(t) computing lower bounds, value \\theta_a(t) computing sample size power. refer information 3 assumptions \\mathcal{}^{(0)}(t), \\mathcal{}^{(1)}(t), \\mathcal{}^{()}(t), respectively. Often assume \\mathcal{}(t)=\\mathcal{}^{(0)}(t)=\\mathcal{}^{(1)}(t)=\\mathcal{}^{()}(t). note information may differ different values \\theta(t). fixed designs, Lachin (2009) computes sample size based different variances null alternate hypothesis.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-compute-npe-bound.html","id":"spending-bounds","dir":"Articles","previous_headings":"","what":"Spending bounds","title":"Computing bounds under non-constant treatment effect","text":"consider different boundary types gsDesign package simplify two types according whether lower bounds binding non-binding. concept implicitly derive Z-value bounds a_k, b_k, k=1,\\cdots,K based probabilities specified following table. include test.type argument gsDesign::gsDesign() function reference. Boundary crossing probabilities used set Z-value boundaries can reduced just two types distinguishing whether lower bounds binding non-binding: Reduced options boundary crossing probabilities used set Z-value boundaries second table used \\theta=0 derive upper bound control Type error cases. chosen arbitrary \\theta 0 test.type, \\theta_a \\beta-spending arbitrary \\theta_1 otherwise. note one-sided design let \\beta_k(\\theta)=0 a_k=-\\infty, k=1,\\cdots,K. test.type=3, 4 let \\theta=\\theta_a, test.type=5, 6 \\theta \\geq 0 arbitrary. note asymmetric \\alpha-spending bounds can derived using test.type > 2 \\theta=0.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-compute-npe-bound.html","id":"two-sided-testing-and-design","dir":"Articles","previous_headings":"","what":"Two-sided testing and design","title":"Computing bounds under non-constant treatment effect","text":"denote alternative H_{}: \\theta(t)=\\theta_a(t); always assume H_a power calculations; using \\beta-spending also use H_a controlling lower boundary a_k crossing probabilities letting \\theta=\\theta_a lower bound spending. value \\theta(t)>0 reflect positive benefit. restrict alternate hypothesis \\theta_a(t)>0 t. value \\theta(t) referred (standardized) treatment effect information fraction t. assume interest stopping early good evidence reject one hypothesis favor . a_k= -\\infty analysis k 1\\leq k\\leq K alternate hypothesis rejected analysis k; .e., futility bound analysis k. k=1,2,\\ldots,K, trial stopped analysis k reject H_0 a_j0 \\epsilon= 0.001 yields b_k=3.09. original proposal use b_K=\\Phi^{-1}(1-\\alpha) final analysis, fully control one-sided Type error level \\alpha suggest computing final bound b_K using algorithm \\alpha(0)=\\alpha. Bounds computed spending \\alpha_k(0) analysis k can computed using equation (9) b_1. k=2,\\ldots,K algorithm previous section used. noted Jennison Turnbull (1999), b_1,\\ldots,b_K determined null hypothesis depend t_k \\alpha_k(0) dependence \\mathcal{}_k, k=1,\\ldots,K. computing bounds based \\beta_k(\\theta), k=1,\\ldots,K, \\theta(t_k)\\neq 0 additional dependency a_k depending t_k b_k, k=1,\\ldots,K, also final total information \\mathcal{}_K. Thus, spending bound something null hypothesis needs recomputed time \\mathcal{}_K changes, whereas needs computed \\theta(t_k)=0, k=1,\\ldots,K.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-compute-npe-bound.html","id":"bounds-based-on-boundary-families","dir":"Articles","previous_headings":"Two-sided testing and design","what":"Bounds based on boundary families","title":"Computing bounds under non-constant treatment effect","text":"Assume constants b_1^*,\\ldots,b_K^* total targeted one-sided Type error \\alpha. wish find C_u function t_1,\\ldots t_K b_k=C_ub_k^* \\alpha(0)=\\alpha. Thus, problem solve C_u. a_k, k=1,2,\\ldots,K fixed simple root finding problem. Since one normally normally uses non-binding efficacy bounds, normally case a_k=-\\infty, k=1,\\ldots,K problem. Now assume constants a_k^* wish find C_l a_k=C_la_k^*+\\theta(t_k)\\sqrt{\\mathcal{}_k} k=1,\\ldots,K \\beta(\\theta)=\\beta. use constant upper bounds previous paragraph, finding C_l simple root-finding problem. 2-sided symmetric bounds a_k=-b_k, k=1,\\ldots,K, need solve C_u use simple root finding. point, solve type bound asymmetric upper lower bounds.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-compute-npe-bound.html","id":"sample-size","dir":"Articles","previous_headings":"","what":"Sample size","title":"Computing bounds under non-constant treatment effect","text":"sample size, assume t_k, \\theta(t_k) 1,\\ldots,K fixed. assume \\beta(\\theta) decreasing \\mathcal{} decreasing. automatically case \\theta(t_k)>0, k=1,\\ldots,K many cases. Thus, information required done search \\mathcal{I_K} yields \\alpha(\\theta) yields targeted power.","code":""},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-design-with-ahr.html","id":"introduction","dir":"Articles","previous_headings":"","what":"Introduction","title":"Design using average hazard ratio","text":"consider fixed group sequential design non-proportional hazards testing logrank test. focus primarily average hazard ratio approach, expanding asymptotic approach Mukhopadhyay et al. (2020) group sequential design complex enrollment assumptions. theoretical background provided vignettes package. provide basic examples along lines Lin et al. (2020) illustration design considerations following assumptions: Proportional hazards Short delayed effect Longer delayed effect Crossing survival Illustrations include Expected average hazard ratio (AHR) time. Expected event accumulation time. impact planned study duration required number events. Power across scenarios trial designed assumption short delayed effect. Timing interim analyses. \\alpha-spending considerations. focus results rather code, hidden code can revealed examples.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-design-with-ahr.html","id":"packages-used","dir":"Articles","previous_headings":"","what":"Packages used","title":"Design using average hazard ratio","text":"primary packages needed gsDesign2. packages used supportive.","code":"library(gsDesign) library(gsDesign2) library(ggplot2) library(dplyr) library(gt) library(tidyr) library(tibble)"},{"path":"https://merck.github.io/gsDesign2/articles/story-design-with-ahr.html","id":"scenarios","dir":"Articles","previous_headings":"","what":"Scenarios","title":"Design using average hazard ratio","text":"Expected enrollment duration 18 months piecewise constant enrollment rates escalating every 2 months month 6 enrollment assumed reached steady state. later assume similar ramp-period 24 months expected enrollment duration. consider following failure rate assumptions: Control group exponential failure rate median 14 months. Constant hazard ratio 0.7 (experimental/control). Control group exponential failure rate median 10 months. Hazard ratio 1 6 months followed hazard ratio 0.6. Control group exponential failure rate median 10 months. Hazard ratio 1 6 months followed hazard ratio 0.6. Control group exponential failure rate median 10 months. Hazard ratio 1.5 4 months followed hazard ratio 0.5. survival curves 4 scenarios shown : average hazard ratio 4 scenarios shown . note Shorter delayed effect scenario, average hazard ratio approaches PH scenario study duration 36 months. number events 4 scenarios shown . 3 NPH scenarios events accumulate faster PH scenario due lower control median /delayed effect. , see slight variations control failure rates potential delayed effect can substantially accelerate accumulation events. event-based cutoff analysis slight variations can lead earlier analyses anticipated average hazard ratio expected longer follow-never achieved. examine implications .","code":"# Set the enrollment table of totally 24 month enroll24 <- define_enroll_rate( duration = c(rep(2, 3), 18), # 6 month ramp-up of enrollment, 24 months enrollment time target rate = 1:4 # ratio of the enrollment rate ) # Adjust enrollment rates to enroll 100 subjects enroll24$rate <- enroll24$rate * 100 / sum(enroll24$duration * enroll24$rate) # Set the enrollment table for 18 month expected enrollment enroll18 <- define_enroll_rate( duration = c(rep(2, 3), 12), # 6 month ramp-up of enrollment, 18 months enrollment time target rate = 1:4 # ratio of the enrollment rate ) # Adjust enrollment rates to enroll 100 subjects enroll18$rate <- enroll18$rate * 100 / sum(enroll18$duration * enroll18$rate) # Put these in a single tibble by scenario # We will use 18 month enrollment for delayed effect and crossing hazards scenarios enroll_rate <- rbind( enroll18 %>% mutate(Scenario = \"PH\"), enroll18 %>% mutate(Scenario = \"Shorter delayed effect\"), enroll18 %>% mutate(Scenario = \"Longer delayed effect\"), enroll18 %>% mutate(Scenario = \"Crossing\") ) month <- c(0, 4, 6, 44) duration <- month - c(0, month[1:3]) control_rate <- log(2) / c(rep(16, 4), rep(14, 4), rep(14, 4)) s <- tibble( Scenario = c(rep(\"PH\", 4), rep(\"Delayed effect\", 4), rep(\"Crossing\", 4)), Treatment = rep(\"Control\", 12), Month = rep(month, 3), duration = rep(duration, 3), rate = control_rate, hr = c(rep(.7, 4), c(1, 1, 1, .575), c(1.5, 1.5, .5, .5)) ) s <- rbind( s, s %>% mutate(Treatment = \"Experimental\", rate = rate * hr) ) %>% group_by(Scenario, Treatment) %>% mutate(Survival = exp(-cumsum(duration * rate))) ggplot(s, aes(x = Month, y = Survival, col = Scenario, lty = Treatment)) + geom_line() + scale_y_log10(breaks = (1:10) / 10, lim = c(.1, 1)) + scale_x_continuous(breaks = seq(0, 42, 6)) # get 4 scenarios control_median <- c(14, 12, 12, 12) month <- c(0, 4, 6, 44) duration <- month - c(0, month[1:3]) # HR by time period for each scenario hr <- c( rep(.7, 4), # constant hazard ratio of 0.7 1, 1, .6, .6, # hazard ratio of 1 for 4 months followed by a hazard ratio of 0.6. 1, 1, 1, .6, # hr = 1 for 6 months followed by hr = .6 1.5, 1.5, .5, .5 ) # hazard ratio of 1.5 for 4 months followed by a hazard ratio of 0.5. # Put parameters together in a tibble s <- tibble( Scenario = c(rep(\"PH\", 4), rep(\"Shorter delayed effect\", 4), rep(\"Longer delayed effect\", 4), rep(\"Crossing\", 4)), Treatment = rep(\"Control\", 16), Month = rep(month, 4), # Periods for constant HR duration = rep(duration, 4), rate = log(2) / c( rep(control_median[1], 4), rep(control_median[2], 4), rep(control_median[3], 4), rep(control_median[4], 4) ), hr = hr ) # calculate the survival at each change point for each scenario s <- rbind( s, s %>% mutate(Treatment = \"Experimental\", rate = rate * hr) ) %>% group_by(Scenario, Treatment) %>% mutate(Survival = exp(-cumsum(duration * rate))) # plot the survival curve ggplot(s, aes(x = Month, y = Survival, col = Scenario, lty = Treatment, shape = Treatment)) + geom_line() + annotate(\"text\", x = 18, y = .1, label = \"Control for scenarios other than PH have same survival\") + scale_y_log10(breaks = (1:10) / 10, lim = c(.07, 1)) + scale_x_continuous(breaks = seq(0, 42, 6)) + ggtitle(\"Survival over time for 4 scenarios studied\") # Durations to be used in common for all failure rate scenarios dur <- month[2:4] - month[1:3] # Set the failure table # We use exponential failure, proportional hazards fail_rate <- rbind( tibble( Scenario = \"PH\", stratum = \"All\", duration = dur, fail_rate = log(2) / 14, hr = hr[1], dropout_rate = .001 ), tibble( Scenario = \"Shorter delayed effect\", stratum = \"All\", duration = dur, fail_rate = log(2) / 11, hr = hr[6:8], dropout_rate = .001 ), tibble( Scenario = \"Longer delayed effect\", stratum = \"All\", duration = dur, fail_rate = log(2) / 11, hr = hr[10:12], dropout_rate = .001 ), tibble( Scenario = \"Crossing\", stratum = \"All\", duration = dur, fail_rate = log(2) / 11, hr = hr[14:16], dropout_rate = .001 ) ) hr <- do.call( rbind, lapply( c(\"PH\", \"Shorter delayed effect\", \"Longer delayed effect\", \"Crossing\"), function(x) { ahr( enroll_rate = enroll_rate %>% filter(Scenario == x), fail_rate = fail_rate %>% filter(Scenario == x), total_duration = c(.001, seq(4, 44, 4)) ) %>% mutate(Scenario = x) } ) ) ggplot(hr, aes(x = time, y = ahr, col = Scenario)) + geom_line() + scale_x_continuous(breaks = seq(0, 42, 6)) + ggtitle(\"Average hazard ratio (AHR) by study duration\", subtitle = \"Under the 4 scenarios examined\" ) ggplot(hr, aes(x = time, y = event, col = Scenario)) + geom_line() + scale_x_continuous(breaks = seq(0, 42, 6)) + ylab(\"Expected events per 100 enrolled\") + ggtitle(\"Expected event accumulation under the 4 scenarios studied\")"},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-design-with-ahr.html","id":"fixed-design-using-ahr-and-logrank","dir":"Articles","previous_headings":"Sample Size and Events by Scenarios","what":"Fixed Design using AHR and Logrank","title":"Design using average hazard ratio","text":"power fixed design 90% 2.5% one-sided Type error different scenarios consideration. now assume 18 month enrollment pattern scenarios. PH Shorter delayed effect scenarios need similar AHR, number events sample size 36 month study. two scenarios crossing survival curves large effect delay require substantially larger sample sizes due achieving similar AHR month 36. Assuming shorter delayed effect primary scenario wish protect power, long trial optimize tradeoffs sample size, AHR events required? inform tradeoff looking sizing trial different assumed trial durations failure rates assumed relative enrollment rates. counts events required perhaps interesting 24 month trial requires almost twice events powered 90% compared trial 42 months duration. study, consider 36 month trial duration reasonable tradeoff time, sample size power presumed delayed effect 4 months followed hazard ratio 0.6 thereafter.","code":"ss_ahr_fixed <- do.call( rbind, lapply( c(\"PH\", \"Shorter delayed effect\", \"Longer delayed effect\", \"Crossing\"), function(x) { xx <- gs_design_ahr( enroll_rate = enroll_rate %>% filter(Scenario == x), fail_rate = fail_rate %>% filter(Scenario == x), analysis_time = 36, upper = gs_b, upar = qnorm(.975), lower = gs_b, lpar = -Inf, alpha = .025, beta = .1 ) ans <- xx$analysis %>% select(time, n, event, ahr) %>% mutate(Scenario = x) return(ans) } ) ) ss_ahr_fixed %>% gt() %>% fmt_number(columns = 1:3, decimals = 0) %>% fmt_number(columns = 4, decimals = 3) %>% tab_header( title = \"Sample Size and Events Required by Scenario\", subtitle = \"36 Month Trial duration, 2.5% One-sided Type 1 Error, 90% Power\" ) do.call( rbind, lapply( c(24, 30, 36, 42), function(x) { ans <- gs_design_ahr( enroll_rate = enroll_rate %>% filter(Scenario == \"Shorter delayed effect\"), fail_rate = fail_rate %>% filter(Scenario == \"Shorter delayed effect\"), analysis_time = x, upper = gs_b, upar = qnorm(.975), lower = gs_b, lpar = -Inf, alpha = .025, beta = .1 )$analysis %>% select(time, n, event, ahr) %>% mutate(Scenario = \"Shorter delayed effect\") return(ans) } ) ) %>% gt() %>% fmt_number(columns = 1:3, decimals = 0) %>% fmt_number(columns = 4, decimals = 3) %>% tab_header( title = \"Sample Size and Events Required by Trial Duration\", subtitle = \"Delayed Effect of 4 Months, HR = 0.6 Thereafter; 90% Power\" )"},{"path":"https://merck.github.io/gsDesign2/articles/story-design-with-ahr.html","id":"alternate-hypothesis-mapping","dir":"Articles","previous_headings":"Sample Size and Events by Scenarios","what":"Alternate Hypothesis Mapping","title":"Design using average hazard ratio","text":"different scenarios interest, can examine expected number events time periods interest. Recall alternate hypothesis assumes treatment effect (HR=1) 4 months HR = 0.6 thereafter. scenarios, wish base futility bound assumption plus number events first 4 months 4 months, can compute average hazard ratio alternate hazard ratio scenario 20 months follows. can see interim futility spending bound based alternate hypothesis can depend fairly heavily enrollment control failure rate. Note also time interim analysis, alternate hypothesis AHR can computed fashion based observed events time period. Note can quite different scenario HR; e.g., PH, assume HR=0.7 throughout, futility bound comparison, compute blinded AHR decreases analysis alternate hypothesis.","code":"events_by_time_period <- NULL for (g in c(\"PH\", \"Shorter delayed effect\", \"Longer delayed effect\", \"Crossing\")) { events_by_time_period <- rbind( events_by_time_period, pw_info( enroll_rate = enroll_rate %>% filter(Scenario == g), fail_rate = fail_rate %>% filter(Scenario == g), total_duration = c(12, 20, 28, 36) ) %>% mutate(Scenario = g) ) } # Time periods for each scenario were 0-4, 4-6, and 6+ # Thus H1 has HR as follows hr1 <- tibble(t = c(0, 4, 6), hr1 = c(1, .6, .6)) ahr_by_analysis <- events_by_time_period %>% full_join(hr1) %>% group_by(Scenario, time) %>% summarize(AHR1 = exp(sum(event * log(hr1)) / sum(event))) ahr_by_analysis %>% pivot_wider(names_from = Scenario, values_from = AHR1) %>% gt() %>% fmt_number(columns = 2:5, decimals = 3)"},{"path":"https://merck.github.io/gsDesign2/articles/story-design-with-ahr.html","id":"group-sequential-design","dir":"Articles","previous_headings":"Sample Size and Events by Scenarios","what":"Group Sequential Design","title":"Design using average hazard ratio","text":"assume design delayed effect model delay long long-term average hazard ratio benefit strong. proportional hazards scenario, look power alternate scenarios. plan 36 month group sequential design Shorter delayed effect scenario. Interim analyses planned 12, 20, 28 months.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-design-with-ahr.html","id":"ahr-method","dir":"Articles","previous_headings":"Sample Size and Events by Scenarios > Group Sequential Design","what":"AHR method","title":"Design using average hazard ratio","text":"scenario, now wish compute adjusted expected futility bounds power implied.","code":"analysis_time <- c(12, 20, 28, 36) upar <- list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL, theta = 0) lpar <- list(sf = gsDesign::sfHSD, total_spend = .1, param = -2, timing = NULL, theta = NULL) nph_asymmetric <- gs_design_ahr( enroll_rate = enroll_rate |> filter(Scenario == \"Shorter delayed effect\"), fail_rate = fail_rate |> filter(Scenario == \"Shorter delayed effect\"), ratio = 1, alpha = .025, beta = 0.1, # Information fraction not required (but available!) analysis_time = analysis_time, # Function to enable spending bound upper = gs_spending_bound, lower = gs_spending_bound, # Spending function and parameters used upar = upar, lpar = lpar ) summary(nph_asymmetric) %>% as_gt() do.call( rbind, lapply( c(\"PH\", \"Shorter delayed effect\", \"Longer delayed effect\", \"Crossing\"), function(x) { ahr1 <- (ahr_by_analysis %>% filter(Scenario == x))$AHR1 lparx <- lpar lparx$theta1 <- -log(ahr1) yy <- gs_power_ahr( enroll_rate = enroll_rate %>% filter(Scenario == x), fail_rate = fail_rate %>% filter(Scenario == x), event = NULL, analysis_time = c(12, 20, 28, 36), upper = gs_spending_bound, upar = upar, lower = gs_spending_bound, lpar = lparx )$analysis %>% mutate(Scenario = x) } ) ) %>% gt() %>% fmt_number(columns = \"event\", decimals = 1) %>% fmt_number(columns = 5:10, decimals = 4)"},{"path":"https://merck.github.io/gsDesign2/articles/story-design-with-ahr.html","id":"weighted-logrank-method","dir":"Articles","previous_headings":"Sample Size and Events by Scenarios > Group Sequential Design","what":"Weighted Logrank Method","title":"Design using average hazard ratio","text":"investigate two types weighting scheme weight logrank method. fixed design first weighting scheme four scenario summarized follows. fixed design second weighting scheme four scenario summarized follows.","code":"do.call( rbind, lapply( c(\"PH\", \"Shorter delayed effect\", \"Longer delayed effect\", \"Crossing\"), function(x) { gs_design_wlr( enroll_rate = enroll_rate %>% filter(Scenario == x), fail_rate = fail_rate %>% filter(Scenario == x), weight = function(x, arm0, arm1) { wlr_weight_fh(x, arm0, arm1, rho = 0, gamma = 0.5, tau = 4) }, alpha = .025, beta = .1, upar = qnorm(.975), lpar = -Inf, analysis_time = 44 )$analysis %>% mutate(Scenario = x) } ) ) %>% gt() %>% fmt_number(columns = 3:6, decimals = 4) # Ignore tau or (tau can be -1) do.call( rbind, lapply( c(\"PH\", \"Shorter delayed effect\", \"Longer delayed effect\", \"Crossing\"), function(x) { gs_design_wlr( enroll_rate = enroll_rate %>% filter(Scenario == x), fail_rate = fail_rate %>% filter(Scenario == x), weight = function(x, arm0, arm1) { wlr_weight_fh(x, arm0, arm1, rho = 0, gamma = 0.5) }, alpha = .025, beta = .1, upar = qnorm(.975), lpar = -Inf, analysis_time = 44 )$analysis %>% mutate(Scenario = x) } ) ) %>% gt() %>% fmt_number(columns = 3:6, decimals = 4)"},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-design-with-spending.html","id":"overview","dir":"Articles","previous_headings":"","what":"Overview","title":"Trial design with spending under NPH","text":"vignette covers implement designs trials spending assuming non-proportional hazards. primarily concerned practical issues implementation rather design strategies, ignore design strategy.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-design-with-spending.html","id":"scenario-for-consideration","dir":"Articles","previous_headings":"","what":"Scenario for consideration","title":"Trial design with spending under NPH","text":"set enrollment, failure dropout rates along assumptions enrollment duration times analyses. assume 4 analysis (3 interim analyses + 1 final analysis) conducted 18, 24, 30, 36 months trial enrollment opened. assume single stratum enrollment targeted last 12 months. first 2 months, second 2 months, third 2 months remaining months, relative enrollment rates 8:12:16:24. rates updated constant multiple time design note . assume hazard ratio (HR) 0.9 first 3 months 0.6 thereafter. also assume control time--event follows piecewise exponential distribution median 8 month first 3 months 14 months thereafter.","code":"n_analysis <- 4 analysis_time <- c(18, 24, 30, 36) enroll_rate <- define_enroll_rate( duration = c(2, 2, 2, 6), rate = c(8, 12, 16, 24) ) enroll_rate |> gt::gt() |> gt::tab_header(title = \"Planned Relative Enrollment Rates\") fail_rate <- define_fail_rate( duration = c(3, 100), fail_rate = log(2) / c(8, 14), hr = c(.9, .6), dropout_rate = .001 ) fail_rate |> gt::gt() |> gt::tab_header(title = \"Table of Failure Rate Assumptions\")"},{"path":"https://merck.github.io/gsDesign2/articles/story-design-with-spending.html","id":"fixed-design-with-no-interim-analysis","dir":"Articles","previous_headings":"","what":"Fixed design with no interim analysis","title":"Trial design with spending under NPH","text":"can derive power enrollment rates failure rates follows: now compute sample size translate continuous sample size integer sample size.","code":"fixed_design_ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, power = NULL, ratio = 1, study_duration = 36, event = NULL ) |> summary() #> # A tibble: 1 × 7 #> Design N Events Time Bound alpha Power #> #> 1 Average hazard ratio 216 151. 36 1.96 0.025 0.656 fixed_design <- fixed_design_ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, power = .9, ratio = 1, study_duration = 36, event = NULL ) |> to_integer() fixed_design$analysis #> # A tibble: 1 × 7 #> design n event time bound alpha power #> #> 1 ahr 410 287 36.0 1.96 0.025 0.901"},{"path":"https://merck.github.io/gsDesign2/articles/story-design-with-spending.html","id":"group-sequential-design","dir":"Articles","previous_headings":"Fixed design with no interim analysis","what":"Group sequential design","title":"Trial design with spending under NPH","text":"now consider group sequential design bounds derived using spending functions. target interim analysis 24 months final analysis 36 months. Spending efficacy futility based proportion events expected analysis divided total expected events final analysis.","code":"gs <- gs_design_ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, info_frac = NULL, analysis_time = c(24, 36), upper = gs_spending_bound, lower = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL), lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.1, param = NULL, timing = NULL), h1_spending = TRUE ) |> to_integer() gs |> summary() |> gt::gt()"},{"path":"https://merck.github.io/gsDesign2/articles/story-info-formula.html","id":"continuous-outcomes","dir":"Articles","previous_headings":"","what":"Continuous outcomes","title":"Statistical information under null and alternative hypothesis","text":"Imagine trial continuous outcome. Let X_{0, } \\sim N(\\mu_0, \\sigma^2) subjects = 1, \\ldots, n_0 control arm X_{1,} \\sim N(\\mu_1, \\sigma^2) patient (= 1, \\ldots, n_1) experimental arm. superiority design, tested hypothesis H_0: \\; \\mu_0 = \\mu_1 \\;\\;\\; \\text{vs.} \\;\\;\\; H_1:\\; \\mu_1 > \\mu_0. Suppose k-th analysis, n_{0k} subjects control arm, n_{1k} subjects experimental arm. \\delta_k difference group means, .e., \\delta_k = \\frac{\\sum_{=1}^{n_{1k}} X_{,1}}{n_{1k}} - \\frac{\\sum_{=1}^{n_{0k}} X_{,0}}{n_{0k}}. can estimated \\widehat\\delta_k = \\frac{\\sum_{=1}^{n_{1k}} x_{,1}}{n_{1k}} - \\frac{\\sum_{=1}^{n_{0k}} x_{,0}}{n_{0k}}, x_{,j} observation X_{,j} subject arm j. statistical information \\mathcal I_k \\mathcal I_k^{-1} = \\text{Var}(\\delta_k | H_0) = \\sigma^2 (1 / n_{1k} + 1 / n_{0k}), H_0 H_1, can estimated \\mathcal I_k = \\widehat\\sigma^2 (1 / n_{1k} + 1 / n_{0k}).","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-info-formula.html","id":"binary-outcomes","dir":"Articles","previous_headings":"","what":"Binary outcomes","title":"Statistical information under null and alternative hypothesis","text":"Imagine trial binary outcome. Let X_{0, } \\sim B(p_0) patient = 1, \\ldots, n_0 X_{1,} \\sim B(p_1) patient (= 1, \\ldots, n_1), p_0 p_1 failure rate probability. Suppose k-th analysis, n_{0k} subjects control arm, n_{1k} subjects experimental arm. superiority design, null alternative hypothesis H_0: \\; p_0 = p_1 = p \\;\\;\\; \\text{vs.} \\;\\;\\; H_1:\\; p_0 > p_1. nature-scale treatment effect \\delta_k = \\frac{\\sum_{=1}^{n_{1k}} X_{,1}}{n_{1k}} - \\frac{\\sum_{=1}^{n_{0k}} X_{,0}}{n_{0k}}, can estimated \\widehat\\delta_k = \\frac{\\sum_{=1}^{n_{1k}} x_{,1}}{n_{1k}} - \\frac{\\sum_{=1}^{n_{0k}} x_{,0}}{n_{0k}}, x_{,j} observation X_{,j} subject arm j. statistical information \\mathcal I_k^{-1} = \\text{Var}(\\delta_k) = \\left\\{ \\begin{array}{ll} p(1-p)/n_{1k} + p(1-p)/n_{0k} & \\text{} H_0\\\\ p_1(1-p_1)/n_{1k} + p_0(1-p_0)/n_{0k} & \\text{} H_1\\\\ \\end{array} \\right.. estimation \\widehat{\\mathcal }_k^{-1} = \\left\\{ \\begin{array}{ll} \\bar p(1 - \\bar p) / n_{1k} + \\bar p(1 - \\bar p) / n_{0k} & \\text{} H_0\\\\ \\widehat p_1(1-p_1) / n_{1k} + \\widehat p_0(1 - \\widehat p_0)/n_{0k} & \\text{} H_1\\\\ \\end{array} \\right., \\bar p = \\frac{\\sum_{=1}^{n_{1k}}x_{i1} + \\sum_{=1}^{n_{0k}}x_{i0}}{n_{1k} + n_{0k}}, \\widehat p_j = \\frac{\\sum_{=1}^{n_{jk}}x_{ij}}{n_{jk}} j = 0, 1.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-info-formula.html","id":"survival-outcome","dir":"Articles","previous_headings":"","what":"Survival outcome","title":"Statistical information under null and alternative hypothesis","text":"many clinical trials, outcome time event. simplicity, assume event death person can one event; ideas apply events can recur, cases restrict attention first event patients. use logrank statistics compare treatment control arms. assume N_k total number deaths analysis k. numerator logrank statistics analysis k (Proschan, Lan, Wittes 2006) \\sum_{=1}^{N_k} D_i, D_i = O_i - E_i O_i indicator ith death occurred treatment patient, E_i = m_{1i} / (m_{0i} + m_{1i}) null expectation O_i given respective numbers, m_{0i} m_{1i}, control treatment patients risk just prior ith death. Conditioned m_{0i} m_{1i}, O_i Bernoulli distribution parameter E_i. null conditional mean variance D_i 0 V_i = E_i(1 − E_i), respectively. Unconditionally, D_i uncorrelated, mean 0 random variables variance E(V_i) null hypothesis. Thus, conditioned N_k, \\begin{array}{ccl} \\mathcal I_k^{-1} & = & \\text{Var}(\\delta_k) = \\sum_{=1}^{N_k} \\text{Var}(D_i) = \\sum_{=1}^{N_k} E(V_i) = E \\left( \\sum_{=1}^{N_k} V_i \\right) = E \\left( \\sum_{=1}^{N_k} E_i(1 − E_i) \\right) \\\\ & = & \\left\\{ \\begin{array}{ll} E\\left(\\sum_{=1}^{N_k} \\frac{r}{1+r} \\frac{1}{1+r} \\right) & \\text{} H_0\\\\ E\\left(\\sum_{=1}^{N_k} \\frac{m_{1i}}{(m_{0i} + m_{1i})} \\frac{m_{0i}}{(m_{0i} + m_{1i})}\\right) & \\text{} H_1 \\end{array} \\right., \\end{array} r randomization ratio. estimation \\begin{array}{ccl} \\widehat{\\mathcal }_k^{-1} & = & \\left\\{ \\begin{array}{ll} \\sum_{=1}^{N_k} \\frac{r}{1+r} \\frac{1}{1+r} & \\text{} H_0\\\\ \\sum_{=1}^{N_k} \\frac{m_{1i}}{(m_{0i} + m_{1i})} \\frac{m_{0i}}{(m_{0i} + m_{1i})} & \\text{} H_1 \\end{array} \\right.. \\end{array}","code":""},{"path":[]},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-integer-design.html","id":"binary-outcome","dir":"Articles","previous_headings":"Unstratified design","what":"Binary outcome","title":"Integer designs","text":"Note original design, sample size 1243.307021, 1989.2912336, 2486.614042, integer design, sample size updated 1242, 1988, 2488. 2 interim analysis, floor closet multiplier 2, since randomization ratio 1. final analysis, ceiling sample size 2486.614042 2488 also make sure integer sample size multiplier 2. Please also note , since sample size rounded, power new design also changes little bit, , 0.9 0.9001172.","code":"x <- gs_design_rd( p_c = tibble(stratum = \"All\", rate = 0.2), p_e = tibble(stratum = \"All\", rate = 0.15), info_frac = c(0.5, 0.8, 1), rd0 = 0, alpha = 0.025, beta = 0.1, ratio = 1, stratum_prev = NULL, weight = \"unstratified\", upper = gs_spending_bound, lower = gs_b, upar = list(sf = gsDesign::sfLDOF, timing = c(0.5, 0.8, 1), total_spend = 0.025, param = NULL), lpar = rep(-Inf, 3) ) xi <- x %>% to_integer() tibble( Design = rep(c(\"Original design\", \"Integer design\"), each = 3), `Sample size` = c(x$analysis$n, xi$analysis$n), Z = c( (x$bound %>% filter(bound == \"upper\"))$z, (xi$bound %>% filter(bound == \"upper\"))$z ), `Information fraction` = c(x$analysis$info_frac, xi$analysis$info_frac), Power = c( (x$bound %>% filter(bound == \"upper\"))$probability, (xi$bound %>% filter(bound == \"upper\"))$probability ) ) %>% group_by(Design) %>% gt() %>% tab_header( title = \"Comparison between the original/integer design\", subtitle = \"on binary endpoints (unstratified design)\" ) %>% fmt_number(columns = 2:5, decimals = 4)"},{"path":"https://merck.github.io/gsDesign2/articles/story-integer-design.html","id":"survival-outcome","dir":"Articles","previous_headings":"Unstratified design","what":"Survival outcome","title":"Integer designs","text":"Notice integer design, () number events, (ii) sample size, (iii) power, (iv) information fraction different.","code":"x <- gs_design_ahr( analysis_time = c(12, 24, 36), upper = gs_spending_bound, lower = gs_b, upar = list(sf = gsDesign::sfLDOF, timing = 1:3 / 3, total_spend = 0.025, param = NULL), lpar = rep(-Inf, 3) ) xi <- x %>% to_integer() tibble( Design = rep(c(\"Original design\", \"Integer design\"), each = 3), Events = c(x$analysis$event, xi$analysis$event), `Sample size` = c(x$analysis$n, xi$analysis$n), Z = c( (x$bound %>% filter(bound == \"upper\"))$z, (xi$bound %>% filter(bound == \"upper\"))$z ), `Information fraction` = c(x$analysis$info_frac, xi$analysis$info_frac), Power = c( (x$bound %>% filter(bound == \"upper\"))$probability, (xi$bound %>% filter(bound == \"upper\"))$probability ) ) %>% group_by(Design) %>% gt() %>% tab_header( title = \"Comparison between the original/integer design\", subtitle = \"on survival endpoints (unstratified design)\" ) %>% fmt_number(columns = 2:5, decimals = 4)"},{"path":"https://merck.github.io/gsDesign2/articles/story-integer-design.html","id":"stratified-design","dir":"Articles","previous_headings":"","what":"Stratified design","title":"Integer designs","text":"Note original design, sample size 3426.1318268, 4894.4740382, integer design, sample size updated 3426, 4896. 2 interim analysis, floor closet multiplier 2, since randomization ratio 1. final analysis, ceiling sample size 4894.4740382 4896 also make sure integer sample size multiplier 2.","code":"x <- gs_design_rd( p_c = tibble( stratum = c(\"biomarker positive\", \"biomarker negative\"), rate = c(0.2, 0.25) ), p_e = tibble( stratum = c(\"biomarker positive\", \"biomarker negative\"), rate = c(0.15, 0.22) ), info_frac = c(0.7, 1), rd0 = 0, alpha = 0.025, beta = 0.1, ratio = 1, stratum_prev = tibble( stratum = c(\"biomarker positive\", \"biomarker negative\"), prevalence = c(0.4, 0.6) ), weight = \"ss\", upper = gs_spending_bound, lower = gs_b, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = c(0.7, 1)), lpar = rep(-Inf, 2) ) xi <- x %>% to_integer() tibble( Design = rep(c(\"Original design\", \"Integer design\"), each = 2), `Sample size` = c(x$analysis$n, xi$analysis$n), Z = c( (x$bound %>% filter(bound == \"upper\"))$z, (xi$bound %>% filter(bound == \"upper\"))$z ), `Information fraction` = c(x$analysis$info_frac, xi$analysis$info_frac), Power = c( (x$bound %>% filter(bound == \"upper\"))$probability, (xi$bound %>% filter(bound == \"upper\"))$probability ) ) %>% group_by(Design) %>% gt() %>% tab_header( title = \"Comparison between the original/integer design\", subtitle = \"on binary endpoints (unstratified design)\" ) %>% fmt_number(columns = 2:5, decimals = 4)"},{"path":"https://merck.github.io/gsDesign2/articles/story-npe-background.html","id":"overview","dir":"Articles","previous_headings":"","what":"Overview","title":"Non-proportional effect size in group sequential design","text":"acronym NPES short non-proportional effect size. motivated primarily use designing time--event trial non-proportional hazards (NPH), simplified generalized concept . model likely useful rank-based survival tests beyond logrank test considered initially Tsiatis (1982). also useful situations treatment effect may vary time trial reason. generalize framework Chapter 2 Proschan, Lan, Wittes (2006) incorporate possibility treatment effect changing course trial systematic way. vignettes addresses distribution theory initial technical issues around computing boundary crossing probabilities bounds satisfying targeted boundary crossing probabilities applied generalize computational algorithms provided Chapter 19 Jennison Turnbull (1999) used compute boundary crossing probabilities well boundaries group sequential designs. Additional specifics around boundary computation, power sample size provided separate vignette.","code":""},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-npe-background.html","id":"the-continuous-model-and-e-process","dir":"Articles","previous_headings":"The probability model","what":"The continuous model and E-process","title":"Non-proportional effect size in group sequential design","text":"consider simple example motivate distribution theory quite general applies across many situations. instance, Proschan, Lan, Wittes (2006) immediately suggest paired observations, time--event binary outcomes endpoints theory applicable. assume given integer N>0 X_{} independent, =1,2,\\ldots. integer K\\leq N assume perform analysis K times 00 reflect positive benefit. k=1,2,\\ldots,K-1, interim cutoffs -\\infty \\leq a_k< b_k\\leq \\infty set; final cutoffs -\\infty \\leq a_K\\leq b_K <\\infty also set. infinite efficacy bound analysis means bound crossed analysis. Thus, 3K parameters define group sequential design: a_k, b_k, \\mathcal{}_k, k=1,2,\\ldots,K.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-npe-background.html","id":"notation-for-boundary-crossing-probabilities","dir":"Articles","previous_headings":"Test bounds and crossing probabilities","what":"Notation for boundary crossing probabilities","title":"Non-proportional effect size in group sequential design","text":"now apply distributional assumptions compute boundary crossing probabilities. use shorthand notation section \\theta represent \\theta() \\theta=0 represent \\theta(t)\\equiv 0 t. denote probability crossing upper boundary analysis k without previously crossing bound \\alpha_{k}(\\theta)=P_{\\theta}(\\{Z_{k}\\geq b_{k}\\}\\cap_{j=1}^{-1}\\{a_{j}\\leq Z_{j}< b_{j}\\}), k=1,2,\\ldots,K. Next, consider analogous notation lower bound. k=1,2,\\ldots,K denote probability crossing lower bound analysis k without previously crossing bound \\beta_{k}(\\theta)=P_{\\theta}((Z_{k}< a_{k}\\}\\cap_{j=1}^{k-1}\\{ a_{j}\\leq Z_{j}< b_{j}\\}). symmetric testing analysis k a_k= - b_k, \\beta_k(0)=\\alpha_k(0), k=1,2,\\ldots,K. total lower boundary crossing probability trial denoted \\beta(\\theta)\\equiv\\sum_{k=1}^{K}\\beta_{k}(\\theta). Note can also set a_k= -\\infty analyses lower bound desired, k=1,2,\\ldots,K. k-\\infty b_k<\\infty.","code":""},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-npe-integration.html","id":"overview","dir":"Articles","previous_headings":"","what":"Overview","title":"Numerical integration non-proportional effect size in group sequential design","text":"provided asymptotic distribution theory notation group sequential boundaries vignettes/articles/story-npe-background.Rmd. vignettes generalize computational algorithms provided Chapter 19 Jennison Turnbull (1999) used compute boundary crossing probabilities well derive boundaries group sequential designs.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-npe-integration.html","id":"asymptotic-normal-and-boundary-crossing-probabilities","dir":"Articles","previous_headings":"","what":"Asymptotic normal and boundary crossing probabilities","title":"Numerical integration non-proportional effect size in group sequential design","text":"assume Z_1,\\cdots,Z_K multivariate normal distribution variance 1\\leq k\\leq K \\text{Var}(Z_k) = 1 expected value E(Z_{k})= \\sqrt{\\mathcal{}_k}\\theta(t_{k})= \\sqrt{n_k}E(\\bar X_k) .","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-npe-integration.html","id":"notation-for-boundary-crossing-probabilities","dir":"Articles","previous_headings":"","what":"Notation for boundary crossing probabilities","title":"Numerical integration non-proportional effect size in group sequential design","text":"use shorthand notation section \\theta represent \\theta() \\theta=0 represent \\theta(t)\\equiv 0 t. denote probability crossing upper boundary analysis k without previously crossing bound \\alpha_{k}(\\theta)=P_{\\theta}(\\{Z_{k}\\geq b_{k}\\}\\cap_{j=1}^{-1}\\{a_{j}\\leq Z_{j}< b_{j}\\}), k=1,2,\\ldots,K. Next, consider analogous notation lower bound. k=1,2,\\ldots,K denote probability crossing lower bound analysis k without previously crossing bound \\beta_{k}(\\theta)=P_{\\theta}((Z_{k}< a_{k}\\}\\cap_{j=1}^{k-1}\\{ a_{j}\\leq Z_{j}< b_{j}\\}). symmetric testing analysis k a_k= - b_k, \\beta_k(0)=\\alpha_k(0), k=1,2,\\ldots,K. total lower boundary crossing probability trial denoted \\beta(\\theta)\\equiv\\sum_{k=1}^{K}\\beta_{k}(\\theta). Note can also set a_k= -\\infty analyses lower bound desired, k=1,2,\\ldots,K; thus, use \\alpha^+(\\theta) notation . k-\\infty b_k<\\infty.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-npe-integration.html","id":"recursive-algorithms-for-numerical-integration","dir":"Articles","previous_headings":"","what":"Recursive algorithms for numerical integration","title":"Numerical integration non-proportional effect size in group sequential design","text":"now provide small update algorithm Chapter 19 Jennison Turnbull (1999) numerical integration required compute boundary crossing probabilities previous section also identifying group sequential boundaries satisfying desired characteristics. key calculations conditional power identity equation (1) allows building recursive numerical integration identities enable simple, efficient numerical integration. define g_1(z;\\theta) = \\frac{d}{dz}P(Z_1\\leq z) = \\phi\\left(z - \\sqrt{\\mathcal{}_1}\\theta(t_1)\\right)\\tag{2} k=2,3,\\ldots K recursively define subdensity function \\begin{align} g_k(z; \\theta) &= \\frac{d}{dz}P_\\theta(\\{Z_k\\leq z\\}\\cap_{j=1}^{k-1}\\{a_j\\leq Z_j0 \\pi_k(b^{(+1)};\\theta)-\\alpha_k(\\theta) suitably small. simple starting value k b^{(0)} = \\Phi^{-1}(1- \\alpha_k(\\theta)) + \\sqrt{\\mathcal{}_k}\\theta(t_k).\\tag{9} Normally, b_k calculated \\theta(t_k)=0 k=1,2,\\ldots,K simplifies . However, a_k computed analogously often use non-zero \\theta enable -called \\beta-spending.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-npe-integration.html","id":"numerical-integration","dir":"Articles","previous_headings":"","what":"Numerical integration","title":"Numerical integration non-proportional effect size in group sequential design","text":"numerical integration required compute boundary probabilities derive boundaries defined section 19.3 Jennison Turnbull (1999). single change replacement non-proportional effect size assumption equation (3) replacing equivalent equation (4) used constant effect size Jennison Turnbull (1999).","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-npe-integration.html","id":"demonstrating-calculations","dir":"Articles","previous_headings":"Numerical integration","what":"Demonstrating calculations","title":"Numerical integration non-proportional effect size in group sequential design","text":"walk perform basic calculations . basic scenario one interim analysis addition final analysis. target Type error \\alpha=0.025 Type II error \\beta = 0.1, latter corresponding target 90% power. assume power spending function \\rho=2 bounds. , information fraction t, cumulative spending \\alpha \\times t^2 upper bound \\beta \\times t^2 lower bound. Statistical information 1 first analysis 4 final analysis, leading information fraction t_1= 1/4, t_2=1 interim final, respectively. assume \\theta_1 = .5, \\theta_3=1.5. Set overall study parameters Calculate interim bounds Set numerical integration grid next (final) analysis set table numerical integration continuation region can subsequently use compute boundary crossing probabilities bounds second interim analysis. begin null hypothesis. columns resulting table - z - Z-values grid; recall interim test statistic normally distributed variance 1 - w - weights numerical integration - h - weights w times normal density can used numerical integration; demonstrate use probability crossing bound null hypothesis computed follows: now set numerical integration grid alternate hypothesis compute continuation probability. Compute initial iteration analysis 2 bounds initial estimate second analysis bounds computed way actual first analysis bounds. Compute actual boundary crossing probabilities initial approximations get actual boundary crossing probabilities second analysis, update numerical integration grids. null hypothesis, need update interval b2_0. get first order Taylor’s series approximation update bound, need derivative probability respect Z-value cutoff. given subdensity computed grid. , grid contains numerical integration weight w weight times subdensity h. Thus, get subdensity bound, estimated derivative boundary crossing probability, compute: see Taylor’s series update gotten us substantially closer targeted boundary probability. now update lower bound analogous fashion. Confirm gs_power_npe()","code":"# Information for both null and alternative info <- c(1, 4) # information fraction timing <- info / max(info) # Type I error alpha <- 0.025 # Type II error (1 - power) beta <- 0.1 # Cumulative alpha-spending at IA, Final alphaspend <- alpha * timing^2 # Cumulative beta-spending at IA, Final betaspend <- beta * timing^2 # Average treatment effect at analyses theta <- c(1, 3) / 2 # Upper bound under null hypothesis b1 <- qnorm(alphaspend[1], lower.tail = FALSE) # Lower bound under alternate hypothesis a1 <- qnorm(betaspend[1], mean = sqrt(info[1]) * theta[1]) # Compare probability of crossing vs target for bounds: cat( \"Upper bound =\", b1, \"Target spend =\", alphaspend[1], \"Actual spend =\", pnorm(b1, lower.tail = FALSE) ) #> Upper bound = 2.955167 Target spend = 0.0015625 Actual spend = 0.0015625 # Lower bound under alternate hypothesis a1 <- qnorm(betaspend[1], mean = sqrt(info[1]) * theta[1]) # Compare probability of crossing vs target for bounds: cat( \"Lower bound =\", a1, \"Target spend =\", betaspend[1], \"Actual spend =\", pnorm(a1, mean = sqrt(info[1]) * theta[1]) ) #> Lower bound = -1.997705 Target spend = 0.00625 Actual spend = 0.00625 # Set up grid over continuation region # Null hypothesis grid1_0 <- gsDesign2:::h1(theta = 0, info = info[1], a = a1, b = b1) grid1_0 %>% head() #> $z #> [1] -1.99770547 -1.95718607 -1.91666667 -1.87500000 -1.83333333 -1.79166667 #> [7] -1.75000000 -1.70833333 -1.66666667 -1.62500000 -1.58333333 -1.54166667 #> [13] -1.50000000 -1.45833333 -1.41666667 -1.37500000 -1.33333333 -1.29166667 #> [19] -1.25000000 -1.20833333 -1.16666667 -1.12500000 -1.08333333 -1.04166667 #> [25] -1.00000000 -0.95833333 -0.91666667 -0.87500000 -0.83333333 -0.79166667 #> [31] -0.75000000 -0.70833333 -0.66666667 -0.62500000 -0.58333333 -0.54166667 #> [37] -0.50000000 -0.45833333 -0.41666667 -0.37500000 -0.33333333 -0.29166667 #> [43] -0.25000000 -0.20833333 -0.16666667 -0.12500000 -0.08333333 -0.04166667 #> [49] 0.00000000 0.04166667 0.08333333 0.12500000 0.16666667 0.20833333 #> [55] 0.25000000 0.29166667 0.33333333 0.37500000 0.41666667 0.45833333 #> [61] 0.50000000 0.54166667 0.58333333 0.62500000 0.66666667 0.70833333 #> [67] 0.75000000 0.79166667 0.83333333 0.87500000 0.91666667 0.95833333 #> [73] 1.00000000 1.04166667 1.08333333 1.12500000 1.16666667 1.20833333 #> [79] 1.25000000 1.29166667 1.33333333 1.37500000 1.41666667 1.45833333 #> [85] 1.50000000 1.54166667 1.58333333 1.62500000 1.66666667 1.70833333 #> [91] 1.75000000 1.79166667 1.83333333 1.87500000 1.91666667 1.95833333 #> [97] 2.00000000 2.04166667 2.08333333 2.12500000 2.16666667 2.20833333 #> [103] 2.25000000 2.29166667 2.33333333 2.37500000 2.41666667 2.45833333 #> [109] 2.50000000 2.54166667 2.58333333 2.62500000 2.66666667 2.70833333 #> [115] 2.75000000 2.79166667 2.83333333 2.87500000 2.91666667 2.93591676 #> [121] 2.95516685 #> #> $w #> [1] 0.013506468 0.054025872 0.027395357 0.055555556 0.027777778 0.055555556 #> [7] 0.027777778 0.055555556 0.027777778 0.055555556 0.027777778 0.055555556 #> [13] 0.027777778 0.055555556 0.027777778 0.055555556 0.027777778 0.055555556 #> [19] 0.027777778 0.055555556 0.027777778 0.055555556 0.027777778 0.055555556 #> [25] 0.027777778 0.055555556 0.027777778 0.055555556 0.027777778 0.055555556 #> [31] 0.027777778 0.055555556 0.027777778 0.055555556 0.027777778 0.055555556 #> [37] 0.027777778 0.055555556 0.027777778 0.055555556 0.027777778 0.055555556 #> [43] 0.027777778 0.055555556 0.027777778 0.055555556 0.027777778 0.055555556 #> [49] 0.027777778 0.055555556 0.027777778 0.055555556 0.027777778 0.055555556 #> [55] 0.027777778 0.055555556 0.027777778 0.055555556 0.027777778 0.055555556 #> [61] 0.027777778 0.055555556 0.027777778 0.055555556 0.027777778 0.055555556 #> [67] 0.027777778 0.055555556 0.027777778 0.055555556 0.027777778 0.055555556 #> [73] 0.027777778 0.055555556 0.027777778 0.055555556 0.027777778 0.055555556 #> [79] 0.027777778 0.055555556 0.027777778 0.055555556 0.027777778 0.055555556 #> [85] 0.027777778 0.055555556 0.027777778 0.055555556 0.027777778 0.055555556 #> [91] 0.027777778 0.055555556 0.027777778 0.055555556 0.027777778 0.055555556 #> [97] 0.027777778 0.055555556 0.027777778 0.055555556 0.027777778 0.055555556 #> [103] 0.027777778 0.055555556 0.027777778 0.055555556 0.027777778 0.055555556 #> [109] 0.027777778 0.055555556 0.027777778 0.055555556 0.027777778 0.055555556 #> [115] 0.027777778 0.055555556 0.027777778 0.055555556 0.020305586 0.025666787 #> [121] 0.006416697 #> #> $h #> [1] 7.325795e-04 3.174772e-03 1.741296e-03 3.821460e-03 2.064199e-03 #> [6] 4.452253e-03 2.396592e-03 5.151272e-03 2.763254e-03 5.918793e-03 #> [11] 3.163964e-03 6.753608e-03 3.597711e-03 7.652841e-03 4.062610e-03 #> [16] 8.611793e-03 4.555835e-03 9.623842e-03 5.073586e-03 1.068040e-02 #> [21] 5.611075e-03 1.177092e-02 6.162560e-03 1.288302e-02 6.721409e-03 #> [26] 1.400261e-02 7.280204e-03 1.511417e-02 7.830885e-03 1.620106e-02 #> [31] 8.364929e-03 1.724594e-02 8.873556e-03 1.823116e-02 9.347967e-03 #> [36] 1.913930e-02 9.779592e-03 1.995361e-02 1.016034e-02 2.065862e-02 #> [41] 1.048287e-02 2.124051e-02 1.074078e-02 2.168766e-02 1.092888e-02 #> [46] 2.199098e-02 1.104332e-02 2.214423e-02 1.108173e-02 2.214423e-02 #> [51] 1.104332e-02 2.199098e-02 1.092888e-02 2.168766e-02 1.074078e-02 #> [56] 2.124051e-02 1.048287e-02 2.065862e-02 1.016034e-02 1.995361e-02 #> [61] 9.779592e-03 1.913930e-02 9.347967e-03 1.823116e-02 8.873556e-03 #> [66] 1.724594e-02 8.364929e-03 1.620106e-02 7.830885e-03 1.511417e-02 #> [71] 7.280204e-03 1.400261e-02 6.721409e-03 1.288302e-02 6.162560e-03 #> [76] 1.177092e-02 5.611075e-03 1.068040e-02 5.073586e-03 9.623842e-03 #> [81] 4.555835e-03 8.611793e-03 4.062610e-03 7.652841e-03 3.597711e-03 #> [86] 6.753608e-03 3.163964e-03 5.918793e-03 2.763254e-03 5.151272e-03 #> [91] 2.396592e-03 4.452253e-03 2.064199e-03 3.821460e-03 1.765603e-03 #> [96] 3.257338e-03 1.499749e-03 2.757277e-03 1.265110e-03 2.317833e-03 #> [101] 1.059795e-03 1.934941e-03 8.816570e-04 1.604123e-03 7.283858e-04 #> [106] 1.320661e-03 5.975956e-04 1.079765e-03 4.868972e-04 8.767006e-04 #> [111] 3.939593e-04 7.068990e-04 3.165552e-04 5.660405e-04 2.525990e-04 #> [116] 4.501131e-04 2.001694e-04 3.554511e-04 1.151506e-04 1.375807e-04 #> [121] 3.249917e-05 prob_h0_continue <- sum(grid1_0$h) cat( \"Probability of continuing trial under null hypothesis\\n\", \" Using numerical integration:\", prob_h0_continue, \"\\n Using normal CDF:\", pnorm(b1) - pnorm(a1), \"\\n\" ) #> Probability of continuing trial under null hypothesis #> Using numerical integration: 0.9755632 #> Using normal CDF: 0.9755632 grid1_1 <- gsDesign2:::h1(theta = theta[1], info = info[1], a = a1, b = b1) prob_h1_continue <- sum(grid1_1$h) h1mean <- sqrt(info[1]) * theta[1] cat( \"Probability of continuing trial under alternate hypothesis\\n\", \" Using numerical integration:\", prob_h1_continue, \"\\n Using normal CDF:\", pnorm(b1, mean = h1mean) - pnorm(a1, h1mean), \"\\n\" ) #> Probability of continuing trial under alternate hypothesis #> Using numerical integration: 0.986709 #> Using normal CDF: 0.986709 # Upper bound under null hypothesis # incremental spend spend0 <- alphaspend[2] - alphaspend[1] # H0 bound at 2nd analysis; 1st approximation b2_0 <- qnorm(spend0, lower.tail = FALSE) # Lower bound under alternate hypothesis spend1 <- betaspend[2] - betaspend[1] a2_0 <- qnorm(spend1, mean = sqrt(info[2]) * theta[2]) cat(\"Initial bound approximation for 2nd analysis\\n (\", a2_0, \", \", b2_0, \")\\n\", sep = \"\" ) #> Initial bound approximation for 2nd analysis #> (1.681989, 1.987428) # Upper rejection region grid under H0 grid2_0 <- gsDesign2:::hupdate(theta = 0, info = info[2], a = b2_0, b = Inf, im1 = info[1], gm1 = grid1_0) pupper_0 <- sum(grid2_0$h) cat( \"Upper spending at analysis 2\\n Target:\", spend0, \"\\n Using initial bound approximation:\", pupper_0, \"\\n\" ) #> Upper spending at analysis 2 #> Target: 0.0234375 #> Using initial bound approximation: 0.02290683 # First point in grid is at bound # Compute derivative dpdb2 <- grid2_0$h[1] / grid2_0$w[1] # Compute difference between target and actual bound crossing probability pdiff <- spend0 - pupper_0 # Taylor's series update b2_1 <- b2_0 - pdiff / dpdb2 # Compute boundary crossing probability at updated bound cat( \"Original bound approximation:\", b2_0, \"\\nUpdated bound approximation:\", b2_1 ) #> Original bound approximation: 1.987428 #> Updated bound approximation: 1.977726 grid2_0 <- gsDesign2:::hupdate(theta = 0, info = info[2], a = b2_1, b = Inf, im1 = info[1], gm1 = grid1_0) pupper_1 <- sum(grid2_0$h) cat( \"\\nOriginal boundary crossing probability:\", pupper_0, \"\\nUpdated boundary crossing probability:\", pupper_1, \"\\nTarget:\", spend0, \"\\n\" ) #> #> Original boundary crossing probability: 0.02290683 #> Updated boundary crossing probability: 0.02344269 #> Target: 0.0234375 # Lower rejection region grid under H1 grid2_1 <- gsDesign2:::hupdate( theta = theta[2], info = info[2], a = -Inf, b = a2_0, thetam1 = theta[1], im1 = info[1], gm1 = grid1_1 ) plower_0 <- sum(grid2_1$h) # Last point in grid is at bound # Compute derivative indx <- length(grid2_1$h) dpda2 <- grid2_1$h[indx] / grid2_1$w[indx] # Compute difference between target and actual bound crossing probability pdiff <- spend1 - plower_0 # Taylor's series update a2_1 <- a2_0 + pdiff / dpda2 # Compute boundary crossing probability at updated bound cat( \"Original bound approximation:\", a2_0, \"\\nUpdated bound approximation:\", a2_1 ) #> Original bound approximation: 1.681989 #> Updated bound approximation: 1.702596 grid2_1 <- gsDesign2:::hupdate( theta = theta[2], info = info[2], a = -Inf, b = a2_1, thetam1 = theta[1], im1 = info[1], gm1 = grid1_1 ) plower_1 <- sum(grid2_1$h) cat( \"\\nOriginal boundary crossing probability:\", plower_0, \"\\nUpdated boundary crossing probability:\", plower_1, \"\\nTarget:\", spend1, \"\\n\" ) #> #> Original boundary crossing probability: 0.09035972 #> Updated boundary crossing probability: 0.09379707 #> Target: 0.09375 gs_power_npe( theta = theta, theta1 = theta, info = info, binding = TRUE, upper = gs_spending_bound, upar = list(sf = gsDesign::sfPower, total_spend = 0.025, param = 2), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfPower, total_spend = 0.1, param = 2) ) #> # A tibble: 4 × 10 #> analysis bound z probability theta theta1 info_frac info info0 info1 #> #> 1 1 upper 2.96 0.00704 0.5 0.5 0.25 1 1 1 #> 2 2 upper 1.98 0.845 1.5 1.5 1 4 4 4 #> 3 1 lower -2.00 0.00625 0.5 0.5 0.25 1 1 1 #> 4 2 lower 1.70 0.100 1.5 1.5 1 4 4 4"},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-nph-futility.html","id":"overview","dir":"Articles","previous_headings":"","what":"Overview","title":"Futility bounds at design and analysis under non-proportional hazards","text":"set futility bounds non-proportional hazards assumption. consider methods presented Korn Freidlin (2018) setting bounds consider alternate futility bound based \\beta-spending delayed crossing treatment effect simplify implementation. Finally, show update \\beta-spending bound based blinded interim data. consider example reproduce line Korn Freidlin (2018) Table 1 alternative futility bounds considered.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-nph-futility.html","id":"initial-design-set-up-for-fixed-analysis","dir":"Articles","previous_headings":"Overview","what":"Initial design set-up for fixed analysis","title":"Futility bounds at design and analysis under non-proportional hazards","text":"Korn Freidlin (2018) considered delayed effect scenarios proposed futility bound modification earlier method proposed Wieand, Schroeder, O’Fallon (1994). begin enrollment failure rate assumptions Korn Freidlin (2018) based example Chen (2013). now derive fixed sample size based assumptions. Ideally, allow targeted event count variable follow-fixed_design_ahr() study duration computed automatically.","code":"# Enrollment assumed to be 680 patients over 12 months with no ramp-up enroll_rate <- define_enroll_rate(duration = 12, rate = 680 / 12) # Failure rates ## Control exponential with median of 12 mos ## Delayed effect with HR = 1 for 3 months and HR = .693 thereafter ## Censoring rate is 0 fail_rate <- define_fail_rate( duration = c(3, 100), fail_rate = -log(.5) / 12, hr = c(1, .693), dropout_rate = 0 ) ## Study duration was 34.8 in Korn & Freidlin Table 1 ## We change to 34.86 here to obtain 512 expected events more precisely study_duration <- 34.86 fixedevents <- fixed_design_ahr( alpha = 0.025, power = NULL, enroll_rate = enroll_rate, fail_rate = fail_rate, study_duration = study_duration ) fixedevents %>% summary() %>% select(-Bound) %>% as_gt(footnote = \"Power based on 512 events\") %>% fmt_number(columns = 3:4, decimals = 2) %>% fmt_number(columns = 5:6, decimals = 3)"},{"path":"https://merck.github.io/gsDesign2/articles/story-nph-futility.html","id":"modified-wieand-futility-bound","dir":"Articles","previous_headings":"","what":"Modified Wieand futility bound","title":"Futility bounds at design and analysis under non-proportional hazards","text":"Wieand, Schroeder, O’Fallon (1994) rule recommends stopping 50% planned events accrue observed HR > 1. kornfreidlin2018 modified adding second interim analysis 75% planned events stop observed HR > 1 implemented requiring trend favor control direction Z-bound 0 resulting Nominal p bound 0.5 interim analyses table . fixed bound specified gs_b() function upper lower corresponding parameters upar upper (efficacy) bound lpar lower (futility) bound. final efficacy bound 1-sided nominal p-value 0.025; futility bound lowers 0.0247 noted lower-right-hand corner table . < 0.025 since probability computed binding assumption. arbitrary convention; futility bound ignored, computation yields 0.025. last row Alternate hypothesis see power 88.44%. Korn Freidlin (2018) computed 88.4% power design 100,000 simulations estimate standard error power calculation 0.1%.","code":"wieand <- gs_power_ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, upper = gs_b, upar = c(rep(Inf, 2), qnorm(.975)), lower = gs_b, lpar = c(0, 0, -Inf), event = 512 * c(.5, .75, 1) ) wieand %>% summary() %>% as_gt( title = \"Group sequential design with futility only at interim analyses\", subtitle = \"Wieand futility rule stops if HR > 1\" )"},{"path":"https://merck.github.io/gsDesign2/articles/story-nph-futility.html","id":"beta-spending-futility-bound-with-ahr","dir":"Articles","previous_headings":"","what":"Beta-spending futility bound with AHR","title":"Futility bounds at design and analysis under non-proportional hazards","text":"Need summarize .","code":"betaspending <- gs_power_ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, upper = gs_b, upar = c(rep(Inf, 2), qnorm(.975)), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL), event = 512 * c(.5, .75, 1), test_lower = c(TRUE, TRUE, FALSE) ) betaspending %>% summary() %>% as_gt( title = \"Group sequential design with futility only\", subtitle = \"Beta-spending futility bound\" )"},{"path":"https://merck.github.io/gsDesign2/articles/story-nph-futility.html","id":"classical-beta-spending-futility-bound","dir":"Articles","previous_headings":"","what":"Classical beta-spending futility bound","title":"Futility bounds at design and analysis under non-proportional hazards","text":"classical \\beta-spending bound assume constant treatment effect time using proportional hazards assumption. use average hazard ratio fixed design analysis purpose.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-nph-futility.html","id":"korn-and-freidlin-futility-bound","dir":"Articles","previous_headings":"","what":"Korn and Freidlin futility bound","title":"Futility bounds at design and analysis under non-proportional hazards","text":"Korn Freidlin (2018) futility bound set least 50% expected events occurred least two thirds observed events occurred later 3 months randomization. expected timing demonstrated .","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-nph-futility.html","id":"accumulation-of-events-by-time-interval","dir":"Articles","previous_headings":"Korn and Freidlin futility bound","what":"Accumulation of events by time interval","title":"Futility bounds at design and analysis under non-proportional hazards","text":"consider accumulation events time occur -effect interval first 3 months randomization events time interval. done overall trial without dividing treatment group using gsDesign2::AHR() function. consider monthly accumulation events 34.86 months planned trial duration. note summary early expected events events first 3 months -study expected prior first interim analysis. can look proportion events first 3 months follows: Korn Freidlin (2018) bound targeted timing 50% events occurred least 2/3 3 months enrollment 3 months delayed effect period. see 1/3 events still within 3 months enrollment month 20.","code":"event_accumulation <- pw_info( enroll_rate = enroll_rate, fail_rate = fail_rate, total_duration = c(1:34, 34.86), ratio = 1 ) head(event_accumulation, n = 7) %>% gt() event_accumulation %>% group_by(time) %>% summarize(`Total events` = sum(event), \"Proportion early\" = first(event) / `Total events`) %>% ggplot(aes(x = time, y = `Proportion early`)) + geom_line()"},{"path":"https://merck.github.io/gsDesign2/articles/story-nph-futility.html","id":"korn-and-freidlin-bound","dir":"Articles","previous_headings":"Korn and Freidlin futility bound","what":"Korn and Freidlin bound","title":"Futility bounds at design and analysis under non-proportional hazards","text":"bound proposed Korn Freidlin (2018)","code":""},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-power-evaluation-with-spending-bound.html","id":"overview","dir":"Articles","previous_headings":"","what":"Overview","title":"Power evaluation with spending bounds","text":"vignette covers compute power Type error design derived spending bound. write general non-constant treatment effect using gs_design_npe() derived design one parameter setting computing power another setting. use trial binary endpoint enable full illustration.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-power-evaluation-with-spending-bound.html","id":"scenario-for-consideration","dir":"Articles","previous_headings":"","what":"Scenario for Consideration","title":"Power evaluation with spending bounds","text":"consider scenario largely based CAPTURE study (Capture Investigators et al. (1997)) primary endpoint composite death, acute myocardial infarction need recurrent percutaneous intervention within 30 days randomization. detailed introduction trial listed follows. consider 2-arm trial experimental arm control arm. assume K=3 analyses 350, 700, 1400 patients observed equal randomization treatment groups. primary endpoint trial binary indicator participant failed outcome. case, consider parameter \\theta = p_1 - p _2 p_1 denotes probability trial participant control group experiences failure p_2 represents probability trial participant experimental group. study designed approximately 80% power (Type II error \\beta = 1 - 0.8 = 0.2) 2.5% one-sided Type error (\\alpha = 0.025) detect reduction 15% event rate (p_1 = 0.15) control group 10% (p_2 = 0.1) experimental group. example, parameter interest \\theta = p_1 - p_2. denote alternate hypothesis H_1: \\theta = \\theta_1= p_1^1 - p_2^1 = 0.15 - 0.10 = 0.05 null hypothesis H_0: \\theta = \\theta_0 = 0 = p_1^0 - p_2^0 p^0_1 = p^0_2= (p_1^1+p_2^1)/2 = 0.125 laid Lachin (2009). note considered success outcome objective response oncology study, let p_1 denote experimental group p_2 control group response rate. Thus, always set notation p_1>p_2 represents superiority experimental group.","code":"p0 <- 0.15 # assumed failure rate in control group p1 <- 0.10 # assumed failure rate in experimental group alpha <- 0.025 # type I error beta <- 0.2 # type II error for 80% power"},{"path":"https://merck.github.io/gsDesign2/articles/story-power-evaluation-with-spending-bound.html","id":"notations","dir":"Articles","previous_headings":"","what":"Notations","title":"Power evaluation with spending bounds","text":"assume k: index analysis, .e., k = 1, \\ldots, K; : index arm, .e., = 1 control group = 2 experimental group; n_{ik}: number subjects group analysis k; n_k: number subjects analysis k, .e., n_k = n_{1k} + n_{2k}; X_{ij}: independent random variable whether j-th subject group response, .e, X_{ij} \\sim \\text{Bernoulli}(p_i); Y_{ik}: number subject response group analysis k, .e., Y_{ik} = \\sum_{j = 1}^{n_{ik}} X_{ij};","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-power-evaluation-with-spending-bound.html","id":"statistical-testing","dir":"Articles","previous_headings":"","what":"Statistical Testing","title":"Power evaluation with spending bounds","text":"section, discuss estimation statistical information variance proportion null hypothesis H_0: p_1^0 = p_2^0 \\equiv p_0 alternative hypothesis H_1: \\theta = \\theta_1= p_1^1 - p_2^1. , introduce test statistics group sequential design.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-power-evaluation-with-spending-bound.html","id":"estimation-of-statistical-information-under-h1","dir":"Articles","previous_headings":"Statistical Testing","what":"Estimation of Statistical Information under H1","title":"Power evaluation with spending bounds","text":"alternative hypothesis, one can estimate proportion failures group analysis k \\hat{p}_{ik} = Y_{ik}/n_{ik}. note variance \\text{Var}(\\hat p_{ik})=\\frac{p_{}(1-p_i)}{n_{ik}}, consistent estimator \\widehat{\\text{Var}}(\\hat p_{ik})=\\frac{\\hat p_{ik}(1-\\hat p_{ik})}{n_{ik}}, = 1, 2 k = 1, 2, \\ldots, K. Letting \\hat\\theta_k = \\hat p_{1k} - \\hat p_{2k}, also \\sigma^2_k \\equiv \\text{Var}(\\hat\\theta_k) = \\frac{p_1(1-p_1)}{n_{1k}}+\\frac{p_2(1-p_2)}{n_{2k}}, consistent estimator \\hat\\sigma^2_k = \\frac{\\hat p_{1k}(1-\\hat p_{1k})}{n_{1k}}+\\frac{\\hat p_{2k}(1-\\hat p_{2k})}{n_{2k}}, Statistical information quantities corresponding estimators denoted \\left\\{ \\begin{align} \\mathcal{}_k = &1/\\sigma^2_k,\\\\ \\mathcal{\\hat }_k = &1/\\hat \\sigma^2_k, \\end{align} \\right.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-power-evaluation-with-spending-bound.html","id":"estimation-of-statistical-information-under-h0","dir":"Articles","previous_headings":"Statistical Testing","what":"Estimation of Statistical Information under H0","title":"Power evaluation with spending bounds","text":"null hypothesis, one can estimate proportion failures group analysis k estimate \\hat{p}_{0k} = \\frac{Y_{1k}+ Y_{2k}}{n_{1k}+ n_{2k}} = \\frac{n_{1k}\\hat p_{1k} + n_{2k}\\hat p_{2k}}{n_{1k} + n_{2k}}. corresponding null hypothesis estimator \\hat\\sigma^2_{0k} \\equiv \\widehat{\\text{Var}}(\\hat{p}_{0k}) = \\hat p_{0k}(1-\\hat p_{0k})\\left(\\frac{1}{n_{1k}}+ \\frac{1}{n_{2k}}\\right), k = 1,2, \\ldots, K. Statistical information quantities corresponding estimators denoted \\left\\{ \\begin{align} \\mathcal{}_{0k} =& 1/ \\sigma^2_{0k},\\\\ \\mathcal{\\hat }_{0k} =& 1/\\hat \\sigma^2_{0k}, \\end{align} \\right. k = 1, 2, \\ldots, K.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-power-evaluation-with-spending-bound.html","id":"testing-statistics","dir":"Articles","previous_headings":"Statistical Testing","what":"Testing Statistics","title":"Power evaluation with spending bounds","text":"Testing, recommended Lachin (2009), done large sample test null hypothesis variance estimate without continuity correction: Z_k = \\hat\\theta_k/\\hat\\sigma_{0k}=\\frac{\\hat p_{1k} - \\hat p_{2k}}{\\sqrt{(1/n_{1k}+ 1/n_{2k})\\hat p_{0k}(1-\\hat p_{0k})} }, asymptotically \\text{Normal}(0,1) p_1 = p_2 \\text{Normal}(0, \\sigma_{0k}^2/\\sigma_k^2) generally p_1, p_2 k = 1, 2, \\ldots, K. assume constant proportion \\xi_i randomized group =1,2. Thus, Z_k \\approx \\frac{\\sqrt{n_k}(\\hat p_{1k} - \\hat p_{2k})}{\\sqrt{(1/\\xi_1+ 1/\\xi_2)p_{0}(1- p_0)} }. , asymptotic distribution Z_k \\sim \\text{Normal} \\left( \\sqrt{n_k}\\frac{p_1 - p_2}{\\sqrt{(1/\\xi_1+ 1/\\xi_2) p_0(1- p_0)} }, \\sigma^2_{0k}/\\sigma^2_{1k} \\right), note \\sigma^2_{0k}/\\sigma^2_{1k} = \\frac{ p_0(1-p_0)\\left(1/\\xi_1+ 1/\\xi_2\\right)}{p_1(1-p_1)/\\xi_1+p_2(1-p_2)/\\xi_2}. also note definition \\sigma^2_{0k}/\\sigma^2_{1k}=\\mathcal I_k/\\mathcal I_{0k}. Based input p_1, p_2, n_k, \\xi_1, \\xi_2 = 1-\\xi_1 compute \\theta, \\mathcal{}_k, \\mathcal{}_{0k} k = 1, 2, \\ldots, K. note \\chi^2=Z^2_k \\chi^2 test without continuity correction recommended Gordon Watson (1996). Note finally extends straightforward way non-inferiority test Farrington Manning (1990) null hypothesis \\theta = p_1 - p_2 - \\delta = 0 non-inferiority margin \\delta > 0; \\delta < 0 correspond referred super-superiority Chan (2002), requiring experimental therapy shown superior control least margin -\\delta>0.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-power-evaluation-with-spending-bound.html","id":"power-calculations","dir":"Articles","previous_headings":"","what":"Power Calculations","title":"Power evaluation with spending bounds","text":"begin developing function gs_info_binomial() calculate statistical information discussed . CAPTURE trial, can plug gs_power_npe() intended spending functions. begin power alternate hypothesis Now examine information smaller assumed treatment difference alternative:","code":"gs_info_binomial <- function(p1, p2, xi1, n, delta = NULL) { if (is.null(delta)) delta <- p1 - p2 # Compute (constant) effect size at each analysis theta theta <- rep(p1 - p2, length(n)) # compute null hypothesis rate, p0 p0 <- xi1 * p1 + (1 - xi1) * p2 # compute information based on p1, p2 info <- n / (p1 * (1 - p1) / xi1 + p2 * (1 - p2) / (1 - xi1)) # compute information based on null hypothesis rate of p0 info0 <- n / (p0 * (1 - p0) * (1 / xi1 + 1 / (1 - xi1))) # compute information based on H1 rates of p1star, p2star p1star <- p0 + delta * xi1 p2star <- p0 - delta * (1 - xi1) info1 <- n / (p1star * (1 - p1star) / xi1 + p2star * (1 - p2star) / (1 - xi1)) out <- tibble( Analysis = seq_along(n), n = n, theta = theta, theta1 = rep(delta, length(n)), info = info, info0 = info0, info1 = info1 ) return(out) } h1 <- gs_info_binomial(p1 = .15, p2 = .1, xi1 = .5, n = c(350, 700, 1400)) h1 %>% gt() gs_power_npe( theta = h1$theta, theta1 = h1$theta, info = h1$info, info0 = h1$info0, info1 = h1$info1, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfHSD, param = -2, total_spend = 0.2) ) %>% gt() %>% fmt_number(columns = 3:10, decimals = 4) h <- gs_info_binomial(p1 = .15, p2 = .12, xi1 = .5, delta = .05, n = c(350, 700, 1400)) gs_power_npe( theta = h$theta, theta1 = h$theta1, info = h$info, info0 = h$info0, info1 = h$info1, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfHSD, param = -2, total_spend = 0.2) ) %>% gt() %>% fmt_number(columns = 3:10, decimals = 4)"},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-risk-difference.html","id":"overview","dir":"Articles","previous_headings":"","what":"Overview","title":"Group sequential design for binary outcomes","text":"consider group sequential design examining risk difference two treatment groups binary outcome. several issues consider: measure treatment difference natural parameter; focus risk difference. Incorporation null alternate hypothesis variances. Superiority, non-inferiority super-superiority designs. Stratified populations. Fixed group sequential designs. single stratum designs, focus sample size power using method Farrington Manning (1990) trial test difference two binomial event rates. routine can used test superiority, non-inferiority super-superiority. design tests superiority, methods consistent Fleiss, Tytun, Ury (1980), without continuity correction. Methods sample size power gsDesign::nBinomial() testing risk-difference scale single stratum. also consistent Hmisc R package routines bsamsize() bpower() superiority designs. trials multiple strata, testing risk difference often done weighting stratum according inverse variance (Mantel Haenszel (1959)). Since risk differences may also assumed different different strata, also explore weighting strata sample sizes Mehrotra Railkar (2000). focus sample sizes large enough asymptotic theory work well without continuity corrections. concepts incorporated following functions intended use fixed group sequential designs: gs_info_rd() support asymptotic variance statistical information calculation. gs_power_rd() support power calculations. gs_design_rd() support sample size calculations. Simulation used throughout check examples presented.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-risk-difference.html","id":"notation","dir":"Articles","previous_headings":"","what":"Notation","title":"Group sequential design for binary outcomes","text":"K: total number analyses (including final analysis) group sequential design. fixed design, K= 1. S: total number strata. population un-stratified population, S=1. w_{s,k}: underlying weight assigned s-th strata k-th analysis. SWITCH ORDER s, k w? \\widehat w_{s,k}: estimated weight assigned s-th strata k-th analysis. N_{C,k,s}, N_{E,k,s}: planned sample size control/treatment group k-th analysis s-th strata. \\widehat N_{C,k,s}, \\widehat N_{E,k,s}: observed sample size control/treatment group k-th analysis s-th strata. r: planned randomization ratio, .e., r = N_{E,k,s} / N_{C,k,s} \\;\\; \\forall k = 1, \\ldots, K \\;\\; \\text{} \\;\\; s = 1, \\ldots, S. p_{C,s}, p_{E,s}: planned rate control/treatment arm, .e., independent observations control/treatment group binary outcome observed probability p_{C,s} k-th analysis s-th strata. d: indicator whether outcome failure (bad outcome) response (good outcome), .e., d = \\left\\{ \\begin{array}{lll} -1 & \\text{} p_{C,s} < p_{E,s} & \\text{control arm better}\\\\ 1 & \\text{} p_{C,s} > p_{E,s} & \\text{treatment arm better}\\\\ \\end{array} \\right. assume \\exists s^* \\\\{1, \\ldots, S\\}, s.t., p_{C,s^*} < p_{E,s^*}, p_{C,s} < p_{E,s}, \\forall s \\\\{1, \\ldots, S\\}, vice versa. X_{C,k,s}, X_{E,k,s}: random variables indicating number subjects failed control/treatment arm, .e., X_{C,k,s} \\sim \\text{Binomial}(N_{C,k,s}, p_{C,k,s}), X_{E,k,s} \\sim \\text{Binomial}(N_{E,k,s}, p_{E,k,s}) k-th analysis s-th strata. x_{C,k,s}, x_{E,k,s}: observed outcome X_{C, k, s}, X_{E, k, s} k-th analysis s-th strata, respectively. \\widehat p_{C,k,s}, \\widehat p_{E,k,s}: observed rates control/treatment group k-th analysis s-th strata, .e., \\widehat p_{C,k,s} = x_{C,k,s} / \\widehat N_{C,k,s}.\\\\ \\widehat p_{E,k,s} = x_{E,k,s} / \\widehat N_{E,k,s}. \\delta_{s}^{null}: planned risk difference H_0 k-th analysis s-th strata. \\delta_{s}: planned risk difference H_1 k-th analysis s-th strata denoted \\delta_{s} = |p_{C,s} - p_{E,s}|. \\hat\\delta_{s}: estimation risk difference \\widehat\\theta_{k,s} = |\\widehat p_{C,k,s} - \\widehat p_{E,k,s}| E(\\widehat\\theta_{k,s}) = \\theta_{s}, \\;\\forall k = 1, \\ldots, K.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-risk-difference.html","id":"testing","dir":"Articles","previous_headings":"","what":"Testing","title":"Group sequential design for binary outcomes","text":"test statistics k-th analysis Z_{k} = \\frac{ \\sum_{s=1}^S \\widehat w_{s,k} \\; |\\widehat \\delta_{k,s} - \\delta_{s}^{null} | }{ \\sqrt{\\sum_{s=1}^S \\widehat w_{s,k}^2 \\widehat\\sigma_{H_0,k,s}^2} } \\widehat\\sigma^2_{k,s} = \\widehat{\\text{Var}}(\\widehat p_C -\\widehat p_E). value \\widehat\\sigma^2_{k,s} depends hypothesis design, .e., whether superiority design, non-inferiority design, super-superiority design. discuss \\widehat\\sigma^2_{k,s} following 3 subsections.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-risk-difference.html","id":"superiority-design","dir":"Articles","previous_headings":"Testing","what":"Superiority Design","title":"Group sequential design for binary outcomes","text":"superiority design (\\delta_{s}^{null} = 0) show experimental group superior control group thresholds. hypothesis H_0: \\delta_{s} = 0 \\text{ vs. } H_1: \\delta_{s} > 0, \\; \\forall k = 1, \\ldots, K, s = 1, \\ldots, S Variance per strata per analysis: null hypothesis, \\begin{array}{ll} \\sigma^2_{H_0,k,s} & = \\text{Var}(p_C - p_E | H_0) = p_{k,s}^{pool} \\left(1 - p^{pool}_{k,s} \\right) \\left(\\frac{1}{N_{C,k,s}} + \\frac{1}{N_{E,k,s}} \\right), \\\\ \\widehat\\sigma^2_{H_0,k,s} & = \\widehat{\\text{Var}}(\\hat p_C - \\hat p_E | H_0) = \\widehat p_{k,s}^{pool} \\left(1 - \\widehat p^{pool}_{k,s} \\right) \\left(\\frac{1}{N_{C,k,s}} + \\frac{1}{N_{E,k,s}} \\right), \\end{array} p_{k,s}^{pool} = (p_{C,s} N_{C,k,s} + p_{E,s} N_{E,k,s}) / (N_{C,k,s} + N_{E,k,s}) \\widehat p_{k,s}^{pool} = (x_{C,k,s} + x_{E,k,s}) / (\\widehat N_{C,k,s} + \\widehat N_{E,k,s}). alternative hypothesis, \\begin{array}{ll} \\sigma_{H_1,k,s}^2 & = \\text{Var}(p_C - p_E | H_1) = \\frac{p_{C,s} (1- p_{C,s})}{N_{C,k,s}} + \\frac{p_{E,s} (1 - p_{E,s})}{N_{E,k,s}} \\\\ \\widehat\\sigma_{H_1,k,s}^2 & = \\widehat{\\text{Var}}(\\hat p_C - \\hat p_E | H_1) = \\frac{\\widehat p_{C,k,s} (1- \\widehat p_{C,k,s})}{N_{C,k,s}} + \\frac{\\widehat p_{E,k,s} (1 - \\widehat p_{E,k,s})}{N_{E,k,s}} \\end{array} \\widehat p_{C,k,s} = x_{C,k,s} / N_{C,k,s} \\text{ } \\widehat p_{E,k,s} = x_{E,k,s} / N_{E,k,s}. Testing one-sided level \\alpha \\(0, 1) null hypothesis rejected Z_k cross upper boundary. upper boundary can either fixed derived spending functions. Standardized treatment effect per analysis: null hypothesis, \\theta_{H_0,k} = 0 \\\\ \\widehat \\theta_{H_0,k} = 0 alternative hypothesis, \\begin{array}{ll} \\theta_{H_1,k} & = \\frac{\\sum_{s=1}^S w_{k,s} (p_{C,s} - p_{E,s})}{\\sqrt{\\sum_{s=1}^S w_{k,s}^2 \\sigma_{H_1, k,s}^2}}\\\\ \\widehat\\theta_{H_1,k} & = \\frac{ \\sum_{s=1}^S \\widehat w_{k,s} (\\widehat p_C - \\widehat p_E) }{ \\sqrt{\\sum_{s=1}^S \\widehat w_{k,s}^2 \\widehat\\sigma_{H_1, k,s}^2} }. \\end{array} Standardized information per analysis: Lachin (2009) Lachin (1981) provide fixed sample size calculations based values \\psi_0 null hypothesis \\psi_1 alternate hypothesis. propose using variance calculations compute statistical information group sequential design apply formulation power sample size calculation vignette Computing Bounds Non-Constant Treatment Effect. null hypothesis, \\begin{array}{ll} \\mathcal I_{H0,k} & = \\left[ \\sum_{s=1}^S w_{k,s}^2 \\frac{p_{k,s}^{pool} (1 - p_{k,s}^{pool})}{N_{C, k, s}} + w_{k,s}^2 \\frac{p_{k,s}^{pool} (1 - p_{k,s}^{pool})}{N_{E, k, s}} \\right]^{-1} \\\\ \\widehat{\\mathcal }_{H0,k} & = \\left[ \\sum_{s=1}^S \\widehat w_{k,s}^2 \\frac{\\widehat p_{k,s}^{pool} (1 - \\widehat p_{k,s}^{pool})}{\\widehat N_{C,k,s}} + \\widehat w_{k,s}^2 \\frac{\\widehat p_{k,s}^{pool} (1 - \\widehat p_{k,s}^{pool})}{\\widehat N_{C,k,s}} \\right]^{-1} \\end{array} alternative hypothesis, \\begin{array}{ll} \\mathcal I_{H1,k} = \\left[ \\sum_{s=1}^S w_{k,s}^2 \\frac{p_{C,k,s} (1 - p_{C,k,s})}{N_{C,k,s}} + \\sum_{s=1}^S w_{k,s}^2 \\frac{p_{E,k,s} (1 - p_{E,k,s})}{N_{E,k,s}} \\right]^{-1}\\\\ \\widehat{\\mathcal }_{H1,k} = \\left[ \\sum_{s=1}^S \\widehat w_{k,s}^2 \\frac{\\widehat p_{C,k,s} (1 - \\widehat p_{C,k,s})}{\\widehat N_{C,k,s}} + \\sum_{s=1}^S \\widehat w_{k,s}^2 \\frac{\\widehat p_{E,k,s} (1 - \\widehat p_{E,k,s})}{\\widehat N_{E,k,s}} \\right]^{-1} \\end{array}","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-risk-difference.html","id":"super-superiority-design","dir":"Articles","previous_headings":"Testing","what":"Super-Superiority Design","title":"Group sequential design for binary outcomes","text":"hypothesis super-superiority design H_0: \\delta_{k,s} = \\delta_{k,s}^{null} \\;\\; vs. \\;\\; H_1: \\delta > \\delta_{k,s}^{null} \\text{ } \\delta_{k,s}^{null} > 0. \\theta_{k,s_1}^{null} = \\theta_{k,s_2}^{null} \\theta_{k,s_1}^{null} \\neq \\theta_{k,s_2}^{null} s_1 \\neq s_2. null hypothesis \\theta_{0,k,s} \\neq 0, estimation rates \\widehat p_{C0,k,s}, \\widehat p_{E0,k,s} satisfy \\left\\{ \\begin{array}{l} \\widehat p_{C0,k,s} = \\widehat p_{E0,k,s} + d_{k,s} \\times \\delta_{k,s}^{null} \\\\ \\widehat p_{C0,k,s} + r\\widehat p_{E0,k,s} = \\widehat p_{C,k,s} + r\\widehat p_{E,k,s} . \\end{array} \\right. Solving 2 equations 2 unknowns yields \\left\\{ \\begin{array}{l} \\widehat p_{E0,k,s} & = (\\widehat p_{C,k,s} + r \\widehat p_{E,k,s} - d_{k,s} \\delta_{k,s}^{null}) / (r + 1)\\\\ \\widehat p_{C0,k,s} & = \\widehat p_{E0,k,s} + d_{k,s} \\delta_{k,s}^{null}. \\end{array} \\right. Variance per strata per analysis: H_0, \\hat\\sigma^2_{H_0,k,s} = \\frac{\\widehat p_{C0,k,s}(1- \\widehat p_{C0,k,s})}{N_{C,k,s}} + \\frac{ \\widehat p_{E0,k,s} (1 - \\widehat p_{E0,k,s})}{N_{E,k,s}}. H_1, \\widehat\\sigma_{H_1,k,s}^2 = \\frac{\\widehat p_{C,k,s} (1- \\widehat p_{C,k,s})}{N_{C,k,s}} + \\frac{\\widehat p_{E,k,s} (1 - \\widehat p_{E,k,s})}{N_{E,k,s}}. Standardized treatment effect per analysis: null hypothesis, \\widehat \\theta_{H_0,k} = \\frac{ \\sum_{s=1}^S w_{k,s} \\delta_{s,k}^{null} }{ \\sqrt{\\sum_{s=1}^S w_{k,s}^2 \\widehat \\sigma_{H_0,k,s}}^2 }. alternative hypothesis, \\widehat \\theta_{H_1} = \\frac{ \\sum_{s=1}^S w_{k,s} d_{k,s} \\times (\\widehat p_{C,k,s} - \\widehat p_{E,k,s}) }{ \\sqrt{\\sum_{s=1}^S w_{k,s}^2 \\widehat \\sigma_{H_1,k,s}^2} }. Standardized information per analysis: null hypothesis, \\widehat{\\mathcal }_{H0,k} = \\left[ \\sum_{s=1}^S w_{k,s}^2 \\frac{\\bar p_{C0,s} (1 - \\bar p_{C0,s})}{N_{C,s}} + w_{k,s}^2\\frac{\\bar p_{E0,s} (1 - \\bar p_{E0,s})}{N_{E,s}} \\right]^{-1}. alternative hypothesis, \\widehat{\\mathcal }_{H1,k} = \\left[ \\sum_{s=1}^S \\left( w_{k,s}^2 \\frac{\\bar p_{C,k,s} (1 - \\bar p_{C,k,s})}{N_{C,k,s}} + w_{k,s}^2 \\frac{\\bar p_{E,k,s} (1 - \\bar p_{E,k,s})}{N_{E,k,s}} \\right) \\right]^{-1}.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-risk-difference.html","id":"non-inferiority-design","dir":"Articles","previous_headings":"Testing","what":"Non-inferiority Design","title":"Group sequential design for binary outcomes","text":"non-inferiority Design means , treatment group definitely better control group, unacceptably worse. hypothesis H_0: \\delta_{k,s} = \\delta_{k,s}^{null} \\;\\; vs. \\;\\; H_1: \\delta_{k,s} > \\delta_{k,s}^{null} \\delta_{k,s}^{null} <0. variance, standardized treatment effect statistical information super-superiority design setting \\delta_{k,s}^{null} negative numbers.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-risk-difference.html","id":"weighting-options","dir":"Articles","previous_headings":"","what":"Weighting Options","title":"Group sequential design for binary outcomes","text":"previously noted, consider weighting based either inverse-variance weights (Mantel Haenszel (1959)) strata sample size weights (Mehrotra Railkar (2000)). Inverse-variance weights (INVAR): w_{s,k} = \\frac{1/\\sigma^2_{s,k}}{\\sum_{s=1}^S 1/\\sigma^2_{s,k}}. \\\\ \\widehat w_{s,k} = \\frac{1/\\widehat\\sigma^2_{s,k}}{\\sum_{s=1}^S 1/\\widehat\\sigma^2_{s,k}}. \\widehat\\sigma_{s,k}^2 \\\\{\\widehat\\sigma_{H_0, k,s}^2, \\widehat\\sigma_{H_1, k,s}^2 \\} depending information scale info_scale = ... gs_info_rd(), gs_power_rd() gs_design_rd(). Sample-Size Weights (SS): w_{s,k} = \\frac{ (N_{C, s, k} \\; N_{E, s, k}) / (N_{C, s, k} + N_{E, s, k}) }{ \\sum_{s=1}^S (N_{C, s, k} \\; N_{E, s, k}) / (N_{C, s, k} + N_{E, s, k}) },\\\\ \\widehat w_{s,k} = \\frac{ (\\widehat N_{C, s, k} \\; \\widehat N_{E, s, k}) / (\\widehat N_{C, s, k} + \\widehat N_{E, s, k}) }{ \\sum_{s=1}^S (\\widehat N_{C, s, k} \\; \\widehat N_{E, s, k}) / (\\widehat N_{C, s, k} + \\widehat N_{E, s, k}) }, N_{C,s,k}, N_{E,s,k} planned sample size s-th strata k-th analysis control group experimental group, respectively. \\widehat N_{C,s,k}, \\widehat N_{E,s,k} observed sample size s-th strata k-th analysis control group experimental group, respectively.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-risk-difference.html","id":"simulations","dir":"Articles","previous_headings":"","what":"Simulations","title":"Group sequential design for binary outcomes","text":"quick 20,000 simulations compare density histogram outcomes standard normal density. Assume r=1, d = 1, p_C=p_E=0.125, N=200. compute \\sigma 0.047. Even huge sample size normal density fits quite well flatness middle.","code":"# Hypothesized failure rate p <- .125 # Other parameters set.seed(123) r <- 1 n <- 200 n_c <- n / (r + 1) n_e <- r * n / (r + 1) library(ggplot2) # Generate random counts of events for each treatment x_c <- rbinom(n = 20000, size = n_c, prob = p) x_e <- rbinom(n = 20000, size = n_e, prob = p) # Treatment difference estimate thetahat <- x_c / n_c - x_e / n_e # Standard error under H0 pbar <- (x_c + x_e) / n se0 <- sqrt(pbar * (1 - pbar) * (1 / n_c + 1 / n_e)) # Z to test H0 z <- thetahat / se0 x <- seq(-4, 4, .1) se0a <- sqrt(p * (1 - p) * (1 / n_c + 1 / n_e)) y <- data.frame(z = x, Density = dnorm(x = x, mean = 0, sd = 1)) ggplot() + geom_histogram(data = data.frame(z), aes(x = z, y = ..density..), color = 1, fill = \"white\") + geom_line(data = y, aes(x = z, y = Density), linetype = 1) + ylab(\"Density\") + ggtitle(\"Binomial outcomes by simulation vs. asymptotic normal density\", subtitle = \"Histogram of 20,000 simulations\" ) #> Warning: The dot-dot notation (`..density..`) was deprecated in ggplot2 3.4.0. #> ℹ Please use `after_stat(density)` instead. #> This warning is displayed once every 8 hours. #> Call `lifecycle::last_lifecycle_warnings()` to see where this warning was #> generated."},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-risk-difference.html","id":"unstratified-fixed-design","dir":"Articles","previous_headings":"Examples","what":"Unstratified Fixed Design","title":"Group sequential design for binary outcomes","text":"example discussed section unstratified fixed design equal sized groups detect 30% reduction mortality associated congestive heart failure, 1-year mortality control group assumed greater 0.4. p_C=0.4, p_E = .28. null hypothesis, assume p_C=p_E =0.34. desire 90% power two-sided test two proportions \\alpha = 0.05. like calculate sample size achieve 90% power.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-risk-difference.html","id":"gsdesign2","dir":"Articles","previous_headings":"Examples > Unstratified Fixed Design","what":"gsDesign2","title":"Group sequential design for binary outcomes","text":"First, set parameters. calculate variance H_0 H_1. mathematical formulation shown follows. \\begin{array}{ll} \\sigma^2_{H_0} = p^{pool} \\left(1 - p^{pool} \\right) \\left(\\frac{1}{N_C} + \\frac{1}{N_{E}} \\right) = p^{pool} \\left(1 - p^{pool} \\right) \\left(\\frac{1}{N \\xi_C} + \\frac{1}{N \\xi_E} \\right) \\overset{r=1}{=} p^{pool} \\left(1 - p^{pool} \\right) \\frac{4}{N} \\\\ \\sigma^2_{H_1} = \\frac{p_C \\left(1 - p_C \\right)}{N_C} + \\frac{p_E \\left(1 - p_E \\right)}{N_E} = \\frac{p_C \\left(1 - p_C \\right)}{N \\xi_C} + \\frac{p_E \\left(1 - p_E \\right)}{N \\xi_E} \\overset{r=1}{=} \\left[ p_C \\left(1 - p_C \\right) + p_E \\left(1 - p_E \\right) \\right] \\frac{2}{N} \\end{array} calculation results Next, calculate standardized treatment effect H_0 H_1, whose mathematical formulation \\begin{array}{ll} \\theta_{H_0} = 0; \\\\ \\theta_{H_1} = \\frac{|p_c - p_e|}{\\sigma_{H_1}} \\end{array}. calculation results logic implemented function gs_info_rd(). plugging theta info gs_design_npe(), one can calculate sample size achieve 90% power. logic implement gs_design_rd() calculate sample size given fixed power one-step.","code":"p_c <- .28 p_e <- .4 p_pool <- (p_c + p_e) / 2 n <- 1 ratio <- 1 n_c <- n / (1 + ratio) n_e <- n_c * ratio sigma_h0 <- sqrt(p_pool * (1 - p_pool) * 4 / n) sigma_h1 <- sqrt((p_c * (1 - p_c) + p_e * (1 - p_e)) * 2 / n) info_h0 <- 1 / (sigma_h0^2) info_h1 <- 1 / (sigma_h1^2) theta_h0 <- 0 theta_h1 <- abs(p_c - p_e) / sigma_h1 tibble::tribble( ~n_c, ~n_e, ~p_c, ~p_e, ~theta_h1, ~theta_h0, ~info_h1, ~info_h0, n_c, n_e, p_c, p_e, theta_h1, theta_h0, info_h1, info_h0, ) %>% gt::gt() x <- gs_info_rd( p_c = tibble::tibble(stratum = \"All\", rate = .28), p_e = tibble::tibble(stratum = \"All\", rate = .4), n = tibble::tibble(stratum = \"All\", n = 1, analysis = 1), rd0 = 0, ratio = 1, weight = \"unstratified\" ) x %>% gt::gt() %>% gt::fmt_number(columns = 5:8, decimals = 6) # under info_scale = \"h0_info\" y_0 <- gs_design_npe( theta = .4 - .28, info = x$info0, info0 = x$info0, info_scale = \"h0_info\", alpha = .025, beta = .1, upper = gs_b, lower = gs_b, upar = list(par = -qnorm(.025)), lpar = list(par = -Inf) ) # under info_scale = \"h1_info\" y_1 <- gs_design_npe( theta = .4 - .28, info = x$info1, info0 = x$info0, info_scale = \"h1_info\", alpha = .025, beta = .1, upper = gs_b, lower = gs_b, upar = list(par = -qnorm(.025)), lpar = list(par = -Inf) ) # under info_scale = \"h0_h1_info\" y_2 <- gs_design_npe( theta = .4 - .28, info = x$info1, info0 = x$info0, info_scale = \"h0_h1_info\", alpha = .025, beta = .1, upper = gs_b, lower = gs_b, upar = list(par = -qnorm(.025)), lpar = list(par = -Inf) ) tibble( `info_scale = \"h0_info\"` = y_0$info0[1] / x$info0[1], `info_scale = \"h1_info\"` = y_1$info1[1] / x$info1[1], `info_scale = \"h0_h1_info\"` = y_2$info[1] / x$info1[1] ) %>% gt::gt() %>% gt::tab_header(title = \"The sample size calculated by gsDesign2 under 3 info_scale\") z_info_scale_0 <- gs_design_rd( p_c = tibble::tibble(stratum = \"All\", rate = .28), p_e = tibble::tibble(stratum = \"All\", rate = .4), rd0 = 0, alpha = 0.025, beta = 0.1, ratio = 1, weight = \"unstratified\", upper = gs_b, lower = gs_b, upar = -qnorm(.025), lpar = -Inf, info_scale = \"h0_info\" ) z_info_scale_1 <- gs_design_rd( p_c = tibble::tibble(stratum = \"All\", rate = .28), p_e = tibble::tibble(stratum = \"All\", rate = .4), rd0 = 0, alpha = 0.025, beta = 0.1, ratio = 1, weight = \"unstratified\", upper = gs_b, lower = gs_b, upar = -qnorm(.025), lpar = -Inf, info_scale = \"h1_info\" ) z_info_scale_2 <- gs_design_rd( p_c = tibble::tibble(stratum = \"All\", rate = .28), p_e = tibble::tibble(stratum = \"All\", rate = .4), rd0 = 0, alpha = 0.025, beta = 0.1, ratio = 1, weight = \"unstratified\", upper = gs_b, lower = gs_b, upar = -qnorm(.025), lpar = -Inf, info_scale = \"h0_h1_info\" )"},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-risk-difference.html","id":"east","dir":"Articles","previous_headings":"Examples > Unstratified Fixed Design","what":"EAST","title":"Group sequential design for binary outcomes","text":"Sample size calculated EAST","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-risk-difference.html","id":"summary","dir":"Articles","previous_headings":"Examples > Unstratified Fixed Design","what":"Summary","title":"Group sequential design for binary outcomes","text":"","code":"tibble::tibble( gsDesign2_info_scale_0 = z_info_scale_0$analysis$n, gsDesign2_info_scale_1 = z_info_scale_1$analysis$n, gsDesign2_info_scale_2 = z_info_scale_2$analysis$n, gsDesign = x_gsdesign$n, EAST_unpool = 645, EAST_pool = 651 ) %>% gt::gt() %>% gt::tab_spanner( label = \"gsDesign2\", columns = c(gsDesign2_info_scale_0, gsDesign2_info_scale_1, gsDesign2_info_scale_2) ) %>% gt::tab_spanner( label = \"EAST\", columns = c(EAST_unpool, EAST_pool) ) %>% cols_label( gsDesign2_info_scale_0 = \"info_scale = \\\"h0_info\\\"\", gsDesign2_info_scale_1 = \"info_scale = \\\"h1_info\\\"\", gsDesign2_info_scale_2 = \"info_scale = \\\"h0_h1_info\\\"\", EAST_unpool = \"un-pooled\", EAST_pool = \"pooled\" )"},{"path":"https://merck.github.io/gsDesign2/articles/story-risk-difference.html","id":"unstratified-group-sequential-design","dir":"Articles","previous_headings":"Examples","what":"Unstratified Group Sequential Design","title":"Group sequential design for binary outcomes","text":"example discussed section unstratified group sequential design equal sized groups detect p_C = 0.15, p_E = .1. null hypothesis, assume p_C = p_E = 0.125. desire 90% power two-sided test two proportions \\alpha = 0.05. like calculate sample size achieve 90% power.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-risk-difference.html","id":"gsdesign2-1","dir":"Articles","previous_headings":"Examples > Unstratified Group Sequential Design","what":"gsDesign2","title":"Group sequential design for binary outcomes","text":"calculate sample size, one can use gs_design_rd(). logic gs_design_rd() calculate sample size fixed design first. logic implemented gs_design_rd().","code":"x_gs <- gs_info_rd( p_c = tibble::tibble(stratum = \"All\", rate = .15), p_e = tibble::tibble(stratum = \"All\", rate = .1), n = tibble::tibble(stratum = \"All\", n = 1:3 / 3, analysis = 1:3), rd0 = 0, ratio = 1, weight = \"unstratified\" ) x_gs %>% gt::gt() %>% gt::tab_header(title = \"The statistical information of the group sequential design\") y_gs0 <- gs_design_npe( theta = .05, info = x_gs$info0, info0 = x_gs$info0, info_scale = \"h0_info\", alpha = .025, beta = .1, binding = FALSE, upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = rep(-Inf, 3), test_lower = FALSE ) y_gs1 <- gs_design_npe( theta = .05, info = x_gs$info1, info0 = x_gs$info1, info_scale = \"h0_h1_info\", alpha = .025, beta = .1, binding = FALSE, upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = rep(-Inf, 3), test_lower = FALSE ) y_gs2 <- gs_design_npe( theta = .05, info = x_gs$info1, info0 = x_gs$info0, info_scale = \"h0_h1_info\", alpha = .025, beta = .1, binding = FALSE, upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = rep(-Inf, 3), test_lower = FALSE ) tibble( `info_scale = \"h0_info\"` = y_gs0$info0 / x_gs$info0[3], `info_scale = \"h1_info\"` = y_gs1$info1 / x_gs$info1[3], `info_scale = \"h0_h1_info\"` = y_gs2$info / x_gs$info1[3] ) %>% gt::gt() %>% gt::tab_header( title = \"The sample size calculated by `gsDesign2` under 3 info_scale\", subtitle = \"under group sequential design\" ) x_gsdesign2_info_scale_0 <- gs_design_rd( p_c = tibble::tibble(stratum = \"All\", rate = .15), p_e = tibble::tibble(stratum = \"All\", rate = .1), info_frac = 1:3 / 3, rd0 = 0, alpha = .025, beta = .1, ratio = 1, weight = \"unstratified\", upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = rep(-Inf, 3), test_lower = FALSE, info_scale = \"h0_info\" ) x_gsdesign2_info_scale_1 <- gs_design_rd( p_c = tibble::tibble(stratum = \"All\", rate = .15), p_e = tibble::tibble(stratum = \"All\", rate = .1), info_frac = 1:3 / 3, rd0 = 0, alpha = .025, beta = .1, ratio = 1, weight = \"unstratified\", upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = rep(-Inf, 3), test_lower = FALSE, info_scale = \"h1_info\" ) x_gsdesign2_info_scale_2 <- gs_design_rd( p_c = tibble::tibble(stratum = \"All\", rate = .15), p_e = tibble::tibble(stratum = \"All\", rate = .1), info_frac = 1:3 / 3, rd0 = 0, alpha = .025, beta = .1, ratio = 1, weight = \"unstratified\", upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = rep(-Inf, 3), test_lower = FALSE, info_scale = \"h0_h1_info\" )"},{"path":"https://merck.github.io/gsDesign2/articles/story-risk-difference.html","id":"gsdesign-1","dir":"Articles","previous_headings":"Examples > Unstratified Group Sequential Design","what":"gsDesign","title":"Group sequential design for binary outcomes","text":"","code":"n_fix <- nBinomial( # Control event rate p1 = .15, # Experimental event rate p2 = .1, # Null hypothesis event rate difference (control - experimental) delta0 = 0, # 1-sided Type I error alpha = .025, # Type II error (1 - Power) beta = .1, # Experimental/Control randomization ratio ratio = 1 ) cat(\"The sample size of fixed-design calculated by `gsDesign` is \", n_fix, \".\\n\") #> The sample size of fixed-design calculated by `gsDesign` is 1834.641 . x_gsdesign <- gsDesign( k = 3, test.type = 1, # 1-sided Type I error alpha = .025, # Type II error (1 - Power) beta = .1, # If test.type = 5 or 6, this sets maximum spending for futility # under the null hypothesis. Otherwise, this is ignored. astar = 0, timing = 1:3 / 3, sfu = sfLDOF, sfupar = NULL, sfl = sfLDOF, sflpar = NULL, # Difference in event rates under alternate hypothesis delta = 0, # Difference in rates under H1 delta1 = .05, # Difference in rates under H0 delta0 = 0, endpoint = \"Binomial\", # Fixed design sample size from nBinomial above n.fix = n_fix ) cat(\"The sample size calcuated by `gsDesign` is \", x_gsdesign$n.I, \".\\n\") #> The sample size calcuated by `gsDesign` is 618.7954 1237.591 1856.386 . gsBoundSummary(x_gsdesign, digits = 4, ddigits = 2, tdigits = 1) #> Analysis Value Efficacy #> IA 1: 33% Z 3.7103 #> N: 619 p (1-sided) 0.0001 #> ~delta at bound 0.0985 #> P(Cross) if delta=0 0.0001 #> P(Cross) if delta=0.05 0.0338 #> IA 2: 67% Z 2.5114 #> N: 1238 p (1-sided) 0.0060 #> ~delta at bound 0.0472 #> P(Cross) if delta=0 0.0060 #> P(Cross) if delta=0.05 0.5603 #> Final Z 1.9930 #> N: 1857 p (1-sided) 0.0231 #> ~delta at bound 0.0306 #> P(Cross) if delta=0 0.0250 #> P(Cross) if delta=0.05 0.9000"},{"path":"https://merck.github.io/gsDesign2/articles/story-risk-difference.html","id":"east-1","dir":"Articles","previous_headings":"Examples > Unstratified Group Sequential Design","what":"EAST","title":"Group sequential design for binary outcomes","text":"Sample size calculated EAST Sample size calculated EAST Sample size calculated EAST","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-risk-difference.html","id":"summary-1","dir":"Articles","previous_headings":"Examples > Unstratified Group Sequential Design","what":"Summary","title":"Group sequential design for binary outcomes","text":"","code":"tibble::tibble( gsDesign2_info_scale_0 = x_gsdesign2_info_scale_0$analysis$n, gsDesign2_info_scale_1 = x_gsdesign2_info_scale_1$analysis$n, gsDesign2_info_scale_2 = x_gsdesign2_info_scale_2$analysis$n, gsDesign = x_gsdesign$n.I, EAST_unpool = c(617, 1233, 1850), EAST_pool = c(619, 1238, 1857) ) %>% gt::gt() %>% gt::tab_spanner( label = \"gsDesign2\", columns = c(gsDesign2_info_scale_0, gsDesign2_info_scale_1, gsDesign2_info_scale_2) ) %>% gt::tab_spanner( label = \"EAST\", columns = c(EAST_unpool, EAST_pool) ) %>% cols_label( gsDesign2_info_scale_0 = \"info_scale = \\\"h0_info\\\"\", gsDesign2_info_scale_1 = \"info_scale = \\\"h1_info\\\"\", gsDesign2_info_scale_2 = \"info_scale = \\\"h0_h1_info\\\"\", EAST_unpool = \"un-pooled\", EAST_pool = \"pooled\" )"},{"path":"https://merck.github.io/gsDesign2/articles/story-risk-difference.html","id":"stratified-group-sequential-design","dir":"Articles","previous_headings":"Examples","what":"Stratified Group Sequential Design","title":"Group sequential design for binary outcomes","text":"example, consider 3 strata group sequential design 3 analyses. First, calculate variance \\left\\{ \\begin{array}{ll} \\sigma^2_{H_0,k,s} & = p_{k,s}^{pool} \\left(1 - p^{pool}_{k,s} \\right) \\left(\\frac{1}{N_{C,k,s}} + \\frac{1}{N_{E,k,s}} \\right) = p_{k,s}^{pool} \\left(1 - p^{pool}_{k,s} \\right) \\left(\\frac{1}{ \\frac{\\xi_s}{1+r} N_{k}} + \\frac{1}{ \\frac{r \\xi_s}{1+r} N_{k}} \\right) \\\\ \\sigma_{H_1,k,s}^2 & = \\frac{p_{C,s} (1- p_{C,s})}{N_{C,k,s}} + \\frac{p_{E,s} (1 - p_{E,s})}{N_{E,k,s}} = \\frac{p_{C,s} (1- p_{C,s})}{\\frac{\\xi_s}{1+r} N_{k}} + \\frac{p_{E,s} (1 - p_{E,s})}{\\frac{r \\xi_s}{1+r} N_{k}} \\end{array} \\right. Second, calculate weight using inverse variance w_{s,k} = \\frac{1/\\sigma^2_{s,k}}{\\sum_{s=1}^S 1/\\sigma^2_{s,k}}. Third, calculate weighted risk difference weighted statistical information. \\left\\{ \\begin{array}{ll} \\delta_{H_0,k} & = 0\\\\ \\delta_{H_1,k} & = \\sum_{s=1}^S w_{k,s} |p_{C,s} - p_{E,s}| \\end{array} \\right. \\\\ \\left\\{ \\begin{array}{ll} \\mathcal I_{H_0,k} & = \\left[ \\sum_{s=1}^S w_{k,s}^2 \\frac{p_{k,s}^{pool} (1 - p_{k,s}^{pool})}{N_{C, k, s}} + w_{k,s}^2 \\frac{p_{k,s}^{pool} (1 - p_{k,s}^{pool})}{N_{E, k, s}} \\right]^{-1}\\\\ \\mathcal I_{H_1,k} & = \\left[ \\sum_{s=1}^S w_{k,s}^2 \\frac{p_{C,k,s} (1 - p_{C,k,s})}{N_{C,k,s}} + \\sum_{s=1}^S w_{k,s}^2 \\frac{p_{E,k,s} (1 - p_{E,k,s})}{N_{E,k,s}} \\right]^{-1} \\end{array} \\right. \\\\ logic implemented gs_design_rd().","code":"ratio <- 1 prevalence_ratio <- c(4, 5, 6) p_c_by_stratum <- c(.3, .37, .6) p_e_by_stratum <- c(.25, .3, .5) p_c <- tibble::tibble(stratum = c(\"S1\", \"S2\", \"S3\"), rate = p_c_by_stratum) p_e <- tibble::tibble(stratum = c(\"S1\", \"S2\", \"S3\"), rate = p_e_by_stratum) ratio_strata_c <- tibble::tibble(stratum = c(\"S1\", \"S2\", \"S3\"), ratio = prevalence_ratio) ratio_strata_e <- ratio_strata_c n <- 1 info_frac <- 1:3 / 3 n_c <- n / (1 + ratio) n_e <- ratio * n_c x <- p_c %>% rename(p_c = rate) %>% left_join(p_e) %>% rename(p_e = rate) %>% mutate(p_pool = (p_c + p_e) / 2) %>% mutate( xi_c = ( ratio_strata_c %>% mutate(prop = ratio / sum(ratio)) )$prop ) %>% mutate( xi_e = ( ratio_strata_e %>% mutate(prop = ratio / sum(ratio)) )$prop ) %>% mutate(n_c = n_c * xi_c, n_e = n_e * xi_e) x %>% gt::gt() %>% gt::fmt_number(columns = 4:8, decimals = 4) %>% gt::tab_footnote( footnote = \"p_pool = (p_c * n_c + p_e * n_e) / (n_c * n_e).\", locations = gt::cells_column_labels(columns = p_pool) ) %>% gt::tab_footnote( footnote = \"xi_c = sample size of a strata / sample size of the control arm.\", locations = gt::cells_column_labels(columns = xi_c) ) %>% gt::tab_footnote( footnote = \"xi_e = sample size of a strata / sample size of the experimental arm.\", locations = gt::cells_column_labels(columns = xi_e) ) %>% gt::tab_footnote( footnote = \"n_c = total sample size of the control arm.\", locations = gt::cells_column_labels(columns = n_c) ) %>% gt::tab_footnote( footnote = \"n_e = total size of the experimental arm.\", locations = gt::cells_column_labels(columns = n_e) ) %>% gt::tab_header(title = \"Stratified Example\") x <- x %>% union_all(x) %>% union_all(x) %>% mutate(Analysis = rep(1:3, each = 3)) %>% left_join(tibble(Analysis = 1:3, IF = info_frac)) %>% mutate(n_c = n_c * IF, n_e = n_e * IF) %>% select(Analysis, stratum, p_c, p_pool, p_e, n_c, n_e, xi_c, xi_e) %>% mutate( sigma_h0 = sqrt(p_pool * (1 - p_pool) * (1 / n_c + 1 / n_e)), sigma_h1 = sqrt(p_c * (1 - p_c) / n_c + p_e * (1 - p_e) / n_e) ) x %>% gt() %>% gt::fmt_number(6:11, decimals = 4) %>% gt::tab_footnote( footnote = \"sigma_h0 = the H0 sd per stratum per analysis.\", locations = gt::cells_column_labels(columns = sigma_h0) ) %>% gt::tab_footnote( footnote = \"sigma_h1 = the H0 sd per stratum per analysis.\", locations = gt::cells_column_labels(columns = sigma_h1) ) temp <- x %>% group_by(Analysis) %>% summarise( sum_invar_H0 = sum(1 / sigma_h0^2), sum_invar_H1 = sum(1 / sigma_h1^2), sum_ss = sum((n_c * n_e) / (n_c + n_e)) ) x <- x %>% left_join(temp) %>% mutate( weight_invar_H0 = 1 / sigma_h0^2 / sum_invar_H0, weight_invar_H1 = 1 / sigma_h1^2 / sum_invar_H1, weight_ss = (n_c * n_e) / (n_c + n_e) / sum_ss ) %>% select(-c(sum_invar_H0, sum_invar_H1, sum_ss)) x %>% gt() %>% fmt_number(6:14, decimals = 4) %>% gt::tab_footnote( footnote = \"weight_invar_H0 = the weight per stratum per analysis calculated by INVAR by using variance under H0.\", locations = gt::cells_column_labels(columns = weight_invar_H0) ) %>% gt::tab_footnote( footnote = \"weight_invar_H1 = the weight per stratum per analysis calculated by INVAR by using variance under H1.\", locations = gt::cells_column_labels(columns = weight_invar_H1) ) %>% gt::tab_footnote( footnote = \"weight_ss = the weight per stratum per analysis calculated by SS.\", locations = gt::cells_column_labels(columns = weight_ss) ) x <- x %>% group_by(Analysis) %>% summarise( rd_invar_H0 = sum(weight_invar_H0 * abs(p_c - p_e)), rd_invar_H1 = sum(weight_invar_H1 * abs(p_c - p_e)), rd_ss = sum(weight_ss * abs(p_c - p_e)), rd0 = 0, info_invar_H0 = 1 / sum( weight_invar_H0^2 * p_c * (1 - p_c) / n_c + weight_invar_H0^2 * p_e * (1 - p_e) / n_e ), info_invar_H1 = 1 / sum( weight_invar_H1^2 * p_c * (1 - p_c) / n_c + weight_invar_H1^2 * p_e * (1 - p_e) / n_e ), info_ss = 1 / sum( weight_ss^2 * p_c * (1 - p_c) / n_c + weight_ss^2 * p_e * (1 - p_e) / n_e ), info0_invar_H0 = 1 / sum( weight_invar_H0^2 * p_pool * (1 - p_pool) / n_c + weight_invar_H0^2 * p_pool * (1 - p_pool) / n_e ), info0_invar_H1 = 1 / sum( weight_invar_H1^2 * p_pool * (1 - p_pool) / n_c + weight_invar_H1^2 * p_pool * (1 - p_pool) / n_e ), info0_ss = 1 / sum( weight_ss^2 * p_pool * (1 - p_pool) / n_c + weight_ss^2 * p_pool * (1 - p_pool) / n_e ) ) x %>% gt::gt() %>% fmt_number(c(2:4, 6:11), decimals = 6) %>% gt::tab_footnote( footnote = \"info_invar_H0 = the statistical information under H1 per stratum per analysis calculated by INVAR by using variance under H0.\", locations = gt::cells_column_labels(columns = info_invar_H0) ) %>% gt::tab_footnote( footnote = \"info_invar_H1 = the statistical information under H1 per stratum per analysis calculated by INVAR by using variance under H0.\", locations = gt::cells_column_labels(columns = info_invar_H1) ) %>% gt::tab_footnote( footnote = \"info_ss = the statistical information under H1 per stratum per analysis calculated by SS.\", locations = gt::cells_column_labels(columns = info_ss) ) %>% gt::tab_footnote( footnote = \"info0_invar_H0 = the statistical information under H0 per stratum per analysis calculated by INVAR by using variance under H0.\", locations = gt::cells_column_labels(columns = info0_invar_H0) ) %>% gt::tab_footnote( footnote = \"info0_invar_H1 = the statistical information under H0 per stratum per analysis calculated by INVAR by using variance under H0.\", locations = gt::cells_column_labels(columns = info0_invar_H1) ) %>% gt::tab_footnote( footnote = \"info0_ss = the statistical information under H0 per stratum per analysis calculated by SS.\", locations = gt::cells_column_labels(columns = info0_ss) ) # Sample size under H0 ---- y_invar_h0 <- gs_design_npe( theta = x$rd_invar_H0, info = x$info0_invar_H0, info0 = x$info0_invar_H0, info_scale = \"h0_h1_info\", alpha = 0.025, beta = 0.2, upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = rep(-Inf, 3), test_lower = FALSE, ) y_invar_h1 <- gs_design_npe( theta = x$rd_invar_H1, info = x$info0_invar_H1, info0 = x$info0_invar_H1, info_scale = \"h0_h1_info\", alpha = 0.025, beta = 0.2, upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = rep(-Inf, 3), test_lower = FALSE, ) y_ss <- gs_design_npe( theta = x$rd_ss, info = x$info0_ss, info0 = x$info0_ss, info_scale = \"h0_h1_info\", alpha = 0.025, beta = 0.2, upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = rep(-Inf, 3), test_lower = FALSE, ) # Sample size under H1 ---- yy_invar_h0 <- gs_design_npe( theta = x$rd_invar_H0, info = x$info_invar_H0, info0 = x$info0_invar_H0, info_scale = \"h0_h1_info\", alpha = 0.025, beta = 0.2, upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = rep(-Inf, 3), test_lower = FALSE, ) yy_invar_h1 <- gs_design_npe( theta = x$rd_invar_H1, info = x$info_invar_H1, info0 = x$info0_invar_H1, info_scale = \"h0_h1_info\", alpha = 0.025, beta = 0.2, upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = rep(-Inf, 3), test_lower = FALSE, ) yy_ss <- gs_design_npe( theta = x$rd_ss, info = x$info_ss, info0 = x$info0_ss, info_scale = \"h0_h1_info\", alpha = 0.025, beta = 0.2, upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = rep(-Inf, 3), test_lower = FALSE, ) ans_math <- tibble::tibble( `Weighting method` = rep(c(\"INVAR-H0\", \"INVAR-H1\", \"Sample Size\"), 2), `Calculated under` = c(rep(\"H0\", 3), rep(\"H1\", 3)), `Sample size` = c( y_invar_h0$info[3] / x$info0_invar_H0[3], y_invar_h1$info[3] / x$info0_invar_H1[3], y_ss$info[3] / x$info0_ss[3], yy_invar_h0$info[3] / x$info_invar_H0[3], yy_invar_h1$info[3] / x$info_invar_H1[3], yy_ss$info[3] / x$info_ss[3] ) ) ans_math %>% gt::gt() %>% gt::tab_header(title = \"Sample size calculated by INVAR and SS\") ## sample size weighting + information scale = \"h0_info\" x_ss0 <- gs_design_rd( p_c = p_c, p_e = p_e, info_frac = 1:3 / 3, rd0 = 0, alpha = .025, beta = .2, ratio = 1, stratum_prev = tibble::tibble(stratum = c(\"S1\", \"S2\", \"S3\"), prevalence = 4:6), weight = \"ss\", upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = c(qnorm(.1), rep(-Inf, 2)), info_scale = \"h0_info\", binding = FALSE ) ## sample size weighting + information scale = \"h1_info\" x_ss1 <- gs_design_rd( p_c = p_c, p_e = p_e, info_frac = 1:3 / 3, rd0 = 0, alpha = .025, beta = .2, ratio = 1, stratum_prev = tibble::tibble(stratum = c(\"S1\", \"S2\", \"S3\"), prevalence = 4:6), weight = \"ss\", upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = c(qnorm(.1), rep(-Inf, 2)), info_scale = \"h1_info\", binding = FALSE ) ## sample size weighting + information scale = \"h0_h1_info\" x_ss2 <- gs_design_rd( p_c = p_c, p_e = p_e, info_frac = 1:3 / 3, rd0 = 0, alpha = .025, beta = .2, ratio = 1, stratum_prev = tibble::tibble(stratum = c(\"S1\", \"S2\", \"S3\"), prevalence = 4:6), weight = \"ss\", upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = c(qnorm(.1), rep(-Inf, 2)), info_scale = \"h0_h1_info\", binding = FALSE ) ## inverse variance weighting + information scale = \"h0_info\" x_invar0 <- gs_design_rd( p_c = p_c, p_e = p_e, info_frac = 1:3 / 3, rd0 = 0, alpha = .025, beta = .2, ratio = 1, stratum_prev = tibble::tibble(stratum = c(\"S1\", \"S2\", \"S3\"), prevalence = 1:3), weight = \"invar\", upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = c(qnorm(.1), rep(-Inf, 2)), info_scale = \"h0_info\", binding = FALSE ) ## inverse variance weighting + information scale = \"h1_info\" x_invar1 <- gs_design_rd( p_c = p_c, p_e = p_e, info_frac = 1:3 / 3, rd0 = 0, alpha = .025, beta = .2, ratio = 1, stratum_prev = tibble::tibble(stratum = c(\"S1\", \"S2\", \"S3\"), prevalence = 1:3), weight = \"invar\", upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = c(qnorm(.1), rep(-Inf, 2)), info_scale = \"h1_info\", binding = FALSE ) ## inverse variance weighting + information scale = \"h0_h1_info\" x_invar2 <- gs_design_rd( p_c = p_c, p_e = p_e, info_frac = 1:3 / 3, rd0 = 0, alpha = .025, beta = .2, ratio = 1, stratum_prev = tibble::tibble(stratum = c(\"S1\", \"S2\", \"S3\"), prevalence = 1:3), weight = \"invar\", upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = c(qnorm(.1), rep(-Inf, 2)), info_scale = \"h0_h1_info\", binding = FALSE ) ## inverse variance weighting + information scale = \"h0_info\" x_invar_h1_0 <- gs_design_rd( p_c = p_c, p_e = p_e, info_frac = 1:3 / 3, rd0 = 0, alpha = .025, beta = .2, ratio = 1, stratum_prev = tibble::tibble(stratum = c(\"S1\", \"S2\", \"S3\"), prevalence = 1:3), weight = \"invar\", upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = c(qnorm(.1), rep(-Inf, 2)), info_scale = \"h0_info\", binding = FALSE ) ## inverse variance weighting + information scale = \"h1_info\" x_invar_h1_1 <- gs_design_rd( p_c = p_c, p_e = p_e, info_frac = 1:3 / 3, rd0 = 0, alpha = .025, beta = .2, ratio = 1, stratum_prev = tibble::tibble(stratum = c(\"S1\", \"S2\", \"S3\"), prevalence = 1:3), weight = \"invar\", upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = c(qnorm(.1), rep(-Inf, 2)), info_scale = \"h1_info\", binding = FALSE ) ## inverse variance weighting + information scale = \"h0_h1_info\" x_invar_h1_2 <- gs_design_rd( p_c = p_c, p_e = p_e, info_frac = 1:3 / 3, rd0 = 0, alpha = .025, beta = .2, ratio = 1, stratum_prev = tibble::tibble(stratum = c(\"S1\", \"S2\", \"S3\"), prevalence = 1:3), weight = \"invar\", upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = c(qnorm(.1), rep(-Inf, 2)), info_scale = \"h0_h1_info\", binding = FALSE ) ans <- tibble::tibble( INVAR0 = x_invar0$analysis$n[1:3], INVAR1 = x_invar1$analysis$n[1:3], INVAR2 = x_invar2$analysis$n[1:3], SS0 = x_ss0$analysis$n[1:3], SS1 = x_ss1$analysis$n[1:3], SS2 = x_ss2$analysis$n[1:3] ) ans %>% gt::gt() %>% gt::tab_header(title = \"Sample size calculated by INVAR and SS\") %>% gt::tab_spanner( label = \"Inverse variance weighting \", columns = c( \"INVAR0\", \"INVAR1\", \"INVAR2\" ) ) %>% gt::tab_spanner( label = \"Sample size weighting\", columns = c(SS0, SS1, SS2) ) %>% cols_label( INVAR0 = \"info_scale = \\\"h0_info\\\"\", INVAR1 = \"info_scale = \\\"h1_info\\\"\", INVAR2 = \"info_scale = \\\"h0_h1_info\\\"\", SS0 = \"info_scale = \\\"h0_info\\\"\", SS1 = \"info_scale = \\\"h1_info\\\"\", SS2 = \"info_scale = \\\"h0_h1_info\\\"\" )"},{"path":"https://merck.github.io/gsDesign2/articles/story-risk-difference.html","id":"summary-2","dir":"Articles","previous_headings":"","what":"Summary","title":"Group sequential design for binary outcomes","text":"\\delta_{k,s}^{null} risk difference H_0. 0, positive, negative superiority, super-superiority non-inferiority design, respectively. superiority design, \\widehat \\sigma^2_{H_0,k,s} = \\widehat p _{k,s}^{pool} \\left(1 - \\widehat p ^{pool}_{k,s} \\right) \\left( \\frac{1}{N_{C,k,s}} + \\frac{1}{N_{E,k,s}} \\right) super-superiority design non-inferiority design, \\hat \\sigma^2 _{H_0,k,s} = \\frac {\\widehat p _{C0,k,s}(1- \\widehat p_{C0,k,s})}{N_ {C,k,s}} + \\frac{ \\widehat p_{E0,k,s} (1 - \\widehat p_{E0,k,s})}{N_{E,k,s}}","code":""},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-seven-test-types.html","id":"introduction","dir":"Articles","previous_headings":"","what":"Introduction","title":"Computing spending boundaries in group sequential design","text":"compare derivation different spending bounds using gsDesign2 gsDesign packages. gsDesign, 6 types bounds. demonstrate replicate using gsDesign2. gsDesign2, gs_spending_bound() function can used derive spending boundaries group sequential design derivations power calculations. demonstrate gs_design_ahr() function , using designs proportional hazards assumptions compare gsDesign::gsSurv(). Since sample size methods differ gsDesign2::gs_design_ahr() gsDesign::gsSurv() functions, use continuous sample sizes spending bounds (Z-values, nominal p-values, spending) identical except noted. Indeed, able reproduce bounds high degree accuracy. Due different sample size methods, sample size boundary approximations vary slightly. also present seventh example implement futility bound based observed hazard ratio well Haybittle-Peto-like efficacy bound. particular, futility bound difficult implement using gsDesign package straightforward using gsDesign2. last two examples, implement integer sample size event counts using to_integer() function gsDesign2 package toInteger() function gsDesign package. generally used cases comparing package computations Examples 1–5. examples, use following design assumptions: choice Type II error 0.15 corresponding 85% power intentional. allows impactful futility bounds interim analyses. Many teams may decide typical 90% power (beta = .1), can make futility bounds less likely impact early decisions.","code":"trial_duration <- 36 # Planned trial duration info_frac <- c(.35, .7, 1) # Information fraction at analyses # 16 month planned enrollment with constant rate enroll_rate <- define_enroll_rate(duration = 16, rate = 1) # Minimum follow-up for gsSurv() (computed) minfup <- trial_duration - sum(enroll_rate$duration) # Failure rates fail_rate <- define_fail_rate( duration = Inf, # Single time period, exponential failure fail_rate = log(2) / 12, # Exponential time-to-event with 12 month median hr = .7, # Proportional hazards dropout_rate = -log(.99) / 12 # 1% dropout rate per year ) alpha <- 0.025 # Type I error (one-sided) beta <- 0.15 # 85% power = 15% Type II error ratio <- 1 # Randomization ratio (experimental / control)"},{"path":"https://merck.github.io/gsDesign2/articles/story-seven-test-types.html","id":"examples","dir":"Articles","previous_headings":"","what":"Examples","title":"Computing spending boundaries in group sequential design","text":"Analogous gsDesign package, look 6 variations combinations efficacy futility bounds.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-seven-test-types.html","id":"example-1-efficacy-bound-only","dir":"Articles","previous_headings":"Examples","what":"Example 1: Efficacy bound only","title":"Computing spending boundaries in group sequential design","text":"One-sided design efficacy bound. easy way use fixed bound (lower = gs_b) negative infinite bounds (lpar = rep(-Inf, 3)); summary table produced, infinite bounds appear. upper bound implements spending bound (upper = gs_spending_bound) list objects provided upar describe spending function associated parameters. parts upar list used sf = gsDesign::sfLDOF select Lan-DeMets spending function approximates O’Brien-Fleming bound. total_spend = alpha sets total spending targeted Type error study. upper bound provides Type error control design specified elsewhere. Now check gsDesign::gsSurv(). noted , sample size event counts vary slightly design derived using gs_design_ahr(). also results slightly different crossing probabilities alternate hypothesis interim analyses well slightly different approximate hazard ratios required cross bounds. Comparing Z-value bounds directly see approximately 6 digits precision parameters chosen (r=32, tol=1e-08):","code":"upar <- list(sf = gsDesign::sfLDOF, total_spend = alpha, param = NULL) one_sided <- gsDesign2::gs_design_ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, ratio = ratio, beta = beta, # Information fraction at analyses and trial duration info_frac = info_frac, analysis_time = trial_duration, # Precision parameters for computations r = 32, tol = 1e-08, # Use NULL information for Type I error, H1 information for Type II error (power) info_scale = \"h0_h1_info\", # Default # Upper spending bound and corresponding parameter(s) upper = gs_spending_bound, upar = upar, # No lower bound lower = gs_b, lpar = rep(-Inf, 3) ) one_sided |> summary() |> gsDesign2::as_gt(title = \"Efficacy bound only\", subtitle = \"alpha-spending\") oneSided <- gsSurv( alpha = alpha, beta = beta, timing = info_frac, T = trial_duration, minfup = minfup, lambdaC = fail_rate$fail_rate, eta = fail_rate$dropout_rate, hr = fail_rate$hr, r = 32, tol = 1e-08, # Precision parameters for computations test.type = 1, # One-sided bound; efficacy only # Upper bound parameters sfu = upar$sf, sfupar = upar$param, ) oneSided |> gsBoundSummary() #> Analysis Value Efficacy #> IA 1: 35% Z 3.6128 #> N: 356 p (1-sided) 0.0002 #> Events: 100 ~HR at bound 0.4852 #> Month: 14 P(Cross) if HR=1 0.0002 #> P(Cross) if HR=0.7 0.0338 #> IA 2: 70% Z 2.4406 #> N: 394 p (1-sided) 0.0073 #> Events: 200 ~HR at bound 0.7079 #> Month: 23 P(Cross) if HR=1 0.0074 #> P(Cross) if HR=0.7 0.5341 #> Final Z 2.0002 #> N: 394 p (1-sided) 0.0227 #> Events: 286 ~HR at bound 0.7891 #> Month: 36 P(Cross) if HR=1 0.0250 #> P(Cross) if HR=0.7 0.8500 one_sided$bound$z - oneSided$upper$bound #> [1] -1.349247e-07 9.218765e-07 3.515345e-07"},{"path":"https://merck.github.io/gsDesign2/articles/story-seven-test-types.html","id":"example-2-symmetric-2-sided-design","dir":"Articles","previous_headings":"Examples","what":"Example 2: Symmetric 2-sided design","title":"Computing spending boundaries in group sequential design","text":"now derive symmetric 2-sided design. requires use argument h1_spending = FALSE use \\alpha-spending upper lower bounds. lower bound labeled futility bound table, better termed efficacy bound control better experimental treatment. compare gsDesign::gsSurv(). Comparing Z-value bounds directly, see approximately 6 digits accuracy.","code":"upar <- list(sf = gsDesign::sfLDOF, total_spend = alpha, param = NULL) lpar <- list(sf = gsDesign::sfLDOF, total_spend = alpha, param = NULL) symmetric <- gs_design_ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, ratio = ratio, beta = beta, # Information fraction at analyses and trial duration info_frac = info_frac, analysis_time = trial_duration, # Precision parameters for computations r = 32, tol = 1e-08, # Use NULL information for Type I error, H1 information for power info_scale = \"h0_h1_info\", # Default # Function and parameter(s) for upper spending bound upper = gs_spending_bound, upar = upar, lower = gs_spending_bound, lpar = lpar, # Symmetric designs use binding bounds binding = TRUE, h1_spending = FALSE # Use null hypothesis spending for lower bound ) symmetric |> summary() |> gsDesign2::as_gt( title = \"2-sided Symmetric Design\", subtitle = \"Single spending function\" ) Symmetric <- gsSurv( test.type = 2, # Two-sided symmetric bound alpha = alpha, beta = beta, timing = info_frac, T = trial_duration, minfup = minfup, r = 32, tol = 1e-08, lambdaC = fail_rate$fail_rate, eta = fail_rate$dropout_rate, hr = fail_rate$hr, sfu = upar$sf, sfupar = upar$param ) Symmetric |> gsBoundSummary() #> Analysis Value Efficacy Futility #> IA 1: 35% Z 3.6128 -3.6128 #> N: 356 p (1-sided) 0.0002 0.0002 #> Events: 100 ~HR at bound 0.4852 2.0609 #> Month: 14 P(Cross) if HR=1 0.0002 0.0002 #> P(Cross) if HR=0.7 0.0338 0.0000 #> IA 2: 70% Z 2.4406 -2.4406 #> N: 394 p (1-sided) 0.0073 0.0073 #> Events: 200 ~HR at bound 0.7079 1.4126 #> Month: 23 P(Cross) if HR=1 0.0074 0.0074 #> P(Cross) if HR=0.7 0.5341 0.0000 #> Final Z 2.0002 -2.0002 #> N: 394 p (1-sided) 0.0227 0.0227 #> Events: 286 ~HR at bound 0.7891 1.2673 #> Month: 36 P(Cross) if HR=1 0.0250 0.0250 #> P(Cross) if HR=0.7 0.8500 0.0000 dplyr::filter(symmetric$bound, bound == \"upper\")$z - Symmetric$upper$bound #> [1] -1.349247e-07 9.218765e-07 4.092976e-07 dplyr::filter(symmetric$bound, bound == \"lower\")$z - Symmetric$lower$bound #> [1] 1.349247e-07 -9.218765e-07 -4.092976e-07"},{"path":"https://merck.github.io/gsDesign2/articles/story-seven-test-types.html","id":"example-3-asymmetric-2-sided-design-with-beta-spending-and-binding-futility","dir":"Articles","previous_headings":"Examples","what":"Example 3: Asymmetric 2-sided design with \\beta-spending and binding futility","title":"Computing spending boundaries in group sequential design","text":"Designs binding futility bounds generally considered acceptable Phase 3 trials Type error controlled futility bound crossed trial continues, infrequent occurrence. binding futility bound means Type error computations assume trial stops futility bound crossed. trial continues futility bound crossed, Type error longer controlled computed efficacy bound. Phase 2b study, may acceptable results slightly smaller sample size less stringent efficacy bounds first analysis comparable design non-binding futility bound presented Example 4. compare gsDesign::gsSurv(). Comparing Z-value bounds directly, see approximately 6 digits accuracy spite needing relaxing accuracy tol = 1e-07 call gsSurv() order get convergence.","code":"upar <- list(sf = gsDesign::sfLDOF, total_spend = alpha, param = NULL) lpar <- list(sf = gsDesign::sfHSD, total_spend = beta, param = -.5) asymmetric_binding <- gs_design_ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, ratio = ratio, beta = beta, # Information fraction at analyses and trial duration info_frac = info_frac, analysis_time = trial_duration, # Precision parameters for computations r = 32, tol = 1e-08, # Use NULL information for Type I error, H1 information for Type II error and power info_scale = \"h0_h1_info\", # Function and parameter(s) for upper spending bound upper = gs_spending_bound, upar = upar, lower = gs_spending_bound, lpar = lpar, # Asymmetric beta-spending design using binding bounds binding = TRUE, h1_spending = TRUE # Use beta-spending for futility ) asymmetric_binding |> summary() |> gsDesign2::as_gt( title = \"2-sided asymmetric design with binding futility\", subtitle = \"Both alpha- and beta-spending used\" ) asymmetricBinding <- gsSurv( test.type = 3, # Two-sided asymmetric bound, binding futility alpha = alpha, beta = beta, timing = info_frac, T = trial_duration, minfup = minfup, r = 32, tol = 1e-07, lambdaC = fail_rate$fail_rate, eta = fail_rate$dropout_rate, hr = fail_rate$hr, sfu = upar$sf, sfupar = upar$param, sfl = lpar$sf, sflpar = lpar$param ) asymmetricBinding |> gsBoundSummary() #> Analysis Value Efficacy Futility #> IA 1: 35% Z 3.6128 0.1436 #> N: 380 p (1-sided) 0.0002 0.4429 #> Events: 107 ~HR at bound 0.4971 0.9726 #> Month: 14 P(Cross) if HR=1 0.0002 0.5571 #> P(Cross) if HR=0.7 0.0387 0.0442 #> IA 2: 70% Z 2.4382 1.1807 #> N: 422 p (1-sided) 0.0074 0.1189 #> Events: 214 ~HR at bound 0.7164 0.8509 #> Month: 23 P(Cross) if HR=1 0.0074 0.8913 #> P(Cross) if HR=0.7 0.5679 0.0969 #> Final Z 1.9232 1.9232 #> N: 422 p (1-sided) 0.0272 0.0272 #> Events: 306 ~HR at bound 0.8024 0.8024 #> Month: 36 P(Cross) if HR=1 0.0250 0.9750 #> P(Cross) if HR=0.7 0.8500 0.1500 dplyr::filter(asymmetric_binding$bound, bound == \"upper\")$z - asymmetricBinding$upper$bound #> [1] -1.349247e-07 2.505886e-04 6.494369e-03 dplyr::filter(asymmetric_binding$bound, bound == \"lower\")$z - asymmetricBinding$lower$bound #> [1] -0.02803415 -0.02670908 -0.01598640"},{"path":"https://merck.github.io/gsDesign2/articles/story-seven-test-types.html","id":"example-4-asymmetric-2-sided-design-with-beta-spending-and-non-binding-futility-bound","dir":"Articles","previous_headings":"Examples","what":"Example 4: Asymmetric 2-sided design with \\beta-spending and non-binding futility bound","title":"Computing spending boundaries in group sequential design","text":"gsDesign package, asymmetric designs non-binding \\beta-spending used futility default design. objectives type design include: Meaningful futility bounds stop trial early treatment benefit emerging experimental treatment vs. control. Type error controlled even trial continues futility bound crossed. compare gsDesign::gsSurv(). Comparing Z-value bounds directly, see approximately 6 digits accuracy.","code":"upar <- list(sf = gsDesign::sfLDOF, total_spend = alpha, param = NULL) lpar <- list(sf = gsDesign::sfHSD, total_spend = beta, param = -.5) asymmetric_nonbinding <- gs_design_ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, ratio = ratio, beta = beta, # Information fraction at analyses and trial duration info_frac = info_frac, analysis_time = trial_duration, # Precision parameters for computations r = 32, tol = 1e-08, # Use NULL information for Type I error, H1 info for Type II error and power info_scale = \"h0_h1_info\", # Default # Function and parameter(s) for upper spending bound upper = gs_spending_bound, upar = upar, lower = gs_spending_bound, lpar = lpar, # Asymmetric beta-spending design use binding bounds binding = FALSE, h1_spending = TRUE # Use beta-spending for futility ) asymmetric_nonbinding |> summary() |> gsDesign2::as_gt( title = \"2-sided asymmetric design with non-binding futility\", subtitle = \"Both alpha- and beta-spending used\" ) asymmetricNonBinding <- gsSurv( test.type = 4, # Two-sided asymmetric bound, non-binding futility alpha = alpha, beta = beta, timing = info_frac, T = trial_duration, minfup = minfup, r = 32, tol = 1e-08, lambdaC = fail_rate$fail_rate, eta = fail_rate$dropout_rate, hr = fail_rate$hr, sfu = upar$sf, sfupar = upar$param, sfl = lpar$sf, sflpar = lpar$param ) asymmetricNonBinding |> gsBoundSummary() #> Analysis Value Efficacy Futility #> IA 1: 35% Z 3.6128 0.1860 #> N: 398 p (1-sided) 0.0002 0.4262 #> Events: 112 ~HR at bound 0.5050 0.9654 #> Month: 14 P(Cross) if HR=1 0.0002 0.5738 #> P(Cross) if HR=0.7 0.0424 0.0442 #> IA 2: 70% Z 2.4406 1.2406 #> N: 440 p (1-sided) 0.0073 0.1074 #> Events: 224 ~HR at bound 0.7215 0.8471 #> Month: 23 P(Cross) if HR=1 0.0073 0.9020 #> P(Cross) if HR=0.7 0.5901 0.0969 #> Final Z 2.0002 2.0002 #> N: 440 p (1-sided) 0.0227 0.0227 #> Events: 320 ~HR at bound 0.7995 0.7995 #> Month: 36 P(Cross) if HR=1 0.0215 0.9785 #> P(Cross) if HR=0.7 0.8500 0.1500 dplyr::filter(asymmetric_nonbinding$bound, bound == \"upper\")$z - asymmetricNonBinding$upper$bound #> [1] -1.349247e-07 9.218765e-07 3.515345e-07 dplyr::filter(asymmetric_nonbinding$bound, bound == \"lower\")$z - asymmetricNonBinding$lower$bound #> [1] -0.03267431 -0.03311078 -0.02426999"},{"path":"https://merck.github.io/gsDesign2/articles/story-seven-test-types.html","id":"example-5-asymmetric-2-sided-design-with-null-hypothesis-spending-and-binding-futility-bound","dir":"Articles","previous_headings":"Examples","what":"Example 5: Asymmetric 2-sided design with null hypothesis spending and binding futility bound","title":"Computing spending boundaries in group sequential design","text":"Now use null hypothesis probabilities set futility bounds. parameter alpha_star used set total spending futility bound null hypothesis. example, set 0.5 50% probability crossing futility bound interim final analyses combined. futility bound final analysis really role, use test_lower argument eliminate evaluation final analysis. arbitrary largely selected interim futility bounds can meaningful tests. case, minor trend favor control first second interim cross futility bound. less stringent \\beta-spending bounds previously described, still address potential ethical issue continuing trial minor trend favor control present. Comparing Z-value bounds directly, see approximately 6 digits accuracy. gsSurv() require alternate arguments r tol.","code":"alpha_star <- .5 upar <- list(sf = gsDesign::sfLDOF, total_spend = alpha, param = NULL) lpar <- list(sf = gsDesign::sfHSD, total_spend = alpha_star, param = 1) asymmetric_safety_binding <- gs_design_ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, ratio = ratio, beta = beta, # Information fraction at analyses and trial duration info_frac = info_frac, analysis_time = trial_duration, # Precision parameters for computations r = 32, tol = 1e-08, # Use NULL information for Type I error, H1 information for Type II error info_scale = \"h0_info\", # Function and parameter(s) for upper spending bound upper = gs_spending_bound, upar = upar, lower = gs_spending_bound, lpar = lpar, test_lower = c(TRUE, TRUE, FALSE), # Asymmetric design use binding bounds binding = TRUE, h1_spending = FALSE # Use null-spending for futility ) asymmetric_safety_binding |> summary() |> gsDesign2::as_gt( title = \"2-sided asymmetric safety design with binding futility\", subtitle = \"Alpha-spending used for both bounds, asymmetrically\" ) asymmetricSafetyBinding <- gsSurv( test.type = 5, # Two-sided asymmetric bound, binding futility, H0 futility spending astar = alpha_star, # Total Type I error spend for futility alpha = alpha, beta = beta, timing = info_frac, T = trial_duration, minfup = minfup, lambdaC = fail_rate$fail_rate, eta = fail_rate$dropout_rate, hr = fail_rate$hr, sfu = upar$sf, sfupar = upar$param, sfl = lpar$sf, sflpar = lpar$param ) asymmetricSafetyBinding |> gsBoundSummary() #> Analysis Value Efficacy Futility #> IA 1: 35% Z 3.6128 -0.7271 #> N: 356 p (1-sided) 0.0002 0.7664 #> Events: 101 ~HR at bound 0.4856 1.1565 #> Month: 14 P(Cross) if HR=1 0.0002 0.2336 #> P(Cross) if HR=0.7 0.0340 0.0060 #> IA 2: 70% Z 2.4405 -0.4203 #> N: 394 p (1-sided) 0.0073 0.6629 #> Events: 201 ~HR at bound 0.7082 1.0612 #> Month: 23 P(Cross) if HR=1 0.0074 0.3982 #> P(Cross) if HR=0.7 0.5353 0.0070 #> Final Z 1.9979 -0.2531 #> N: 394 p (1-sided) 0.0229 0.5999 #> Events: 286 ~HR at bound 0.7895 1.0304 #> Month: 36 P(Cross) if HR=1 0.0250 0.5000 #> P(Cross) if HR=0.7 0.8500 0.0072 dplyr::filter(asymmetric_safety_binding$bound, bound == \"upper\")$z - asymmetricSafetyBinding$upper$bound #> [1] -1.349247e-07 9.211210e-07 4.185954e-07 dplyr::filter(asymmetric_safety_binding$bound, bound == \"lower\")$z - asymmetricSafetyBinding$lower$bound[1:2] #> [1] 4.348992e-08 -3.276118e-08"},{"path":"https://merck.github.io/gsDesign2/articles/story-seven-test-types.html","id":"example-6-asymmetric-2-sided-design-with-null-hypothesis-spending-and-non-binding-futility-bound","dir":"Articles","previous_headings":"Examples","what":"Example 6: Asymmetric 2-sided design with null hypothesis spending and non-binding futility bound","title":"Computing spending boundaries in group sequential design","text":", recommend non-binding bound presented binding bound example 5. eliminate final futility bound using test_lower argument. Addition, show eliminate efficacy bound interim 1 allowing team decide early stop trial efficacy without longer-term data. corresponding gsDesign::gsSurv() design strictly comparable since option eliminate futility efficacy analyses enabled.","code":"upar <- list(sf = gsDesign::sfLDOF, total_spend = alpha, param = NULL) lpar <- list(sf = gsDesign::sfHSD, total_spend = alpha_star, param = 1) asymmetric_safety_nonbinding <- gs_design_ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, ratio = ratio, beta = beta, # Information fraction at analyses and trial duration info_frac = info_frac, analysis_time = trial_duration, # Precision parameters for computations r = 32, tol = 1e-08, # Use NULL information for Type I error, H1 information for Type II error info_scale = \"h0_info\", # Function and parameter(s) for upper spending bound upper = gs_spending_bound, upar = upar, test_upper = c(FALSE, TRUE, TRUE), lower = gs_spending_bound, lpar = lpar, test_lower = c(TRUE, TRUE, FALSE), # Asymmetric design use non-binding bounds binding = FALSE, h1_spending = FALSE # Use null-spending for futility ) |> to_integer() asymmetric_safety_nonbinding |> summary() |> gsDesign2::as_gt( title = \"2-sided asymmetric safety design with non-binding futility\", subtitle = \"Alpha-spending used for both bounds, asymmetrically\" ) |> gt::tab_footnote(footnote = \"Integer-based sample size and event counts\") asymmetricSafetyNonBinding <- gsSurv( test.type = 6, # Two-sided asymmetric bound, binding futility, H0 futility spending astar = alpha_star, # Total Type I error spend for futility alpha = alpha, beta = beta, timing = info_frac, T = trial_duration, minfup = minfup, r = 32, tol = 1e-08, lambdaC = fail_rate$fail_rate, eta = fail_rate$dropout_rate, hr = fail_rate$hr, sfu = upar$sf, sfupar = upar$param, sfl = lpar$sf, sflpar = lpar$param ) asymmetricSafetyBinding |> gsBoundSummary() #> Analysis Value Efficacy Futility #> IA 1: 35% Z 3.6128 -0.7271 #> N: 356 p (1-sided) 0.0002 0.7664 #> Events: 101 ~HR at bound 0.4856 1.1565 #> Month: 14 P(Cross) if HR=1 0.0002 0.2336 #> P(Cross) if HR=0.7 0.0340 0.0060 #> IA 2: 70% Z 2.4405 -0.4203 #> N: 394 p (1-sided) 0.0073 0.6629 #> Events: 201 ~HR at bound 0.7082 1.0612 #> Month: 23 P(Cross) if HR=1 0.0074 0.3982 #> P(Cross) if HR=0.7 0.5353 0.0070 #> Final Z 1.9979 -0.2531 #> N: 394 p (1-sided) 0.0229 0.5999 #> Events: 286 ~HR at bound 0.7895 1.0304 #> Month: 36 P(Cross) if HR=1 0.0250 0.5000 #> P(Cross) if HR=0.7 0.8500 0.0072"},{"path":"https://merck.github.io/gsDesign2/articles/story-seven-test-types.html","id":"example-7-alternate-bound-types","dir":"Articles","previous_headings":"Examples","what":"Example 7: Alternate bound types","title":"Computing spending boundaries in group sequential design","text":"consider two types alternative boundary computation approaches. Computing futility bounds based hazard ratio. Computing efficacy bounds Haybittle-Peto related Fleming-Harrington-O’Brien approach. begin futility bound. consider non-binding futility bound impact efficacy bound. Assume clinical trial team wishes stop trial first two interim analyses targeted interim hazard ratio achieved. approach can require bit iteration (trial error) incorporate final design endpoint count; skip iteration . assume wish consider stopping futility hazard ratio greater 1 0.9 observed interim analyses 1 2 104 209 events observed, respectively. final analysis planned 300 events. wish translate hazard ratios specified corresponding Z-values; can done follows. add final futility bound -Inf, indicating final futility analysis; gives us vector Z-value bounds analyses. type bound, Type II error computed rather based bounds rather spending approach bounds computed based specified spending. efficacy bound, first consider Haybittle-Peto fixed bound interim analyses. Using Bonferroni approach, test nominal levels 0.001, 0.001, 0.023 3 analyses. accounting correlations, actually quite use 0.025 1-sided Type error allowed. allow user substitute code follows verify . alternative approach use fixed spending approach analysis suggested Fleming, Harrington, O’Brien (1984). , iteration shown, use piecewise linear spending function select interim bounds match desired Haybittle-Peto interim bounds. However, using approach slightly liberal final bound achieved still controls Type error. see targeted bounds achieved nominal p-values 0.0001 interim efficacy bound targeted hazard ratios interim futility bounds. methods, trial designers control design characteristics may desire. particular, note Haybittle-Peto efficacy bounds less stringent first interim stringent second interim corresponding O’Brien-Fleming-like bounds computed spending approach. may may desirable.","code":"# Targeted events at interim and final analysis # This is based on above designs and then adjusted, as necessary targeted_events <- c(104, 209, 300) interim_futility_z <- -gsDesign::hrn2z(hr = c(1, .9), n = targeted_events[1:2]) interim_futility_z #> [1] 0.0000000 0.7615897 lower <- gs_b # Allows specifying fixed Z-values for futility # Translated HR bounds to Z-value scale lpar <- c(interim_futility_z, -Inf) upper <- gs_b upar <- qnorm(c(.001, .001, .0023), lower.tail = FALSE) upper <- gs_spending_bound upar <- list( sf = gsDesign::sfLinear, total_spend = alpha, param = c(targeted_events[1:2] / targeted_events[3], c(.001, .0018) / .025), timing = NULL ) asymmetric_fixed_bounds <- gs_design_ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, ratio = ratio, beta = beta, # Information fraction at analyses and trial duration info_frac = info_frac, analysis_time = trial_duration, # Precision parameters for computations r = 32, tol = 1e-08, # Use NULL information for Type I error, H1 information for Type II error info_scale = \"h0_info\", # Function and parameter(s) for upper spending bound upper = upper, upar = upar, lower = lower, lpar = lpar, # Non-binding futility bounds binding = FALSE ) |> to_integer() asymmetric_fixed_bounds |> summary() |> gsDesign2::as_gt( title = \"2-sided asymmetric safety design with fixed non-binding futility\", subtitle = \"Futility bounds computed to approximate HR\" ) |> gt::tab_footnote(footnote = \"Integer-based sample size and event counts\")"},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"overview","dir":"Articles","previous_headings":"","what":"Overview","title":"Spending time examples","text":"multiple scenarios event-based spending group sequential designs limitations terms ensuring adequate follow-ensuring adequate spending preserved final analysis. Example contexts often arises trials may delayed treatment effect, control failure rates different expected, multiple hypotheses tested. general, situations found ensuring adequate follow-duration adequate number events important fully evaluate potential effectiveness new treatment. testing multiple hypotheses, carefully thinking possible spending issues can critical. addition, group sequential trials, preserving adequate \\alpha-spending final evaluation hypothesis important difficult using traditional event-based spending. document, outline three examples demonstrate issues: importance adequate events adequate follow-duration ensure power fixed design, importance guaranteeing reasonable amount \\alpha-spending final analysis group sequential design. trial examining outcome biomarker positive overall populations, show importance considering design reacts incorrect design assumptions biomarker prevalence. group sequential design options, demonstrate concept spending time effective way adapt. Traditionally Lan DeMets (1983), spending done according targeting specific number events outcome end trial. However, delayed treatment effect scenarios substantial literature (e.g., Lin et al. (2020), Roychoudhury et al. (2021)) documenting importance adequate follow-duration addition requiring adequate number events traditional proportional hazards assumption. approaches taken, found spending time approach generalizes well addressing variety scenarios. fact spending need correspond information fraction perhaps first raised Lan DeMets (1989) calendar-time spending discussed. However, note Proschan, Lan, Wittes (2006) raised scenarios spending alternatives considered. Two specific spending approaches suggested : Spending according minimum planned observed event counts. suggested delayed effect examples. Spending common spending time across multiple hypotheses; e.g., multiple population example, spending overall population rate biomarker positive subgroup regardless event counts time overall population. consistent Follmann, Proschan, Geller (1994) applied multiple experimental treatments compared common control. Spending time case corresponds approach Fleming, Harrington, O’Brien (1984) fixed incremental spending set potentially variable number interim analyses. document fairly long demonstrates number scenarios relevant spending time concept. layout intended make easy possibly focus individual examples interested full review. Code blocks can unfolded interested implementation. Rather bog conceptual discussion implementation details, tried provide sufficient comments code guide implementation interested .","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"delayed-effect-scenario","dir":"Articles","previous_headings":"","what":"Delayed effect scenario","title":"Spending time examples","text":"consider example single stratum possibility delayed treatment effect. next two sections consider 1) fixed design interim analysis, 2) design interim analysis. Following common assumptions: control group time--event exponentially distributed median 12 months. 2.5% one-sided Type error. 90% power. constant enrollment rate expected enrollment duration 12 months. targeted trial duration 30 months. delayed effect experimental group compared control, hazard ratio 1 first 4 months hazard ratio 0.6 thereafter. restrictions constant control failure rate, two hazard ratio time intervals constant enrollment required, simplify example. approach taken uses average-hazard ratio approach approximating treatment effect Mukhopadhyay et al. (2020) asymptotic group sequential theory Tsiatis (1982).","code":"# control median m <- 12 # enrollment rate enroll_rate <- define_enroll_rate( duration = 12, # expected enrollment duration of 12 months rate = 1 # here the rate is a ratio, which will be updated to achieve the desired sample size ) # failure rate fail_rate <- define_fail_rate( duration = c(4, 100), # hazard ratio of 1 for the first 4 months and a hazard ratio of 0.6 thereafter hr = c(1, .6), fail_rate = log(2) / m, # exponential distribution dropout_rate = .001 )"},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"fixed-design-delayed-effect","dir":"Articles","previous_headings":"","what":"Fixed design, delayed effect","title":"Spending time examples","text":"sample size events design shown . see average hazard ratio (AHR) assumptions 0.7026, part way early HR 1 later HR 0.6 assumed experimental versus control therapy.","code":"# bounds for fixed design are just a fixed bound for nominal p = 0.025, 1-sided z_025 <- qnorm(.975) # fixed design, single stratum # find sample size for 30 month trial under given # enrollment and sample size assumptions xx <- gs_design_ahr(enroll_rate, fail_rate, analysis_time = 30, upper = gs_b, upar = z_025, lower = gs_b, lpar = z_025 ) # get the summary table of the fixed design summary(xx, analysis_vars = c(\"time\", \"n\", \"event\", \"ahr\", \"info_frac\"), analysis_decimals = c(0, 0, 0, 4, 4) ) %>% as_gt()"},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"scenario-1-less-experimental-benefit","dir":"Articles","previous_headings":"Fixed design, delayed effect > Power when assumptions design are wrong","what":"Scenario 1: less experimental benefit","title":"Spending time examples","text":"assume instead effect delay 6 months instead 4 control median 10 months instead 12, substantial impact power. , assumed targeted events required final analysis resulting expected final analysis time 25 months instead planned 30 average hazard ratio 0.78 expected time analysis rather targeted average hazard ratio 0.70 original assumptions. Now also require 30 months trial duration addition targeted events. improves power 63% 76% increase 25 30 months duration 340 377 expected events, important gain. driven average hazard ratio 0.78 compared 0.76 increased expected number events. also ensures adequate follow-better describe longer-term differences survival; may particularly important early follow-suggests delayed effect crossing survival curves. Thus, adaptation event-based design based also require adequate follow-can help ensure power large clinical trial investment clinically relevant underlying survival benefit.","code":"# update the median of control arm am <- 10 # alternate control median (the original is 12) # update the failure rate table fail_rate$duration[1] <- 6 # the original is 4 fail_rate$fail_rate <- log(2) / am # the original is log(2)/12 # get the targeted number of events target_events <- xx$analysis$event # update the design and calculate the power under the targeted events yy <- gs_power_ahr( enroll_rate = xx$enroll_rate, fail_rate = fail_rate, # here we want to achieve the target events # and set analysis_time as NULL # so the analysis_time will be calculated according to the target events event = target_events, analysis_time = NULL, upper = gs_b, upar = z_025, lower = gs_b, lpar = z_025 ) yy %>% summary() %>% as_gt() yy <- gs_power_ahr( enroll_rate = xx$enroll_rate, fail_rate = fail_rate, # here we want to achieve the targeted events, # but also keep the 30 month as the analysis_time event = target_events, analysis_time = 30, upper = gs_b, upar = z_025, lower = gs_b, lpar = z_025 ) # get the summary table of updated design yy %>% summary() %>% as_gt()"},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"scenario-2-low-control-event-rates","dir":"Articles","previous_headings":"Fixed design, delayed effect","what":"Scenario 2: low control event rates","title":"Spending time examples","text":"Now assume longer planned control median, 16 months demonstrate value retaining event count requirement. analyze 30 months, power trial 87% 288 events expected. also require adequate events, restore power 94.5, originally targeted level 90%. cost expected trial duration becomes 38.5 months rather 30; however, since control median now larger, additional follow-useful characterize tail behavior. Note scenario likely particularly interested retaining power treatment effect actually stronger original alternate hypothesis. Thus, example, time cutoff alone ensured sufficient follow-power trial.","code":"# alternate control median am <- 16 # the original is 12 # update the failure rate fail_rate$fail_rate <- log(2) / am fail_rate$duration[1] <- 4 # calculate the power when trial duration is 30 month yy <- gs_power_ahr( enroll_rate = xx$enroll_rate, fail_rate = fail_rate, # here we set analysisTime as 30 # and calculate the corresponding number of events event = NULL, analysis_time = 30, upper = gs_b, upar = z_025, lower = gs_b, lpar = z_025 ) yy %>% summary() %>% as_gt() # calculate the power when trial duration is 30 month and the events is the targeted events yy <- gs_power_ahr( enroll_rate = xx$enroll_rate, fail_rate = fail_rate, # here we set trial duration as 30 month # and keep the events as the target events event = target_events, analysis_time = 30, upper = gs_b, upar = z_025, lower = gs_b, lpar = z_025 ) yy %>% summary() %>% as_gt()"},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"conclusions-for-fixed-design","dir":"Articles","previous_headings":"Fixed design, delayed effect","what":"Conclusions for fixed design","title":"Spending time examples","text":"summary, demonstrated value requiring adequate events adequate follow-duration approach analysis done one requirements. Requiring retain power important treatment benefit characterization time potential delayed onset positive beneficial treatment effect.","code":""},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"alternative-spending-strategies","dir":"Articles","previous_headings":"Group sequential design","what":"Alternative spending strategies","title":"Spending time examples","text":"extend design detect delayed effect group sequential design single interim analysis 80% final planned events accrued. assume final analysis require targeted trial duration events based fixed design based evaluations . assume efficacy bound uses Lan DeMets (1983) spending function approximating O’Brien-Fleming bound. futility bound planned, exception demonstration one scenario. interim analysis far enough trial substantial probability stopping early design assumptions. Coding different strategies must done carefully. Spending approach 1: time design, specify spending function specifying use information fraction design. Spending approach 2: wished use 22 30 months calendar analysis times use calendar fraction spending, need specify spending time design. Spending approach 3: Next show set information-based spending power calculation timing analysis based information fraction; e.g., propose requiring achieving planned event counts, also planned study duration analysis performed. critical set maximum planned information update information fraction calculation case. Spending approach 4: final case replace information fraction design specific spending time plugged spending function compute incremental \\alpha-spending analysis. case, use planned information fraction design, 0.8 interim analysis 1 final analysis. used regardless scenario using compute power, recall information fraction still used computing correlations asymptotic distribution approximation design tests.","code":"# Spending for design with planned information fraction (IF) upar_design_if <- list( # total_spend represents one-sided Type I error total_spend = 0.025, # Spending function and associated # parameter (NULL, in this case) sf = sfLDOF, param = NULL, # Do NOT specify spending time here as it will be set # by information fraction specified in call to gs_design_ahr() timing = NULL, # Do NOT specify maximum information here as it will be # set as the design maximum information max_info = NULL ) # CF is for calendar fraction upar_design_cf <- upar_design_if # Now switch spending time to calendar fraction upar_design_cf$timing <- c(22, 30) / 30 # We now need to change max_info from spending as specified for design upar_actual_info_frac <- upar_design_if # Note that we still have timing = NULL, unchanged from information-based design upar_actual_info_frac <- NULL # Replace NULL maximum information with planned maximum null hypothesis # information from design # This max will be updated for each planned design later upar_actual_info_frac$max_info <- 100 # Copy original upper planned spending upar_planned_info_frac <- upar_design_if # Interim and final spending time will always be the same, regardless of # expected events or calendar timing of analysis upar_planned_info_frac$timing <- c(0.8, 1) # We will reset planned maximum information later"},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"planned-design","dir":"Articles","previous_headings":"Group sequential design","what":"Planned design","title":"Spending time examples","text":"extend design studied group sequential design single interim analysis 80% final planned events accrued. assume final analysis require targeted trial duration events based fixed design evaluations made . assume efficacy bound uses Lan-DeMets spending function approximating O’Brien-Fleming bound. futility bound planned. interim analysis far enough trial substantial probability stopping early design assumptions.","code":"# Control median m <- 12 # Planned information fraction at interim(s) and final planned_info_frac <- c(.8, 1) # No futility bound lpar <- rep(-Inf, 2) # enrollment rate enroll_rate <- define_enroll_rate( duration = 12, rate = 1 ) # failure rate fail_rate <- define_fail_rate( duration = c(4, 100), hr = c(1, .6), fail_rate = log(2) / m, dropout_rate = .001 ) # get the group sequential design model xx <- gs_design_ahr( enroll_rate, fail_rate, # final analysis time set to targeted study duration; # analysis times before are 'small' to ensure use of information fraction for timing analysis_time = c(1, 30), # timing here matches what went into planned_info_frac above info_frac = planned_info_frac, # upper bound : spending approach 1 upper = gs_spending_bound, upar = upar_design_if, # lower bound: no futility bound lower = gs_b, lpar = lpar ) # get the summary table xx %>% summary() %>% as_gt()"},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"two-alternate-approaches","dir":"Articles","previous_headings":"Group sequential design","what":"Two alternate approaches","title":"Spending time examples","text":"consider two alternate approaches demonstrate spending time concept may helpful practice. However, skipping following two subsections can done interest. first demonstrates calendar spending Lan DeMets (1989). second basically method Fleming, Harrington, O’Brien (1984) fixed incremental spend used potentially variable number interim analyses, final bound computed based unspent one-sided Type error assigned hypothesis.","code":""},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"calendar-spending","dir":"Articles","previous_headings":"Group sequential design > Two alternate approaches","what":"Calendar spending","title":"Spending time examples","text":"use sample size , change efficacy bound spending calendar-based. reason spending different information-based spending mainly due fact expected information linear time. case, calendar fraction interim less information fraction, exactly opposite true earlier trial. just note calendar-based spending chosen, may worth comparing design bounds bounds using spending function, information-based spending see important differences trial team possibly scientific regulatory community. note also risk enough events achieve targeted power final analysis calendar-based spending strategy. examine calendar-based spending document.","code":"yy <- gs_power_ahr( enroll_rate = xx$enroll_rate, fail_rate = xx$fail_rate, # Planned time will drive timing since information accrues faster event = 1:2, # Interim time rounded analysis_time = c(22, 30), # upper bound: use calendar fraction upper = gs_spending_bound, upar = upar_design_cf, # lower bound: no futility bound lower = gs_b, lpar = lpar ) yy %>% summary() %>% as_gt()"},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"fixed-incremental-spend-with-a-variable-number-of-analyses","dir":"Articles","previous_headings":"Group sequential design > Two alternate approaches","what":"Fixed incremental spend with a variable number of analyses","title":"Spending time examples","text":"noted, method proposed Fleming, Harrington, O’Brien (1984). general strategy demonstrated interim analyses every 6 months final targeted follow-time cumulative number events achieved. efficacy analyses start, fixed incremental spend 0.001 used interim. criteria final analysis met, remaining \\alpha spent. Cumulative spending months 18 24 0.001 0.002, respectively, full cumulative \\alpha-spending 0.025 final analysis. done setting spending time 18 24 months 1/25, 2/25 1; .e., 1/25 incremental \\alpha-spending incorporated interim analysis remaining \\alpha spent final analysis. enables strategy analyzing every 6 months minimum targeted follow-minimum number events observed, time final analysis performed. skip efficacy analyses first two interim analyses months 6 12. futility, simply use nominal 1-sided p-value 0.05 favoring control interim. note raises flag futility bound crossed Data Monitoring Committee (DMC) can choose continue trial even futility bound crossed. However, bound may effective providing DMC guidance stop futility prematurely. comparison designs, leave enrollment rates, failure rates, dropout rates final analysis time . see following table summarizing efficacy bounds power little impact total power futility analyses specified. cumulative \\alpha-spending 0.001 0.002 efficacy interim analyses, see nominal p-value bound second interim 0.0015, 0.001 incremental \\alpha-spend. also note nominal p-values testing, approximate hazard ratio required cross bounds presumably help justify consideration completing trial based definitive interim efficacy finding. Also, small interim spend, final nominal p-value reduced much overall \\alpha=0.025 Type error set group sequential design. also examine futility bound. nominal p-value 0.05 analysis one-sided p-value favor control experimental treatment. can see probability stopping early alternate hypothesis (\\beta-spending) substantial even given early delayed effect. Also, substantial approximate observed hazard ratios cross futility bound seem reasonable given timing number events observed; exception small number events first interim, larger number observed time early excess risk. may useful plan additional analyses futility bound crossed support stopping . example, looking subgroups evaluating smoothed hazard rates time treatment group may useful. clinical trial study team complete discussion futility bound considerations time design.","code":"# Cumulative spending at IA3 and IA4 will be 0.001 and 0.002, respectively. # Power spending function sfPower with param = 1 is linear in timing # which makes setting the above cumulative spending targets simple by # setting timing variable the the cumulative proportion of spending at each analysis. # There will be no efficacy testing at IA1 or IA2. # Thus, incremental spend, which will be unused, is set very small for these analyses. upar_fho <- list( total_spend = 0.025, sf = sfPower, param = 1, timing = c((1:2) / 250, (1:2) / 25, 1) ) fho <- gs_power_ahr( enroll_rate = xx$enroll_rate, fail_rate = xx$fail_rate, event = NULL, analysis_time = seq(6, 30, 6), upper = gs_spending_bound, upar = upar_fho, # No efficacy testing at IA1 or IA2 # Thus, the small alpha the spending function would have # allocated will not be used test_upper = c(FALSE, FALSE, TRUE, TRUE, TRUE), lower = gs_b, lpar = c(rep(qnorm(.05), 4), -Inf) ) fho %>% summary() %>% as_gt()"},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"scenario-with-less-treatment-effect","dir":"Articles","previous_headings":"Group sequential design","what":"Scenario with less treatment effect","title":"Spending time examples","text":", compute power assumption changing median control group time--event 10 months rather assumed 12 delay effect onset 6 months rather 4. otherwise change enrollment, dropout hazard ratio assumptions. following examples, require targeted number events targeted trial duration group sequential design interim final analyses. first example, uses interim spending based event count observed originally planned final event count information fraction 323 / 355 = 0.91. gives event-based spending 0.0191, substantially targeted information fraction 284 / 355 = 0.8 targeted interim spending 0.0122. reduces power overall 76% 73% lowers nominal p-value bound final analysis 0.0218 0.0165; see following two tables. Noting average hazard ratio 0.8 interim 0.76 final analysis emphasizes value preserving \\alpha-spending final analysis. Thus, example valuable limit spending interim analysis minimum planned spending opposed using event-based spending. Just important, general design principle making interim analysis criteria stringent final ensured alternate scenario. multiple trials delayed effects observed difference final nominal p-value bound made difference ensure statistically significant finding.","code":"# Alternate control median am <- 10 # Update the failure rate fail_rate$fail_rate <- log(2) / am fail_rate$duration[1] <- 6 # Set planned maximum information from planned design max_info0 <- max(xx$analysis$info) upar_actual_info_frac <- upar_design_if upar_actual_info_frac$max_info <- max_info0 # compute power if actual information fraction relative to original # planned total is used yy <- gs_power_ahr( enroll_rate = xx$enroll_rate, fail_rate = fail_rate, # Planned time will drive timing since information accrues faster event = 1:2, analysis_time = xx$analysis$time, upper = gs_spending_bound, upar = upar_actual_info_frac, lower = gs_b, lpar = lpar ) yy %>% summary() %>% filter(Bound == \"Efficacy\") %>% gt() %>% fmt_number(columns = 3:6, decimals = 4) yz <- gs_power_ahr( enroll_rate = xx$enroll_rate, fail_rate = fail_rate, event = xx$analysis$Events, analysis_time = xx$analysis$time, upper = gs_spending_bound, upar = upar_planned_info_frac, lower = gs_b, lpar = lpar ) #> Warning: Unknown or uninitialised column: `Events`. yz %>% summary() %>% gt() %>% fmt_number(columns = 3:6, decimals = 4)"},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"scenario-with-longer-control-median","dir":"Articles","previous_headings":"Group sequential design","what":"Scenario with longer control median","title":"Spending time examples","text":"Now return example control median longer expected confirm spending according planned level alone without considering actual number events also result power reduction. power gain great (94.2% vs 95.0%) interim final p-value bounds aligned intent emphasizing final analysis smaller average hazard ratio expected (0.680 vs 0.723 interim). First, show result using planned spending. Since number events less expected, used actual number events interim bound stringent obtain slightly greater power.","code":"# Alternate control median am <- 16 # Update the failure rate fail_rate$fail_rate <- log(2) / am # Return to 4 month delay with HR=1 before HR = 0.6 fail_rate$duration[1] <- 4 # Start with spending based on planned information # which is greater than actual information yy <- gs_power_ahr( enroll_rate = xx$enroll_rate, fail_rate = fail_rate, event = c(1, max(xx$analysis$event)), analysis_time = xx$analysis$time, upper = gs_spending_bound, upar = upar_planned_info_frac, lower = gs_b, lpar = lpar ) yy %>% summary() %>% gt() %>% fmt_number(columns = 3:6, decimals = 4) yz <- gs_power_ahr( enroll_rate = xx$enroll_rate, fail_rate = fail_rate, event = c(1, max(xx$analysis$event)), analysis_time = xx$analysis$time, upper = gs_spending_bound, upar = upar_actual_info_frac, lower = gs_b, lpar = lpar ) yz %>% summary() %>% gt() %>% fmt_number(columns = 3:6, decimals = 4)"},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"summary-for-spending-time-motivation-assuming-delayed-benefit","dir":"Articles","previous_headings":"Group sequential design","what":"Summary for spending time motivation assuming delayed benefit","title":"Spending time examples","text":"summary, using minimum planned actual spending adapt design based event-based spending adapts interim bound stringent final bound different scenarios ensures better power event-based interim analysis spending.","code":""},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"assumptions","dir":"Articles","previous_headings":"Testing multiple hypotheses","what":"Assumptions","title":"Spending time examples","text":"consider simple case use method Maurer Bretz (2013) test overall population biomarker subgroup endpoint. assume exponential failure rate median 12 control group regardless population. hazard ratio biomarker positive subgroup assumed 0.6, negative population 0.8. assume biomarker positive group represents half population, meaning enrollment rates assumed negative positive patients. difference failure rates two strata hazard ratio. case, assume proportional hazards within negative (HR = 0.8) positive (HR = 0.6) patients. illustrative purposes, choosing strategy based possible feeling much less certainty study start whether underlying benefit biomarker negative population. wish ensure power biomarker positive group, allow good chance positive overall population finding lesser benefit biomarker negative population. alternative trial strategy planned, alternate approach following considered. case, design first biomarker positive population one-sided Type error controlled \\alpha = 0.0125:","code":"# we assume an exponential failure rate with a median of 12 # for the control group regardless of population. m <- 12 # the enrollment rate of both subgroup and population is the same enroll_rate <- define_enroll_rate( stratum = c(\"Positive\", \"Negative\"), duration = 12, rate = 20 ) # the hazard ratio in the biomarker positive subgroup will be assumed to be 0.6, # and in the negative population 0.8. fail_rate <- define_fail_rate( stratum = c(\"Positive\", \"Negative\"), hr = c(0.6, 0.8), duration = 100, fail_rate = log(2) / m, dropout_rate = 0.001 )"},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"planned-design-for-biomarker-positive-population","dir":"Articles","previous_headings":"Testing multiple hypotheses","what":"Planned design for biomarker positive population","title":"Spending time examples","text":"","code":"# Since execution will be event-based for biomarker population, # there will be no need to change spending plan for different scenarios. # upper bound: spending based on information fraction upar_design_spend <- list( sf = gsDesign::sfLDOF, # spending function total_spend = 0.0125, # total alpha spend is now 0.0125 timing = NULL, # to select maximum planned information for information fraction param = NULL ) # lower bound: no futility bound lpar <- rep(-Inf, 2) # Z = -infinity for lower bound # we will base the combined hypothesis design to ensure power in the biomarker subgroup positive <- gs_design_ahr( # enroll/failure rates enroll_rate = enroll_rate %>% filter(stratum == \"Positive\"), fail_rate = fail_rate %>% filter(stratum == \"Positive\"), # Following drives information fraction for interim info_frac = c(.8, 1), # Total study duration driven by final analysis_time value, i.e., 30 # Enter small increasing values before that # so information fraction in planned_info_frac drives timing of interims analysis_time = c(1, 30), # upper bound upper = gs_spending_bound, upar = upar_design_spend, # lower lower lower = gs_b, lpar = lpar ) positive %>% summary() %>% gt() %>% fmt_number(columns = 3:6, decimals = 4)"},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"planned-design-for-overall-population","dir":"Articles","previous_headings":"Testing multiple hypotheses","what":"Planned design for overall population","title":"Spending time examples","text":"adjust overall study enrollment rate match design requirement biomarker positive population. Now can examine power overall population based hazard ratio assumptions biomarker negative biomarker positive subgroups just calculated enrollment assumption. use analysis times biomarker positive population design. see interim information fraction overall population slightly greater biomarker positive population . compensate enable flexibility biomarker positive prevalence changes, use spending time biomarker positive subgroup regardless true fraction final planned events analysis. Thus, interim nominal p-value bound biomarker positive overall populations. make much difference , see natural way adapt design observed biomarker positive prevalence different assumed design.","code":"# Get enrollment rate inflation factor compared to originally input rate inflation_factor <- positive$enroll_rate$rate[1] / enroll_rate$rate[1] # Using this inflation factor, set planned enrollment rates planned_enroll_rate <- enroll_rate %>% mutate(rate = rate * inflation_factor) planned_enroll_rate %>% gt() # Store overall enrollment rates for future use overall_enroll_rate <- planned_enroll_rate %>% summarize( stratum = \"All\", duration = first(duration), rate = sum(rate) ) overall_enroll_rate %>% gt() # Set total spend for overall population, O'Brien-Fleming spending function, and # same spending time as biomarker subgroup upar_overall_planned_info_frac <- list( sf = gsDesign::sfLDOF, # O'Brien-Fleming spending function param = NULL, total_spend = 0.0125, # alpha timing = c(.8, 1), # same spending time as biomarker subgroup max_info = NULL # we will use actual final information as planned initially ) overall_planned_bounds <- gs_power_ahr( # enroll/failure rates enroll_rate = planned_enroll_rate, fail_rate = fail_rate, # analysis time: the planned analysis time for biomarker positive population analysis_time = positive$analysis$time, # events will be determined by expected events at planned analysis times event = NULL, # upper bound: planned spending times are specified the same as before upper = gs_spending_bound, upar = upar_overall_planned_info_frac, # lower bound: no futility lower = gs_b, lpar = lpar ) overall_planned_bounds %>% summary() %>% gt() %>% fmt_number(columns = 3:6, decimals = 4)"},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"alternate-scenarios-overview","dir":"Articles","previous_headings":"Testing multiple hypotheses","what":"Alternate scenarios overview","title":"Spending time examples","text":"divide evaluations three subsections: one higher prevalence biomarker positive patients expected; one lower biomarker prevalence; differing event rate hazard ratio assumptions. case, assume total enrollment rate 48.8 per month planned . also assume enroll targeted biomarker positive subgroup enrollment 293 achieved, regardless overall enrollment. specify interim analysis timing require 80% planned final analysis events biomarker positive population least 10 months minimum follow-; thus, biomarker population never vary events spending . spending time used overall population, compare event-based spending. choices arbitrary. think reasonable, design planner think carefully variations suit clinical trial team needs.","code":"## Setting spending alternatives # Using information (event)-based spending time relative to overall population plan # Set total spend for overall population, O'Brien-Fleming spending function. # For design information-spending, we set timing = NULL and max_info to plan from above upar_overall_planned_info_frac <- list( sf = gsDesign::sfLDOF, # O'Brien-Fleming spending function total_spend = 0.0125, # alpha max_info = max(overall_planned_bounds$info0), # we will use planned final information for # overall population from design to # compute information fraction relative to plan param = NULL, timing = planned_info_frac ) #> Warning in max(overall_planned_bounds$info0): no non-missing arguments to max; #> returning -Inf # Using planned information fraction will demonstrate problems below. # Set total spend for overall population, O'Brien-Fleming spending function, and # same spending time as biomarker subgroup upar_overall_actual_info_frac <- list( sf = gsDesign::sfLDOF, # O'Brien-Fleming spending function total_spend = 0.0125, # alpha max_info = max(overall_planned_bounds$info0), # we will use planned final information # for overall population from design param = NULL, timing = NULL ) #> Warning in max(overall_planned_bounds$info0): no non-missing arguments to max; #> returning -Inf"},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"biomarker-subgroup-power","dir":"Articles","previous_headings":"Testing multiple hypotheses > Alternate scenarios overview > Biomarker subgroup prevalence higher than planned","what":"Biomarker subgroup power","title":"Spending time examples","text":"suppose biomarker prevalence 60%, higher 50% prevalence design anticipated. enrollment rates positive versus negative patients expected enrollment duration now: Now can compute power biomarker positive group targeted events. Since simple proportional hazards model, thing changing original design takes slightly less time.","code":"# update the enrollment rate due to 60% prevalence positive_60_enroll_rate <- rbind( overall_enroll_rate %>% mutate(stratum = \"Positive\", rate = 0.6 * rate), overall_enroll_rate %>% mutate(stratum = \"Negative\", rate = 0.4 * rate) ) # update the enrollment duration positive_60_enroll_rate$duration <- max(positive$analysis$n) / overall_enroll_rate$rate / 0.6 # display the updated enrollment rate table positive_60_enroll_rate %>% gt() %>% fmt_number(columns = \"rate\", decimals = 1) positive_60_power <- gs_power_ahr( # enrollment/failure rate enroll_rate = positive_60_enroll_rate %>% filter(stratum == \"Positive\"), fail_rate = fail_rate %>% filter(stratum == \"Positive\"), # number of events event = positive$analysis$event, # analysis time will be calcuated to achieve the targeted events analysis_time = NULL, # upper bound upper = gs_spending_bound, upar = upar_design_spend, # lower bound lower = gs_b, lpar = lpar ) positive_60_power %>% summary() %>% gt() %>% fmt_number(columns = 3:6, decimals = 4)"},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"overall-population-power","dir":"Articles","previous_headings":"Testing multiple hypotheses > Alternate scenarios overview > Biomarker subgroup prevalence higher than planned","what":"Overall population power","title":"Spending time examples","text":"Now use spending overall population, resulting full \\alpha-spending end trial even though originally targeted events expected achieved. note information fraction computed based originally planned events overall population. Given larger proportion patients biomarker positive, average hazard ratio stronger originally planned power overall population still 90%. used information-based (.e., event-based) spending, reached full spending final analysis thus lower power.","code":"gs_power_ahr( # set the enrollment/failure rate enroll_rate = positive_60_enroll_rate, fail_rate = fail_rate, # set evnets and analysis time event = NULL, analysis_time = positive_60_power$analysis$time, # set upper bound: use planned spending in spite of lower overall information upper = gs_spending_bound, upar = upar_overall_planned_info_frac, # set lower bound: no futility lower = gs_b, lpar = rep(-Inf, 2) ) %>% summary() %>% gt() %>% fmt_number(columns = 3:6, decimals = 4) gs_power_ahr( # set the enrollment/failure rate enroll_rate = positive_60_enroll_rate, fail_rate = fail_rate, # set evnets and analysis time event = NULL, analysis_time = positive_60_power$analysis$time, # upper bound: use actual spending which uses less than complete alpha upper = gs_spending_bound, upar = upar_overall_actual_info_frac, # lower bound: no futility lower = gs_b, lpar = lpar ) %>% summary() %>% gt() %>% fmt_number(columns = 3:6, decimals = 4)"},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"biomarker-subgroup-prevalence-lower-than-planned","dir":"Articles","previous_headings":"Testing multiple hypotheses > Alternate scenarios overview","what":"Biomarker subgroup prevalence lower than planned","title":"Spending time examples","text":"suppose biomarker prevalence 40%, lower 50% prevalence design anticipated. enrollment rates positive versus negative patients expected enrollment duration now :","code":"# set the enrollment rate under 40% prevalence positive_40_enroll_rate <- rbind( overall_enroll_rate %>% mutate(stratum = \"Positive\", rate = 0.4 * rate), overall_enroll_rate %>% mutate(stratum = \"Negative\", rate = 0.6 * rate) ) # update the duration of enrollment table positive_40_enroll_rate$duration <- max(positive$analysis$n) / positive_40_enroll_rate$rate[1] # display the enrollment table positive_40_enroll_rate %>% gt() %>% fmt_number(columns = \"rate\", decimals = 1)"},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"biomarker-positive-subgroup-power","dir":"Articles","previous_headings":"Testing multiple hypotheses > Alternate scenarios overview > Biomarker subgroup prevalence lower than planned","what":"Biomarker positive subgroup power","title":"Spending time examples","text":"Now can compute power biomarker positive group targeted events.","code":"upar_actual_info_frac$total_spend <- 0.0125 upar_actual_info_frac$max_info <- max(positive$analysis$info) positive_40_power <- gs_power_ahr( # set enrollment/failure rate enroll_rate = positive_40_enroll_rate %>% filter(stratum == \"Positive\"), fail_rate = fail_rate %>% filter(stratum == \"Positive\"), # set events/analysis time event = positive$analysis$event, analysis_time = NULL, # set upper bound upper = gs_spending_bound, upar = upar_actual_info_frac, # set lower bound lower = gs_b, lpar = rep(-Inf, 2) ) positive_40_power %>% summary() %>% gt() %>% fmt_number(columns = 3:6, decimals = 4)"},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"overall-population-power-1","dir":"Articles","previous_headings":"Testing multiple hypotheses > Alternate scenarios overview > Biomarker subgroup prevalence lower than planned","what":"Overall population power","title":"Spending time examples","text":"see adapting overall sample size spending according biomarker subgroup, retain 90% power. spite lower overall effect size, larger adapted sample size ensures power retention.","code":"gs_power_ahr( enroll_rate = positive_40_enroll_rate, fail_rate = fail_rate, event = 1:2, analysis_time = positive_40_power$analysis$time, upper = gs_spending_bound, upar = upar_overall_planned_info_frac, lower = gs_b, lpar = rep(-Inf, 2) ) %>% summary() %>% gt() %>% fmt_number(columns = 3:6, decimals = 4)"},{"path":"https://merck.github.io/gsDesign2/articles/story-spending-time-example.html","id":"summary-of-findings","dir":"Articles","previous_headings":"Testing multiple hypotheses","what":"Summary of findings","title":"Spending time examples","text":"suggested two overall findings planning executing trial potentially delayed treatment effect: Require targeted event count minimum follow-completing analysis trial helps ensure powering trial appropriately better description tail behavior may essential long-term results key establishing potentially positive risk-benefit. Use fixed, small incremental \\alpha-spend interim proposed Fleming, Harrington, O’Brien (1984) variable number interim analyses ensure adequate follow-. Use minimum planned actual spending interim analyses. implementing Fleming, Harrington, O’Brien (1984) approach, also suggested simple approach futility may quite useful practically scenario potentially delayed onset treatment effect. basically looks evidence favorable control group effect relative experimental setting nominal p-value cutoff 1-sided 0.05 level early interim futility analyses. crossing survival curves inferior survival curves may exist, may useful way ensure continuing trial ethical; approach perhaps useful experimental treatment replacing components control treatment case add-treatment may toxic potentially detrimental effects. addition delayed effect example, considered example testing biomarker positive subgroup overall population. Using common spending time hypotheses common interim analysis strategy advocated Follmann, Proschan, Geller (1994) can helpful implement spending hypotheses adequate \\alpha spend final analysis also ensure full utilization \\alpha-spending. suggested using minimum planned actual spending interim analysis. Spending can based key hypothesis (e.g., biomarker positive population) minimum spending time among hypotheses tested. Taking advantage know correlations ensure full \\alpha utilization multiple hypothesis testing also simply implemented strategy Anderson et al. (2022). summary, illustrated motivation illustration spending time approach examples commonly encountered. Approaches suggested included implementation Fleming, Harrington, O’Brien (1984) fixed incremental \\alpha-spend interim analysis well use minimum planned actual spending interim analyses.","code":""},{"path":[]},{"path":"https://merck.github.io/gsDesign2/articles/story-update-boundary.html","id":"design-assumptions","dir":"Articles","previous_headings":"","what":"Design assumptions","title":"Efficacy and futility boundary update","text":"assume two analyses: interim analysis (IA) final analysis (FA). IA planned 20 months opening enrollment, followed FA month 36. planned enrollment period spans 14 months, first 2 months enrollment rate 1/3 final rate, next 2 months rate 2/3 final rate, final rate remaining 10 months. obtain targeted 90% power, rates multiplied constant. control arm assumed follow exponential distribution median 9 months dropout rate 0.0001 per month regardless treatment group. Finally, experimental treatment group piecewise exponential 3-month delayed treatment effect; , first 3 months HR = 1 HR 0.6 thereafter. use null hypothesis information boundary crossing probability calculations null alternate hypotheses. also imply null hypothesis information used information fraction used spending functions derive design.","code":"alpha <- 0.0125 beta <- 0.1 ratio <- 1 # Enrollment enroll_rate <- define_enroll_rate( duration = c(2, 2, 10), rate = (1:3) / 3 ) # Failure and dropout fail_rate <- define_fail_rate( duration = c(3, Inf), fail_rate = log(2) / 9, hr = c(1, 0.6), dropout_rate = .0001 ) # IA and FA analysis time analysis_time <- c(20, 36) # Randomization ratio ratio <- 1 info_scale <- \"h0_info\""},{"path":"https://merck.github.io/gsDesign2/articles/story-update-boundary.html","id":"one-sided-design","dir":"Articles","previous_headings":"","what":"One-sided design","title":"Efficacy and futility boundary update","text":"design, efficacy bounds IA FA. use Lan DeMets (1983) spending function total alpha 0.0125, approximates O’Brien-Fleming bound. planned design targets: Planned events: 227, 349 Planned information fraction interim final analysis: 0.6504, 1 Planned alpha spending: 0.0054, 0.025 Planned efficacy bounds: 2.9048, 2.2593 note rounding final targeted events increases power slightly targeted 90%.","code":"upper <- gs_spending_bound upar <- list(sf = gsDesign::sfLDOF, total_spend = alpha, param = NULL) x <- gs_design_ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, alpha = alpha, beta = beta, info_frac = NULL, info_scale = \"h0_info\", analysis_time = analysis_time, ratio = ratio, upper = gs_spending_bound, upar = upar, test_upper = TRUE, lower = gs_b, lpar = rep(-Inf, 2), test_lower = FALSE ) |> to_integer() x |> summary() |> as_gt() |> tab_header(title = \"Planned design\")"},{"path":"https://merck.github.io/gsDesign2/articles/story-update-boundary.html","id":"bounds-for-alternate-alpha","dir":"Articles","previous_headings":"One-sided design","what":"Bounds for alternate alpha","title":"Efficacy and futility boundary update","text":"stage study design, may required report designs multiple \\alpha alpha reallocated due rejection another hypothesis. design stage, planned \\alpha 0.0125. Assume updated \\alpha 0.025 due reallocation \\alpha hypothesis. corresponding bounds updated boundaries utilize planned treatment effect planned statistical information null hypothesis, considering original design info_scale = \"h0_info\".","code":"gs_update_ahr( x = x, alpha = 0.025 ) |> summary(col_decimals = c(z = 4)) |> as_gt(title = \"Updated design\", subtitle = \"For alternate alpha = 0.025\")"},{"path":"https://merck.github.io/gsDesign2/articles/story-update-boundary.html","id":"updating-bounds-with-observed-events-at-time-of-analyses","dir":"Articles","previous_headings":"One-sided design","what":"Updating bounds with observed events at time of analyses","title":"Efficacy and futility boundary update","text":"provide simulation observed events IA FA differ planned. case differences planned due using calendar-based cutoffs simulated data. practice, even attempting match event counts exactly observed events analyses often differ planned. also assume protocol specifies full \\alpha spent final analysis even case like shortfall events versus design plan. observed data example generated simtrial::sim_pw_surv(). updated design ","code":"set.seed(123) # Make simulated data reproducible # Generate trial data observed_data <- simtrial::sim_pw_surv( n = x$analysis$n[x$analysis$analysis == 2], stratum = data.frame(stratum = \"All\", p = 1), block = c(rep(\"control\", 2), rep(\"experimental\", 2)), enroll_rate = x$enroll_rate, fail_rate = (fail_rate |> simtrial::to_sim_pw_surv())$fail_rate, dropout_rate = (fail_rate |> simtrial::to_sim_pw_surv())$dropout_rate ) # Cut simulated data for interim analysis at planned calendar time observed_data_ia <- observed_data |> simtrial::cut_data_by_date(analysis_time[1]) # Cut simulated data for final analysis at planned calendar time observed_data_fa <- observed_data |> simtrial::cut_data_by_date(analysis_time[2]) # Set spending fraction for interim according to observed events # divided by planned final events. # Final spending fraction is 1 per plan even if there is a shortfall # of events versus planned (as specified above) ustime <- c(sum(observed_data_ia$event) / max(x$analysis$event), 1) # Update bound gs_update_ahr( x = x, ustime = ustime, observed_data = list(observed_data_ia, observed_data_fa) ) |> summary(col_decimals = c(z = 4)) |> as_gt(title = \"Updated design\", subtitle = paste0(\"With observed \", sum(observed_data_ia$event), \" events at IA and \", sum(observed_data_fa$event), \" events at FA\"))"},{"path":"https://merck.github.io/gsDesign2/articles/story-update-boundary.html","id":"two-sided-asymmetric-design-beta-spending-with-non-binding-lower-bound","dir":"Articles","previous_headings":"","what":"Two-sided asymmetric design, beta-spending with non-binding lower bound","title":"Efficacy and futility boundary update","text":"section, investigate 2 sided asymmetric design, non-binding \\beta-spending used generate futility bounds. \\beta-spending refers Type II error (1 - power) spending lower bound crossing probabilities alternative hypothesis. Non-binding bound computation assumes trial continues lower bound crossed Type error, Type II error. original designs, employ Lan-DeMets spending function used approximate O’Brien-Fleming bounds (Lan DeMets 1983) efficacy futility bounds. total spending efficacy 0.0125, futility 0.1. addition, assume futility test final analysis. planned design, Planned events: 236, 363 Planned information fraction (timing): 0.6501, 1 Planned alpha spending: 0.0054388, 0.025 Planned efficacy bounds: 2.9057, 2.2593 Planned futility bounds: 0.6453 Since added futility bounds, sample size number events larger 1-sided example.","code":"# Upper and lower bounds uses spending with Lan-DeMets spending approximating # O'Brien-Fleming bound upper <- gs_spending_bound upar <- list(sf = gsDesign::sfLDOF, total_spend = alpha, param = NULL) lower <- gs_spending_bound lpar <- list(sf = gsDesign::sfLDOF, total_spend = beta, param = NULL) x <- gs_design_ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, alpha = alpha, beta = beta, info_frac = NULL, info_scale = \"h0_info\", analysis_time = c(20, 36), ratio = ratio, upper = gs_spending_bound, upar = upar, test_upper = TRUE, lower = lower, lpar = lpar, test_lower = c(TRUE, FALSE), binding = FALSE ) |> to_integer() x |> summary() |> as_gt() |> tab_header(title = \"Planned design\", subtitle = \"2-sided asymmetric design, non-binding futility\")"},{"path":"https://merck.github.io/gsDesign2/articles/story-update-boundary.html","id":"bounds-for-alternate-alpha-1","dir":"Articles","previous_headings":"Two-sided asymmetric design, beta-spending with non-binding lower bound","what":"Bounds for alternate alpha","title":"Efficacy and futility boundary update","text":"may want report design bounds multiple \\alpha case Type error may reallocated another hypothesis. assume now \\alpha 0.025 still use sample size event timing original alpha = 0.0125. updated bounds ","code":"gs_update_ahr( x = x, alpha = 0.025 ) |> summary(col_decimals = c(z = 4)) |> as_gt(title = \"Updated design\", subtitle = \"For alpha = 0.025\")"},{"path":"https://merck.github.io/gsDesign2/articles/story-update-boundary.html","id":"updating-bounds-with-observed-events-at-time-of-analyses-1","dir":"Articles","previous_headings":"Two-sided asymmetric design, beta-spending with non-binding lower bound","what":"Updating bounds with observed events at time of analyses","title":"Efficacy and futility boundary update","text":"assume observed events 1-sided example . updated design ","code":"# Update spending fraction as above ustime <- c(sum(observed_data_ia$event) / max(x$analysis$event), 1) gs_update_ahr( x = x, ustime = ustime, # Spending fraction for futility bound same as for efficacy lstime = ustime, observed_data = list(observed_data_ia, observed_data_fa) ) |> summary(col_decimals = c(z = 4)) |> as_gt(title = \"Updated design\", subtitle = paste0(\"With observed \", sum(observed_data_ia$event), \" events at IA and \", sum(observed_data_fa$event), \" events at FA\"))"},{"path":[]},{"path":"https://merck.github.io/gsDesign2/authors.html","id":null,"dir":"","previous_headings":"","what":"Authors","title":"Authors and Citation","text":"Keaven Anderson. Author. Yilong Zhang. Author. Yujie Zhao. Author, maintainer. Jianxiao Yang. Author. Nan Xiao. Author. Amin Shirazi. Contributor. Ruixue Wang. Contributor. Yi Cui. Contributor. Ping Yang. Contributor. Xin Tong Li. Contributor. Chenxiang Li. Contributor. Hiroaki Fukuda. Contributor. Hongtao Zhang. Contributor. Yalin Zhu. Contributor. John Blischak. Contributor. Dickson Wanjau. Contributor. Merck & Co., Inc., Rahway, NJ, USA affiliates. Copyright holder.","code":""},{"path":"https://merck.github.io/gsDesign2/authors.html","id":"citation","dir":"","previous_headings":"","what":"Citation","title":"Authors and Citation","text":"Anderson K, Zhang Y, Zhao Y, Yang J, Xiao N (2024). gsDesign2: Group Sequential Design Non-Constant Effect. R package version 1.1.2.23, https://github.com/Merck/gsDesign2, https://merck.github.io/gsDesign2/.","code":"@Manual{, title = {gsDesign2: Group Sequential Design with Non-Constant Effect}, author = {Keaven Anderson and Yilong Zhang and Yujie Zhao and Jianxiao Yang and Nan Xiao}, year = {2024}, note = {R package version 1.1.2.23, https://github.com/Merck/gsDesign2}, url = {https://merck.github.io/gsDesign2/}, }"},{"path":[]},{"path":"https://merck.github.io/gsDesign2/index.html","id":"objective","dir":"","previous_headings":"","what":"Objective","title":"Group Sequential Design with Non-Constant Effect","text":"goal gsDesign2 enable fixed group sequential design non-proportional hazards. Piecewise constant enrollment, failure rates dropout rates stratified population available enable highly flexible enrollment, time--event time--dropout assumptions. Substantial flexibility top gsDesign package intended selecting boundaries. Comments usability features encouraged still young package.","code":""},{"path":"https://merck.github.io/gsDesign2/index.html","id":"installation","dir":"","previous_headings":"","what":"Installation","title":"Group Sequential Design with Non-Constant Effect","text":"Install released version gsDesign2 CRAN: install development version GitHub :","code":"install.packages(\"gsDesign2\") remotes::install_github(\"Merck/gsDesign2\")"},{"path":[]},{"path":"https://merck.github.io/gsDesign2/index.html","id":"step-1-specifying-enrollment-and-failure-rates","dir":"","previous_headings":"Use cases","what":"Step 1: specifying enrollment and failure rates","title":"Group Sequential Design with Non-Constant Effect","text":"basic example shows solve common problem. assume 4 month delay treatment effect. Specifically, assume hazard ratio 1 4 months 0.6 thereafter. example assume exponential failure rate low exponential dropout rate. enroll_rate specification indicates expected enrollment duration 12 months exponential inter-arrival times. resulting failure rate specification following table. many rows strata needed can specified approximate whatever patterns wish.","code":"library(gsDesign2) # Basic example # Constant enrollment over 12 months # Rate will be adjusted later by gsDesign2 NPH to get sample size enroll_rate <- define_enroll_rate(duration = 12, rate = 1) # 12 month median exponential failure rate in control # 4 month delay in effect with HR=0.6 after # Low exponential dropout rate median_surv <- 12 fail_rate <- define_fail_rate( duration = c(4, Inf), fail_rate = log(2) / median_surv, hr = c(1, .6), dropout_rate = .001 ) fail_rate |> gt::gt()"},{"path":"https://merck.github.io/gsDesign2/index.html","id":"step-2-derive-a-fixed-design-with-no-interim-analyses","dir":"","previous_headings":"Use cases","what":"Step 2: derive a fixed design with no interim analyses","title":"Group Sequential Design with Non-Constant Effect","text":"Computing fixed sample size design 2.5% one-sided Type error 90% power. specify trial duration 36 months analysis_time. Enrollment duration sum enroll_rate$duration. used fixed_design() since single analysis: input enrollment rates now scaled achieve power: failure dropout rates remain unchanged input. summary obtained . columns : Design: sample size derivation method. N: sample size; generally round even number. Event: generally round . Bound: Z value efficacy; inverse normal 1 - alpha. alpha: 1-sided alpha level testing. Power: power corresponding enrollment, failure rate, trial targeted events.","code":"fd <- fixed_design_ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, alpha = 0.025, power = 0.9, study_duration = 36, ratio = 1 # Experimental/control randomization ratio ) fd$enroll_rate |> gt::gt() fd |> summary() |> as_gt()"},{"path":"https://merck.github.io/gsDesign2/index.html","id":"step-3-group-sequential-design","dir":"","previous_headings":"Use cases","what":"Step 3: group sequential design","title":"Group Sequential Design with Non-Constant Effect","text":"provide simple example group sequential design demonstrates couple features available gsDesign package. first specifying analysis times calendar time rather information fraction. second efficacy futility bound analysis. addition methods non-proportional hazards demonstrated fixed design . use O’Brien-Fleming spending function derive efficacy bounds 24 36 months. futility, simply require nominally significant trend wrong direction (p < 0.1) 8 months, trend favor experimental treatment 14 months (Z > 0) bound later (Z = -\\infty). Thus, two efficacy analyses two separate, earlier futility analysis. Power set 80% due somewhat aggressive futility bounds used safety (analysis 1 half way enrollment) proof concept (analysis 2). aggressive futility bounds may desirable previous proof concept experimental treatment established; essentially, becomes Phase II/III design interim evaluation appropriate efficacy trends completing trial. Now summarize derived design. summary table described vignette summarize group sequential designs gt tables. Note design trend favor experimental treatment minor 8 months due delayed effect assumption used (see AHR analysis 1 table). design trend 16 months somewhat favorable looking HR < 1 (favoring experimental treatment) proof concept. Actual bounds timing selected trial situation dependent, hope suggestions provocative might considered.","code":"gsd <- gs_design_ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, ratio = 1, alpha = 0.025, beta = 0.2, # 80% power; enables aggressive futility bound specified analysis_time = c(8, 14, 24, 36), binding = FALSE, # Non-binding futility bound upper = gs_spending_bound, # Use spending bound for efficacy; total_spend is normally alpha upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), test_upper = c(FALSE, FALSE, TRUE, TRUE), # Only test efficacy after 1st analysis lower = gs_b, # Fixed Z-values will be provided for futility bound lpar = c(qnorm(0.1), 0, -Inf, -Inf) ) gsd |> summary() |> as_gt()"},{"path":"https://merck.github.io/gsDesign2/reference/ahr.html","id":null,"dir":"Reference","previous_headings":"","what":"Average hazard ratio under non-proportional hazards — ahr","title":"Average hazard ratio under non-proportional hazards — ahr","text":"Provides geometric average hazard ratio various non-proportional hazards assumptions either single multiple strata studies. piecewise exponential distribution allows simple method specify distribution enrollment pattern enrollment, failure dropout rates changes time.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/ahr.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Average hazard ratio under non-proportional hazards — ahr","text":"","code":"ahr( enroll_rate = define_enroll_rate(duration = c(2, 2, 10), rate = c(3, 6, 9)), fail_rate = define_fail_rate(duration = c(3, 100), fail_rate = log(2)/c(9, 18), hr = c(0.9, 0.6), dropout_rate = 0.001), total_duration = 30, ratio = 1 )"},{"path":"https://merck.github.io/gsDesign2/reference/ahr.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Average hazard ratio under non-proportional hazards — ahr","text":"enroll_rate enroll_rate data frame without stratum created define_enroll_rate(). fail_rate fail_rate data frame without stratum created define_fail_rate(). total_duration Total follow-start enrollment data cutoff; can single value vector positive numbers. ratio Ratio experimental control randomization.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/ahr.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Average hazard ratio under non-proportional hazards — ahr","text":"data frame time (total_duration), ahr (average hazard ratio), n (sample size), event (expected number events), info (information given scenarios), info0 (information related null hypothesis) value total_duration input.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/ahr.html","id":"specification","dir":"Reference","previous_headings":"","what":"Specification","title":"Average hazard ratio under non-proportional hazards — ahr","text":"contents section shown PDF user manual .","code":""},{"path":"https://merck.github.io/gsDesign2/reference/ahr.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Average hazard ratio under non-proportional hazards — ahr","text":"","code":"# Example 1: default ahr() #> time ahr n event info info0 #> 1 30 0.6952153 108 58.49097 14.32724 14.62274 # Example 2: default with multiple analysis times (varying total_duration) ahr(total_duration = c(15, 30)) #> time ahr n event info info0 #> 1 15 0.7857415 108 30.27841 7.441186 7.569603 #> 2 30 0.6952153 108 58.49097 14.327243 14.622742 # Example 3: stratified population enroll_rate <- define_enroll_rate( stratum = c(rep(\"Low\", 2), rep(\"High\", 3)), duration = c(2, 10, 4, 4, 8), rate = c(5, 10, 0, 3, 6) ) fail_rate <- define_fail_rate( stratum = c(rep(\"Low\", 2), rep(\"High\", 2)), duration = c(1, Inf, 1, Inf), fail_rate = c(.1, .2, .3, .4), dropout_rate = .001, hr = c(.9, .75, .8, .6) ) ahr(enroll_rate = enroll_rate, fail_rate = fail_rate, total_duration = c(15, 30)) #> time ahr n event info info0 #> 1 15 0.7332218 164 113.2782 28.18130 28.31954 #> 2 30 0.7175169 170 166.1836 41.49942 41.54590"},{"path":"https://merck.github.io/gsDesign2/reference/ahr_blinded.html","id":null,"dir":"Reference","previous_headings":"","what":"Blinded estimation of average hazard ratio — ahr_blinded","title":"Blinded estimation of average hazard ratio — ahr_blinded","text":"Based blinded data assumed hazard ratios different intervals, compute blinded estimate average hazard ratio (AHR) corresponding estimate statistical information. function intended use computing futility bounds based spending assuming input hazard ratio (hr) values intervals specified .","code":""},{"path":"https://merck.github.io/gsDesign2/reference/ahr_blinded.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Blinded estimation of average hazard ratio — ahr_blinded","text":"","code":"ahr_blinded( surv = survival::Surv(time = simtrial::ex1_delayed_effect$month, event = simtrial::ex1_delayed_effect$evntd), intervals = c(3, Inf), hr = c(1, 0.6), ratio = 1 )"},{"path":"https://merck.github.io/gsDesign2/reference/ahr_blinded.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Blinded estimation of average hazard ratio — ahr_blinded","text":"surv Input survival object (see survival::Surv()); note 0 = censored, 1 = event survival::Surv(). intervals Vector containing positive values indicating interval lengths exponential rates assumed. Note final infinite interval added events occur final interval specified. hr Vector hazard ratios assumed interval. ratio Ratio experimental control randomization.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/ahr_blinded.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Blinded estimation of average hazard ratio — ahr_blinded","text":"tibble one row containing ahr - Blinded average hazard ratio based assumed period-specific hazard ratios input fail_rate observed events corresponding intervals. event - Total observed number events. info0 - Information related null hypothesis. theta - Natural parameter group sequential design representing expected incremental drift analyses.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/ahr_blinded.html","id":"specification","dir":"Reference","previous_headings":"","what":"Specification","title":"Blinded estimation of average hazard ratio — ahr_blinded","text":"contents section shown PDF user manual .","code":""},{"path":"https://merck.github.io/gsDesign2/reference/ahr_blinded.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Blinded estimation of average hazard ratio — ahr_blinded","text":"","code":"ahr_blinded( surv = survival::Surv( time = simtrial::ex2_delayed_effect$month, event = simtrial::ex2_delayed_effect$evntd ), intervals = c(4, 100), hr = c(1, .55), ratio = 1 ) #> # A tibble: 1 × 4 #> event ahr theta info0 #> #> 1 228 0.826 0.191 57"},{"path":"https://merck.github.io/gsDesign2/reference/as_gt.html","id":null,"dir":"Reference","previous_headings":"","what":"Convert summary table of a fixed or group sequential design object to a gt object — as_gt","title":"Convert summary table of a fixed or group sequential design object to a gt object — as_gt","text":"Convert summary table fixed group sequential design object gt object","code":""},{"path":"https://merck.github.io/gsDesign2/reference/as_gt.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Convert summary table of a fixed or group sequential design object to a gt object — as_gt","text":"","code":"as_gt(x, ...) # S3 method for class 'fixed_design' as_gt(x, title = NULL, footnote = NULL, ...) # S3 method for class 'gs_design' as_gt( x, title = NULL, subtitle = NULL, colname_spanner = \"Cumulative boundary crossing probability\", colname_spannersub = c(\"Alternate hypothesis\", \"Null hypothesis\"), footnote = NULL, display_bound = c(\"Efficacy\", \"Futility\"), display_columns = NULL, display_inf_bound = FALSE, ... )"},{"path":"https://merck.github.io/gsDesign2/reference/as_gt.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Convert summary table of a fixed or group sequential design object to a gt object — as_gt","text":"x summary object fixed group sequential design. ... Additional arguments (used). title string specify title gt table. footnote list containing content, location, attr. content vector string specify footnote text; location vector string specify locations put superscript footnote index; attr vector string specify attributes footnotes, example, c(\"colname\", \"title\", \"subtitle\", \"analysis\", \"spanner\"); users can use functions gt package customize table. subtitle string specify subtitle gt table. colname_spanner string specify spanner gt table. colname_spannersub vector strings specify spanner details gt table. display_bound vector strings specifying label bounds. default c(\"Efficacy\", \"Futility\"). display_columns vector strings specifying variables displayed summary table. display_inf_bound Logical, whether display +/-inf bound.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/as_gt.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Convert summary table of a fixed or group sequential design object to a gt object — as_gt","text":"gt_tbl object.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/as_gt.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Convert summary table of a fixed or group sequential design object to a gt object — as_gt","text":"","code":"if (FALSE) { # interactive() && !identical(Sys.getenv(\"IN_PKGDOWN\"), \"true\") library(dplyr) # Enrollment rate enroll_rate <- define_enroll_rate( duration = 18, rate = 20 ) # Failure rates fail_rate <- define_fail_rate( duration = c(4, 100), fail_rate = log(2) / 12, dropout_rate = .001, hr = c(1, .6) ) # Study duration in months study_duration <- 36 # Experimental / Control randomization ratio ratio <- 1 # 1-sided Type I error alpha <- 0.025 # Type II error (1 - power) beta <- 0.1 # Example 1 ---- fixed_design_ahr( alpha = alpha, power = 1 - beta, enroll_rate = enroll_rate, fail_rate = fail_rate, study_duration = study_duration, ratio = ratio ) %>% summary() %>% as_gt() # Example 2 ---- fixed_design_fh( alpha = alpha, power = 1 - beta, enroll_rate = enroll_rate, fail_rate = fail_rate, study_duration = study_duration, ratio = ratio ) %>% summary() %>% as_gt() } if (FALSE) { # interactive() && !identical(Sys.getenv(\"IN_PKGDOWN\"), \"true\") library(dplyr) # Example 1 ---- # The default output gs_design_ahr() %>% summary() %>% as_gt() gs_power_ahr() %>% summary() %>% as_gt() gs_design_wlr() %>% summary() %>% as_gt() gs_power_wlr() %>% summary() %>% as_gt() gs_power_combo() %>% summary() %>% as_gt() gs_design_rd() %>% summary() %>% as_gt() gs_power_rd() %>% summary() %>% as_gt() # Example 2 ---- # Usage of title = ..., subtitle = ... # to edit the title/subtitle gs_power_wlr() %>% summary() %>% as_gt( title = \"Bound Summary\", subtitle = \"from gs_power_wlr\" ) # Example 3 ---- # Usage of colname_spanner = ..., colname_spannersub = ... # to edit the spanner and its sub-spanner gs_power_wlr() %>% summary() %>% as_gt( colname_spanner = \"Cumulative probability to cross boundaries\", colname_spannersub = c(\"under H1\", \"under H0\") ) # Example 4 ---- # Usage of footnote = ... # to edit the footnote gs_power_wlr() %>% summary() %>% as_gt( footnote = list( content = c( \"approximate weighted hazard ratio to cross bound.\", \"wAHR is the weighted AHR.\", \"the crossing probability.\", \"this table is generated by gs_power_wlr.\" ), location = c(\"~wHR at bound\", NA, NA, NA), attr = c(\"colname\", \"analysis\", \"spanner\", \"title\") ) ) # Example 5 ---- # Usage of display_bound = ... # to either show efficacy bound or futility bound, or both(default) gs_power_wlr() %>% summary() %>% as_gt(display_bound = \"Efficacy\") # Example 6 ---- # Usage of display_columns = ... # to select the columns to display in the summary table gs_power_wlr() %>% summary() %>% as_gt(display_columns = c(\"Analysis\", \"Bound\", \"Nominal p\", \"Z\", \"Probability\")) }"},{"path":"https://merck.github.io/gsDesign2/reference/as_rtf.html","id":null,"dir":"Reference","previous_headings":"","what":"Write summary table of a fixed or group sequential design object to an RTF file — as_rtf","title":"Write summary table of a fixed or group sequential design object to an RTF file — as_rtf","text":"Write summary table fixed group sequential design object RTF file","code":""},{"path":"https://merck.github.io/gsDesign2/reference/as_rtf.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Write summary table of a fixed or group sequential design object to an RTF file — as_rtf","text":"","code":"as_rtf(x, ...) # S3 method for class 'fixed_design' as_rtf( x, title = NULL, footnote = NULL, col_rel_width = NULL, orientation = c(\"portrait\", \"landscape\"), text_font_size = 9, file, ... ) # S3 method for class 'gs_design' as_rtf( x, title = NULL, subtitle = NULL, colname_spanner = \"Cumulative boundary crossing probability\", colname_spannersub = c(\"Alternate hypothesis\", \"Null hypothesis\"), footnote = NULL, display_bound = c(\"Efficacy\", \"Futility\"), display_columns = NULL, display_inf_bound = TRUE, col_rel_width = NULL, orientation = c(\"portrait\", \"landscape\"), text_font_size = 9, file, ... )"},{"path":"https://merck.github.io/gsDesign2/reference/as_rtf.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Write summary table of a fixed or group sequential design object to an RTF file — as_rtf","text":"x summary object fixed group sequential design. ... Additional arguments (used). title string specify title RTF table. footnote list containing content, location, attr. content vector string specify footnote text; location vector string specify locations put superscript footnote index; attr vector string specify attributes footnotes, example, c(\"colname\", \"title\", \"subtitle\", \"analysis\", \"spanner\"); users can use functions gt package customize table. col_rel_width Column relative width vector e.g. c(2,1,1) refers 2:1:1. Default NULL equal column width. orientation Orientation 'portrait' 'landscape'. text_font_size Text font size. vary text font size column, use numeric vector length vector equal number columns displayed e.g. c(9,20,40). file File path output. subtitle string specify subtitle RTF table. colname_spanner string specify spanner RTF table. colname_spannersub vector strings specify spanner details RTF table. display_bound vector strings specifying label bounds. default c(\"Efficacy\", \"Futility\"). display_columns vector strings specifying variables displayed summary table. display_inf_bound Logical, whether display +/-inf bound.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/as_rtf.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Write summary table of a fixed or group sequential design object to an RTF file — as_rtf","text":"as_rtf() returns input x invisibly.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/as_rtf.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Write summary table of a fixed or group sequential design object to an RTF file — as_rtf","text":"","code":"library(dplyr) #> #> Attaching package: ‘dplyr’ #> The following objects are masked from ‘package:stats’: #> #> filter, lag #> The following objects are masked from ‘package:base’: #> #> intersect, setdiff, setequal, union # Enrollment rate enroll_rate <- define_enroll_rate( duration = 18, rate = 20 ) # Failure rates fail_rate <- define_fail_rate( duration = c(4, 100), fail_rate = log(2) / 12, dropout_rate = .001, hr = c(1, .6) ) # Study duration in months study_duration <- 36 # Experimental / Control randomization ratio ratio <- 1 # 1-sided Type I error alpha <- 0.025 # Type II error (1 - power) beta <- 0.1 # AHR ---- # under fixed power x <- fixed_design_ahr( alpha = alpha, power = 1 - beta, enroll_rate = enroll_rate, fail_rate = fail_rate, study_duration = study_duration, ratio = ratio ) %>% summary() x %>% as_rtf(file = tempfile(fileext = \".rtf\")) x %>% as_rtf(title = \"Fixed design\", file = tempfile(fileext = \".rtf\")) x %>% as_rtf( footnote = \"Power computed with average hazard ratio method given the sample size\", file = tempfile(fileext = \".rtf\") ) x %>% as_rtf(text_font_size = 10, file = tempfile(fileext = \".rtf\")) # FH ---- # under fixed power fixed_design_fh( alpha = alpha, power = 1 - beta, enroll_rate = enroll_rate, fail_rate = fail_rate, study_duration = study_duration, ratio = ratio ) %>% summary() %>% as_rtf(file = tempfile(fileext = \".rtf\")) #' # \\donttest{ # the default output library(dplyr) gs_design_ahr() %>% summary() %>% as_rtf(file = tempfile(fileext = \".rtf\")) gs_power_ahr(lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.1)) %>% summary() %>% as_rtf(file = tempfile(fileext = \".rtf\")) gs_design_wlr() %>% summary() %>% as_rtf(file = tempfile(fileext = \".rtf\")) gs_power_wlr(lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.1)) %>% summary() %>% as_rtf(file = tempfile(fileext = \".rtf\")) gs_power_combo() %>% summary() %>% as_rtf(file = tempfile(fileext = \".rtf\")) gs_design_rd() %>% summary() %>% as_rtf(file = tempfile(fileext = \".rtf\")) gs_power_rd() %>% summary() %>% as_rtf(file = tempfile(fileext = \".rtf\")) # usage of title = ..., subtitle = ... # to edit the title/subtitle gs_power_wlr(lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.1)) %>% summary() %>% as_rtf( title = \"Bound Summary\", subtitle = \"from gs_power_wlr\", file = tempfile(fileext = \".rtf\") ) # usage of colname_spanner = ..., colname_spannersub = ... # to edit the spanner and its sub-spanner gs_power_wlr(lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.1)) %>% summary() %>% as_rtf( colname_spanner = \"Cumulative probability to cross boundaries\", colname_spannersub = c(\"under H1\", \"under H0\"), file = tempfile(fileext = \".rtf\") ) # usage of footnote = ... # to edit the footnote gs_power_wlr(lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.1)) %>% summary() %>% as_rtf( footnote = list( content = c( \"approximate weighted hazard ratio to cross bound.\", \"wAHR is the weighted AHR.\", \"the crossing probability.\", \"this table is generated by gs_power_wlr.\" ), location = c(\"~wHR at bound\", NA, NA, NA), attr = c(\"colname\", \"analysis\", \"spanner\", \"title\") ), file = tempfile(fileext = \".rtf\") ) # usage of display_bound = ... # to either show efficacy bound or futility bound, or both(default) gs_power_wlr(lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.1)) %>% summary() %>% as_rtf( display_bound = \"Efficacy\", file = tempfile(fileext = \".rtf\") ) # usage of display_columns = ... # to select the columns to display in the summary table gs_power_wlr(lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.1)) %>% summary() %>% as_rtf( display_columns = c(\"Analysis\", \"Bound\", \"Nominal p\", \"Z\", \"Probability\"), file = tempfile(fileext = \".rtf\") ) # }"},{"path":"https://merck.github.io/gsDesign2/reference/define_enroll_rate.html","id":null,"dir":"Reference","previous_headings":"","what":"Define enrollment rate — define_enroll_rate","title":"Define enrollment rate — define_enroll_rate","text":"Define enrollment rate subjects study following piecewise exponential distribution.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/define_enroll_rate.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Define enrollment rate — define_enroll_rate","text":"","code":"define_enroll_rate(duration, rate, stratum = \"All\")"},{"path":"https://merck.github.io/gsDesign2/reference/define_enroll_rate.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Define enrollment rate — define_enroll_rate","text":"duration numeric vector ordered piecewise study duration interval. rate numeric vector enrollment rate duration. stratum character vector stratum name.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/define_enroll_rate.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Define enrollment rate — define_enroll_rate","text":"enroll_rate data frame.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/define_enroll_rate.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Define enrollment rate — define_enroll_rate","text":"duration ordered piecewise duration equal \\(t_i - t_{-1}\\), \\(0 = t_0 < t_i < \\cdots < t_M = \\infty\\). enrollment rates defined duration length. study multiple strata, different duration rates can specified stratum.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/define_enroll_rate.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Define enrollment rate — define_enroll_rate","text":"","code":"# Define enroll rate without stratum define_enroll_rate( duration = c(2, 2, 10), rate = c(3, 6, 9) ) #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 # Define enroll rate with stratum define_enroll_rate( duration = rep(c(2, 2, 2, 18), 3), rate = c((1:4) / 3, (1:4) / 2, (1:4) / 6), stratum = c(array(\"High\", 4), array(\"Moderate\", 4), array(\"Low\", 4)) ) #> # A tibble: 12 × 3 #> stratum duration rate #> #> 1 High 2 0.333 #> 2 High 2 0.667 #> 3 High 2 1 #> 4 High 18 1.33 #> 5 Moderate 2 0.5 #> 6 Moderate 2 1 #> 7 Moderate 2 1.5 #> 8 Moderate 18 2 #> 9 Low 2 0.167 #> 10 Low 2 0.333 #> 11 Low 2 0.5 #> 12 Low 18 0.667"},{"path":"https://merck.github.io/gsDesign2/reference/define_fail_rate.html","id":null,"dir":"Reference","previous_headings":"","what":"Define failure rate — define_fail_rate","title":"Define failure rate — define_fail_rate","text":"Define subject failure rate study two treatment groups. Also supports stratified designs different failure rates stratum.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/define_fail_rate.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Define failure rate — define_fail_rate","text":"","code":"define_fail_rate(duration, fail_rate, dropout_rate, hr = 1, stratum = \"All\")"},{"path":"https://merck.github.io/gsDesign2/reference/define_fail_rate.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Define failure rate — define_fail_rate","text":"duration numeric vector ordered piecewise study duration interval. fail_rate numeric vector failure rate duration control group. dropout_rate numeric vector dropout rate duration. hr numeric vector hazard ratio treatment control group. stratum character vector stratum name.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/define_fail_rate.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Define failure rate — define_fail_rate","text":"fail_rate data frame.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/define_fail_rate.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Define failure rate — define_fail_rate","text":"Define failure dropout rate subjects study following piecewise exponential distribution. duration ordered piecewise duration equal \\(t_i - t_{-1}\\), \\(0 = t_0 < t_i < \\cdots < t_M = \\infty\\). failure rate, dropout rate, hazard ratio study duration can specified. study multiple strata, different duration, failure rates, dropout rates, hazard ratios can specified stratum.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/define_fail_rate.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Define failure rate — define_fail_rate","text":"","code":"# Define enroll rate define_fail_rate( duration = c(3, 100), fail_rate = log(2) / c(9, 18), hr = c(.9, .6), dropout_rate = .001 ) #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 # Define enroll rate with stratum define_fail_rate( stratum = c(rep(\"Low\", 2), rep(\"High\", 2)), duration = 1, fail_rate = c(.1, .2, .3, .4), dropout_rate = .001, hr = c(.9, .75, .8, .6) ) #> # A tibble: 4 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 Low 1 0.1 0.001 0.9 #> 2 Low 1 0.2 0.001 0.75 #> 3 High 1 0.3 0.001 0.8 #> 4 High 1 0.4 0.001 0.6"},{"path":"https://merck.github.io/gsDesign2/reference/event_diff.html","id":null,"dir":"Reference","previous_headings":"","what":"Considering the enrollment rate, failure rate, and randomization ratio, calculate the difference between the targeted number of events and the accumulated events at time x — event_diff","title":"Considering the enrollment rate, failure rate, and randomization ratio, calculate the difference between the targeted number of events and the accumulated events at time x — event_diff","text":"helper function passed uniroot()","code":""},{"path":"https://merck.github.io/gsDesign2/reference/event_diff.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Considering the enrollment rate, failure rate, and randomization ratio, calculate the difference between the targeted number of events and the accumulated events at time x — event_diff","text":"","code":"event_diff(x, enroll_rate, fail_rate, ratio, target_event)"},{"path":"https://merck.github.io/gsDesign2/reference/event_diff.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Considering the enrollment rate, failure rate, and randomization ratio, calculate the difference between the targeted number of events and the accumulated events at time x — event_diff","text":"x Duration enroll_rate enroll_rate data frame without stratum created define_enroll_rate(). fail_rate fail_rate data frame without stratum created define_fail_rate(). ratio Experimental:Control randomization ratio. target_event targeted number events achieved.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/event_diff.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Considering the enrollment rate, failure rate, and randomization ratio, calculate the difference between the targeted number of events and the accumulated events at time x — event_diff","text":"single numeric value represents difference expected number events provided duration (x) targeted number events (target_event)","code":""},{"path":"https://merck.github.io/gsDesign2/reference/expected_accrual.html","id":null,"dir":"Reference","previous_headings":"","what":"Piecewise constant expected accrual — expected_accrual","title":"Piecewise constant expected accrual — expected_accrual","text":"Computes expected cumulative enrollment (accrual) given set piecewise constant enrollment rates times.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/expected_accrual.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Piecewise constant expected accrual — expected_accrual","text":"","code":"expected_accrual( time = 0:24, enroll_rate = define_enroll_rate(duration = c(3, 3, 18), rate = c(5, 10, 20)) )"},{"path":"https://merck.github.io/gsDesign2/reference/expected_accrual.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Piecewise constant expected accrual — expected_accrual","text":"time Times enrollment computed. enroll_rate enroll_rate data frame without stratum created define_enroll_rate().","code":""},{"path":"https://merck.github.io/gsDesign2/reference/expected_accrual.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Piecewise constant expected accrual — expected_accrual","text":"vector expected cumulative enrollment specified times.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/expected_accrual.html","id":"specification","dir":"Reference","previous_headings":"","what":"Specification","title":"Piecewise constant expected accrual — expected_accrual","text":"contents section shown PDF user manual .","code":""},{"path":"https://merck.github.io/gsDesign2/reference/expected_accrual.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Piecewise constant expected accrual — expected_accrual","text":"","code":"library(tibble) # Example 1: default expected_accrual() #> [1] 0 5 10 15 25 35 45 65 85 105 125 145 165 185 205 225 245 265 285 #> [20] 305 325 345 365 385 405 # Example 2: unstratified design expected_accrual( time = c(5, 10, 20), enroll_rate = define_enroll_rate( duration = c(3, 3, 18), rate = c(5, 10, 20) ) ) #> [1] 35 125 325 expected_accrual( time = c(5, 10, 20), enroll_rate = define_enroll_rate( duration = c(3, 3, 18), rate = c(5, 10, 20), ) ) #> [1] 35 125 325 # Example 3: stratified design expected_accrual( time = c(24, 30, 40), enroll_rate = define_enroll_rate( stratum = c(\"subgroup\", \"complement\"), duration = c(33, 33), rate = c(30, 30) ) ) #> [1] 1440 1800 1980 # Example 4: expected accrual over time # Scenario 4.1: for the enrollment in the first 3 months, # it is exactly 3 * 5 = 15. expected_accrual( time = 3, enroll_rate = define_enroll_rate(duration = c(3, 3, 18), rate = c(5, 10, 20)) ) #> [1] 15 # Scenario 4.2: for the enrollment in the first 6 months, # it is exactly 3 * 5 + 3 * 10 = 45. expected_accrual( time = 6, enroll_rate = define_enroll_rate(duration = c(3, 3, 18), rate = c(5, 10, 20)) ) #> [1] 45 # Scenario 4.3: for the enrollment in the first 24 months, # it is exactly 3 * 5 + 3 * 10 + 18 * 20 = 405. expected_accrual( time = 24, enroll_rate = define_enroll_rate(duration = c(3, 3, 18), rate = c(5, 10, 20)) ) #> [1] 405 # Scenario 4.4: for the enrollment after 24 months, # it is the same as that from the 24 months, since the enrollment is stopped. expected_accrual( time = 25, enroll_rate = define_enroll_rate(duration = c(3, 3, 18), rate = c(5, 10, 20)) ) #> [1] 405 # Instead of compute the enrolled subjects one time point by one time point, # we can also compute it once. expected_accrual( time = c(3, 6, 24, 25), enroll_rate = define_enroll_rate(duration = c(3, 3, 18), rate = c(5, 10, 20)) ) #> [1] 15 45 405 405"},{"path":"https://merck.github.io/gsDesign2/reference/expected_event.html","id":null,"dir":"Reference","previous_headings":"","what":"Expected events observed under piecewise exponential model — expected_event","title":"Expected events observed under piecewise exponential model — expected_event","text":"Computes expected events time strata assumption piecewise constant enrollment rates piecewise exponential failure censoring rates. piecewise exponential distribution allows simple method specify distribution enrollment pattern enrollment, failure dropout rates changes time. main purpose may generate trial can analyzed single point time using group sequential methods, routine can also used simulate adaptive trial design. intent enable sample size calculations non-proportional hazards assumptions stratified populations.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/expected_event.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Expected events observed under piecewise exponential model — expected_event","text":"","code":"expected_event( enroll_rate = define_enroll_rate(duration = c(2, 2, 10), rate = c(3, 6, 9)), fail_rate = define_fail_rate(duration = c(3, 100), fail_rate = log(2)/c(9, 18), dropout_rate = 0.001), total_duration = 25, simple = TRUE )"},{"path":"https://merck.github.io/gsDesign2/reference/expected_event.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Expected events observed under piecewise exponential model — expected_event","text":"enroll_rate enroll_rate data frame without stratum created define_enroll_rate(). fail_rate fail_rate data frame without stratum created define_fail_rate(). total_duration Total follow-start enrollment data cutoff. simple default (TRUE), return numeric expected number events, otherwise data frame described .","code":""},{"path":"https://merck.github.io/gsDesign2/reference/expected_event.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Expected events observed under piecewise exponential model — expected_event","text":"default simple = TRUE return total expected number events real number. Otherwise, simple = FALSE, data frame returned following variables period specified fail_rate: t: start period. fail_rate: failure rate period. event: expected events period. records returned data frame correspond input data frame fail_rate.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/expected_event.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Expected events observed under piecewise exponential model — expected_event","text":"periods generally supplied output input. intent enable expected event calculations tidy format maximize flexibility variety purposes.","code":""},{"path":[]},{"path":"https://merck.github.io/gsDesign2/reference/expected_event.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Expected events observed under piecewise exponential model — expected_event","text":"","code":"library(gsDesign2) # Default arguments, simple output (total event count only) expected_event() #> [1] 57.3537 # Event count by time period expected_event(simple = FALSE) #> t fail_rate event #> 1 0 0.07701635 22.24824 #> 2 3 0.03850818 35.10546 # Early cutoff expected_event(total_duration = .5) #> [1] 0.02850923 # Single time period example expected_event( enroll_rate = define_enroll_rate(duration = 10, rate = 10), fail_rate = define_fail_rate(duration = 100, fail_rate = log(2) / 6, dropout_rate = .01), total_duration = 22, simple = FALSE ) #> t fail_rate event #> 1 0 0.1155245 80.40974 # Single time period example, multiple enrollment periods expected_event( enroll_rate = define_enroll_rate(duration = c(5, 5), rate = c(10, 20)), fail_rate = define_fail_rate(duration = 100, fail_rate = log(2) / 6, dropout_rate = .01), total_duration = 22, simple = FALSE ) #> t fail_rate event #> 1 0 0.1155245 118.8484"},{"path":"https://merck.github.io/gsDesign2/reference/expected_time.html","id":null,"dir":"Reference","previous_headings":"","what":"Predict time at which a targeted event count is achieved — expected_time","title":"Predict time at which a targeted event count is achieved — expected_time","text":"expected_time() made match input format ahr() solve time expected accumulated events equal input target. Enrollment failure rate distributions specified follows. piecewise exponential distribution allows simple method specify distribution enrollment pattern enrollment, failure dropout rates changes time.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/expected_time.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Predict time at which a targeted event count is achieved — expected_time","text":"","code":"expected_time( enroll_rate = define_enroll_rate(duration = c(2, 2, 10), rate = c(3, 6, 9) * 5), fail_rate = define_fail_rate(stratum = \"All\", duration = c(3, 100), fail_rate = log(2)/c(9, 18), hr = c(0.9, 0.6), dropout_rate = rep(0.001, 2)), target_event = 150, ratio = 1, interval = c(0.01, 100) )"},{"path":"https://merck.github.io/gsDesign2/reference/expected_time.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Predict time at which a targeted event count is achieved — expected_time","text":"enroll_rate enroll_rate data frame without stratum created define_enroll_rate(). fail_rate fail_rate data frame without stratum created define_fail_rate(). target_event targeted number events achieved. ratio Experimental:Control randomization ratio. interval interval presumed include time expected event count equal target_event.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/expected_time.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Predict time at which a targeted event count is achieved — expected_time","text":"data frame Time (computed match events target_event), AHR (average hazard ratio), Events (target_event input), info (information given scenarios), info0 (information related null hypothesis) value total_duration input.","code":""},{"path":[]},{"path":"https://merck.github.io/gsDesign2/reference/expected_time.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Predict time at which a targeted event count is achieved — expected_time","text":"","code":"# Example 1 ---- # default # \\donttest{ expected_time() #> time ahr event info info0 #> 1 14.90814 0.7865729 150 36.86707 37.5 # } # Example 2 ---- # check that result matches a finding using AHR() # Start by deriving an expected event count enroll_rate <- define_enroll_rate(duration = c(2, 2, 10), rate = c(3, 6, 9) * 5) fail_rate <- define_fail_rate( duration = c(3, 100), fail_rate = log(2) / c(9, 18), hr = c(.9, .6), dropout_rate = .001 ) total_duration <- 20 xx <- ahr(enroll_rate, fail_rate, total_duration) xx #> time ahr n event info info0 #> 1 20 0.7377944 540 208.3641 50.97575 52.09103 # Next we check that the function confirms the timing of the final analysis. # \\donttest{ expected_time(enroll_rate, fail_rate, target_event = xx$event, interval = c(.5, 1.5) * xx$time ) #> time ahr event info info0 #> 1 20 0.7377944 208.3641 50.97575 52.09103 # } # Example 3 ---- # In this example, we verify `expected_time()` by `ahr()`. # \\donttest{ x <- ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, ratio = 1, total_duration = 20 ) cat(\"The number of events by 20 months is \", x$event, \".\\n\") #> The number of events by 20 months is 208.3641 . y <- expected_time( enroll_rate = enroll_rate, fail_rate = fail_rate, ratio = 1, target_event = x$event ) cat(\"The time to get \", x$event, \" is \", y$time, \"months.\\n\") #> The time to get 208.3641 is 20 months. # }"},{"path":"https://merck.github.io/gsDesign2/reference/fastlag.html","id":null,"dir":"Reference","previous_headings":"","what":"Find the ","title":"Find the ","text":"Fast replacement dplyr::lag simple case n = 1L always supplying new value insert beginning vector.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/fastlag.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Find the ","text":"","code":"fastlag(x, first)"},{"path":"https://merck.github.io/gsDesign2/reference/fastlag.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Find the ","text":"x vector (length(x) > 0) first single value (length(first) == 1)","code":""},{"path":"https://merck.github.io/gsDesign2/reference/fastlag.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Find the ","text":"vector begins first followed x final value removed","code":""},{"path":"https://merck.github.io/gsDesign2/reference/fastlag.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Find the ","text":"Important: function fast provides minimal safety checks. relies coercion rules c. best results, x first type atomic vector, though fine mix numeric integer vectors long code also rely distinction. can also work lists needed.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/fastlag.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Find the ","text":"","code":"gsDesign2:::fastlag(1:5, first = 100) == c(100, 1:4) #> [1] TRUE TRUE TRUE TRUE TRUE"},{"path":"https://merck.github.io/gsDesign2/reference/fixed_design.html","id":null,"dir":"Reference","previous_headings":"","what":"Fixed design under non-proportional hazards — fixed_design_ahr","title":"Fixed design under non-proportional hazards — fixed_design_ahr","text":"Computes fixed design sample size (given power) power (given sample size) : fixed_design_ahr() - Average hazard ratio method. fixed_design_fh() - Weighted logrank test Fleming-Harrington weights (Farrington Manning, 1990). fixed_design_mb() - Weighted logrank test Magirr-Burman weights. fixed_design_lf() - Lachin-Foulkes method (Lachin Foulkes, 1986). fixed_design_maxcombo() - MaxCombo method. fixed_design_rmst() - RMST method. fixed_design_milestone() - Milestone method. Additionally, fixed_design_rd() provides fixed design binary endpoint treatment effect measuring risk difference.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/fixed_design.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Fixed design under non-proportional hazards — fixed_design_ahr","text":"","code":"fixed_design_ahr( enroll_rate, fail_rate, alpha = 0.025, power = NULL, ratio = 1, study_duration = 36, event = NULL ) fixed_design_fh( alpha = 0.025, power = NULL, ratio = 1, study_duration = 36, enroll_rate, fail_rate, rho = 0, gamma = 0 ) fixed_design_lf( alpha = 0.025, power = NULL, ratio = 1, study_duration = 36, enroll_rate, fail_rate ) fixed_design_maxcombo( alpha = 0.025, power = NULL, ratio = 1, study_duration = 36, enroll_rate, fail_rate, rho = c(0, 0, 1), gamma = c(0, 1, 0), tau = rep(-1, 3) ) fixed_design_mb( alpha = 0.025, power = NULL, ratio = 1, study_duration = 36, enroll_rate, fail_rate, tau = 6, w_max = Inf ) fixed_design_milestone( alpha = 0.025, power = NULL, ratio = 1, enroll_rate, fail_rate, study_duration = 36, tau = NULL ) fixed_design_rd( alpha = 0.025, power = NULL, ratio = 1, p_c, p_e, rd0 = 0, n = NULL ) fixed_design_rmst( alpha = 0.025, power = NULL, ratio = 1, study_duration = 36, enroll_rate, fail_rate, tau = NULL )"},{"path":"https://merck.github.io/gsDesign2/reference/fixed_design.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Fixed design under non-proportional hazards — fixed_design_ahr","text":"enroll_rate Enrollment rates. fail_rate Failure dropout rates. alpha One-sided Type error (strictly 0 1). power Power (NULL compute power strictly 0 1 - alpha otherwise). ratio Experimental:Control randomization ratio. study_duration Study duration. event Targeted event analysis. rho vector numbers paring gamma tau MaxCombo test. gamma vector numbers paring rho tau MaxCombo test. tau Test parameter RMST. w_max Test parameter Magirr-Burman method. p_c numerical value control arm rate. p_e numerical value experimental arm rate. rd0 Risk difference null hypothesis, default 0. n Sample size. NULL power input, sample size computed achieve targeted power","code":""},{"path":"https://merck.github.io/gsDesign2/reference/fixed_design.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Fixed design under non-proportional hazards — fixed_design_ahr","text":"list design characteristic summary.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/fixed_design.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Fixed design under non-proportional hazards — fixed_design_ahr","text":"","code":"# AHR method ---- library(dplyr) # Example 1: given power and compute sample size x <- fixed_design_ahr( alpha = .025, power = .9, enroll_rate = define_enroll_rate(duration = 18, rate = 1), fail_rate = define_fail_rate( duration = c(4, 100), fail_rate = log(2) / 12, hr = c(1, .6), dropout_rate = .001 ), study_duration = 36 ) x %>% summary() #> # A tibble: 1 × 7 #> Design N Events Time Bound alpha Power #> #> 1 Average hazard ratio 463. 325. 36 1.96 0.025 0.9 # Example 2: given sample size and compute power x <- fixed_design_ahr( alpha = .025, enroll_rate = define_enroll_rate(duration = 18, rate = 20), fail_rate = define_fail_rate( duration = c(4, 100), fail_rate = log(2) / 12, hr = c(1, .6), dropout_rate = .001 ), study_duration = 36 ) x %>% summary() #> # A tibble: 1 × 7 #> Design N Events Time Bound alpha Power #> #> 1 Average hazard ratio 360 252. 36 1.96 0.025 0.816 # WLR test with FH weights ---- library(dplyr) # Example 1: given power and compute sample size x <- fixed_design_fh( alpha = .025, power = .9, enroll_rate = define_enroll_rate(duration = 18, rate = 1), fail_rate = define_fail_rate( duration = c(4, 100), fail_rate = log(2) / 12, hr = c(1, .6), dropout_rate = .001 ), study_duration = 36, rho = 1, gamma = 1 ) x %>% summary() #> # A tibble: 1 × 7 #> Design N Events Time Bound alpha Power #> #> 1 Fleming-Harrington FH(1, 1) 352. 247. 36 1.96 0.025 0.9 # Example 2: given sample size and compute power x <- fixed_design_fh( alpha = .025, enroll_rate = define_enroll_rate(duration = 18, rate = 20), fail_rate = define_fail_rate( duration = c(4, 100), fail_rate = log(2) / 12, hr = c(1, .6), dropout_rate = .001 ), study_duration = 36, rho = 1, gamma = 1 ) x %>% summary() #> # A tibble: 1 × 7 #> Design N Events Time Bound alpha Power #> #> 1 Fleming-Harrington FH(1, 1) 360 252. 36 1.96 0.025 0.906 # LF method ---- library(dplyr) # Example 1: given power and compute sample size x <- fixed_design_lf( alpha = .025, power = .9, enroll_rate = define_enroll_rate(duration = 18, rate = 1), fail_rate = define_fail_rate( duration = 100, fail_rate = log(2) / 12, hr = .7, dropout_rate = .001 ), study_duration = 36 ) x %>% summary() #> # A tibble: 1 × 7 #> Design N Events Time Bound alpha Power #> #> 1 Lachin and Foulkes 463. 329. 36 1.96 0.025 0.9 # Example 2: given sample size and compute power x <- fixed_design_fh( alpha = .025, enroll_rate = define_enroll_rate(duration = 18, rate = 20), fail_rate = define_fail_rate( duration = 100, fail_rate = log(2) / 12, hr = .7, dropout_rate = .001 ), study_duration = 36 ) x %>% summary() #> # A tibble: 1 × 7 #> Design N Events Time Bound alpha Power #> #> 1 Fleming-Harrington FH(0, 0) (logrank) 360 256. 36 1.96 0.025 0.819 # MaxCombo test ---- library(dplyr) # Example 1: given power and compute sample size x <- fixed_design_maxcombo( alpha = .025, power = .9, enroll_rate = define_enroll_rate(duration = 18, rate = 1), fail_rate = define_fail_rate( duration = c(4, 100), fail_rate = log(2) / 12, hr = c(1, .6), dropout_rate = .001 ), study_duration = 36, rho = c(0, 0.5), gamma = c(0, 0), tau = c(-1, -1) ) x %>% summary() #> # A tibble: 1 × 7 #> Design N Events Time Bound alpha Power #> #> 1 MaxCombo: FHC(0, 0), FHC(0.5, 0) 483. 339. 36 2.02 0.025 0.900 # Example 2: given sample size and compute power x <- fixed_design_maxcombo( alpha = .025, enroll_rate = define_enroll_rate(duration = 18, rate = 20), fail_rate = define_fail_rate( duration = c(4, 100), fail_rate = log(2) / 12, hr = c(1, .6), dropout_rate = .001 ), study_duration = 36, rho = c(0, 0.5), gamma = c(0, 0), tau = c(-1, -1) ) x %>% summary() #> # A tibble: 1 × 7 #> Design N Events Time Bound alpha Power #> #> 1 MaxCombo: FHC(0, 0), FHC(0.5, 0) 360. 252. 36 2.02 0.025 0.797 # WLR test with MB weights ---- library(dplyr) # Example 1: given power and compute sample size x <- fixed_design_mb( alpha = .025, power = .9, enroll_rate = define_enroll_rate(duration = 18, rate = 1), fail_rate = define_fail_rate( duration = c(4, 100), fail_rate = log(2) / 12, hr = c(1, .6), dropout_rate = .001 ), study_duration = 36, tau = 4, w_max = 2 ) x %>% summary() #> # A tibble: 1 × 7 #> Design N Events Time Bound alpha Power #> #> 1 Modestly weighted LR: tau = 4 430. 301. 36 1.96 0.025 0.9 # Example 2: given sample size and compute power x <- fixed_design_mb( alpha = .025, enroll_rate = define_enroll_rate(duration = 18, rate = 20), fail_rate = define_fail_rate( duration = c(4, 100), fail_rate = log(2) / 12, hr = c(1, .6), dropout_rate = .001 ), study_duration = 36, tau = 4, w_max = 2 ) x %>% summary() #> # A tibble: 1 × 7 #> Design N Events Time Bound alpha Power #> #> 1 Modestly weighted LR: tau = 4 360 252. 36 1.96 0.025 0.844 # Milestone method ---- library(dplyr) # Example 1: given power and compute sample size x <- fixed_design_milestone( alpha = .025, power = .9, enroll_rate = define_enroll_rate(duration = 18, rate = 1), fail_rate = define_fail_rate( duration = 100, fail_rate = log(2) / 12, hr = .7, dropout_rate = .001 ), study_duration = 36, tau = 18 ) x %>% summary() #> # A tibble: 1 × 7 #> Design N Events Time Bound alpha Power #> #> 1 Milestone: tau = 18 606. 431. 36 1.96 0.025 0.9 # Example 2: given sample size and compute power x <- fixed_design_milestone( alpha = .025, enroll_rate = define_enroll_rate(duration = 18, rate = 20), fail_rate = define_fail_rate( duration = 100, fail_rate = log(2) / 12, hr = .7, dropout_rate = .001 ), study_duration = 36, tau = 18 ) x %>% summary() #> # A tibble: 1 × 7 #> Design N Events Time Bound alpha Power #> #> 1 Milestone: tau = 18 360 256. 36 1.96 0.025 0.705 # Binary endpoint with risk differences ---- library(dplyr) # Example 1: given power and compute sample size x <- fixed_design_rd( alpha = 0.025, power = 0.9, p_c = .15, p_e = .1, rd0 = 0, ratio = 1 ) x %>% summary() #> # A tibble: 1 × 5 #> Design N Bound alpha Power #> #> 1 Risk difference 1835. 1.96 0.025 0.9 # Example 2: given sample size and compute power x <- fixed_design_rd( alpha = 0.025, power = NULL, p_c = .15, p_e = .1, rd0 = 0, n = 2000, ratio = 1 ) x %>% summary() #> # A tibble: 1 × 5 #> Design N Bound alpha Power #> #> 1 Risk difference 2000 1.96 0.025 0.923 # RMST method ---- library(dplyr) # Example 1: given power and compute sample size x <- fixed_design_rmst( alpha = .025, power = .9, enroll_rate = define_enroll_rate(duration = 18, rate = 1), fail_rate = define_fail_rate( duration = 100, fail_rate = log(2) / 12, hr = .7, dropout_rate = .001 ), study_duration = 36, tau = 18 ) x %>% summary() #> # A tibble: 1 × 7 #> Design N Events Time Bound alpha Power #> #> 1 RMST: tau = 18 671. 477. 36 1.96 0.025 0.9 # Example 2: given sample size and compute power x <- fixed_design_rmst( alpha = .025, enroll_rate = define_enroll_rate(duration = 18, rate = 20), fail_rate = define_fail_rate( duration = 100, fail_rate = log(2) / 12, hr = .7, dropout_rate = .001 ), study_duration = 36, tau = 18 ) x %>% summary() #> # A tibble: 1 × 7 #> Design N Events Time Bound alpha Power #> #> 1 RMST: tau = 18 360 256. 36 1.96 0.025 0.661"},{"path":"https://merck.github.io/gsDesign2/reference/get_combo_power.html","id":null,"dir":"Reference","previous_headings":"","what":"Function to calculate power — get_combo_power","title":"Function to calculate power — get_combo_power","text":"helper function passed uniroot()","code":""},{"path":"https://merck.github.io/gsDesign2/reference/get_combo_power.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Function to calculate power — get_combo_power","text":"","code":"get_combo_power(n, bound, info_fh, theta_fh, corr_fh, algorithm, beta, ...)"},{"path":"https://merck.github.io/gsDesign2/reference/get_combo_power.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Function to calculate power — get_combo_power","text":"n Input sample size algorithm object class GenzBretz, Miwa TVPACK specifying algorithm used well associated hyper parameters. beta Type II error. ... Additional parameters passed mvtnorm::pmvnorm.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/get_combo_power.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Function to calculate power — get_combo_power","text":"optimal sample size (single numeric value)","code":""},{"path":"https://merck.github.io/gsDesign2/reference/get_combo_power.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Function to calculate power — get_combo_power","text":"function calculates difference derived power targeted power (1 - beta), based provided sample size, upper lower boundaries, treatment effect.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gsDesign2-package.html","id":null,"dir":"Reference","previous_headings":"","what":"gsDesign2: Group Sequential Design with Non-Constant Effect — gsDesign2-package","title":"gsDesign2: Group Sequential Design with Non-Constant Effect — gsDesign2-package","text":"goal 'gsDesign2' enable fixed group sequential design non-proportional hazards. enable highly flexible enrollment, time--event time--dropout assumptions, 'gsDesign2' offers piecewise constant enrollment, failure rates, dropout rates stratified population. package includes three methods designs: average hazard ratio, weighted logrank tests Yung Liu (2019) doi:10.1111/biom.13196 , MaxCombo tests. Substantial flexibility top 'gsDesign' package intended selecting boundaries.","code":""},{"path":[]},{"path":"https://merck.github.io/gsDesign2/reference/gsDesign2-package.html","id":"author","dir":"Reference","previous_headings":"","what":"Author","title":"gsDesign2: Group Sequential Design with Non-Constant Effect — gsDesign2-package","text":"Maintainer: Yujie Zhao yujie.zhao@merck.com Authors: Keaven Anderson keaven_anderson@merck.com Yilong Zhang elong0527@gmail.com Jianxiao Yang yangjx@ucla.edu Nan Xiao nan.xiao1@merck.com contributors: Amin Shirazi ashirazist@gmail.com [contributor] Ruixue Wang ruixue.wang@merck.com [contributor] Yi Cui yi.cui@merck.com [contributor] Ping Yang ping.yang1@merck.com [contributor] Xin Tong Li xin.tong.li@merck.com [contributor] Chenxiang Li chenxiang.li@merck.com [contributor] Hiroaki Fukuda hiroaki.fukuda@merck.com [contributor] Hongtao Zhang hongtao.zhang1@merck.com [contributor] Yalin Zhu yalin.zhu@outlook.com [contributor] John Blischak jdblischak@gmail.com [contributor] Dickson Wanjau dickson.wanjau@merck.com [contributor] Merck & Co., Inc., Rahway, NJ, USA affiliates [copyright holder]","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_b.html","id":null,"dir":"Reference","previous_headings":"","what":"Default boundary generation — gs_b","title":"Default boundary generation — gs_b","text":"gs_b() simplest version function used upper lower arguments gs_power_npe() gs_design_npe() upper_bound lower_bound arguments gs_prob_combo() pmvnorm_combo(). simply returns vector Z-values input vector par , k specified, par[k] returned. Note bounds need change changing information analyses, gs_b() used. instance, spending function bounds use gs_spending_bound().","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_b.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Default boundary generation — gs_b","text":"","code":"gs_b(par = NULL, k = NULL, ...)"},{"path":"https://merck.github.io/gsDesign2/reference/gs_b.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Default boundary generation — gs_b","text":"par gs_b(), just Z-values boundaries; can include infinite values. k NULL (default), return par, else return par[k]. ... arguments passed methods.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_b.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Default boundary generation — gs_b","text":"Returns vector input par k NULL, otherwise, par[k].","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_b.html","id":"specification","dir":"Reference","previous_headings":"","what":"Specification","title":"Default boundary generation — gs_b","text":"contents section shown PDF user manual .","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_b.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Default boundary generation — gs_b","text":"","code":"# Simple: enter a vector of length 3 for bound gs_b(par = 4:2) #> [1] 4 3 2 # 2nd element of par gs_b(par = 4:2, k = 2) #> [1] 3 # Generate an efficacy bound using a spending function # Use Lan-DeMets spending approximation of O'Brien-Fleming bound # as 50%, 75% and 100% of final spending # Information fraction IF <- c(.5, .75, 1) gs_b(par = gsDesign::gsDesign( alpha = .025, k = length(IF), test.type = 1, sfu = gsDesign::sfLDOF, timing = IF )$upper$bound) #> [1] 2.962588 2.359018 2.014084"},{"path":"https://merck.github.io/gsDesign2/reference/gs_create_arm.html","id":null,"dir":"Reference","previous_headings":"","what":"Create npsurvSS arm object — gs_create_arm","title":"Create npsurvSS arm object — gs_create_arm","text":"Create npsurvSS arm object","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_create_arm.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Create npsurvSS arm object — gs_create_arm","text":"","code":"gs_create_arm(enroll_rate, fail_rate, ratio, total_time = 1e+06)"},{"path":"https://merck.github.io/gsDesign2/reference/gs_create_arm.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Create npsurvSS arm object — gs_create_arm","text":"enroll_rate Enrollment rates define_enroll_rate(). fail_rate Failure dropout rates define_fail_rate(). ratio Experimental:Control randomization ratio. total_time Total analysis time.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_create_arm.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Create npsurvSS arm object — gs_create_arm","text":"list two arms.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_create_arm.html","id":"specification","dir":"Reference","previous_headings":"","what":"Specification","title":"Create npsurvSS arm object — gs_create_arm","text":"contents section shown PDF user manual .","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_create_arm.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Create npsurvSS arm object — gs_create_arm","text":"","code":"enroll_rate <- define_enroll_rate( duration = c(2, 2, 10), rate = c(3, 6, 9) ) fail_rate <- define_fail_rate( duration = c(3, 100), fail_rate = log(2) / c(9, 18), hr = c(.9, .6), dropout_rate = .001 ) gs_create_arm(enroll_rate, fail_rate, ratio = 1) #> $arm0 #> $size #> [1] 1 #> #> $accr_time #> [1] 14 #> #> $accr_dist #> [1] \"pieceuni\" #> #> $accr_interval #> [1] 0 2 4 14 #> #> $accr_param #> [1] 0.05555556 0.11111111 0.83333333 #> #> $surv_cure #> [1] 0 #> #> $surv_interval #> [1] 0 3 Inf #> #> $surv_shape #> [1] 1 #> #> $surv_scale #> [1] 0.07701635 0.03850818 #> #> $loss_shape #> [1] 1 #> #> $loss_scale #> [1] 0.001 #> #> $follow_time #> [1] 999986 #> #> $total_time #> [1] 1e+06 #> #> attr(,\"class\") #> [1] \"list\" \"arm\" #> #> $arm1 #> $size #> [1] 1 #> #> $accr_time #> [1] 14 #> #> $accr_dist #> [1] \"pieceuni\" #> #> $accr_interval #> [1] 0 2 4 14 #> #> $accr_param #> [1] 0.05555556 0.11111111 0.83333333 #> #> $surv_cure #> [1] 0 #> #> $surv_interval #> [1] 0 3 Inf #> #> $surv_shape #> [1] 1 #> #> $surv_scale #> [1] 0.06931472 0.02310491 #> #> $loss_shape #> [1] 1 #> #> $loss_scale #> [1] 0.001 #> #> $follow_time #> [1] 999986 #> #> $total_time #> [1] 1e+06 #> #> attr(,\"class\") #> [1] \"list\" \"arm\" #>"},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_ahr.html","id":null,"dir":"Reference","previous_headings":"","what":"Group sequential design using average hazard ratio under non-proportional hazards — gs_design_ahr","title":"Group sequential design using average hazard ratio under non-proportional hazards — gs_design_ahr","text":"Group sequential design using average hazard ratio non-proportional hazards","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_ahr.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Group sequential design using average hazard ratio under non-proportional hazards — gs_design_ahr","text":"","code":"gs_design_ahr( enroll_rate = define_enroll_rate(duration = c(2, 2, 10), rate = c(3, 6, 9)), fail_rate = define_fail_rate(duration = c(3, 100), fail_rate = log(2)/c(9, 18), hr = c(0.9, 0.6), dropout_rate = 0.001), alpha = 0.025, beta = 0.1, info_frac = NULL, analysis_time = 36, ratio = 1, binding = FALSE, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = alpha), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfLDOF, total_spend = beta), h1_spending = TRUE, test_upper = TRUE, test_lower = TRUE, info_scale = c(\"h0_h1_info\", \"h0_info\", \"h1_info\"), r = 18, tol = 1e-06, interval = c(0.01, 1000) )"},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_ahr.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Group sequential design using average hazard ratio under non-proportional hazards — gs_design_ahr","text":"enroll_rate Enrollment rates. fail_rate Failure dropout rates. alpha One-sided Type error. beta Type II error. info_frac Targeted information fraction analysis. analysis_time Minimum time analysis. ratio Experimental:Control randomization ratio (yet implemented). binding Indicator whether futility bound binding; default FALSE recommended. upper Function compute upper bound. upar Parameters passed upper. lower Function compute lower bound. lpar Parameters passed lower. h1_spending Indicator lower bound set spending alternate hypothesis (input fail_rate) spending used lower bound. test_upper Indicator analyses include upper (efficacy) bound; single value TRUE (default) indicates analyses; otherwise, logical vector length info indicate analyses efficacy bound. test_lower Indicator analyses include lower bound; single value TRUE (default) indicates analyses; single value FALSE indicated lower bound; otherwise, logical vector length info indicate analyses lower bound. info_scale Information scale calculation. Options : \"h0_h1_info\" (default): variance null alternative hypotheses used. \"h0_info\": variance null hypothesis used. \"h1_info\": variance alternative hypothesis used. r Integer value controlling grid numerical integration Jennison Turnbull (2000); default 18, range 1 80. Larger values provide larger number grid points greater accuracy. Normally, r changed user. tol Tolerance parameter boundary convergence (Z-scale). interval interval presumed include time expected event count equal targeted event.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_ahr.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Group sequential design using average hazard ratio under non-proportional hazards — gs_design_ahr","text":"list input parameters, enrollment rate, analysis, bound.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_ahr.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Group sequential design using average hazard ratio under non-proportional hazards — gs_design_ahr","text":"added.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_ahr.html","id":"specification","dir":"Reference","previous_headings":"","what":"Specification","title":"Group sequential design using average hazard ratio under non-proportional hazards — gs_design_ahr","text":"contents section shown PDF user manual .","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_ahr.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Group sequential design using average hazard ratio under non-proportional hazards — gs_design_ahr","text":"","code":"library(gsDesign) #> #> Attaching package: ‘gsDesign’ #> The following objects are masked from ‘package:gsDesign2’: #> #> as_gt, as_rtf library(gsDesign2) library(dplyr) # Example 1 ---- # call with defaults gs_design_ahr() #> $input #> $input$enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $input$alpha #> [1] 0.025 #> #> $input$beta #> [1] 0.1 #> #> $input$ratio #> [1] 1 #> #> $input$info_frac #> [1] 1 #> #> $input$analysis_time #> [1] 36 #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.025 #> #> #> $input$lower #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$lpar #> $input$lpar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$lpar$total_spend #> [1] 0.1 #> #> #> $input$test_upper #> [1] TRUE #> #> $input$test_lower #> [1] TRUE #> #> $input$h1_spending #> [1] TRUE #> #> $input$binding #> [1] FALSE #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 13.2 #> 2 All 2 26.4 #> 3 All 10 39.7 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $bound #> # A tibble: 1 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.9 0.025 1.96 0.795 0.0250 #> #> $analysis #> # A tibble: 1 × 10 #> analysis time n event ahr theta info info0 info_frac info_frac0 #> #> 1 1 36 476. 292. 0.683 0.381 71.7 73.0 1 1 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" # Example 2 ---- # Single analysis gs_design_ahr(analysis_time = 40) #> $input #> $input$enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $input$alpha #> [1] 0.025 #> #> $input$beta #> [1] 0.1 #> #> $input$ratio #> [1] 1 #> #> $input$info_frac #> [1] 1 #> #> $input$analysis_time #> [1] 40 #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.025 #> #> #> $input$lower #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$lpar #> $input$lpar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$lpar$total_spend #> [1] 0.1 #> #> #> $input$test_upper #> [1] TRUE #> #> $input$test_lower #> [1] TRUE #> #> $input$h1_spending #> [1] TRUE #> #> $input$binding #> [1] FALSE #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 11.9 #> 2 All 2 23.8 #> 3 All 10 35.6 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $bound #> # A tibble: 1 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.9 0.025 1.96 0.791 0.0250 #> #> $analysis #> # A tibble: 1 × 10 #> analysis time n event ahr theta info info0 info_frac info_frac0 #> #> 1 1 40 428. 280. 0.678 0.389 68.8 69.9 1 1 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" # Example 3 ---- # Multiple analysis_time gs_design_ahr(analysis_time = c(12, 24, 36)) #> $input #> $input$enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $input$alpha #> [1] 0.025 #> #> $input$beta #> [1] 0.1 #> #> $input$ratio #> [1] 1 #> #> $input$info_frac #> [1] 0.3080415 0.7407917 1.0000000 #> #> $input$analysis_time #> [1] 12 24 36 #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.025 #> #> #> $input$lower #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$lpar #> $input$lpar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$lpar$total_spend #> [1] 0.1 #> #> #> $input$test_upper #> [1] TRUE #> #> $input$test_lower #> [1] TRUE #> #> $input$h1_spending #> [1] TRUE #> #> $input$binding #> [1] FALSE #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 14.5 #> 2 All 2 29.1 #> 3 All 10 43.6 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $bound #> # A tibble: 6 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.00248 0.0000538 3.87 0.459 0.0000538 #> 2 1 lower 0.00321 0.0443 -1.70 1.41 0.956 #> 3 2 upper 0.579 0.00921 2.36 0.736 0.00919 #> 4 2 lower 0.0556 0.830 0.953 0.884 0.170 #> 5 3 upper 0.900 0.0244 2.01 0.799 0.0222 #> 6 3 lower 0.100 0.976 2.01 0.799 0.0223 #> #> $analysis #> # A tibble: 3 × 10 #> analysis time n event ahr theta info info0 info_frac info_frac0 #> #> 1 1 12 436. 98.8 0.811 0.210 24.4 24.7 0.309 0.308 #> 2 2 24 523. 238. 0.715 0.335 58.1 59.4 0.738 0.741 #> 3 3 36 523. 321. 0.683 0.381 78.8 80.2 1 1 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" # Example 4 ---- # Specified information fraction # \\donttest{ gs_design_ahr(info_frac = c(.25, .75, 1), analysis_time = 36) #> $input #> $input$enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $input$alpha #> [1] 0.025 #> #> $input$beta #> [1] 0.1 #> #> $input$ratio #> [1] 1 #> #> $input$info_frac #> [1] 0.25 0.75 1.00 #> #> $input$analysis_time #> [1] 36 #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.025 #> #> #> $input$lower #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$lpar #> $input$lpar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$lpar$total_spend #> [1] 0.1 #> #> #> $input$test_upper #> [1] TRUE #> #> $input$test_lower #> [1] TRUE #> #> $input$h1_spending #> [1] TRUE #> #> $input$binding #> [1] FALSE #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 14.6 #> 2 All 2 29.1 #> 3 All 10 43.7 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $bound #> # A tibble: 6 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.000295 0.00000737 4.33 0.380 0.00000737 #> 2 1 lower 0.00108 0.0135 -2.21 1.64 0.987 #> 3 2 upper 0.599 0.00965 2.34 0.740 0.00965 #> 4 2 lower 0.0570 0.843 1.01 0.878 0.157 #> 5 3 upper 0.900 0.0244 2.01 0.799 0.0221 #> 6 3 lower 0.100 0.976 2.01 0.799 0.0221 #> #> $analysis #> # A tibble: 3 × 10 #> analysis time n event ahr theta info info0 info_frac info_frac0 #> #> 1 1 10.7 382. 80.4 0.823 0.195 19.8 20.1 0.251 0.250 #> 2 2 24.4 524. 241. 0.714 0.337 59.0 60.3 0.747 0.750 #> 3 3 36 524. 322. 0.683 0.381 79.0 80.4 1 1 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" # } # Example 5 ---- # multiple analysis times & info_frac # driven by times gs_design_ahr(info_frac = c(.25, .75, 1), analysis_time = c(12, 25, 36)) #> $input #> $input$enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $input$alpha #> [1] 0.025 #> #> $input$beta #> [1] 0.1 #> #> $input$ratio #> [1] 1 #> #> $input$info_frac #> [1] 0.25 0.75 1.00 #> #> $input$analysis_time #> [1] 12 25 36 #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.025 #> #> #> $input$lower #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$lpar #> $input$lpar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$lpar$total_spend #> [1] 0.1 #> #> #> $input$test_upper #> [1] TRUE #> #> $input$test_lower #> [1] TRUE #> #> $input$h1_spending #> [1] TRUE #> #> $input$binding #> [1] FALSE #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 14.6 #> 2 All 2 29.3 #> 3 All 10 43.9 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $bound #> # A tibble: 6 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.00251 0.0000538 3.87 0.460 0.0000538 #> 2 1 lower 0.00321 0.0446 -1.70 1.41 0.955 #> 3 2 upper 0.635 0.0105 2.31 0.746 0.0104 #> 4 2 lower 0.0599 0.862 1.09 0.871 0.138 #> 5 3 upper 0.900 0.0243 2.02 0.799 0.0219 #> 6 3 lower 0.100 0.976 2.01 0.799 0.0220 #> #> $analysis #> # A tibble: 3 × 10 #> analysis time n event ahr theta info info0 info_frac info_frac0 #> #> 1 1 12 439. 99.5 0.811 0.210 24.5 24.9 0.309 0.308 #> 2 2 25 527. 248. 0.711 0.341 60.5 61.9 0.763 0.766 #> 3 3 36 527. 323. 0.683 0.381 79.3 80.7 1 1 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" # driven by info_frac # \\donttest{ gs_design_ahr(info_frac = c(1 / 3, .8, 1), analysis_time = c(12, 25, 36)) #> $input #> $input$enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $input$alpha #> [1] 0.025 #> #> $input$beta #> [1] 0.1 #> #> $input$ratio #> [1] 1 #> #> $input$info_frac #> [1] 0.3333333 0.8000000 1.0000000 #> #> $input$analysis_time #> [1] 12 25 36 #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.025 #> #> #> $input$lower #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$lpar #> $input$lpar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$lpar$total_spend #> [1] 0.1 #> #> #> $input$test_upper #> [1] TRUE #> #> $input$test_lower #> [1] TRUE #> #> $input$h1_spending #> [1] TRUE #> #> $input$binding #> [1] FALSE #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 14.7 #> 2 All 2 29.5 #> 3 All 10 44.2 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $bound #> # A tibble: 6 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.00510 0.000104 3.71 0.490 0.000104 #> 2 1 lower 0.00459 0.0665 -1.50 1.33 0.934 #> 3 2 upper 0.701 0.0122 2.25 0.756 0.0122 #> 4 2 lower 0.0655 0.896 1.26 0.856 0.104 #> 5 3 upper 0.900 0.0241 2.03 0.799 0.0214 #> 6 3 lower 0.100 0.976 2.02 0.799 0.0216 #> #> $analysis #> # A tibble: 3 × 10 #> analysis time n event ahr theta info info0 info_frac info_frac0 #> #> 1 1 12.5 465. 108. 0.806 0.216 26.7 27.1 0.334 0.333 #> 2 2 26.4 530. 260. 0.706 0.348 63.7 65.1 0.797 0.800 #> 3 3 36 530. 325. 0.683 0.381 79.9 81.3 1 1 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" # } # Example 6 ---- # 2-sided symmetric design with O'Brien-Fleming spending # \\donttest{ gs_design_ahr( analysis_time = c(12, 24, 36), binding = TRUE, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL), h1_spending = FALSE ) #> $input #> $input$enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $input$alpha #> [1] 0.025 #> #> $input$beta #> [1] 0.1 #> #> $input$ratio #> [1] 1 #> #> $input$info_frac #> [1] 0.3080415 0.7407917 1.0000000 #> #> $input$analysis_time #> [1] 12 24 36 #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.025 #> #> $input$upar$param #> NULL #> #> $input$upar$timing #> NULL #> #> #> $input$lower #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$lpar #> $input$lpar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$lpar$total_spend #> [1] 0.025 #> #> $input$lpar$param #> NULL #> #> $input$lpar$timing #> NULL #> #> #> $input$test_upper #> [1] TRUE #> #> $input$test_lower #> [1] TRUE #> #> $input$h1_spending #> [1] FALSE #> #> $input$binding #> [1] TRUE #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 13.7 #> 2 All 2 27.5 #> 3 All 10 41.2 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $bound #> # A tibble: 6 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.00226 0.0000538 3.87 0.449 0.0000538 #> 2 1 lower 0.000000613 0.0000538 -3.87 2.23 1.00 #> 3 2 upper 0.550 0.00921 2.36 0.730 0.00919 #> 4 2 lower 0.00000125 0.00921 -2.36 1.37 0.991 #> 5 3 upper 0.900 0.0250 2.01 0.794 0.0222 #> 6 3 lower 0.00000128 0.0250 -2.01 1.26 0.978 #> #> $analysis #> # A tibble: 3 × 10 #> analysis time n event ahr theta info info0 info_frac info_frac0 #> #> 1 1 12 412. 93.4 0.811 0.210 23.0 23.3 0.309 0.308 #> 2 2 24 494. 224. 0.715 0.335 54.9 56.1 0.738 0.741 #> 3 3 36 494. 303. 0.683 0.381 74.4 75.8 1 1 #> #> attr(,\"class\") #> [1] \"ahr\" \"gs_design\" \"list\" # } # 2-sided asymmetric design with O'Brien-Fleming upper spending # Pocock lower spending under H1 (NPH) # \\donttest{ gs_design_ahr( analysis_time = c(12, 24, 36), binding = TRUE, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfLDPocock, total_spend = 0.1, param = NULL, timing = NULL), h1_spending = TRUE ) #> $input #> $input$enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $input$alpha #> [1] 0.025 #> #> $input$beta #> [1] 0.1 #> #> $input$ratio #> [1] 1 #> #> $input$info_frac #> [1] 0.3080415 0.7407917 1.0000000 #> #> $input$analysis_time #> [1] 12 24 36 #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.025 #> #> $input$upar$param #> NULL #> #> $input$upar$timing #> NULL #> #> #> $input$lower #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$lpar #> $input$lpar$sf #> function (alpha, t, param) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> t[t > 1] <- 1 #> x <- list(name = \"Lan-DeMets Pocock approximation\", param = NULL, #> parname = \"none\", sf = sfLDPocock, spend = alpha * log(1 + #> (exp(1) - 1) * t), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$lpar$total_spend #> [1] 0.1 #> #> $input$lpar$param #> NULL #> #> $input$lpar$timing #> NULL #> #> #> $input$test_upper #> [1] TRUE #> #> $input$test_lower #> [1] TRUE #> #> $input$h1_spending #> [1] TRUE #> #> $input$binding #> [1] TRUE #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 16.5 #> 2 All 2 32.9 #> 3 All 10 49.4 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $bound #> # A tibble: 6 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.00305 0.0000538 3.87 0.481 0.0000538 #> 2 1 lower 0.0430 0.268 -0.619 1.12 0.732 #> 3 2 upper 0.638 0.00921 2.36 0.750 0.00920 #> 4 2 lower 0.0823 0.874 1.13 0.871 0.129 #> 5 3 upper 0.900 0.0250 1.98 0.813 0.0240 #> 6 3 lower 0.100 0.975 1.97 0.813 0.0243 #> #> $analysis #> # A tibble: 3 × 10 #> analysis time n event ahr theta info info0 info_frac info_frac0 #> #> 1 1 12 494. 112. 0.811 0.210 27.6 28.0 0.309 0.308 #> 2 2 24 593. 269. 0.715 0.335 65.9 67.3 0.738 0.741 #> 3 3 36 593. 364. 0.683 0.381 89.3 90.9 1 1 #> #> attr(,\"class\") #> [1] \"ahr\" \"gs_design\" \"list\" # } # Example 7 ---- # \\donttest{ gs_design_ahr( alpha = 0.0125, analysis_time = c(12, 24, 36), upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.0125, param = NULL, timing = NULL), lower = gs_b, lpar = rep(-Inf, 3) ) #> $input #> $input$enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $input$alpha #> [1] 0.0125 #> #> $input$beta #> [1] 0.1 #> #> $input$ratio #> [1] 1 #> #> $input$info_frac #> [1] 0.3080415 0.7407917 1.0000000 #> #> $input$analysis_time #> [1] 12 24 36 #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.0125 #> #> $input$upar$param #> NULL #> #> $input$upar$timing #> NULL #> #> #> $input$lower #> function (par = NULL, k = NULL, ...) #> { #> if (is.null(k)) { #> return(par) #> } #> else { #> return(par[k]) #> } #> } #> #> #> #> $input$lpar #> [1] -Inf -Inf -Inf #> #> $input$test_upper #> [1] TRUE #> #> $input$test_lower #> [1] TRUE #> #> $input$h1_spending #> [1] TRUE #> #> $input$binding #> [1] FALSE #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 16.1 #> 2 All 2 32.2 #> 3 All 10 48.3 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $bound #> # A tibble: 3 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.000619 0.00000679 4.35 0.435 0.00000679 #> 2 2 upper 0.505 0.00371 2.68 0.719 0.00371 #> 3 3 upper 0.900 0.0125 2.28 0.785 0.0114 #> #> $analysis #> # A tibble: 3 × 10 #> analysis time n event ahr theta info info0 info_frac info_frac0 #> #> 1 1 12 483. 109. 0.811 0.210 27.0 27.4 0.309 0.308 #> 2 2 24 579. 263. 0.715 0.335 64.3 65.8 0.738 0.741 #> 3 3 36 579. 355. 0.683 0.381 87.2 88.8 1 1 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" gs_design_ahr( alpha = 0.0125, analysis_time = c(12, 24, 36), upper = gs_b, upar = gsDesign::gsDesign( k = 3, test.type = 1, n.I = c(.25, .75, 1), sfu = sfLDOF, sfupar = NULL, alpha = 0.0125 )$upper$bound, lower = gs_b, lpar = rep(-Inf, 3) ) #> $input #> $input$enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $input$alpha #> [1] 0.0125 #> #> $input$beta #> [1] 0.1 #> #> $input$ratio #> [1] 1 #> #> $input$info_frac #> [1] 0.3080415 0.7407917 1.0000000 #> #> $input$analysis_time #> [1] 12 24 36 #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$upper #> function (par = NULL, k = NULL, ...) #> { #> if (is.null(k)) { #> return(par) #> } #> else { #> return(par[k]) #> } #> } #> #> #> #> $input$upar #> [1] 4.859940 2.658446 2.280095 #> #> $input$lower #> function (par = NULL, k = NULL, ...) #> { #> if (is.null(k)) { #> return(par) #> } #> else { #> return(par[k]) #> } #> } #> #> #> #> $input$lpar #> [1] -Inf -Inf -Inf #> #> $input$test_upper #> [1] TRUE #> #> $input$test_lower #> [1] TRUE #> #> $input$h1_spending #> [1] TRUE #> #> $input$binding #> [1] FALSE #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 16.1 #> 2 All 2 32.2 #> 3 All 10 48.3 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $bound #> # A tibble: 3 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.0000938 0.000000587 4.86 0.395 0.000000587 #> 2 2 upper 0.513 0.00393 2.66 0.721 0.00393 #> 3 3 upper 0.900 0.0125 2.28 0.785 0.0113 #> #> $analysis #> # A tibble: 3 × 10 #> analysis time n event ahr theta info info0 info_frac info_frac0 #> #> 1 1 12 483. 110. 0.811 0.210 27.0 27.4 0.309 0.308 #> 2 2 24 580. 263. 0.715 0.335 64.4 65.9 0.738 0.741 #> 3 3 36 580. 356. 0.683 0.381 87.3 88.9 1 1 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" # }"},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_combo.html","id":null,"dir":"Reference","previous_headings":"","what":"Group sequential design using MaxCombo test under non-proportional hazards — gs_design_combo","title":"Group sequential design using MaxCombo test under non-proportional hazards — gs_design_combo","text":"Group sequential design using MaxCombo test non-proportional hazards","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_combo.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Group sequential design using MaxCombo test under non-proportional hazards — gs_design_combo","text":"","code":"gs_design_combo( enroll_rate = define_enroll_rate(duration = 12, rate = 500/12), fail_rate = define_fail_rate(duration = c(4, 100), fail_rate = log(2)/15, hr = c(1, 0.6), dropout_rate = 0.001), fh_test = rbind(data.frame(rho = 0, gamma = 0, tau = -1, test = 1, analysis = 1:3, analysis_time = c(12, 24, 36)), data.frame(rho = c(0, 0.5), gamma = 0.5, tau = -1, test = 2:3, analysis = 3, analysis_time = 36)), ratio = 1, alpha = 0.025, beta = 0.2, binding = FALSE, upper = gs_b, upar = c(3, 2, 1), lower = gs_b, lpar = c(-1, 0, 1), algorithm = mvtnorm::GenzBretz(maxpts = 1e+05, abseps = 1e-05), n_upper_bound = 1000, ... )"},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_combo.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Group sequential design using MaxCombo test under non-proportional hazards — gs_design_combo","text":"enroll_rate Enrollment rates. fail_rate Failure dropout rates. fh_test data frame summarize test analysis. See examples data structure. ratio Experimental:Control randomization ratio (yet implemented). alpha One-sided Type error. beta Type II error. binding Indicator whether futility bound binding; default FALSE recommended. upper Function compute upper bound. upar Parameters passed upper. lower Function compute lower bound. lpar Parameters passed lower. algorithm object class GenzBretz, Miwa TVPACK specifying algorithm used well associated hyper parameters. n_upper_bound numeric value upper limit sample size. ... Additional parameters passed mvtnorm::pmvnorm.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_combo.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Group sequential design using MaxCombo test under non-proportional hazards — gs_design_combo","text":"list input parameters, enrollment rate, analysis, bound.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_combo.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Group sequential design using MaxCombo test under non-proportional hazards — gs_design_combo","text":"","code":"# The example is slow to run library(dplyr) library(mvtnorm) library(gsDesign) enroll_rate <- define_enroll_rate( duration = 12, rate = 500 / 12 ) fail_rate <- define_fail_rate( duration = c(4, 100), fail_rate = log(2) / 15, # median survival 15 month hr = c(1, .6), dropout_rate = 0.001 ) fh_test <- rbind( data.frame( rho = 0, gamma = 0, tau = -1, test = 1, analysis = 1:3, analysis_time = c(12, 24, 36) ), data.frame( rho = c(0, 0.5), gamma = 0.5, tau = -1, test = 2:3, analysis = 3, analysis_time = 36 ) ) x <- gsSurv( k = 3, test.type = 4, alpha = 0.025, beta = 0.2, astar = 0, timing = 1, sfu = sfLDOF, sfupar = 0, sfl = sfLDOF, sflpar = 0, lambdaC = 0.1, hr = 0.6, hr0 = 1, eta = 0.01, gamma = 10, R = 12, S = NULL, T = 36, minfup = 24, ratio = 1 ) # Example 1 ---- # User-defined boundary # \\donttest{ gs_design_combo( enroll_rate, fail_rate, fh_test, alpha = 0.025, beta = 0.2, ratio = 1, binding = FALSE, upar = x$upper$bound, lpar = x$lower$bound ) #> $enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 37.1 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $bounds #> analysis bound probability probability0 z nominal p #> 1 1 upper 0.002056984 0.0001035057 3.7103029 0.0001035057 #> 2 1 lower 0.140694990 0.4066436377 -0.2361874 0.5933563623 #> 3 2 upper 0.469124612 0.0060406872 2.5114070 0.0060125477 #> 4 2 lower 0.185586571 0.8846152138 1.1703638 0.1209273043 #> 5 3 upper 0.799998476 0.0254946088 1.9929702 0.0231323552 #> 6 3 lower 0.200008755 0.9745050358 1.9929702 0.0231323552 #> #> $analysis #> analysis time n event event_frac ahr #> 1 1 12 444.7987 95.53766 0.3241690 0.8418858 #> 2 2 24 444.7987 219.09306 0.7434051 0.7164215 #> 3 3 36 444.7987 294.71556 1.0000000 0.6831740 #> #> attr(,\"class\") #> [1] \"non_binding\" \"combo\" \"gs_design\" \"list\" # } # Example 2 ---- # \\donttest{ # Boundary derived by spending function gs_design_combo( enroll_rate, fail_rate, fh_test, alpha = 0.025, beta = 0.2, ratio = 1, binding = FALSE, upper = gs_spending_combo, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), # alpha spending lower = gs_spending_combo, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.2), # beta spending ) #> $enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 25.1 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $bounds #> analysis bound probability probability0 z nominal p #> 1 1 upper 2.087715e-08 3.299865e-10 6.1753973 3.299865e-10 #> 2 1 lower 3.269631e-04 3.303090e-03 -2.7160707 9.966969e-01 #> 3 2 upper 2.203276e-01 2.565830e-03 2.7986508 2.565830e-03 #> 4 2 lower 8.468643e-02 7.431751e-01 0.6531624 2.568258e-01 #> 5 3 upper 8.000054e-01 2.371235e-02 2.0972454 1.798593e-02 #> 6 3 lower 1.999978e-01 9.762886e-01 2.0972475 1.798584e-02 #> #> $analysis #> analysis time n event event_frac ahr #> 1 1 12 301.2538 64.70585 0.3241690 0.8418858 #> 2 2 24 301.2538 148.38759 0.7434051 0.7164215 #> 3 3 36 301.2538 199.60528 1.0000000 0.6831740 #> #> attr(,\"class\") #> [1] \"non_binding\" \"combo\" \"gs_design\" \"list\" # }"},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_npe.html","id":null,"dir":"Reference","previous_headings":"","what":"Group sequential design computation with non-constant effect and information — gs_design_npe","title":"Group sequential design computation with non-constant effect and information — gs_design_npe","text":"Derives group sequential design size, bounds boundary crossing probabilities based proportionate information effect size analyses. allows non-constant treatment effect time, also can applied usual homogeneous effect size designs. requires treatment effect proportionate statistical information analysis well method deriving bounds, spending. routine enables two things available gsDesign package: non-constant effect, 2) flexibility boundary selection. many applications, non-proportional-hazards design function gs_design_nph() used; calls function. Initial bound types supported 1) spending bounds, fixed bounds, 3) Haybittle-Peto-like bounds. requirement boundary update method can bound without knowledge future bounds. example, bounds based conditional power require knowledge future bounds supported routine; limited conditional power method demonstrated. Boundary family designs Wang-Tsiatis designs including original (non-spending-function-based) O'Brien-Fleming Pocock designs supported gs_power_npe().","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_npe.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Group sequential design computation with non-constant effect and information — gs_design_npe","text":"","code":"gs_design_npe( theta = 0.1, theta0 = NULL, theta1 = NULL, info = 1, info0 = NULL, info1 = NULL, info_scale = c(\"h0_h1_info\", \"h0_info\", \"h1_info\"), alpha = 0.025, beta = 0.1, upper = gs_b, upar = qnorm(0.975), lower = gs_b, lpar = -Inf, test_upper = TRUE, test_lower = TRUE, binding = FALSE, r = 18, tol = 1e-06 )"},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_npe.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Group sequential design computation with non-constant effect and information — gs_design_npe","text":"theta Natural parameter group sequential design representing expected incremental drift analyses; used power calculation. theta0 Natural parameter used upper bound spending; NULL, set 0. theta1 Natural parameter used lower bound spending; NULL, set theta yields usual beta-spending. set 0, spending 2-sided null hypothesis. info Proportionate statistical information analyses input theta. info0 Proportionate statistical information null hypothesis, different alternative; impacts null hypothesis bound calculation. info1 Proportionate statistical information alternate hypothesis; impacts null hypothesis bound calculation. info_scale Information scale calculation. Options : \"h0_h1_info\" (default): variance null alternative hypotheses used. \"h0_info\": variance null hypothesis used. \"h1_info\": variance alternative hypothesis used. alpha One-sided Type error. beta Type II error. upper Function compute upper bound. upar Parameters passed function provided upper. lower Function compare lower bound. lpar Parameters passed function provided lower. test_upper Indicator analyses include upper (efficacy) bound; single value TRUE (default) indicates analyses; otherwise, logical vector length info indicate analyses efficacy bound. test_lower Indicator analyses include lower bound; single value TRUE (default) indicates analyses; single value FALSE indicates lower bound; otherwise, logical vector length info indicate analyses lower bound. binding Indicator whether futility bound binding; default FALSE recommended. r Integer value controlling grid numerical integration Jennison Turnbull (2000); default 18, range 1 80. Larger values provide larger number grid points greater accuracy. Normally r changed user. tol Tolerance parameter boundary convergence (Z-scale).","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_npe.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Group sequential design computation with non-constant effect and information — gs_design_npe","text":"tibble columns analysis, bound, z, probability, theta, info, info0.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_npe.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Group sequential design computation with non-constant effect and information — gs_design_npe","text":"inputs info info0 vectors length increasing positive numbers. design returned change constant scale factor ensure design power 1 - beta. bound specifications upper, lower, upar, lpar used ensure Type error boundary properties specified.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_npe.html","id":"specification","dir":"Reference","previous_headings":"","what":"Specification","title":"Group sequential design computation with non-constant effect and information — gs_design_npe","text":"contents section shown PDF user manual .","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_npe.html","id":"author","dir":"Reference","previous_headings":"","what":"Author","title":"Group sequential design computation with non-constant effect and information — gs_design_npe","text":"Keaven Anderson keaven_anderson@merck.com","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_npe.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Group sequential design computation with non-constant effect and information — gs_design_npe","text":"","code":"library(dplyr) library(gsDesign) # Example 1 ---- # Single analysis # Lachin book p 71 difference of proportions example pc <- .28 # Control response rate pe <- .40 # Experimental response rate p0 <- (pc + pe) / 2 # Ave response rate under H0 # Information per increment of 1 in sample size info0 <- 1 / (p0 * (1 - p0) * 4) info <- 1 / (pc * (1 - pc) * 2 + pe * (1 - pe) * 2) # Result should round up to next even number = 652 # Divide information needed under H1 by information per patient added gs_design_npe(theta = pe - pc, info = info, info0 = info0) #> # A tibble: 1 × 10 #> analysis bound z probability probability0 theta info info0 info1 #> #> 1 1 upper 1.96 0.9 0.025 0.12 737. 725. 737. #> # ℹ 1 more variable: info_frac # Example 2 ---- # Fixed bound x <- gs_design_npe( alpha = 0.0125, theta = c(.1, .2, .3), info = (1:3) * 80, info0 = (1:3) * 80, upper = gs_b, upar = gsDesign::gsDesign(k = 3, sfu = gsDesign::sfLDOF, alpha = 0.0125)$upper$bound, lower = gs_b, lpar = c(-1, 0, 0) ) x #> # A tibble: 6 × 10 #> analysis bound z probability probability0 theta info_frac info info0 #> #> 1 1 upper 4.17 0.000278 0.0000152 0.1 0.333 51.6 51.6 #> 2 1 lower -1 0.0429 0.159 0.1 0.333 51.6 51.6 #> 3 2 upper 2.85 0.208 0.00222 0.2 0.667 103. 103. #> 4 2 lower 0 0.0537 0.513 0.2 0.667 103. 103. #> 5 3 upper 2.26 0.900 0.0125 0.3 1 155. 155. #> 6 3 lower 0 0.0537 0.606 0.3 1 155. 155. #> # ℹ 1 more variable: info1 # Same upper bound; this represents non-binding Type I error and will total 0.025 gs_power_npe( theta = rep(0, 3), info = (x %>% filter(bound == \"upper\"))$info, upper = gs_b, upar = (x %>% filter(bound == \"upper\"))$z, lower = gs_b, lpar = rep(-Inf, 3) ) #> # A tibble: 6 × 10 #> analysis bound z probability theta theta1 info_frac info info0 info1 #> #> 1 1 upper 4.17 0.0000152 0 0 0.333 51.6 51.6 51.6 #> 2 2 upper 2.85 0.00222 0 0 0.667 103. 103. 103. #> 3 3 upper 2.26 0.0125 0 0 1 155. 155. 155. #> 4 1 lower -Inf 0 0 0 0.333 51.6 51.6 51.6 #> 5 2 lower -Inf 0 0 0 0.667 103. 103. 103. #> 6 3 lower -Inf 0 0 0 1 155. 155. 155. # Example 3 ---- # Spending bound examples # Design with futility only at analysis 1; efficacy only at analyses 2, 3 # Spending bound for efficacy; fixed bound for futility # NOTE: test_upper and test_lower DO NOT WORK with gs_b; must explicitly make bounds infinite # test_upper and test_lower DO WORK with gs_spending_bound gs_design_npe( theta = c(.1, .2, .3), info = (1:3) * 40, info0 = (1:3) * 40, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL), lower = gs_b, lpar = c(-1, -Inf, -Inf), test_upper = c(FALSE, TRUE, TRUE) ) #> # A tibble: 6 × 10 #> analysis bound z probability probability0 theta info_frac info info0 #> #> 1 1 upper Inf 0 0 0.1 0.333 44.6 44.6 #> 2 1 lower -1 0.0477 0.159 0.1 0.333 44.6 44.6 #> 3 2 upper 2.51 0.267 0.00605 0.2 0.667 89.1 89.1 #> 4 2 lower -Inf 0.0477 0.159 0.2 0.667 89.1 89.1 #> 5 3 upper 1.99 0.900 0.0249 0.3 1 134. 134. #> 6 3 lower -Inf 0.0477 0.159 0.3 1 134. 134. #> # ℹ 1 more variable: info1 # one can try `info_scale = \"h1_info\"` or `info_scale = \"h0_info\"` here gs_design_npe( theta = c(.1, .2, .3), info = (1:3) * 40, info0 = (1:3) * 30, info_scale = \"h1_info\", upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL), lower = gs_b, lpar = c(-1, -Inf, -Inf), test_upper = c(FALSE, TRUE, TRUE) ) #> # A tibble: 6 × 10 #> analysis bound z probability probability0 theta info_frac info info0 #> #> 1 1 upper Inf 0 0 0.1 0.333 44.6 44.6 #> 2 1 lower -1 0.0477 0.159 0.1 0.333 44.6 44.6 #> 3 2 upper 2.51 0.267 0.00605 0.2 0.667 89.1 89.1 #> 4 2 lower -Inf 0.0477 0.159 0.2 0.667 89.1 89.1 #> 5 3 upper 1.99 0.900 0.0249 0.3 1 134. 134. #> 6 3 lower -Inf 0.0477 0.159 0.3 1 134. 134. #> # ℹ 1 more variable: info1 # Example 4 ---- # Spending function bounds # 2-sided asymmetric bounds # Lower spending based on non-zero effect gs_design_npe( theta = c(.1, .2, .3), info = (1:3) * 40, info0 = (1:3) * 30, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfHSD, total_spend = 0.1, param = -1, timing = NULL) ) #> # A tibble: 6 × 10 #> analysis bound z probability probability0 theta info_frac info info0 #> #> 1 1 upper 3.71 0.000145 0.000104 0.1 0.333 43.5 32.7 #> 2 1 lower -1.34 0.0139 0.0909 0.1 0.333 43.5 32.7 #> 3 2 upper 2.51 0.258 0.00605 0.2 0.667 87.1 65.3 #> 4 2 lower 0.150 0.0460 0.562 0.2 0.667 87.1 65.3 #> 5 3 upper 1.99 0.900 0.0249 0.3 1 131. 98.0 #> 6 3 lower 2.00 0.0908 0.976 0.3 1 131. 98.0 #> # ℹ 1 more variable: info1 # Example 5 ---- # Two-sided symmetric spend, O'Brien-Fleming spending # Typically, 2-sided bounds are binding xx <- gs_design_npe( theta = c(.1, .2, .3), info = (1:3) * 40, binding = TRUE, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL) ) xx #> # A tibble: 6 × 10 #> analysis bound z probability probability0 theta info_frac info info0 #> #> 1 1 upper 3.71 0.00104 0.000104 0.1 0.333 39.8 39.8 #> 2 1 lower -3.08 0.000104 0.00104 0.1 0.333 39.8 39.8 #> 3 2 upper 2.51 0.233 0.00605 0.2 0.667 79.5 79.5 #> 4 2 lower -0.728 0.00605 0.233 0.2 0.667 79.5 79.5 #> 5 3 upper 1.99 0.900 0.0250 0.3 1 119. 119. #> 6 3 lower 1.28 0.0250 0.900 0.3 1 119. 119. #> # ℹ 1 more variable: info1 # Re-use these bounds under alternate hypothesis # Always use binding = TRUE for power calculations gs_power_npe( theta = c(.1, .2, .3), info = (1:3) * 40, binding = TRUE, upper = gs_b, lower = gs_b, upar = (xx %>% filter(bound == \"upper\"))$z, lpar = -(xx %>% filter(bound == \"upper\"))$z ) #> # A tibble: 6 × 10 #> analysis bound z probability theta theta1 info_frac info info0 info1 #> #> 1 1 upper 3.71 0.00104 0.1 0.1 0.333 40 40 40 #> 2 2 upper 2.51 0.235 0.2 0.2 0.667 80 80 80 #> 3 3 upper 1.99 0.902 0.3 0.3 1 120 120 120 #> 4 1 lower -3.71 0.00000704 0.1 0.1 0.333 40 40 40 #> 5 2 lower -2.51 0.0000151 0.2 0.2 0.667 80 80 80 #> 6 3 lower -1.99 0.0000151 0.3 0.3 1 120 120 120"},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_rd.html","id":null,"dir":"Reference","previous_headings":"","what":"Group sequential design of binary outcome measuring in risk difference — gs_design_rd","title":"Group sequential design of binary outcome measuring in risk difference — gs_design_rd","text":"Group sequential design binary outcome measuring risk difference","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_rd.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Group sequential design of binary outcome measuring in risk difference — gs_design_rd","text":"","code":"gs_design_rd( p_c = tibble::tibble(stratum = \"All\", rate = 0.2), p_e = tibble::tibble(stratum = \"All\", rate = 0.15), info_frac = 1:3/3, rd0 = 0, alpha = 0.025, beta = 0.1, ratio = 1, stratum_prev = NULL, weight = c(\"unstratified\", \"ss\", \"invar\"), upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = c(qnorm(0.1), rep(-Inf, 2)), test_upper = TRUE, test_lower = TRUE, info_scale = c(\"h0_h1_info\", \"h0_info\", \"h1_info\"), binding = FALSE, r = 18, tol = 1e-06, h1_spending = TRUE )"},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_rd.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Group sequential design of binary outcome measuring in risk difference — gs_design_rd","text":"p_c Rate control group. p_e Rate experimental group. info_frac Statistical information fraction. rd0 Treatment effect super-superiority designs, default 0. alpha One-sided Type error. beta Type II error. ratio Experimental:Control randomization ratio (yet implemented). stratum_prev Randomization ratio different stratum. unstratified design NULL. Otherwise tibble containing two columns (stratum prevalence). weight weighting scheme stratified population. upper Function compute upper bound. lower Function compute lower bound. upar Parameters passed upper. lpar Parameters passed lower. test_upper Indicator analyses include upper (efficacy) bound; single value TRUE (default) indicates analyses; otherwise, logical vector length info indicate analyses efficacy bound. test_lower Indicator analyses include lower bound; single value TRUE (default) indicates analyses; single value FALSE indicates lower bound; otherwise, logical vector length info indicate analyses lower bound. info_scale Information scale calculation. Options : \"h0_h1_info\" (default): variance null alternative hypotheses used. \"h0_info\": variance null hypothesis used. \"h1_info\": variance alternative hypothesis used. binding Indicator whether futility bound binding; default FALSE recommended. r Integer value controlling grid numerical integration Jennison Turnbull (2000); default 18, range 1 80. Larger values provide larger number grid points greater accuracy. Normally, r changed user. tol Tolerance parameter boundary convergence (Z-scale). h1_spending Indicator lower bound set spending alternate hypothesis (input fail_rate) spending used lower bound.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_rd.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Group sequential design of binary outcome measuring in risk difference — gs_design_rd","text":"list input parameters, analysis, bound.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_rd.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Group sequential design of binary outcome measuring in risk difference — gs_design_rd","text":"added.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_rd.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Group sequential design of binary outcome measuring in risk difference — gs_design_rd","text":"","code":"library(gsDesign) # Example 1 ---- # unstratified group sequential design x <- gs_design_rd( p_c = tibble::tibble(stratum = \"All\", rate = .2), p_e = tibble::tibble(stratum = \"All\", rate = .15), info_frac = c(0.7, 1), rd0 = 0, alpha = .025, beta = .1, ratio = 1, stratum_prev = NULL, weight = \"unstratified\", upper = gs_b, lower = gs_b, upar = gsDesign(k = 2, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = c(qnorm(.1), rep(-Inf, 2)) ) y <- gs_power_rd( p_c = tibble::tibble(stratum = \"All\", rate = .2), p_e = tibble::tibble(stratum = \"All\", rate = .15), n = tibble::tibble(stratum = \"All\", n = x$analysis$n, analysis = 1:2), rd0 = 0, ratio = 1, weight = \"unstratified\", upper = gs_b, lower = gs_b, upar = gsDesign(k = 2, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = c(qnorm(.1), rep(-Inf, 2)) ) # The above 2 design share the same power with the same sample size and treatment effect x$bound$probability[x$bound$bound == \"upper\" & x$bound$analysis == 2] #> [1] 0.9 y$bound$probability[y$bound$bound == \"upper\" & y$bound$analysis == 2] #> [1] 0.9 # Example 2 ---- # stratified group sequential design gs_design_rd( p_c = tibble::tibble( stratum = c(\"biomarker positive\", \"biomarker negative\"), rate = c(.2, .25) ), p_e = tibble::tibble( stratum = c(\"biomarker positive\", \"biomarker negative\"), rate = c(.15, .22) ), info_frac = c(0.7, 1), rd0 = 0, alpha = .025, beta = .1, ratio = 1, stratum_prev = tibble::tibble( stratum = c(\"biomarker positive\", \"biomarker negative\"), prevalence = c(.4, .6) ), weight = \"ss\", upper = gs_spending_bound, lower = gs_b, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL), lpar = rep(-Inf, 2) ) #> $input #> $input$p_c #> # A tibble: 2 × 2 #> stratum rate #> #> 1 biomarker positive 0.2 #> 2 biomarker negative 0.25 #> #> $input$p_e #> # A tibble: 2 × 2 #> stratum rate #> #> 1 biomarker positive 0.15 #> 2 biomarker negative 0.22 #> #> $input$info_frac #> [1] 0.7 1.0 #> #> $input$rd0 #> [1] 0 #> #> $input$alpha #> [1] 0.025 #> #> $input$beta #> [1] 0.1 #> #> $input$ratio #> [1] 1 #> #> $input$stratum_prev #> # A tibble: 2 × 2 #> stratum prevalence #> #> 1 biomarker positive 0.4 #> 2 biomarker negative 0.6 #> #> $input$weight #> [1] \"ss\" #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.025 #> #> $input$upar$param #> NULL #> #> $input$upar$timing #> NULL #> #> #> $input$test_upper #> [1] TRUE #> #> $input$lower #> function (par = NULL, k = NULL, ...) #> { #> if (is.null(k)) { #> return(par) #> } #> else { #> return(par[k]) #> } #> } #> #> #> #> $input$lpar #> [1] -Inf -Inf #> #> $input$test_lower #> [1] TRUE #> #> $input$h1_spending #> [1] TRUE #> #> $input$binding #> [1] FALSE #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $bound #> # A tibble: 2 × 7 #> analysis bound probability probability0 z `~risk difference at bound` #> #> 1 1 upper 0.616 0.00738 2.44 0.0339 #> 2 2 upper 0.900 0.0250 2.00 0.0232 #> # ℹ 1 more variable: `nominal p` #> #> $analysis #> # A tibble: 2 × 8 #> analysis n rd rd0 info info0 info_frac info_frac0 #> #> 1 1 3426. 0.038 0 5184. 5172. 0.7 0.7 #> 2 2 4894. 0.038 0 7406. 7388. 1 1 #> #> attr(,\"class\") #> [1] \"non_binding\" \"rd\" \"gs_design\" \"list\""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_wlr.html","id":null,"dir":"Reference","previous_headings":"","what":"Group sequential design using weighted log-rank test under non-proportional hazards — gs_design_wlr","title":"Group sequential design using weighted log-rank test under non-proportional hazards — gs_design_wlr","text":"Group sequential design using weighted log-rank test non-proportional hazards","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_wlr.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Group sequential design using weighted log-rank test under non-proportional hazards — gs_design_wlr","text":"","code":"gs_design_wlr( enroll_rate = define_enroll_rate(duration = c(2, 2, 10), rate = c(3, 6, 9)), fail_rate = tibble(stratum = \"All\", duration = c(3, 100), fail_rate = log(2)/c(9, 18), hr = c(0.9, 0.6), dropout_rate = rep(0.001, 2)), weight = wlr_weight_fh, approx = \"asymptotic\", alpha = 0.025, beta = 0.1, ratio = 1, info_frac = NULL, info_scale = c(\"h0_h1_info\", \"h0_info\", \"h1_info\"), analysis_time = 36, binding = FALSE, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = alpha), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfLDOF, total_spend = beta), test_upper = TRUE, test_lower = TRUE, h1_spending = TRUE, r = 18, tol = 1e-06, interval = c(0.01, 1000) )"},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_wlr.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Group sequential design using weighted log-rank test under non-proportional hazards — gs_design_wlr","text":"enroll_rate Enrollment rates. fail_rate Failure dropout rates. weight Weight weighted log rank test: \"1\" = unweighted. \"n\" = Gehan-Breslow. \"sqrtN\" = Tarone-Ware. \"FH_p[]_q[b]\" = Fleming-Harrington p=q=b. approx Approximate estimation method Z statistics. \"event_driven\" = work proportional hazard model log rank test. \"asymptotic\". alpha One-sided Type error. beta Type II error. ratio Experimental:Control randomization ratio (yet implemented). info_frac Targeted information fraction analysis. info_scale Information scale calculation. Options : \"h0_h1_info\" (default): variance null alternative hypotheses used. \"h0_info\": variance null hypothesis used. \"h1_info\": variance alternative hypothesis used. analysis_time Minimum time analysis. binding Indicator whether futility bound binding; default FALSE recommended. upper Function compute upper bound. upar Parameters passed upper. lower Function compute lower bound. lpar Parameters passed lower. test_upper Indicator analyses include upper (efficacy) bound; single value TRUE (default) indicates analyses; otherwise, logical vector length info indicate analyses efficacy bound. test_lower Indicator analyses include lower bound; single value TRUE (default) indicates analyses; single value FALSE indicated lower bound; otherwise, logical vector length info indicate analyses lower bound. h1_spending Indicator lower bound set spending alternate hypothesis (input fail_rate) spending used lower bound. r Integer value controlling grid numerical integration Jennison Turnbull (2000); default 18, range 1 80. Larger values provide larger number grid points greater accuracy. Normally, r changed user. tol Tolerance parameter boundary convergence (Z-scale). interval interval presumed include time expected event count equal targeted event.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_wlr.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Group sequential design using weighted log-rank test under non-proportional hazards — gs_design_wlr","text":"list input parameters, enrollment rate, analysis, bound.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_wlr.html","id":"specification","dir":"Reference","previous_headings":"","what":"Specification","title":"Group sequential design using weighted log-rank test under non-proportional hazards — gs_design_wlr","text":"contents section shown PDF user manual .","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_design_wlr.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Group sequential design using weighted log-rank test under non-proportional hazards — gs_design_wlr","text":"","code":"library(dplyr) library(mvtnorm) library(gsDesign) library(gsDesign2) # set enrollment rates enroll_rate <- define_enroll_rate(duration = 12, rate = 1) # set failure rates fail_rate <- define_fail_rate( duration = c(4, 100), fail_rate = log(2) / 15, # median survival 15 month hr = c(1, .6), dropout_rate = 0.001 ) # Example 1 ---- # Information fraction driven design gs_design_wlr( enroll_rate = enroll_rate, fail_rate = fail_rate, ratio = 1, alpha = 0.025, beta = 0.2, weight = function(x, arm0, arm1) { wlr_weight_fh(x, arm0, arm1, rho = 0, gamma = 0.5) }, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.2), analysis_time = 36, info_frac = 1:3/3 ) #> $input #> $input$enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 1 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $input$weight #> function (x, arm0, arm1) #> { #> wlr_weight_fh(x, arm0, arm1, rho = 0, gamma = 0.5) #> } #> #> #> $input$approx #> [1] \"asymptotic\" #> #> $input$alpha #> [1] 0.025 #> #> $input$beta #> [1] 0.2 #> #> $input$ratio #> [1] 1 #> #> $input$info_frac #> [1] 0.3333333 0.6666667 1.0000000 #> #> $input$analysis_time #> [1] 36 #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.025 #> #> #> $input$lower #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$lpar #> $input$lpar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$lpar$total_spend #> [1] 0.2 #> #> #> $input$test_upper #> [1] TRUE #> #> $input$test_lower #> [1] TRUE #> #> $input$h1_spending #> [1] TRUE #> #> $input$binding #> [1] FALSE #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 25.1 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $bounds #> # A tibble: 6 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.00173 0.0000794 3.78 0.490 0.0000794 #> 2 1 lower 0.0269 0.134 -1.11 1.23 0.866 #> 3 2 upper 0.126 0.00562 2.54 0.671 0.00559 #> 4 2 lower 0.117 0.568 0.155 0.976 0.439 #> 5 3 upper 0.800 0.0249 1.99 0.754 0.0233 #> 6 3 lower 0.200 0.975 1.99 0.754 0.0233 #> #> $analysis #> # A tibble: 3 × 9 #> analysis time n event ahr theta info info0 info_frac #> #> 1 1 18.0 301. 112. 0.702 0.354 5.46 5.53 0.333 #> 2 2 26.6 301. 162. 0.657 0.420 10.9 11.2 0.667 #> 3 3 36 301. 199. 0.639 0.732 16.4 17.1 1 #> #> attr(,\"class\") #> [1] \"non_binding\" \"wlr\" \"gs_design\" \"list\" # Example 2 ---- # Calendar time driven design gs_design_wlr( enroll_rate = enroll_rate, fail_rate = fail_rate, ratio = 1, alpha = 0.025, beta = 0.2, weight = function(x, arm0, arm1) { wlr_weight_fh(x, arm0, arm1, rho = 0, gamma = 0.5) }, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.2), analysis_time = 1:3*12, info_frac = NULL ) #> $input #> $input$enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 1 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $input$weight #> function (x, arm0, arm1) #> { #> wlr_weight_fh(x, arm0, arm1, rho = 0, gamma = 0.5) #> } #> #> #> $input$approx #> [1] \"asymptotic\" #> #> $input$alpha #> [1] 0.025 #> #> $input$beta #> [1] 0.2 #> #> $input$ratio #> [1] 1 #> #> $input$info_frac #> [1] 0.1325089 0.5649964 1.0000000 #> #> $input$analysis_time #> [1] 12 24 36 #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.025 #> #> #> $input$lower #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$lpar #> $input$lpar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$lpar$total_spend #> [1] 0.2 #> #> #> $input$test_upper #> [1] TRUE #> #> $input$test_lower #> [1] TRUE #> #> $input$h1_spending #> [1] TRUE #> #> $input$binding #> [1] FALSE #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 24.0 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $bounds #> # A tibble: 6 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.0000000732 3.30e-10 6.18 0.208 3.30e-10 #> 2 1 lower 0.000441 7.55e- 3 -2.43 1.85 9.92e- 1 #> 3 2 upper 0.301 2.57e- 3 2.80 0.625 2.57e- 3 #> 4 2 lower 0.0882 8.23e- 1 0.925 0.856 1.77e- 1 #> 5 3 upper 0.800 2.20e- 2 1.97 0.751 2.42e- 2 #> 6 3 lower 0.200 9.78e- 1 1.97 0.751 2.42e- 2 #> #> $analysis #> # A tibble: 3 × 9 #> analysis time n event ahr theta info info0 info_frac #> #> 1 1 12 288. 61.9 0.781 0.626 2.08 2.09 0.133 #> 2 2 24 288. 142. 0.666 0.765 8.86 9.07 0.565 #> 3 3 36 288. 191. 0.639 0.732 15.7 16.4 1 #> #> attr(,\"class\") #> [1] \"non_binding\" \"wlr\" \"gs_design\" \"list\" # Example 3 ---- # Both calendar time and information fraction driven design gs_design_wlr( enroll_rate = enroll_rate, fail_rate = fail_rate, ratio = 1, alpha = 0.025, beta = 0.2, weight = function(x, arm0, arm1) { wlr_weight_fh(x, arm0, arm1, rho = 0, gamma = 0.5) }, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.2), analysis_time = 1:3*12, info_frac = c(0.3, 0.7, 1) ) #> $input #> $input$enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 1 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $input$weight #> function (x, arm0, arm1) #> { #> wlr_weight_fh(x, arm0, arm1, rho = 0, gamma = 0.5) #> } #> #> #> $input$approx #> [1] \"asymptotic\" #> #> $input$alpha #> [1] 0.025 #> #> $input$beta #> [1] 0.2 #> #> $input$ratio #> [1] 1 #> #> $input$info_frac #> [1] 0.3 0.7 1.0 #> #> $input$analysis_time #> [1] 12 24 36 #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.025 #> #> #> $input$lower #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$lpar #> $input$lpar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$lpar$total_spend #> [1] 0.2 #> #> #> $input$test_upper #> [1] TRUE #> #> $input$test_lower #> [1] TRUE #> #> $input$h1_spending #> [1] TRUE #> #> $input$binding #> [1] FALSE #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 25.3 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $bounds #> # A tibble: 6 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.000646 0.0000315 4.00 0.461 0.0000315 #> 2 1 lower 0.0197 0.0954 -1.31 1.29 0.905 #> 3 2 upper 0.154 0.00693 2.46 0.683 0.00692 #> 4 2 lower 0.126 0.608 0.266 0.960 0.395 #> 5 3 upper 0.800 0.0249 2.00 0.755 0.0229 #> 6 3 lower 0.200 0.975 2.00 0.755 0.0229 #> #> $analysis #> # A tibble: 3 × 9 #> analysis time n event ahr theta info info0 info_frac #> #> 1 1 17.1 304. 107. 0.711 0.341 4.96 5.02 0.300 #> 2 2 27.5 304. 167. 0.655 0.424 11.6 11.9 0.700 #> 3 3 36 304. 201. 0.639 0.732 16.5 17.3 1 #> #> attr(,\"class\") #> [1] \"non_binding\" \"wlr\" \"gs_design\" \"list\""},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_ahr.html","id":null,"dir":"Reference","previous_headings":"","what":"Information and effect size based on AHR approximation — gs_info_ahr","title":"Information and effect size based on AHR approximation — gs_info_ahr","text":"Based piecewise enrollment rate, failure rate, dropout rates computes approximate information effect size using average hazard ratio model.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_ahr.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Information and effect size based on AHR approximation — gs_info_ahr","text":"","code":"gs_info_ahr( enroll_rate = define_enroll_rate(duration = c(2, 2, 10), rate = c(3, 6, 9)), fail_rate = define_fail_rate(duration = c(3, 100), fail_rate = log(2)/c(9, 18), hr = c(0.9, 0.6), dropout_rate = 0.001), ratio = 1, event = NULL, analysis_time = NULL, interval = c(0.01, 1000) )"},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_ahr.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Information and effect size based on AHR approximation — gs_info_ahr","text":"enroll_rate Enrollment rates define_enroll_rate(). fail_rate Failure dropout rates define_fail_rate(). ratio Experimental:Control randomization ratio. event Targeted minimum events analysis. analysis_time Targeted minimum study duration analysis. interval interval presumed include time expected event count equal targeted event.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_ahr.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Information and effect size based on AHR approximation — gs_info_ahr","text":"data frame columns analysis, time, ahr, event, theta, info, info0. columns info info0 contain statistical information H1, H0, respectively. analysis k, time[k] maximum analysis_time[k] expected time required accrue targeted event[k]. ahr expected average hazard ratio analysis.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_ahr.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Information and effect size based on AHR approximation — gs_info_ahr","text":"ahr() function computes statistical information targeted event times. expected_time() function used get events average HR targeted analysis_time.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_ahr.html","id":"specification","dir":"Reference","previous_headings":"","what":"Specification","title":"Information and effect size based on AHR approximation — gs_info_ahr","text":"contents section shown PDF user manual .","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_ahr.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Information and effect size based on AHR approximation — gs_info_ahr","text":"","code":"library(gsDesign) library(gsDesign2) # Example 1 ---- # \\donttest{ # Only put in targeted events gs_info_ahr(event = c(30, 40, 50)) #> analysis time event ahr theta info info0 #> 1 1 14.90817 30.00008 0.7865726 0.2400702 7.373433 7.50002 #> 2 2 19.16437 40.00000 0.7442008 0.2954444 9.789940 10.00000 #> 3 3 24.54264 50.00000 0.7128241 0.3385206 12.227632 12.50000 # } # Example 2 ---- # Only put in targeted analysis times gs_info_ahr(analysis_time = c(18, 27, 36)) #> analysis time event ahr theta info info0 #> 1 1 18 37.59032 0.7545471 0.2816376 9.208013 9.397579 #> 2 2 27 54.01154 0.7037599 0.3513180 13.216112 13.502885 #> 3 3 36 66.23948 0.6833395 0.3807634 16.267921 16.559870 # Example 3 ---- # \\donttest{ # Some analysis times after time at which targeted event accrue # Check that both Time >= input analysis_time and event >= input event gs_info_ahr(event = c(30, 40, 50), analysis_time = c(16, 19, 26)) #> analysis time event ahr theta info info0 #> 1 1 16.00000 33.06876 0.7759931 0.2536117 8.118487 8.267189 #> 2 2 19.16437 40.00000 0.7442008 0.2954444 9.789940 10.000001 #> 3 3 26.00000 52.41802 0.7071808 0.3464689 12.822714 13.104505 gs_info_ahr(event = c(30, 40, 50), analysis_time = c(14, 20, 24)) #> analysis time event ahr theta info info0 #> 1 1 14.90817 30.00008 0.7865726 0.2400702 7.373433 7.50002 #> 2 2 20.00000 41.67282 0.7377944 0.3040901 10.195150 10.41821 #> 3 3 24.54264 50.00000 0.7128241 0.3385206 12.227632 12.50000 # }"},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_combo.html","id":null,"dir":"Reference","previous_headings":"","what":"Information and effect size for MaxCombo test — gs_info_combo","title":"Information and effect size for MaxCombo test — gs_info_combo","text":"Information effect size MaxCombo test","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_combo.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Information and effect size for MaxCombo test — gs_info_combo","text":"","code":"gs_info_combo( enroll_rate = define_enroll_rate(duration = c(2, 2, 10), rate = c(3, 6, 9)), fail_rate = define_fail_rate(duration = c(3, 100), fail_rate = log(2)/c(9, 18), hr = c(0.9, 0.6), dropout_rate = 0.001), ratio = 1, event = NULL, analysis_time = NULL, rho, gamma, tau = rep(-1, length(rho)), approx = \"asymptotic\" )"},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_combo.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Information and effect size for MaxCombo test — gs_info_combo","text":"enroll_rate enroll_rate data frame without stratum created define_enroll_rate(). fail_rate fail_rate data frame without stratum created define_fail_rate(). ratio Experimental:Control randomization ratio (yet implemented). event Targeted events analysis. analysis_time Minimum time analysis. rho Weighting parameters. gamma Weighting parameters. tau Weighting parameters. approx Approximation method.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_combo.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Information and effect size for MaxCombo test — gs_info_combo","text":"tibble columns test index, analysis index, analysis time, sample size, number events, ahr, delta, sigma2, theta, statistical information.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_combo.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Information and effect size for MaxCombo test — gs_info_combo","text":"","code":"gs_info_combo(rho = c(0, 0.5), gamma = c(0.5, 0), analysis_time = c(12, 24)) #> test analysis time n event ahr delta sigma2 #> 1 1 1 12 89.99998 20.40451 0.7739222 -0.004130002 0.00633611 #> 2 1 2 24 107.99998 49.06966 0.6744758 -0.020174155 0.02617985 #> 3 2 1 12 89.99998 20.40451 0.8182558 -0.008800844 0.04088161 #> 4 2 2 24 107.99998 49.06966 0.7278445 -0.031421204 0.08709509 #> theta info info0 #> 1 0.6518199 0.5702498 0.5733464 #> 2 0.7705987 2.8274229 2.8855151 #> 3 0.2152764 3.6793441 3.6861985 #> 4 0.3607689 9.4062683 9.4737329"},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_rd.html","id":null,"dir":"Reference","previous_headings":"","what":"Information and effect size under risk difference — gs_info_rd","title":"Information and effect size under risk difference — gs_info_rd","text":"Information effect size risk difference","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_rd.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Information and effect size under risk difference — gs_info_rd","text":"","code":"gs_info_rd( p_c = tibble::tibble(stratum = \"All\", rate = 0.2), p_e = tibble::tibble(stratum = \"All\", rate = 0.15), n = tibble::tibble(stratum = \"All\", n = c(100, 200, 300), analysis = 1:3), rd0 = 0, ratio = 1, weight = c(\"unstratified\", \"ss\", \"invar\") )"},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_rd.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Information and effect size under risk difference — gs_info_rd","text":"p_c Rate control group. p_e Rate experimental group. n Sample size. rd0 risk difference H0. ratio Experimental:Control randomization ratio. weight Weighting method, can \"unstratified\", \"ss\", \"invar\".","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_rd.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Information and effect size under risk difference — gs_info_rd","text":"tibble columns analysis index, sample size, risk difference, risk difference null hypothesis, theta1 (standardized treatment effect alternative hypothesis), theta0 (standardized treatment effect null hypothesis), statistical information.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_rd.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Information and effect size under risk difference — gs_info_rd","text":"","code":"# Example 1 ---- # unstratified case with H0: rd0 = 0 gs_info_rd( p_c = tibble::tibble(stratum = \"All\", rate = .15), p_e = tibble::tibble(stratum = \"All\", rate = .1), n = tibble::tibble(stratum = \"All\", n = c(100, 200, 300), analysis = 1:3), rd0 = 0, ratio = 1 ) #> # A tibble: 3 × 8 #> analysis n rd rd0 theta1 theta0 info1 info0 #> #> 1 1 100 0.05 0 0.05 0 230. 229. #> 2 2 200 0.05 0 0.05 0 460. 457. #> 3 3 300 0.05 0 0.05 0 690. 686. # Example 2 ---- # unstratified case with H0: rd0 != 0 gs_info_rd( p_c = tibble::tibble(stratum = \"All\", rate = .2), p_e = tibble::tibble(stratum = \"All\", rate = .15), n = tibble::tibble(stratum = \"All\", n = c(100, 200, 300), analysis = 1:3), rd0 = 0.005, ratio = 1 ) #> # A tibble: 3 × 8 #> analysis n rd rd0 theta1 theta0 info1 info0 #> #> 1 1 100 0.05 0.005 0.05 0.005 174. 173. #> 2 2 200 0.05 0.005 0.05 0.005 348. 346. #> 3 3 300 0.05 0.005 0.05 0.005 522. 519. # Example 3 ---- # stratified case under sample size weighting and H0: rd0 = 0 gs_info_rd( p_c = tibble::tibble(stratum = c(\"S1\", \"S2\", \"S3\"), rate = c(.15, .2, .25)), p_e = tibble::tibble(stratum = c(\"S1\", \"S2\", \"S3\"), rate = c(.1, .16, .19)), n = tibble::tibble( stratum = rep(c(\"S1\", \"S2\", \"S3\"), each = 3), analysis = rep(1:3, 3), n = c(50, 100, 200, 40, 80, 160, 60, 120, 240) ), rd0 = 0, ratio = 1, weight = \"ss\" ) #> # A tibble: 3 × 8 #> analysis n rd rd0 theta1 theta0 info1 info0 #> #> 1 1 150 0.0513 0 0.0513 0 261. 260. #> 2 2 300 0.0513 0 0.0513 0 522. 519. #> 3 3 600 0.0513 0 0.0513 0 1043. 1038. # Example 4 ---- # stratified case under inverse variance weighting and H0: rd0 = 0 gs_info_rd( p_c = tibble::tibble( stratum = c(\"S1\", \"S2\", \"S3\"), rate = c(.15, .2, .25) ), p_e = tibble::tibble( stratum = c(\"S1\", \"S2\", \"S3\"), rate = c(.1, .16, .19) ), n = tibble::tibble( stratum = rep(c(\"S1\", \"S2\", \"S3\"), each = 3), analysis = rep(1:3, 3), n = c(50, 100, 200, 40, 80, 160, 60, 120, 240) ), rd0 = 0, ratio = 1, weight = \"invar\" ) #> # A tibble: 3 × 8 #> analysis n rd rd0 theta1 theta0 info1 info0 #> #> 1 1 150 0.0507 0 0.0507 0 271. 269. #> 2 2 300 0.0507 0 0.0507 0 542. 539. #> 3 3 600 0.0507 0 0.0507 0 1083. 1078. # Example 5 ---- # stratified case under sample size weighting and H0: rd0 != 0 gs_info_rd( p_c = tibble::tibble( stratum = c(\"S1\", \"S2\", \"S3\"), rate = c(.15, .2, .25) ), p_e = tibble::tibble( stratum = c(\"S1\", \"S2\", \"S3\"), rate = c(.1, .16, .19) ), n = tibble::tibble( stratum = rep(c(\"S1\", \"S2\", \"S3\"), each = 3), analysis = rep(1:3, 3), n = c(50, 100, 200, 40, 80, 160, 60, 120, 240) ), rd0 = 0.02, ratio = 1, weight = \"ss\" ) #> # A tibble: 3 × 8 #> analysis n rd rd0 theta1 theta0 info1 info0 #> #> 1 1 150 0.0513 0.02 0.0513 0.02 261. 260. #> 2 2 300 0.0513 0.02 0.0513 0.02 522. 519. #> 3 3 600 0.0513 0.02 0.0513 0.02 1043. 1038. # Example 6 ---- # stratified case under inverse variance weighting and H0: rd0 != 0 gs_info_rd( p_c = tibble::tibble( stratum = c(\"S1\", \"S2\", \"S3\"), rate = c(.15, .2, .25) ), p_e = tibble::tibble( stratum = c(\"S1\", \"S2\", \"S3\"), rate = c(.1, .16, .19) ), n = tibble::tibble( stratum = rep(c(\"S1\", \"S2\", \"S3\"), each = 3), analysis = rep(1:3, 3), n = c(50, 100, 200, 40, 80, 160, 60, 120, 240) ), rd0 = 0.02, ratio = 1, weight = \"invar\" ) #> # A tibble: 3 × 8 #> analysis n rd rd0 theta1 theta0 info1 info0 #> #> 1 1 150 0.0507 0.02 0.0507 0.02 271. 269. #> 2 2 300 0.0507 0.02 0.0507 0.02 542. 539. #> 3 3 600 0.0507 0.02 0.0507 0.02 1083. 1078. # Example 7 ---- # stratified case under inverse variance weighting and H0: rd0 != 0 and # rd0 difference for different statum gs_info_rd( p_c = tibble::tibble( stratum = c(\"S1\", \"S2\", \"S3\"), rate = c(.15, .2, .25) ), p_e = tibble::tibble( stratum = c(\"S1\", \"S2\", \"S3\"), rate = c(.1, .16, .19) ), n = tibble::tibble( stratum = rep(c(\"S1\", \"S2\", \"S3\"), each = 3), analysis = rep(1:3, 3), n = c(50, 100, 200, 40, 80, 160, 60, 120, 240) ), rd0 = tibble::tibble( stratum = c(\"S1\", \"S2\", \"S3\"), rd0 = c(0.01, 0.02, 0.03) ), ratio = 1, weight = \"invar\" ) #> # A tibble: 3 × 8 #> analysis n rd rd0 theta1 theta0 info1 info0 #> #> 1 1 150 0.0507 0.0190 0.0507 0.0190 271. 269. #> 2 2 300 0.0507 0.0190 0.0507 0.0190 542. 539. #> 3 3 600 0.0507 0.0190 0.0507 0.0190 1083. 1078."},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_wlr.html","id":null,"dir":"Reference","previous_headings":"","what":"Information and effect size for weighted log-rank test — gs_info_wlr","title":"Information and effect size for weighted log-rank test — gs_info_wlr","text":"Based piecewise enrollment rate, failure rate, dropout rates computes approximate information effect size using average hazard ratio model.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_wlr.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Information and effect size for weighted log-rank test — gs_info_wlr","text":"","code":"gs_info_wlr( enroll_rate = define_enroll_rate(duration = c(2, 2, 10), rate = c(3, 6, 9)), fail_rate = define_fail_rate(duration = c(3, 100), fail_rate = log(2)/c(9, 18), hr = c(0.9, 0.6), dropout_rate = 0.001), ratio = 1, event = NULL, analysis_time = NULL, weight = wlr_weight_fh, approx = \"asymptotic\", interval = c(0.01, 1000) )"},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_wlr.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Information and effect size for weighted log-rank test — gs_info_wlr","text":"enroll_rate enroll_rate data frame without stratum created define_enroll_rate(). fail_rate Failure dropout rates. ratio Experimental:Control randomization ratio. event Targeted minimum events analysis. analysis_time Targeted minimum study duration analysis. weight Weight weighted log rank test: \"1\" = unweighted. \"n\" = Gehan-Breslow. \"sqrtN\" = Tarone-Ware. \"FH_p[]_q[b]\" = Fleming-Harrington p=q=b. approx Approximate estimation method Z statistics. \"event_driven\" = work proportional hazard model log rank test. \"asymptotic\". interval interval presumed include time expected event count equal targeted event.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_wlr.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Information and effect size for weighted log-rank test — gs_info_wlr","text":"tibble columns Analysis, Time, N, Events, AHR, delta, sigma2, theta, info, info0. info info0 contain statistical information H1, H0, respectively. analysis k, Time[k] maximum analysis_time[k] expected time required accrue targeted event[k]. AHR expected average hazard ratio analysis.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_wlr.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Information and effect size for weighted log-rank test — gs_info_wlr","text":"ahr() function computes statistical information targeted event times. expected_time() function used get events average HR targeted analysis_time.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_info_wlr.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Information and effect size for weighted log-rank test — gs_info_wlr","text":"","code":"library(gsDesign2) # Set enrollment rates enroll_rate <- define_enroll_rate(duration = 12, rate = 500 / 12) # Set failure rates fail_rate <- define_fail_rate( duration = c(4, 100), fail_rate = log(2) / 15, # median survival 15 month hr = c(1, .6), dropout_rate = 0.001 ) # Set the targeted number of events and analysis time event <- c(30, 40, 50) analysis_time <- c(10, 24, 30) gs_info_wlr( enroll_rate = enroll_rate, fail_rate = fail_rate, event = event, analysis_time = analysis_time ) #> analysis time n event ahr delta sigma2 theta #> 1 1 10 416.6667 77.80361 0.8720599 -0.005325328 0.03890022 0.1368971 #> 2 2 24 500.0001 246.28341 0.7164215 -0.040920239 0.12270432 0.3334865 #> 3 3 30 500.0001 293.69568 0.6955693 -0.052942680 0.14583769 0.3630247 #> info info0 #> 1 16.20843 16.22923 #> 2 61.35217 62.08666 #> 3 72.91885 74.25144"},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_ahr.html","id":null,"dir":"Reference","previous_headings":"","what":"Group sequential design power using average hazard ratio under non-proportional hazards — gs_power_ahr","title":"Group sequential design power using average hazard ratio under non-proportional hazards — gs_power_ahr","text":"Group sequential design power using average hazard ratio non-proportional hazards.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_ahr.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Group sequential design power using average hazard ratio under non-proportional hazards — gs_power_ahr","text":"","code":"gs_power_ahr( enroll_rate = define_enroll_rate(duration = c(2, 2, 10), rate = c(3, 6, 9)), fail_rate = define_fail_rate(duration = c(3, 100), fail_rate = log(2)/c(9, 18), hr = c(0.9, 0.6), dropout_rate = rep(0.001, 2)), event = c(30, 40, 50), analysis_time = NULL, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfLDOF, total_spend = NULL), test_lower = TRUE, test_upper = TRUE, ratio = 1, binding = FALSE, info_scale = c(\"h0_h1_info\", \"h0_info\", \"h1_info\"), r = 18, tol = 1e-06, interval = c(0.01, 1000) )"},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_ahr.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Group sequential design power using average hazard ratio under non-proportional hazards — gs_power_ahr","text":"enroll_rate enroll_rate data frame without stratum created define_enroll_rate(). fail_rate Failure dropout rates. event Targeted event analysis. analysis_time Minimum time analysis. upper Function compute upper bound. upar Parameters passed upper. lower Function compute lower bound. lpar Parameters passed lower. test_lower Indicator analyses include lower bound; single value TRUE (default) indicates analyses; single value FALSE indicated lower bound; otherwise, logical vector length info indicate analyses lower bound. test_upper Indicator analyses include upper (efficacy) bound; single value TRUE (default) indicates analyses; otherwise, logical vector length info indicate analyses efficacy bound. ratio Experimental:Control randomization ratio (yet implemented). binding Indicator whether futility bound binding; default FALSE recommended. info_scale Information scale calculation. Options : \"h0_h1_info\" (default): variance null alternative hypotheses used. \"h0_info\": variance null hypothesis used. \"h1_info\": variance alternative hypothesis used. r Integer value controlling grid numerical integration Jennison Turnbull (2000); default 18, range 1 80. Larger values provide larger number grid points greater accuracy. Normally, r changed user. tol Tolerance parameter boundary convergence (Z-scale). interval interval presumed include time expected event count equal targeted event.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_ahr.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Group sequential design power using average hazard ratio under non-proportional hazards — gs_power_ahr","text":"tibble columns Analysis, Bound, Z, Probability, theta, Time, AHR, Events. Contains row analysis bound.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_ahr.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Group sequential design power using average hazard ratio under non-proportional hazards — gs_power_ahr","text":"Bound satisfy input upper bound specification upper, upar, lower bound specification lower, lpar. ahr() computes statistical information targeted event times. expected_time() function used get events average HR targeted analysis_time.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_ahr.html","id":"specification","dir":"Reference","previous_headings":"","what":"Specification","title":"Group sequential design power using average hazard ratio under non-proportional hazards — gs_power_ahr","text":"contents section shown PDF user manual .","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_ahr.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Group sequential design power using average hazard ratio under non-proportional hazards — gs_power_ahr","text":"","code":"library(gsDesign2) library(dplyr) # Example 1 ---- # The default output of `gs_power_ahr()` is driven by events, # i.e., `event = c(30, 40, 50)`, `analysis_time = NULL` # \\donttest{ gs_power_ahr(lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.1)) #> $input #> $input$enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $input$event #> [1] 30 40 50 #> #> $input$analysis_time #> NULL #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.025 #> #> #> $input$lower #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$lpar #> $input$lpar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$lpar$total_spend #> [1] 0.1 #> #> #> $input$test_lower #> [1] TRUE #> #> $input$test_upper #> [1] TRUE #> #> $input$ratio #> [1] 1 #> #> $input$binding #> [1] FALSE #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $bound #> # A tibble: 6 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.0231 0.00381 2.67 0.374 0.00381 #> 2 1 lower 0.0349 0.121 -1.17 1.54 0.879 #> 3 2 upper 0.0897 0.0122 2.29 0.481 0.0110 #> 4 2 lower 0.0668 0.265 -0.663 1.24 0.746 #> 5 3 upper 0.207 0.0250 2.03 0.559 0.0211 #> 6 3 lower 0.101 0.430 -0.227 1.07 0.590 #> #> $analysis #> analysis time n event ahr theta info info0 #> 1 1 14.90817 108 30.00008 0.7865726 0.2400702 7.373433 7.50002 #> 2 2 19.16437 108 40.00000 0.7442008 0.2954444 9.789940 10.00000 #> 3 3 24.54264 108 50.00000 0.7128241 0.3385206 12.227632 12.50000 #> info_frac info_frac0 #> 1 0.6030140 0.6000016 #> 2 0.8006407 0.8000001 #> 3 1.0000000 1.0000000 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" # } # Example 2 ---- # 2-sided symmetric O'Brien-Fleming spending bound, driven by analysis time, # i.e., `event = NULL`, `analysis_time = c(12, 24, 36)` gs_power_ahr( analysis_time = c(12, 24, 36), event = NULL, binding = TRUE, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.025) ) #> $input #> $input$enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $input$event #> NULL #> #> $input$analysis_time #> [1] 12 24 36 #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.025 #> #> #> $input$lower #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$lpar #> $input$lpar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$lpar$total_spend #> [1] 0.025 #> #> #> $input$test_lower #> [1] TRUE #> #> $input$test_upper #> [1] TRUE #> #> $input$ratio #> [1] 1 #> #> $input$binding #> [1] TRUE #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $bound #> # A tibble: 6 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.000370 0.0000538 3.87 0.178 0.0000538 #> 2 1 lower 0.0000612 0.000343 -3.40 4.55 1.00 #> 3 2 upper 0.116 0.00921 2.36 0.506 0.00919 #> 4 2 lower 0.00907 0.115 -1.20 1.42 0.885 #> 5 3 upper 0.324 0.0250 2.01 0.608 0.0222 #> 6 3 lower 0.0250 0.324 -0.473 1.12 0.682 #> #> $analysis #> analysis time n event ahr theta info info0 info_frac #> 1 1 12 90 20.40451 0.8107539 0.2097907 5.028327 5.101127 0.3090946 #> 2 2 24 108 49.06966 0.7151566 0.3352538 11.999266 12.267415 0.7376029 #> 3 3 36 108 66.23948 0.6833395 0.3807634 16.267921 16.559870 1.0000000 #> info_frac0 #> 1 0.3080415 #> 2 0.7407917 #> 3 1.0000000 #> #> attr(,\"class\") #> [1] \"ahr\" \"gs_design\" \"list\" # Example 3 ---- # 2-sided symmetric O'Brien-Fleming spending bound, driven by event, # i.e., `event = c(20, 50, 70)`, `analysis_time = NULL` # \\donttest{ gs_power_ahr( analysis_time = NULL, event = c(20, 50, 70), binding = TRUE, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.025) ) #> $input #> $input$enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $input$event #> [1] 20 50 70 #> #> $input$analysis_time #> NULL #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.025 #> #> #> $input$lower #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$lpar #> $input$lpar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$lpar$total_spend #> [1] 0.025 #> #> #> $input$test_lower #> [1] TRUE #> #> $input$test_upper #> [1] TRUE #> #> $input$ratio #> [1] 1 #> #> $input$binding #> [1] TRUE #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $bound #> # A tibble: 6 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.000198 0.0000275 4.03 0.163 0.0000275 #> 2 1 lower 0.0000312 0.000181 -3.57 4.98 1.00 #> 3 2 upper 0.110 0.00800 2.41 0.502 0.00799 #> 4 2 lower 0.00782 0.109 -1.23 1.42 0.891 #> 5 3 upper 0.352 0.0250 2.00 0.617 0.0226 #> 6 3 lower 0.0250 0.352 -0.393 1.10 0.653 #> #> $analysis #> analysis time n event ahr theta info info0 #> 1 1 11.87087 88.8378 20 0.8119328 0.2083377 4.929331 5.0 #> 2 2 24.54264 108.0000 50 0.7128241 0.3385206 12.227632 12.5 #> 3 3 39.39207 108.0000 70 0.6785816 0.3877506 17.218358 17.5 #> info_frac info_frac0 #> 1 0.2862834 0.2857143 #> 2 0.7101509 0.7142857 #> 3 1.0000000 1.0000000 #> #> attr(,\"class\") #> [1] \"ahr\" \"gs_design\" \"list\" # } # Example 4 ---- # 2-sided symmetric O'Brien-Fleming spending bound, # driven by both `event` and `analysis_time`, i.e., # both `event` and `analysis_time` are not `NULL`, # then the analysis will driven by the maximal one, i.e., # Time = max(analysis_time, calculated Time for targeted event) # Events = max(events, calculated events for targeted analysis_time) # \\donttest{ gs_power_ahr( analysis_time = c(12, 24, 36), event = c(30, 40, 50), binding = TRUE, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.025) ) #> $input #> $input$enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $input$event #> [1] 30 40 50 #> #> $input$analysis_time #> [1] 12 24 36 #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.025 #> #> #> $input$lower #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$lpar #> $input$lpar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$lpar$total_spend #> [1] 0.025 #> #> #> $input$test_lower #> [1] TRUE #> #> $input$test_upper #> [1] TRUE #> #> $input$ratio #> [1] 1 #> #> $input$binding #> [1] TRUE #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $bound #> # A tibble: 6 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.00706 0.000867 3.13 0.316 0.000867 #> 2 1 lower 0.000935 0.00658 -2.48 2.49 0.993 #> 3 2 upper 0.115 0.00921 2.37 0.505 0.00892 #> 4 2 lower 0.00912 0.113 -1.21 1.42 0.888 #> 5 3 upper 0.324 0.0250 2.01 0.607 0.0222 #> 6 3 lower 0.0251 0.323 -0.474 1.12 0.682 #> #> $analysis #> analysis time n event ahr theta info info0 #> 1 1 14.90817 108 30.00008 0.7865726 0.2400702 7.373433 7.50002 #> 2 2 24.00000 108 49.06966 0.7151566 0.3352538 11.999266 12.26741 #> 3 3 36.00000 108 66.23948 0.6833395 0.3807634 16.267921 16.55987 #> info_frac info_frac0 #> 1 0.4532499 0.4529033 #> 2 0.7376029 0.7407917 #> 3 1.0000000 1.0000000 #> #> attr(,\"class\") #> [1] \"ahr\" \"gs_design\" \"list\" # }"},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_combo.html","id":null,"dir":"Reference","previous_headings":"","what":"Group sequential design power using MaxCombo test under non-proportional hazards — gs_power_combo","title":"Group sequential design power using MaxCombo test under non-proportional hazards — gs_power_combo","text":"Group sequential design power using MaxCombo test non-proportional hazards","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_combo.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Group sequential design power using MaxCombo test under non-proportional hazards — gs_power_combo","text":"","code":"gs_power_combo( enroll_rate = define_enroll_rate(duration = 12, rate = 500/12), fail_rate = define_fail_rate(duration = c(4, 100), fail_rate = log(2)/15, hr = c(1, 0.6), dropout_rate = 0.001), fh_test = rbind(data.frame(rho = 0, gamma = 0, tau = -1, test = 1, analysis = 1:3, analysis_time = c(12, 24, 36)), data.frame(rho = c(0, 0.5), gamma = 0.5, tau = -1, test = 2:3, analysis = 3, analysis_time = 36)), ratio = 1, binding = FALSE, upper = gs_b, upar = c(3, 2, 1), lower = gs_b, lpar = c(-1, 0, 1), algorithm = mvtnorm::GenzBretz(maxpts = 1e+05, abseps = 1e-05), ... )"},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_combo.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Group sequential design power using MaxCombo test under non-proportional hazards — gs_power_combo","text":"enroll_rate Enrollment rates. fail_rate Failure dropout rates. fh_test data frame summarize test analysis. See examples data structure. ratio Experimental:Control randomization ratio (yet implemented). binding Indicator whether futility bound binding; default FALSE recommended. upper Function compute upper bound. upar Parameters passed upper. lower Function compute lower bound. lpar Parameters passed lower. algorithm object class GenzBretz, Miwa TVPACK specifying algorithm used well associated hyper parameters. ... Additional parameters passed mvtnorm::pmvnorm.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_combo.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Group sequential design power using MaxCombo test under non-proportional hazards — gs_power_combo","text":"list input parameters, enrollment rate, analysis, bound.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_combo.html","id":"specification","dir":"Reference","previous_headings":"","what":"Specification","title":"Group sequential design power using MaxCombo test under non-proportional hazards — gs_power_combo","text":"contents section shown PDF user manual .","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_combo.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Group sequential design power using MaxCombo test under non-proportional hazards — gs_power_combo","text":"","code":"library(dplyr) library(mvtnorm) library(gsDesign) library(gsDesign2) enroll_rate <- define_enroll_rate( duration = 12, rate = 500 / 12 ) fail_rate <- define_fail_rate( duration = c(4, 100), fail_rate = log(2) / 15, # median survival 15 month hr = c(1, .6), dropout_rate = 0.001 ) fh_test <- rbind( data.frame(rho = 0, gamma = 0, tau = -1, test = 1, analysis = 1:3, analysis_time = c(12, 24, 36)), data.frame(rho = c(0, 0.5), gamma = 0.5, tau = -1, test = 2:3, analysis = 3, analysis_time = 36) ) # Example 1 ---- # Minimal Information Fraction derived bound # \\donttest{ gs_power_combo( enroll_rate = enroll_rate, fail_rate = fail_rate, fh_test = fh_test, upper = gs_spending_combo, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_combo, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.2) ) #> $enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 41.7 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $bound #> analysis bound probability probability0 z nominal p #> 1 1 upper 6.329275e-08 3.299865e-10 6.175397 3.299865e-10 #> 2 1 lower 3.269613e-04 0.000000e+00 -2.516527 9.940741e-01 #> 3 2 upper 4.260145e-01 2.565830e-03 2.798651 2.565830e-03 #> 4 2 lower 8.468664e-02 0.000000e+00 1.237721 1.079098e-01 #> 5 3 upper 9.015980e-01 2.500822e-02 2.097499 1.797473e-02 #> 6 3 lower 2.000038e-01 0.000000e+00 2.958921 1.543591e-03 #> #> $analysis #> analysis time n event event_frac ahr #> 1 1 12 500.0001 107.3943 0.3241690 0.8418858 #> 2 2 24 500.0001 246.2834 0.7434051 0.7164215 #> 3 3 36 500.0001 331.2910 1.0000000 0.6831740 #> #> attr(,\"class\") #> [1] \"non_binding\" \"combo\" \"gs_design\" \"list\" # }"},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_npe.html","id":null,"dir":"Reference","previous_headings":"","what":"Group sequential bound computation with non-constant effect — gs_power_npe","title":"Group sequential bound computation with non-constant effect — gs_power_npe","text":"Derives group sequential bounds boundary crossing probabilities design. allows non-constant treatment effect time, also can applied usual homogeneous effect size designs. requires treatment effect statistical information analysis well method deriving bounds, spending. routine enables two things available gsDesign package: non-constant effect, 2) flexibility boundary selection. many applications, non-proportional-hazards design function gs_design_nph() used; calls function. Initial bound types supported 1) spending bounds, fixed bounds, 3) Haybittle-Peto-like bounds. requirement boundary update method can bound without knowledge future bounds. example, bounds based conditional power require knowledge future bounds supported routine; limited conditional power method demonstrated. Boundary family designs Wang-Tsiatis designs including original (non-spending-function-based) O'Brien-Fleming Pocock designs supported gs_power_npe().","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_npe.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Group sequential bound computation with non-constant effect — gs_power_npe","text":"","code":"gs_power_npe( theta = 0.1, theta0 = NULL, theta1 = NULL, info = 1, info0 = NULL, info1 = NULL, info_scale = c(\"h0_h1_info\", \"h0_info\", \"h1_info\"), upper = gs_b, upar = qnorm(0.975), lower = gs_b, lpar = -Inf, test_upper = TRUE, test_lower = TRUE, binding = FALSE, r = 18, tol = 1e-06 )"},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_npe.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Group sequential bound computation with non-constant effect — gs_power_npe","text":"theta Natural parameter group sequential design representing expected incremental drift analyses; used power calculation. theta0 Natural parameter null hypothesis, needed upper bound computation. theta1 Natural parameter alternate hypothesis, needed lower bound computation. info Statistical information analyses input theta. info0 Statistical information null hypothesis, different info; impacts null hypothesis bound calculation. info1 Statistical information hypothesis used futility bound calculation different info; impacts futility hypothesis bound calculation. info_scale Information scale calculation. Options : \"h0_h1_info\" (default): variance null alternative hypotheses used. \"h0_info\": variance null hypothesis used. \"h1_info\": variance alternative hypothesis used. upper Function compute upper bound. upar Parameters passed upper. lower Function compare lower bound. lpar parameters passed lower. test_upper Indicator analyses include upper (efficacy) bound; single value TRUE (default) indicates analyses; otherwise, logical vector length info indicate analyses efficacy bound. test_lower Indicator analyses include lower bound; single value TRUE (default) indicates analyses; single value FALSE indicated lower bound; otherwise, logical vector length info indicate analyses lower bound. binding Indicator whether futility bound binding; default FALSE recommended. r Integer value controlling grid numerical integration Jennison Turnbull (2000); default 18, range 1 80. Larger values provide larger number grid points greater accuracy. Normally, r changed user. tol Tolerance parameter boundary convergence (Z-scale).","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_npe.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Group sequential bound computation with non-constant effect — gs_power_npe","text":"tibble columns analysis index, bounds, z, crossing probability, theta (standardized treatment effect), theta1 (standardized treatment effect alternative hypothesis), information fraction, statistical information.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_npe.html","id":"specification","dir":"Reference","previous_headings":"","what":"Specification","title":"Group sequential bound computation with non-constant effect — gs_power_npe","text":"contents section shown PDF user manual .","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_npe.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Group sequential bound computation with non-constant effect — gs_power_npe","text":"","code":"library(gsDesign) library(gsDesign2) library(dplyr) # Default (single analysis; Type I error controlled) gs_power_npe(theta = 0) %>% filter(bound == \"upper\") #> # A tibble: 1 × 10 #> analysis bound z probability theta theta1 info_frac info info0 info1 #> #> 1 1 upper 1.96 0.0250 0 0 1 1 1 1 # Fixed bound gs_power_npe( theta = c(.1, .2, .3), info = (1:3) * 40, upper = gs_b, upar = gsDesign::gsDesign(k = 3, sfu = gsDesign::sfLDOF)$upper$bound, lower = gs_b, lpar = c(-1, 0, 0) ) #> # A tibble: 6 × 10 #> analysis bound z probability theta theta1 info_frac info info0 info1 #> #> 1 1 upper 3.71 0.00104 0.1 0.1 0.333 40 40 40 #> 2 2 upper 2.51 0.235 0.2 0.2 0.667 80 80 80 #> 3 3 upper 1.99 0.869 0.3 0.3 1 120 120 120 #> 4 1 lower -1 0.0513 0.1 0.1 0.333 40 40 40 #> 5 2 lower 0 0.0715 0.2 0.2 0.667 80 80 80 #> 6 3 lower 0 0.0715 0.3 0.3 1 120 120 120 # Same fixed efficacy bounds, no futility bound (i.e., non-binding bound), null hypothesis gs_power_npe( theta = rep(0, 3), info = (1:3) * 40, upar = gsDesign::gsDesign(k = 3, sfu = gsDesign::sfLDOF)$upper$bound, lpar = rep(-Inf, 3) ) %>% filter(bound == \"upper\") #> # A tibble: 3 × 10 #> analysis bound z probability theta theta1 info_frac info info0 info1 #> #> 1 1 upper 3.71 0.000104 0 0 0.333 40 40 40 #> 2 2 upper 2.51 0.00605 0 0 0.667 80 80 80 #> 3 3 upper 1.99 0.0250 0 0 1 120 120 120 # Fixed bound with futility only at analysis 1; efficacy only at analyses 2, 3 gs_power_npe( theta = c(.1, .2, .3), info = (1:3) * 40, upper = gs_b, upar = c(Inf, 3, 2), lower = gs_b, lpar = c(qnorm(.1), -Inf, -Inf) ) #> # A tibble: 6 × 10 #> analysis bound z probability theta theta1 info_frac info info0 info1 #> #> 1 1 upper Inf 0 0.1 0.1 0.333 40 40 40 #> 2 2 upper 3 0.113 0.2 0.2 0.667 80 80 80 #> 3 3 upper 2 0.887 0.3 0.3 1 120 120 120 #> 4 1 lower -1.28 0.0278 0.1 0.1 0.333 40 40 40 #> 5 2 lower -Inf 0.0278 0.2 0.2 0.667 80 80 80 #> 6 3 lower -Inf 0.0278 0.3 0.3 1 120 120 120 # Spending function bounds # Lower spending based on non-zero effect gs_power_npe( theta = c(.1, .2, .3), info = (1:3) * 40, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfHSD, total_spend = 0.1, param = -1, timing = NULL) ) #> # A tibble: 6 × 10 #> analysis bound z probability theta theta1 info_frac info info0 info1 #> #> 1 1 upper 3.71 0.00104 0.1 0.1 0.333 40 40 40 #> 2 2 upper 2.51 0.235 0.2 0.2 0.667 80 80 80 #> 3 3 upper 1.99 0.883 0.3 0.3 1 120 120 120 #> 4 1 lower -1.36 0.0230 0.1 0.1 0.333 40 40 40 #> 5 2 lower 0.0726 0.0552 0.2 0.2 0.667 80 80 80 #> 6 3 lower 1.86 0.100 0.3 0.3 1 120 120 120 # Same bounds, but power under different theta gs_power_npe( theta = c(.15, .25, .35), info = (1:3) * 40, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfHSD, total_spend = 0.1, param = -1, timing = NULL) ) #> # A tibble: 6 × 10 #> analysis bound z probability theta theta1 info_frac info info0 info1 #> #> 1 1 upper 3.71 0.00288 0.15 0.15 0.333 40 40 40 #> 2 2 upper 2.51 0.391 0.25 0.25 0.667 80 80 80 #> 3 3 upper 1.99 0.931 0.35 0.35 1 120 120 120 #> 4 1 lower -1.05 0.0230 0.15 0.15 0.333 40 40 40 #> 5 2 lower 0.520 0.0552 0.25 0.25 0.667 80 80 80 #> 6 3 lower 2.41 0.100 0.35 0.35 1 120 120 120 # Two-sided symmetric spend, O'Brien-Fleming spending # Typically, 2-sided bounds are binding x <- gs_power_npe( theta = rep(0, 3), info = (1:3) * 40, binding = TRUE, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL) ) # Re-use these bounds under alternate hypothesis # Always use binding = TRUE for power calculations gs_power_npe( theta = c(.1, .2, .3), info = (1:3) * 40, binding = TRUE, upar = (x %>% filter(bound == \"upper\"))$z, lpar = -(x %>% filter(bound == \"upper\"))$z ) #> # A tibble: 6 × 10 #> analysis bound z probability theta theta1 info_frac info info0 info1 #> #> 1 1 upper 3.71 0.00104 0.1 0.1 0.333 40 40 40 #> 2 2 upper 2.51 0.235 0.2 0.2 0.667 80 80 80 #> 3 3 upper 1.99 0.902 0.3 0.3 1 120 120 120 #> 4 1 lower -3.71 0.00000704 0.1 0.1 0.333 40 40 40 #> 5 2 lower -2.51 0.0000151 0.2 0.2 0.667 80 80 80 #> 6 3 lower -1.99 0.0000151 0.3 0.3 1 120 120 120 # Different values of `r` and `tol` lead to different numerical accuracy # Larger `r` and smaller `tol` give better accuracy, but leads to slow computation n_analysis <- 5 gs_power_npe( theta = rep(0.1, n_analysis), theta0 = NULL, theta1 = NULL, info = 1:n_analysis, info0 = 1:n_analysis, info1 = NULL, info_scale = \"h0_info\", upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL), lower = gs_b, lpar = -rep(Inf, n_analysis), test_upper = TRUE, test_lower = FALSE, binding = FALSE, # Try different combinations of (r, tol) with # r in 6, 18, 24, 30, 35, 40, 50, 60, 70, 80, 90, 100 # tol in 1e-6, 1e-12 r = 6, tol = 1e-6 ) #> # A tibble: 10 × 10 #> analysis bound z probability theta theta1 info_frac info info0 info1 #> #> 1 1 upper 4.88 0.000000890 0.1 0.1 0.2 1 1 1 #> 2 2 upper 3.36 0.000650 0.1 0.1 0.4 2 2 2 #> 3 3 upper 2.68 0.00627 0.1 0.1 0.6 3 3 3 #> 4 4 upper 2.29 0.0200 0.1 0.1 0.8 4 4 4 #> 5 5 upper 2.03 0.0408 0.1 0.1 1 5 5 5 #> 6 1 lower -Inf 0 0.1 0.1 0.2 1 1 1 #> 7 2 lower -Inf 0 0.1 0.1 0.4 2 2 2 #> 8 3 lower -Inf 0 0.1 0.1 0.6 3 3 3 #> 9 4 lower -Inf 0 0.1 0.1 0.8 4 4 4 #> 10 5 lower -Inf 0 0.1 0.1 1 5 5 5"},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_rd.html","id":null,"dir":"Reference","previous_headings":"","what":"Group sequential design power of binary outcome measuring in risk difference — gs_power_rd","title":"Group sequential design power of binary outcome measuring in risk difference — gs_power_rd","text":"Group sequential design power binary outcome measuring risk difference","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_rd.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Group sequential design power of binary outcome measuring in risk difference — gs_power_rd","text":"","code":"gs_power_rd( p_c = tibble::tibble(stratum = \"All\", rate = 0.2), p_e = tibble::tibble(stratum = \"All\", rate = 0.15), n = tibble::tibble(stratum = \"All\", n = c(40, 50, 60), analysis = 1:3), rd0 = 0, ratio = 1, weight = c(\"unstratified\", \"ss\", \"invar\"), upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = c(qnorm(0.1), rep(-Inf, 2)), info_scale = c(\"h0_h1_info\", \"h0_info\", \"h1_info\"), binding = FALSE, test_upper = TRUE, test_lower = TRUE, r = 18, tol = 1e-06 )"},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_rd.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Group sequential design power of binary outcome measuring in risk difference — gs_power_rd","text":"p_c Rate control group. p_e Rate experimental group. n Sample size. rd0 Treatment effect super-superiority designs, default 0. ratio Experimental:control randomization ratio. weight Weighting method, can \"unstratified\", \"ss\", \"invar\". upper Function compute upper bound. lower Function compare lower bound. upar Parameters passed upper. lpar Parameters passed lower. info_scale Information scale calculation. Options : \"h0_h1_info\" (default): variance null alternative hypotheses used. \"h0_info\": variance null hypothesis used. \"h1_info\": variance alternative hypothesis used. binding Indicator whether futility bound binding; default FALSE recommended. test_upper Indicator analyses include upper (efficacy) bound; single value TRUE (default) indicates analyses; otherwise, logical vector length info indicate analyses efficacy bound. test_lower Indicator analyses include lower bound; single value TRUE (default) indicates analyses; single value FALSE indicated lower bound; otherwise, logical vector length info indicate analyses lower bound. r Integer value controlling grid numerical integration Jennison Turnbull (2000); default 18, range 1 80. Larger values provide larger number grid points greater accuracy. Normally, r changed user. tol Tolerance parameter boundary convergence (Z-scale).","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_rd.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Group sequential design power of binary outcome measuring in risk difference — gs_power_rd","text":"list input parameter, analysis, bound.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_rd.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Group sequential design power of binary outcome measuring in risk difference — gs_power_rd","text":"","code":"# Example 1 ---- library(gsDesign) # unstratified case with H0: rd0 = 0 gs_power_rd( p_c = tibble::tibble( stratum = \"All\", rate = .2 ), p_e = tibble::tibble( stratum = \"All\", rate = .15 ), n = tibble::tibble( stratum = \"All\", n = c(20, 40, 60), analysis = 1:3 ), rd0 = 0, ratio = 1, upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = c(qnorm(.1), rep(-Inf, 2)) ) #> $bound #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~risk difference at bound` #> #> 1 1 upper 0.000309 0.000104 3.71 0.629 #> 2 2 upper 0.0182 0.00605 2.51 0.301 #> 3 3 upper 0.0728 0.0250 1.99 0.195 #> 4 1 lower 0.0571 0.100 -1.28 -0.217 #> # ℹ 1 more variable: `nominal p` #> #> $analysis #> # A tibble: 3 × 10 #> analysis n rd rd0 theta1 theta0 info info0 info_frac info_frac0 #> #> 1 1 20 0.05 0 0.05 0 34.8 34.6 0.333 0.333 #> 2 2 40 0.05 0 0.05 0 69.6 69.3 0.667 0.667 #> 3 3 60 0.05 0 0.05 0 104. 104. 1 1 #> #> attr(,\"class\") #> [1] \"non_binding\" \"rd\" \"gs_design\" \"list\" # Example 2 ---- # unstratified case with H0: rd0 != 0 gs_power_rd( p_c = tibble::tibble( stratum = \"All\", rate = .2 ), p_e = tibble::tibble( stratum = \"All\", rate = .15 ), n = tibble::tibble( stratum = \"All\", n = c(20, 40, 60), analysis = 1:3 ), rd0 = 0.005, ratio = 1, upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = c(qnorm(.1), rep(-Inf, 2)) ) #> $bound #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~risk difference at bound` #> #> 1 1 upper 0.000309 0.000116 3.71 0.571 #> 2 2 upper 0.0182 0.00680 2.51 0.276 #> 3 3 upper 0.0728 0.0281 1.99 0.181 #> 4 1 lower 0.0571 0.0949 -1.28 -0.191 #> # ℹ 1 more variable: `nominal p` #> #> $analysis #> # A tibble: 3 × 10 #> analysis n rd rd0 theta1 theta0 info info0 info_frac info_frac0 #> #> 1 1 20 0.05 0.005 0.05 0.005 34.8 34.6 0.333 0.333 #> 2 2 40 0.05 0.005 0.05 0.005 69.6 69.3 0.667 0.667 #> 3 3 60 0.05 0.005 0.05 0.005 104. 104. 1 1 #> #> attr(,\"class\") #> [1] \"non_binding\" \"rd\" \"gs_design\" \"list\" # use spending function gs_power_rd( p_c = tibble::tibble( stratum = \"All\", rate = .2 ), p_e = tibble::tibble( stratum = \"All\", rate = .15 ), n = tibble::tibble( stratum = \"All\", n = c(20, 40, 60), analysis = 1:3 ), rd0 = 0.005, ratio = 1, upper = gs_spending_bound, lower = gs_b, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL), lpar = c(qnorm(.1), rep(-Inf, 2)) ) #> $bound #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~risk difference at bound` #> #> 1 1 upper 0.000309 0.000116 3.71 0.571 #> 2 2 upper 0.0182 0.00680 2.51 0.276 #> 3 3 upper 0.0728 0.0281 1.99 0.181 #> 4 1 lower 0.0571 0.0949 -1.28 -0.191 #> # ℹ 1 more variable: `nominal p` #> #> $analysis #> # A tibble: 3 × 10 #> analysis n rd rd0 theta1 theta0 info info0 info_frac info_frac0 #> #> 1 1 20 0.05 0.005 0.05 0.005 34.8 34.6 0.333 0.333 #> 2 2 40 0.05 0.005 0.05 0.005 69.6 69.3 0.667 0.667 #> 3 3 60 0.05 0.005 0.05 0.005 104. 104. 1 1 #> #> attr(,\"class\") #> [1] \"non_binding\" \"rd\" \"gs_design\" \"list\" # Example 3 ---- # stratified case under sample size weighting and H0: rd0 = 0 gs_power_rd( p_c = tibble::tibble( stratum = c(\"S1\", \"S2\", \"S3\"), rate = c(.15, .2, .25) ), p_e = tibble::tibble( stratum = c(\"S1\", \"S2\", \"S3\"), rate = c(.1, .16, .19) ), n = tibble::tibble( stratum = rep(c(\"S1\", \"S2\", \"S3\"), each = 3), analysis = rep(1:3, 3), n = c(10, 20, 24, 18, 26, 30, 10, 20, 24) ), rd0 = 0, ratio = 1, weight = \"ss\", upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = c(qnorm(.1), rep(-Inf, 2)) ) #> $bound #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~risk difference at bound` #> #> 1 1 upper 0.000437 0.000104 3.71 0.456 #> 2 2 upper 0.0237 0.00604 2.51 0.228 #> 3 3 upper 0.0795 0.0237 1.99 0.166 #> 4 1 lower 0.0470 0.100 -1.28 -0.157 #> # ℹ 1 more variable: `nominal p` #> #> $analysis #> # A tibble: 3 × 10 #> analysis n rd rd0 theta1 theta0 info info0 info_frac info_frac0 #> #> 1 1 38 0.0479 0 0.0479 0 66.3 66.0 0.485 0.485 #> 2 2 66 0.0491 0 0.0491 0 116. 115. 0.846 0.846 #> 3 3 78 0.0492 0 0.0492 0 137. 136. 1 1 #> #> attr(,\"class\") #> [1] \"non_binding\" \"rd\" \"gs_design\" \"list\" # Example 4 ---- # stratified case under inverse variance weighting and H0: rd0 = 0 gs_power_rd( p_c = tibble::tibble( stratum = c(\"S1\", \"S2\", \"S3\"), rate = c(.15, .2, .25) ), p_e = tibble::tibble( stratum = c(\"S1\", \"S2\", \"S3\"), rate = c(.1, .16, .19) ), n = tibble::tibble( stratum = rep(c(\"S1\", \"S2\", \"S3\"), each = 3), analysis = rep(1:3, 3), n = c(10, 20, 24, 18, 26, 30, 10, 20, 24) ), rd0 = 0, ratio = 1, weight = \"invar\", upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = c(qnorm(.1), rep(-Inf, 2)) ) #> $bound #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~risk difference at bound` #> #> 1 1 upper 0.000443 0.000104 3.71 0.449 #> 2 2 upper 0.0240 0.00604 2.51 0.225 #> 3 3 upper 0.0803 0.0237 1.99 0.164 #> 4 1 lower 0.0467 0.100 -1.28 -0.155 #> # ℹ 1 more variable: `nominal p` #> #> $analysis #> # A tibble: 3 × 10 #> analysis n rd rd0 theta1 theta0 info info0 info_frac info_frac0 #> #> 1 1 38 0.0477 0 0.0477 0 68.2 67.9 0.483 0.483 #> 2 2 66 0.0488 0 0.0488 0 119. 119. 0.845 0.845 #> 3 3 78 0.0489 0 0.0489 0 141. 141. 1 1 #> #> attr(,\"class\") #> [1] \"non_binding\" \"rd\" \"gs_design\" \"list\" # Example 5 ---- # stratified case under sample size weighting and H0: rd0 != 0 gs_power_rd( p_c = tibble::tibble( stratum = c(\"S1\", \"S2\", \"S3\"), rate = c(.15, .2, .25) ), p_e = tibble::tibble( stratum = c(\"S1\", \"S2\", \"S3\"), rate = c(.1, .16, .19) ), n = tibble::tibble( stratum = rep(c(\"S1\", \"S2\", \"S3\"), each = 3), analysis = rep(1:3, 3), n = c(10, 20, 24, 18, 26, 30, 10, 20, 24) ), rd0 = 0.02, ratio = 1, weight = \"ss\", upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = c(qnorm(.1), rep(-Inf, 2)) ) #> $bound #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~risk difference at bound` #> #> 1 1 upper 0.000437 0.000194 3.71 0.285 #> 2 2 upper 0.0237 0.0109 2.51 0.153 #> 3 3 upper 0.0795 0.0401 1.99 0.117 #> 4 1 lower 0.0470 0.0744 -1.28 -0.0717 #> # ℹ 1 more variable: `nominal p` #> #> $analysis #> # A tibble: 3 × 10 #> analysis n rd rd0 theta1 theta0 info info0 info_frac info_frac0 #> #> 1 1 38 0.0479 0.02 0.0479 0.02 66.3 66.0 0.485 0.485 #> 2 2 66 0.0491 0.02 0.0491 0.02 116. 115. 0.846 0.846 #> 3 3 78 0.0492 0.02 0.0492 0.02 137. 136. 1 1 #> #> attr(,\"class\") #> [1] \"non_binding\" \"rd\" \"gs_design\" \"list\" # Example 6 ---- # stratified case under inverse variance weighting and H0: rd0 != 0 gs_power_rd( p_c = tibble::tibble( stratum = c(\"S1\", \"S2\", \"S3\"), rate = c(.15, .2, .25) ), p_e = tibble::tibble( stratum = c(\"S1\", \"S2\", \"S3\"), rate = c(.1, .16, .19) ), n = tibble::tibble( stratum = rep(c(\"S1\", \"S2\", \"S3\"), each = 3), analysis = rep(1:3, 3), n = c(10, 20, 24, 18, 26, 30, 10, 20, 24) ), rd0 = 0.03, ratio = 1, weight = \"invar\", upper = gs_b, lower = gs_b, upar = gsDesign(k = 3, test.type = 1, sfu = sfLDOF, sfupar = NULL)$upper$bound, lpar = c(qnorm(.1), rep(-Inf, 2)) ) #> $bound #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~risk difference at bound` #> #> 1 1 upper 0.000443 0.000267 3.71 0.197 #> 2 2 upper 0.0240 0.0145 2.51 0.113 #> 3 3 upper 0.0803 0.0518 1.99 0.0906 #> 4 1 lower 0.0467 0.0632 -1.28 -0.0275 #> # ℹ 1 more variable: `nominal p` #> #> $analysis #> # A tibble: 3 × 10 #> analysis n rd rd0 theta1 theta0 info info0 info_frac info_frac0 #> #> 1 1 38 0.0477 0.03 0.0477 0.03 68.2 67.9 0.483 0.483 #> 2 2 66 0.0488 0.03 0.0488 0.03 119. 119. 0.845 0.845 #> 3 3 78 0.0489 0.03 0.0489 0.03 141. 141. 1 1 #> #> attr(,\"class\") #> [1] \"non_binding\" \"rd\" \"gs_design\" \"list\""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_wlr.html","id":null,"dir":"Reference","previous_headings":"","what":"Group sequential design power using weighted log rank test under non-proportional hazards — gs_power_wlr","title":"Group sequential design power using weighted log rank test under non-proportional hazards — gs_power_wlr","text":"Group sequential design power using weighted log rank test non-proportional hazards","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_wlr.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Group sequential design power using weighted log rank test under non-proportional hazards — gs_power_wlr","text":"","code":"gs_power_wlr( enroll_rate = define_enroll_rate(duration = c(2, 2, 10), rate = c(3, 6, 9)), fail_rate = tibble(stratum = \"All\", duration = c(3, 100), fail_rate = log(2)/c(9, 18), hr = c(0.9, 0.6), dropout_rate = rep(0.001, 2)), event = c(30, 40, 50), analysis_time = NULL, binding = FALSE, upper = gs_spending_bound, lower = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lpar = list(sf = gsDesign::sfLDOF, total_spend = NULL), test_upper = TRUE, test_lower = TRUE, ratio = 1, weight = wlr_weight_fh, info_scale = c(\"h0_h1_info\", \"h0_info\", \"h1_info\"), approx = \"asymptotic\", r = 18, tol = 1e-06, interval = c(0.01, 1000) )"},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_wlr.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Group sequential design power using weighted log rank test under non-proportional hazards — gs_power_wlr","text":"enroll_rate Enrollment rates. fail_rate Failure dropout rates. event Targeted event analysis. analysis_time Minimum time analysis. binding Indicator whether futility bound binding; default FALSE recommended. upper Function compute upper bound. lower Function compute lower bound. upar Parameters passed upper. lpar Parameters passed lower. test_upper Indicator analyses include upper (efficacy) bound; single value TRUE (default) indicates analyses; otherwise, logical vector length info indicate analyses efficacy bound. test_lower Indicator analyses include lower bound; single value TRUE (default) indicates analyses; single value FALSE indicated lower bound; otherwise, logical vector length info indicate analyses lower bound. ratio Experimental:Control randomization ratio (yet implemented). weight Weight weighted log rank test: \"1\" = unweighted. \"n\" = Gehan-Breslow. \"sqrtN\" = Tarone-Ware. \"FH_p[]_q[b]\" = Fleming-Harrington p=q=b. info_scale Information scale calculation. Options : \"h0_h1_info\" (default): variance null alternative hypotheses used. \"h0_info\": variance null hypothesis used. \"h1_info\": variance alternative hypothesis used. approx Approximate estimation method Z statistics. \"event_driven\" = work proportional hazard model log rank test. \"asymptotic\". r Integer value controlling grid numerical integration Jennison Turnbull (2000); default 18, range 1 80. Larger values provide larger number grid points greater accuracy. Normally, r changed user. tol Tolerance parameter boundary convergence (Z-scale). interval interval presumed include time expected event count equal targeted event.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_wlr.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Group sequential design power using weighted log rank test under non-proportional hazards — gs_power_wlr","text":"list input parameters, enrollment rate, analysis, bound.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_wlr.html","id":"specification","dir":"Reference","previous_headings":"","what":"Specification","title":"Group sequential design power using weighted log rank test under non-proportional hazards — gs_power_wlr","text":"contents section shown PDF user manual .","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_power_wlr.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Group sequential design power using weighted log rank test under non-proportional hazards — gs_power_wlr","text":"","code":"library(gsDesign) library(gsDesign2) # set enrollment rates enroll_rate <- define_enroll_rate(duration = 12, rate = 500 / 12) # set failure rates fail_rate <- define_fail_rate( duration = c(4, 100), fail_rate = log(2) / 15, # median survival 15 month hr = c(1, .6), dropout_rate = 0.001 ) # set the targeted number of events and analysis time target_events <- c(30, 40, 50) target_analysisTime <- c(10, 24, 30) # Example 1 ---- # \\donttest{ # fixed bounds and calculate the power for targeted number of events gs_power_wlr( enroll_rate = enroll_rate, fail_rate = fail_rate, event = target_events, analysis_time = NULL, upper = gs_b, upar = gsDesign( k = length(target_events), test.type = 1, n.I = target_events, maxn.IPlan = max(target_events), sfu = sfLDOF, sfupar = NULL )$upper$bound, lower = gs_b, lpar = c(qnorm(.1), rep(-Inf, 2)) ) #> $input #> $input$enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 41.7 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $input$event #> [1] 30 40 50 #> #> $input$analysis_time #> NULL #> #> $input$binding #> [1] FALSE #> #> $input$ratio #> [1] 1 #> #> $input$upper #> function (par = NULL, k = NULL, ...) #> { #> if (is.null(k)) { #> return(par) #> } #> else { #> return(par[k]) #> } #> } #> #> #> #> $input$upar #> [1] 2.668630 2.288719 2.030702 #> #> $input$test_upper #> [1] TRUE #> #> $input$lower #> function (par = NULL, k = NULL, ...) #> { #> if (is.null(k)) { #> return(par) #> } #> else { #> return(par[k]) #> } #> } #> #> #> #> $input$lpar #> [1] -1.281552 -Inf -Inf #> #> $input$test_lower #> [1] TRUE #> #> $input$weight #> function (x, arm0, arm1, rho = 0, gamma = 0, tau = NULL) #> { #> n <- arm0$size + arm1$size #> p1 <- arm1$size/n #> p0 <- 1 - p1 #> if (!is.null(tau)) { #> if (tau > 0) { #> x <- pmin(x, tau) #> } #> } #> esurv <- p0 * npsurvSS::psurv(x, arm0) + p1 * npsurvSS::psurv(x, #> arm1) #> (1 - esurv)^rho * esurv^gamma #> } #> #> #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$approx #> [1] \"asymptotic\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 41.7 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $bounds #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.00470 0.00381 2.67 0.377 0.00381 #> 2 1 lower 0.0881 0.100 -1.28 1.60 0.9 #> 3 2 upper 0.0182 0.0127 2.29 0.485 0.0110 #> 4 3 upper 0.0439 0.0268 2.03 0.563 0.0211 #> #> $analysis #> analysis time n event ahr theta info info0 #> 1 1 5.893949 245.5812 29.99999 0.9636346 0.03704308 3.683799 3.684201 #> 2 2 6.900922 287.5384 40.00003 0.9373448 0.06470405 5.749119 5.750793 #> 3 3 7.808453 325.3522 50.00000 0.9155821 0.08819527 8.132495 8.136743 #> info_frac info_frac0 #> 1 0.4529728 0.4527857 #> 2 0.7069318 0.7067685 #> 3 1.0000000 1.0000000 #> #> attr(,\"class\") #> [1] \"non_binding\" \"wlr\" \"gs_design\" \"list\" # } # Example 2 ---- # fixed bounds and calculate the power for targeted analysis time # \\donttest{ gs_power_wlr( enroll_rate = enroll_rate, fail_rate = fail_rate, event = NULL, analysis_time = target_analysisTime, upper = gs_b, upar = gsDesign( k = length(target_events), test.type = 1, n.I = target_events, maxn.IPlan = max(target_events), sfu = sfLDOF, sfupar = NULL )$upper$bound, lower = gs_b, lpar = c(qnorm(.1), rep(-Inf, 2)) ) #> $input #> $input$enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 41.7 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $input$event #> NULL #> #> $input$analysis_time #> [1] 10 24 30 #> #> $input$binding #> [1] FALSE #> #> $input$ratio #> [1] 1 #> #> $input$upper #> function (par = NULL, k = NULL, ...) #> { #> if (is.null(k)) { #> return(par) #> } #> else { #> return(par[k]) #> } #> } #> #> #> #> $input$upar #> [1] 2.668630 2.288719 2.030702 #> #> $input$test_upper #> [1] TRUE #> #> $input$lower #> function (par = NULL, k = NULL, ...) #> { #> if (is.null(k)) { #> return(par) #> } #> else { #> return(par[k]) #> } #> } #> #> #> #> $input$lpar #> [1] -1.281552 -Inf -Inf #> #> $input$test_lower #> [1] TRUE #> #> $input$weight #> function (x, arm0, arm1, rho = 0, gamma = 0, tau = NULL) #> { #> n <- arm0$size + arm1$size #> p1 <- arm1$size/n #> p0 <- 1 - p1 #> if (!is.null(tau)) { #> if (tau > 0) { #> x <- pmin(x, tau) #> } #> } #> esurv <- p0 * npsurvSS::psurv(x, arm0) + p1 * npsurvSS::psurv(x, #> arm1) #> (1 - esurv)^rho * esurv^gamma #> } #> #> #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$approx #> [1] \"asymptotic\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 41.7 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $bounds #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.0172 0.00381 2.67 0.546 0.00381 #> 2 1 lower 0.0335 0.100 -1.28 1.34 0.9 #> 3 2 upper 0.622 0.0141 2.29 0.747 0.0110 #> 4 3 upper 0.842 0.0263 2.03 0.789 0.0211 #> #> $analysis #> analysis time n event ahr theta info info0 #> 1 1 10 416.6667 77.80361 0.8720599 0.1368971 16.20843 16.22923 #> 2 2 24 500.0000 246.28341 0.7164215 0.3334865 61.35217 62.08666 #> 3 3 30 500.0000 293.69568 0.6955693 0.3630247 72.91885 74.25144 #> info_frac info_frac0 #> 1 0.2222803 0.2185712 #> 2 0.8413760 0.8361677 #> 3 1.0000000 1.0000000 #> #> attr(,\"class\") #> [1] \"non_binding\" \"wlr\" \"gs_design\" \"list\" # } # Example 3 ---- # fixed bounds and calculate the power for targeted analysis time & number of events # \\donttest{ gs_power_wlr( enroll_rate = enroll_rate, fail_rate = fail_rate, event = target_events, analysis_time = target_analysisTime, upper = gs_b, upar = gsDesign( k = length(target_events), test.type = 1, n.I = target_events, maxn.IPlan = max(target_events), sfu = sfLDOF, sfupar = NULL )$upper$bound, lower = gs_b, lpar = c(qnorm(.1), rep(-Inf, 2)) ) #> $input #> $input$enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 41.7 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $input$event #> [1] 30 40 50 #> #> $input$analysis_time #> [1] 10 24 30 #> #> $input$binding #> [1] FALSE #> #> $input$ratio #> [1] 1 #> #> $input$upper #> function (par = NULL, k = NULL, ...) #> { #> if (is.null(k)) { #> return(par) #> } #> else { #> return(par[k]) #> } #> } #> #> #> #> $input$upar #> [1] 2.668630 2.288719 2.030702 #> #> $input$test_upper #> [1] TRUE #> #> $input$lower #> function (par = NULL, k = NULL, ...) #> { #> if (is.null(k)) { #> return(par) #> } #> else { #> return(par[k]) #> } #> } #> #> #> #> $input$lpar #> [1] -1.281552 -Inf -Inf #> #> $input$test_lower #> [1] TRUE #> #> $input$weight #> function (x, arm0, arm1, rho = 0, gamma = 0, tau = NULL) #> { #> n <- arm0$size + arm1$size #> p1 <- arm1$size/n #> p0 <- 1 - p1 #> if (!is.null(tau)) { #> if (tau > 0) { #> x <- pmin(x, tau) #> } #> } #> esurv <- p0 * npsurvSS::psurv(x, arm0) + p1 * npsurvSS::psurv(x, #> arm1) #> (1 - esurv)^rho * esurv^gamma #> } #> #> #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$approx #> [1] \"asymptotic\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 41.7 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $bounds #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.0172 0.00381 2.67 0.546 0.00381 #> 2 1 lower 0.0335 0.100 -1.28 1.34 0.9 #> 3 2 upper 0.622 0.0141 2.29 0.747 0.0110 #> 4 3 upper 0.842 0.0263 2.03 0.789 0.0211 #> #> $analysis #> analysis time n event ahr theta info info0 #> 1 1 10 416.6667 77.80361 0.8720599 0.1368971 16.20843 16.22923 #> 2 2 24 500.0000 246.28341 0.7164215 0.3334865 61.35217 62.08666 #> 3 3 30 500.0000 293.69568 0.6955693 0.3630247 72.91885 74.25144 #> info_frac info_frac0 #> 1 0.2222803 0.2185712 #> 2 0.8413760 0.8361677 #> 3 1.0000000 1.0000000 #> #> attr(,\"class\") #> [1] \"non_binding\" \"wlr\" \"gs_design\" \"list\" # } # Example 4 ---- # spending bounds and calculate the power for targeted number of events # \\donttest{ gs_power_wlr( enroll_rate = enroll_rate, fail_rate = fail_rate, event = target_events, analysis_time = NULL, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.2) ) #> $input #> $input$enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 41.7 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $input$event #> [1] 30 40 50 #> #> $input$analysis_time #> NULL #> #> $input$binding #> [1] FALSE #> #> $input$ratio #> [1] 1 #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.025 #> #> #> $input$test_upper #> [1] TRUE #> #> $input$lower #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$lpar #> $input$lpar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$lpar$total_spend #> [1] 0.2 #> #> #> $input$test_lower #> [1] TRUE #> #> $input$weight #> function (x, arm0, arm1, rho = 0, gamma = 0, tau = NULL) #> { #> n <- arm0$size + arm1$size #> p1 <- arm1$size/n #> p0 <- 1 - p1 #> if (!is.null(tau)) { #> if (tau > 0) { #> x <- pmin(x, tau) #> } #> } #> esurv <- p0 * npsurvSS::psurv(x, arm0) + p1 * npsurvSS::psurv(x, #> arm1) #> (1 - esurv)^rho * esurv^gamma #> } #> #> #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$approx #> [1] \"asymptotic\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 41.7 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $bounds #> # A tibble: 6 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.00110 0.000865 3.13 0.319 0.000865 #> 2 1 lower 0.0569 0.0655 -1.51 1.74 0.935 #> 3 2 upper 0.0115 0.00767 2.44 0.463 0.00739 #> 4 2 lower 0.127 0.159 -1.06 1.40 0.857 #> 5 3 upper 0.0427 0.0250 2.00 0.568 0.0226 #> 6 3 lower 0.200 0.266 -0.738 1.23 0.770 #> #> $analysis #> analysis time n event ahr theta info info0 #> 1 1 5.893949 245.5812 29.99999 0.9636346 0.03704308 3.683799 3.684201 #> 2 2 6.900922 287.5384 40.00003 0.9373448 0.06470405 5.749119 5.750793 #> 3 3 7.808453 325.3522 50.00000 0.9155821 0.08819527 8.132495 8.136743 #> info_frac info_frac0 #> 1 0.4529728 0.4527857 #> 2 0.7069318 0.7067685 #> 3 1.0000000 1.0000000 #> #> attr(,\"class\") #> [1] \"non_binding\" \"wlr\" \"gs_design\" \"list\" # } # Example 5 ---- # spending bounds and calculate the power for targeted analysis time # \\donttest{ gs_power_wlr( enroll_rate = enroll_rate, fail_rate = fail_rate, event = NULL, analysis_time = target_analysisTime, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.2) ) #> $input #> $input$enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 41.7 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $input$event #> NULL #> #> $input$analysis_time #> [1] 10 24 30 #> #> $input$binding #> [1] FALSE #> #> $input$ratio #> [1] 1 #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.025 #> #> #> $input$test_upper #> [1] TRUE #> #> $input$lower #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$lpar #> $input$lpar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$lpar$total_spend #> [1] 0.2 #> #> #> $input$test_lower #> [1] TRUE #> #> $input$weight #> function (x, arm0, arm1, rho = 0, gamma = 0, tau = NULL) #> { #> n <- arm0$size + arm1$size #> p1 <- arm1$size/n #> p0 <- 1 - p1 #> if (!is.null(tau)) { #> if (tau > 0) { #> x <- pmin(x, tau) #> } #> } #> esurv <- p0 * npsurvSS::psurv(x, arm0) + p1 * npsurvSS::psurv(x, #> arm1) #> (1 - esurv)^rho * esurv^gamma #> } #> #> #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$approx #> [1] \"asymptotic\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 41.7 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $bounds #> # A tibble: 6 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.0000207 0.00000163 4.65 0.348 0.00000163 #> 2 1 lower 0.00659 0.0269 -1.93 1.55 0.973 #> 3 2 upper 0.663 0.0142 2.19 0.756 0.0142 #> 4 2 lower 0.162 0.947 1.62 0.814 0.0527 #> 5 3 upper 0.811 0.0225 2.04 0.789 0.0209 #> 6 3 lower 0.200 0.980 2.13 0.780 0.0165 #> #> $analysis #> analysis time n event ahr theta info info0 #> 1 1 10 416.6667 77.80361 0.8720599 0.1368971 16.20843 16.22923 #> 2 2 24 500.0000 246.28341 0.7164215 0.3334865 61.35217 62.08666 #> 3 3 30 500.0000 293.69568 0.6955693 0.3630247 72.91885 74.25144 #> info_frac info_frac0 #> 1 0.2222803 0.2185712 #> 2 0.8413760 0.8361677 #> 3 1.0000000 1.0000000 #> #> attr(,\"class\") #> [1] \"non_binding\" \"wlr\" \"gs_design\" \"list\" # } # Example 6 ---- # spending bounds and calculate the power for targeted analysis time & number of events # \\donttest{ gs_power_wlr( enroll_rate = enroll_rate, fail_rate = fail_rate, event = target_events, analysis_time = target_analysisTime, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.2) ) #> $input #> $input$enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 41.7 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $input$event #> [1] 30 40 50 #> #> $input$analysis_time #> [1] 10 24 30 #> #> $input$binding #> [1] FALSE #> #> $input$ratio #> [1] 1 #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.025 #> #> #> $input$test_upper #> [1] TRUE #> #> $input$lower #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$lpar #> $input$lpar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$lpar$total_spend #> [1] 0.2 #> #> #> $input$test_lower #> [1] TRUE #> #> $input$weight #> function (x, arm0, arm1, rho = 0, gamma = 0, tau = NULL) #> { #> n <- arm0$size + arm1$size #> p1 <- arm1$size/n #> p0 <- 1 - p1 #> if (!is.null(tau)) { #> if (tau > 0) { #> x <- pmin(x, tau) #> } #> } #> esurv <- p0 * npsurvSS::psurv(x, arm0) + p1 * npsurvSS::psurv(x, #> arm1) #> (1 - esurv)^rho * esurv^gamma #> } #> #> #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$approx #> [1] \"asymptotic\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 1 × 3 #> stratum duration rate #> #> 1 All 12 41.7 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 4 0.0462 0.001 1 #> 2 All 100 0.0462 0.001 0.6 #> #> $bounds #> # A tibble: 6 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.0000207 0.00000163 4.65 0.348 0.00000163 #> 2 1 lower 0.00659 0.0269 -1.93 1.55 0.973 #> 3 2 upper 0.663 0.0142 2.19 0.756 0.0142 #> 4 2 lower 0.162 0.947 1.62 0.814 0.0527 #> 5 3 upper 0.811 0.0225 2.04 0.789 0.0209 #> 6 3 lower 0.200 0.980 2.13 0.780 0.0165 #> #> $analysis #> analysis time n event ahr theta info info0 #> 1 1 10 416.6667 77.80361 0.8720599 0.1368971 16.20843 16.22923 #> 2 2 24 500.0000 246.28341 0.7164215 0.3334865 61.35217 62.08666 #> 3 3 30 500.0000 293.69568 0.6955693 0.3630247 72.91885 74.25144 #> info_frac info_frac0 #> 1 0.2222803 0.2185712 #> 2 0.8413760 0.8361677 #> 3 1.0000000 1.0000000 #> #> attr(,\"class\") #> [1] \"non_binding\" \"wlr\" \"gs_design\" \"list\" # }"},{"path":"https://merck.github.io/gsDesign2/reference/gs_spending_bound.html","id":null,"dir":"Reference","previous_headings":"","what":"Derive spending bound for group sequential boundary — gs_spending_bound","title":"Derive spending bound for group sequential boundary — gs_spending_bound","text":"Computes one bound time based spending given distributional assumptions. user specifies gs_spending_bound() use functions, intended use . important user specifications made list provided functions using gs_spending_bound(). Function uses numerical integration Newton-Raphson iteration derive individual bound group sequential design satisfies targeted boundary crossing probability. Algorithm simple extension Chapter 19 Jennison Turnbull (2000).","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_spending_bound.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Derive spending bound for group sequential boundary — gs_spending_bound","text":"","code":"gs_spending_bound( k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, r = 18, tol = 1e-06 )"},{"path":"https://merck.github.io/gsDesign2/reference/gs_spending_bound.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Derive spending bound for group sequential boundary — gs_spending_bound","text":"k Analysis bound computed. par list following items: sf (class spending function). total_spend (total spend). param (parameters needed spending function sf()). timing (vector containing values spending function evaluated NULL information-based spending used). max_info (timing NULL, can input positive number used info information fraction analysis). hgm1 Subdensity grid h1() (k=2) hupdate() (k>2) analysis k-1; k=1, used may NULL. theta Natural parameter used lower bound spending; represents average drift time analysis least analysis k; upper bound spending always set null hypothesis (theta = 0). info Statistical information analyses, least analysis k. efficacy TRUE (default) efficacy bound, FALSE otherwise. test_bound logical vector length info indicate analyses bound. r Integer value controlling grid numerical integration Jennison Turnbull (2000); default 18, range 1 80. Larger values provide larger number grid points greater accuracy. Normally r changed user. tol Tolerance parameter convergence (Z-scale).","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_spending_bound.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Derive spending bound for group sequential boundary — gs_spending_bound","text":"Returns numeric bound (possibly infinite) , upon failure, generates error message.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_spending_bound.html","id":"specification","dir":"Reference","previous_headings":"","what":"Specification","title":"Derive spending bound for group sequential boundary — gs_spending_bound","text":"contents section shown PDF user manual .","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_spending_bound.html","id":"references","dir":"Reference","previous_headings":"","what":"References","title":"Derive spending bound for group sequential boundary — gs_spending_bound","text":"Jennison C Turnbull BW (2000), Group Sequential Methods Applications Clinical Trials. Boca Raton: Chapman Hall.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_spending_bound.html","id":"author","dir":"Reference","previous_headings":"","what":"Author","title":"Derive spending bound for group sequential boundary — gs_spending_bound","text":"Keaven Anderson keaven_anderson@merck.com","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_spending_bound.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Derive spending bound for group sequential boundary — gs_spending_bound","text":"","code":"gs_power_ahr( analysis_time = c(12, 24, 36), event = c(30, 40, 50), binding = TRUE, upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL), lower = gs_spending_bound, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL) ) #> $input #> $input$enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 #> #> $input$fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $input$event #> [1] 30 40 50 #> #> $input$analysis_time #> [1] 12 24 36 #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$upper #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$upar #> $input$upar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$upar$total_spend #> [1] 0.025 #> #> $input$upar$param #> NULL #> #> $input$upar$timing #> NULL #> #> #> $input$lower #> function (k = 1, par = list(sf = gsDesign::sfLDOF, total_spend = 0.025, #> param = NULL, timing = NULL, max_info = NULL), hgm1 = NULL, #> theta = 0.1, info = 1:3, efficacy = TRUE, test_bound = TRUE, #> r = 18, tol = 1e-06) #> { #> if (length(test_bound) == 1 && k > 1) { #> test_bound <- rep(test_bound, k) #> } #> if (!is.null(par$timing)) { #> timing <- par$timing #> } #> else { #> if (is.null(par$max_info)) { #> timing <- info/max(info) #> } #> else { #> timing <- info/par$max_info #> } #> } #> spend <- par$sf(alpha = par$total_spend, t = timing, param = par$param)$spend #> old_spend <- 0 #> for (i in 1:k) { #> if (test_bound[i]) { #> xx <- spend[i] - old_spend #> old_spend <- spend[i] #> spend[i] <- xx #> } #> else { #> spend[i] <- 0 #> } #> } #> spend <- spend[k] #> if (!efficacy) { #> if (spend <= 0) { #> return(-Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> a <- qnorm(spend) + sqrt(info[k]) * theta[k] #> if (k == 1) { #> return(a) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> adelta <- 1 #> j <- 0 #> while (abs(adelta) > tol) { #> hg <- hupdate(theta = theta[k], info = info[k], a = -Inf, #> b = a, thetam1 = theta[k - 1], im1 = info[k - #> 1], gm1 = hgm1, r = r) #> i <- length(hg$h) #> pik <- sum(hg$h) #> adelta <- spend - pik #> dplo <- hg$h[i]/hg$w[i] #> if (adelta > dplo) { #> adelta <- 1 #> } #> else if (adelta < -dplo) { #> adelta <- -1 #> } #> else { #> adelta <- adelta/dplo #> } #> a <- a + adelta #> if (a > extreme_high) { #> a <- extreme_high #> } #> else if (a < extreme_low) { #> a <- extreme_low #> } #> if (abs(adelta) < tol) { #> return(a) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> else { #> if (spend <= 0) { #> return(Inf) #> } #> if (length(theta) == 1) #> theta <- rep(theta, length(info)) #> b <- qnorm(spend, lower.tail = FALSE) #> if (k == 1) { #> return(b) #> } #> mu <- theta[k] * sqrt(info[k]) #> extreme_low <- mu - 3 - 4 * log(r) #> extreme_high <- mu + 3 + 4 * log(r) #> bdelta <- 1 #> j <- 1 #> while (abs(bdelta) > tol) { #> hg <- hupdate(theta = 0, info = info[k], a = b, b = Inf, #> thetam1 = 0, im1 = info[k - 1], gm1 = hgm1, r = r) #> pik <- sum(hg$h) #> bdelta <- spend - pik #> dpikdb <- hg$h[1]/hg$w[1] #> if (bdelta > dpikdb) { #> bdelta <- 1 #> } #> else if (bdelta < -dpikdb) { #> bdelta <- -1 #> } #> else { #> bdelta <- bdelta/dpikdb #> } #> b <- b - bdelta #> if (b > extreme_high) { #> b <- extreme_high #> } #> else if (b < extreme_low) { #> b <- extreme_low #> } #> if (abs(bdelta) < tol) { #> return(b) #> } #> j <- j + 1 #> if (j > 20) { #> stop(paste(\"gs_spending_bound(): bound_update did not converge for lower bound calculation, analysis\", #> k, \" !\")) #> } #> } #> } #> } #> #> #> #> $input$lpar #> $input$lpar$sf #> function (alpha, t, param = NULL) #> { #> checkScalar(alpha, \"numeric\", c(0, Inf), c(FALSE, FALSE)) #> checkVector(t, \"numeric\", c(0, Inf), c(TRUE, FALSE)) #> if (is.null(param) || param < 0.005 || param > 20) #> param <- 1 #> checkScalar(param, \"numeric\", c(0.005, 20), c(TRUE, TRUE)) #> t[t > 1] <- 1 #> if (param == 1) { #> rho <- 1 #> txt <- \"Lan-DeMets O'Brien-Fleming approximation\" #> parname <- \"none\" #> } #> else { #> rho <- param #> txt <- \"Generalized Lan-DeMets O'Brien-Fleming\" #> parname <- \"rho\" #> } #> z <- -qnorm(alpha/2) #> x <- list(name = txt, param = param, parname = parname, sf = sfLDOF, #> spend = 2 * (1 - pnorm(z/t^(rho/2))), bound = NULL, prob = NULL) #> class(x) <- \"spendfn\" #> x #> } #> #> #> #> $input$lpar$total_spend #> [1] 0.025 #> #> $input$lpar$param #> NULL #> #> $input$lpar$timing #> NULL #> #> #> $input$test_lower #> [1] TRUE #> #> $input$test_upper #> [1] TRUE #> #> $input$ratio #> [1] 1 #> #> $input$binding #> [1] TRUE #> #> $input$info_scale #> [1] \"h0_h1_info\" #> #> $input$r #> [1] 18 #> #> $input$tol #> [1] 1e-06 #> #> #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 3 #> 2 All 2 6 #> 3 All 10 9 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.001 0.9 #> 2 All 100 0.0385 0.001 0.6 #> #> $bound #> # A tibble: 6 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.00706 0.000867 3.13 0.316 0.000867 #> 2 1 lower 0.000935 0.00658 -2.48 2.49 0.993 #> 3 2 upper 0.115 0.00921 2.37 0.505 0.00892 #> 4 2 lower 0.00912 0.113 -1.21 1.42 0.888 #> 5 3 upper 0.324 0.0250 2.01 0.607 0.0222 #> 6 3 lower 0.0251 0.323 -0.474 1.12 0.682 #> #> $analysis #> analysis time n event ahr theta info info0 #> 1 1 14.90817 108 30.00008 0.7865726 0.2400702 7.373433 7.50002 #> 2 2 24.00000 108 49.06966 0.7151566 0.3352538 11.999266 12.26741 #> 3 3 36.00000 108 66.23948 0.6833395 0.3807634 16.267921 16.55987 #> info_frac info_frac0 #> 1 0.4532499 0.4529033 #> 2 0.7376029 0.7407917 #> 3 1.0000000 1.0000000 #> #> attr(,\"class\") #> [1] \"ahr\" \"gs_design\" \"list\""},{"path":"https://merck.github.io/gsDesign2/reference/gs_spending_combo.html","id":null,"dir":"Reference","previous_headings":"","what":"Derive spending bound for MaxCombo group sequential boundary — gs_spending_combo","title":"Derive spending bound for MaxCombo group sequential boundary — gs_spending_combo","text":"Derive spending bound MaxCombo group sequential boundary","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_spending_combo.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Derive spending bound for MaxCombo group sequential boundary — gs_spending_combo","text":"","code":"gs_spending_combo(par = NULL, info = NULL)"},{"path":"https://merck.github.io/gsDesign2/reference/gs_spending_combo.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Derive spending bound for MaxCombo group sequential boundary — gs_spending_combo","text":"par list following items: sf (class spending function). total_spend (total spend). param (parameters needed spending function sf()). timing (vector containing values spending function evaluated NULL information-based spending used). max_info (timing NULL, can input positive number used info information fraction analysis). info Statistical information analyses, least analysis k.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_spending_combo.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Derive spending bound for MaxCombo group sequential boundary — gs_spending_combo","text":"vector alpha spending per analysis.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_spending_combo.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Derive spending bound for MaxCombo group sequential boundary — gs_spending_combo","text":"","code":"# alpha-spending par <- list(sf = gsDesign::sfLDOF, total_spend = 0.025) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.0001035057 0.0060483891 0.0250000000 par <- list(sf = gsDesign::sfLDPocock, total_spend = 0.025) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.01132081 0.01908456 0.02500000 par <- list(sf = gsDesign::sfHSD, total_spend = 0.025, param = -40) gs_spending_combo(par, info = 1:3 / 3) #> [1] 6.557724e-14 4.048992e-08 2.500000e-02 # Kim-DeMets (power) Spending Function par <- list(sf = gsDesign::sfPower, total_spend = 0.025, param = 1.5) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.004811252 0.013608276 0.025000000 # Exponential Spending Function par <- list(sf = gsDesign::sfExponential, total_spend = 0.025, param = 1) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.000015625 0.003952847 0.025000000 # Two-parameter Spending Function Families par <- list(sf = gsDesign::sfLogistic, total_spend = 0.025, param = c(.1, .4, .01, .1)) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.001757277 0.008146545 0.025000000 par <- list(sf = gsDesign::sfBetaDist, total_spend = 0.025, param = c(.1, .4, .01, .1)) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.001818609 0.006568999 0.025000000 par <- list(sf = gsDesign::sfCauchy, total_spend = 0.025, param = c(.1, .4, .01, .1)) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.001378849 0.023755732 0.025000000 par <- list(sf = gsDesign::sfExtremeValue, total_spend = 0.025, param = c(.1, .4, .01, .1)) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.001785159 0.007184159 0.025000000 par <- list(sf = gsDesign::sfExtremeValue2, total_spend = 0.025, param = c(.1, .4, .01, .1)) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.001799588 0.007015878 0.025000000 par <- list(sf = gsDesign::sfNormal, total_spend = 0.025, param = c(.1, .4, .01, .1)) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.001797471 0.006969761 0.025000000 # t-distribution Spending Function par <- list(sf = gsDesign::sfTDist, total_spend = 0.025, param = c(-1, 1.5, 4)) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.002063494 0.009705759 0.025000000 # Piecewise Linear and Step Function Spending Functions par <- list(sf = gsDesign::sfLinear, total_spend = 0.025, param = c(.2, .4, .05, .2)) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.00375000 0.01388889 0.02500000 par <- list(sf = gsDesign::sfStep, total_spend = 0.025, param = c(1 / 3, 2 / 3, .1, .1)) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.0025 0.0025 0.0250 # Pointwise Spending Function par <- list(sf = gsDesign::sfPoints, total_spend = 0.025, param = c(.25, .25)) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.00625 0.00625 0.02500 # Truncated, trimmed and gapped spending functions par <- list(sf = gsDesign::sfTruncated, total_spend = 0.025, param = list(trange = c(.2, .8), sf = gsDesign::sfHSD, param = 1)) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.00788072 0.02137939 0.02500000 par <- list(sf = gsDesign::sfTrimmed, total_spend = 0.025, param = list(trange = c(.2, .8), sf = gsDesign::sfHSD, param = 1)) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.01121102 0.01924407 0.02500000 par <- list(sf = gsDesign::sfGapped, total_spend = 0.025, param = list(trange = c(.2, .8), sf = gsDesign::sfHSD, param = 1)) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.007169093 0.007169093 0.025000000 # Xi and Gallo conditional error spending functions par <- list(sf = gsDesign::sfXG1, total_spend = 0.025, param = 0.5) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.0001035057 0.0060483891 0.0250000000 par <- list(sf = gsDesign::sfXG2, total_spend = 0.025, param = 0.14) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.008419231 0.021216583 0.025000000 par <- list(sf = gsDesign::sfXG3, total_spend = 0.025, param = 0.013) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.02428922 0.02477989 0.02500000 # beta-spending par <- list(sf = gsDesign::sfLDOF, total_spend = 0.2) gs_spending_combo(par, info = 1:3 / 3) #> [1] 0.02643829 0.11651432 0.20000000"},{"path":"https://merck.github.io/gsDesign2/reference/gs_update_ahr.html","id":null,"dir":"Reference","previous_headings":"","what":"Group sequential design using average hazard ratio under non-proportional hazards — gs_update_ahr","title":"Group sequential design using average hazard ratio under non-proportional hazards — gs_update_ahr","text":"Group sequential design using average hazard ratio non-proportional hazards","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_update_ahr.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Group sequential design using average hazard ratio under non-proportional hazards — gs_update_ahr","text":"","code":"gs_update_ahr( x = NULL, alpha = NULL, ustime = NULL, lstime = NULL, observed_data = NULL )"},{"path":"https://merck.github.io/gsDesign2/reference/gs_update_ahr.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Group sequential design using average hazard ratio under non-proportional hazards — gs_update_ahr","text":"x design created either gs_design_ahr gs_power_ahr. alpha Type error updated design. ustime Default NULL case upper bound spending time determined timing. Otherwise, vector length k (total number analyses) spending time analysis. lstime Default NULL case lower bound spending time determined timing. Otherwise, vector length k (total number analyses) spending time analysis observed_data list observed datasets analyses.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_update_ahr.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Group sequential design using average hazard ratio under non-proportional hazards — gs_update_ahr","text":"list input parameters, enrollment rate, analysis, bound.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/gs_update_ahr.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Group sequential design using average hazard ratio under non-proportional hazards — gs_update_ahr","text":"","code":"library(gsDesign) library(gsDesign2) library(dplyr) alpha <- 0.025 beta <- 0.1 ratio <- 1 # Enrollment enroll_rate <- define_enroll_rate( duration = c(2, 2, 10), rate = (1:3) / 3) # Failure and dropout fail_rate <- define_fail_rate( duration = c(3, Inf), fail_rate = log(2) / 9, hr = c(1, 0.6), dropout_rate = .0001) # IA and FA analysis time analysis_time <- c(20, 36) # Randomization ratio ratio <- 1 # ------------------------------------------------- # # Example A: one-sided design (efficacy only) # ------------------------------------------------- # # Original design upper <- gs_spending_bound upar <- list(sf = sfLDOF, total_spend = alpha) x <- gs_design_ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, alpha = alpha, beta = beta, ratio = ratio, info_scale = \"h0_info\", info_frac = NULL, analysis_time = c(20, 36), upper = gs_spending_bound, upar = upar, lower = gs_b, lpar = rep(-Inf, 2), test_upper = TRUE, test_lower = FALSE) |> to_integer() # Observed dataset at IA and FA set.seed(123) observed_data <- simtrial::sim_pw_surv( n = x$analysis$n[x$analysis$analysis == 2], stratum = data.frame(stratum = \"All\", p = 1), block = c(rep(\"control\", 2), rep(\"experimental\", 2)), enroll_rate = x$enroll_rate, fail_rate = (fail_rate |> simtrial::to_sim_pw_surv())$fail_rate, dropout_rate = (fail_rate |> simtrial::to_sim_pw_surv())$dropout_rate) observed_data_ia <- observed_data |> simtrial::cut_data_by_date(x$analysis$time[1]) observed_data_fa <- observed_data |> simtrial::cut_data_by_date(x$analysis$time[2]) observed_event_ia <- sum(observed_data_ia$event) observed_event_fa <- sum(observed_data_fa$event) planned_event_ia <- x$analysis$event[1] planned_event_fa <- x$analysis$event[2] # Example A1 ---- # IA spending = observed events / final planned events # the remaining alpha will be allocated to FA. ustime <- c(observed_event_ia / planned_event_fa, 1) gs_update_ahr( x = x, ustime = ustime, observed_data = list(observed_data_ia, observed_data_fa)) #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 10.2 #> 2 All 2 20.3 #> 3 All 10 30.5 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.0001 1 #> 2 All Inf 0.0770 0.0001 0.6 #> #> $bound #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.315 0.00484 2.59 0.686 0.00484 #> 2 2 upper 0.901 0.0250 1.99 0.794 0.0235 #> 3 1 lower 0 0 -Inf Inf 1 #> 4 2 lower 0 0 -Inf Inf 1 #> #> $analysis #> analysis time n event ahr theta info info0 info_frac #> 1 1 19.86657 366 188 0.7356221 0.3070388 47.00 47.00 0.6329966 #> 2 2 35.81007 366 295 0.6832088 0.3809547 73.75 73.75 1.0000000 #> info_frac0 #> 1 0.6372881 #> 2 1.0000000 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" #> [5] \"updated_design\" # Example A2 ---- # IA, FA spending = observed events / final planned events ustime <- c(observed_event_ia, observed_event_fa) / planned_event_fa gs_update_ahr( x = x, ustime = ustime, observed_data = list(observed_data_ia, observed_data_fa)) #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 10.2 #> 2 All 2 20.3 #> 3 All 10 30.5 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.0001 1 #> 2 All Inf 0.0770 0.0001 0.6 #> #> $bound #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.315 0.00484 2.59 0.686 0.00484 #> 2 2 upper 0.899 0.0245 2.00 0.793 0.0230 #> 3 1 lower 0 0 -Inf Inf 1 #> 4 2 lower 0 0 -Inf Inf 1 #> #> $analysis #> analysis time n event ahr theta info info0 info_frac #> 1 1 19.86657 366 188 0.7356221 0.3070388 47.00 47.00 0.6329966 #> 2 2 35.81007 366 295 0.6832088 0.3809547 73.75 73.75 0.9932660 #> info_frac0 #> 1 0.6372881 #> 2 1.0000000 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" #> [5] \"updated_design\" # Example A3 ---- # IA spending = min(observed events, planned events) / final planned events ustime <- c(min(observed_event_ia, planned_event_ia) / planned_event_fa, 1) gs_update_ahr( x = x, ustime = ustime, observed_data = list(observed_data_ia, observed_data_fa)) #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 10.2 #> 2 All 2 20.3 #> 3 All 10 30.5 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.0001 1 #> 2 All Inf 0.0770 0.0001 0.6 #> #> $bound #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.315 0.00484 2.59 0.686 0.00484 #> 2 2 upper 0.901 0.0250 1.99 0.794 0.0235 #> 3 1 lower 0 0 -Inf Inf 1 #> 4 2 lower 0 0 -Inf Inf 1 #> #> $analysis #> analysis time n event ahr theta info info0 info_frac #> 1 1 19.86657 366 188 0.7356221 0.3070388 47.00 47.00 0.6329966 #> 2 2 35.81007 366 295 0.6832088 0.3809547 73.75 73.75 1.0000000 #> info_frac0 #> 1 0.6372881 #> 2 1.0000000 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" #> [5] \"updated_design\" # Example A4 ---- # IA spending = min(observed events, planned events) / final planned events ustime <- c(min(observed_event_ia, planned_event_ia), min(observed_event_fa, planned_event_fa)) / planned_event_fa gs_update_ahr( x = x, ustime = ustime, observed_data = list(observed_data_ia, observed_data_fa)) #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 10.2 #> 2 All 2 20.3 #> 3 All 10 30.5 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.0001 1 #> 2 All Inf 0.0770 0.0001 0.6 #> #> $bound #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.315 0.00484 2.59 0.686 0.00484 #> 2 2 upper 0.899 0.0245 2.00 0.793 0.0230 #> 3 1 lower 0 0 -Inf Inf 1 #> 4 2 lower 0 0 -Inf Inf 1 #> #> $analysis #> analysis time n event ahr theta info info0 info_frac #> 1 1 19.86657 366 188 0.7356221 0.3070388 47.00 47.00 0.6329966 #> 2 2 35.81007 366 295 0.6832088 0.3809547 73.75 73.75 0.9932660 #> info_frac0 #> 1 0.6372881 #> 2 1.0000000 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" #> [5] \"updated_design\" # alpha is upadted to 0.05 gs_update_ahr( x = x, alpha = 0.05, ustime = ustime, observed_data = list(observed_data_ia, observed_data_fa)) #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 10.2 #> 2 All 2 20.3 #> 3 All 10 30.5 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.0001 1 #> 2 All Inf 0.0770 0.0001 0.6 #> #> $bound #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.461 0.0138 2.20 0.725 0.0138 #> 2 2 upper 0.943 0.0492 1.69 0.821 0.0451 #> 3 1 lower 0 0 -Inf Inf 1 #> 4 2 lower 0 0 -Inf Inf 1 #> #> $analysis #> analysis time n event ahr theta info info0 info_frac #> 1 1 19.86657 366 188 0.7356221 0.3070388 47.00 47.00 0.6329966 #> 2 2 35.81007 366 295 0.6832088 0.3809547 73.75 73.75 0.9932660 #> info_frac0 #> 1 0.6372881 #> 2 1.0000000 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" #> [5] \"updated_design\" # ------------------------------------------------- # # Example B: Two-sided asymmetric design, # beta-spending with non-binding lower bound # ------------------------------------------------- # # Original design x <- gs_design_ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, alpha = alpha, beta = beta, ratio = ratio, info_scale = \"h0_info\", info_frac = NULL, analysis_time = c(20, 36), upper = gs_spending_bound, upar = list(sf = sfLDOF, total_spend = alpha), test_upper = TRUE, lower = gs_spending_bound, lpar = list(sf = sfLDOF, total_spend = beta), test_lower = c(TRUE, FALSE), binding = FALSE) |> to_integer() # Example B1 ---- # IA spending = observed events / final planned events # the remaining alpha will be allocated to FA. ustime <- c(observed_event_ia / planned_event_fa, 1) gs_update_ahr( x = x, ustime = ustime, lstime = ustime, observed_data = list(observed_data_ia, observed_data_fa)) #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 10.6 #> 2 All 2 21.2 #> 3 All 10 31.8 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.0001 1 #> 2 All Inf 0.0770 0.0001 0.6 #> #> $bound #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.315 0.00484 2.59 0.686 0.00484 #> 2 2 upper 0.891 0.0248 1.99 0.794 0.0235 #> 3 1 lower 0.0387 0.633 0.339 0.952 0.367 #> 4 2 lower 0.0387 0.633 -Inf Inf 1 #> #> $analysis #> analysis time n event ahr theta info info0 info_frac #> 1 1 19.91897 382 188 0.7356221 0.3070388 47.00 47.00 0.6329966 #> 2 2 36.06513 382 295 0.6832088 0.3809547 73.75 73.75 1.0000000 #> info_frac0 #> 1 0.6372881 #> 2 1.0000000 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" #> [5] \"updated_design\" # Example B2 ---- # IA, FA spending = observed events / final planned events ustime <- c(observed_event_ia, observed_event_fa) / planned_event_fa gs_update_ahr( x = x, ustime = ustime, lstime = ustime, observed_data = list(observed_data_ia, observed_data_fa)) #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 10.6 #> 2 All 2 21.2 #> 3 All 10 31.8 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.0001 1 #> 2 All Inf 0.0770 0.0001 0.6 #> #> $bound #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.315 0.00484 2.59 0.686 0.00484 #> 2 2 upper 0.890 0.0243 2.00 0.793 0.0230 #> 3 1 lower 0.0387 0.633 0.339 0.952 0.367 #> 4 2 lower 0.0387 0.633 -Inf Inf 1 #> #> $analysis #> analysis time n event ahr theta info info0 info_frac #> 1 1 19.91897 382 188 0.7356221 0.3070388 47.00 47.00 0.6329966 #> 2 2 36.06513 382 295 0.6832088 0.3809547 73.75 73.75 0.9932660 #> info_frac0 #> 1 0.6372881 #> 2 1.0000000 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" #> [5] \"updated_design\" # Example B3 ---- ustime <- c(min(observed_event_ia, planned_event_ia) / planned_event_fa, 1) gs_update_ahr( x = x, ustime = ustime, lstime = ustime, observed_data = list(observed_data_ia, observed_data_fa)) #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 10.6 #> 2 All 2 21.2 #> 3 All 10 31.8 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.0001 1 #> 2 All Inf 0.0770 0.0001 0.6 #> #> $bound #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.315 0.00484 2.59 0.686 0.00484 #> 2 2 upper 0.891 0.0248 1.99 0.794 0.0235 #> 3 1 lower 0.0387 0.633 0.339 0.952 0.367 #> 4 2 lower 0.0387 0.633 -Inf Inf 1 #> #> $analysis #> analysis time n event ahr theta info info0 info_frac #> 1 1 19.91897 382 188 0.7356221 0.3070388 47.00 47.00 0.6329966 #> 2 2 36.06513 382 295 0.6832088 0.3809547 73.75 73.75 1.0000000 #> info_frac0 #> 1 0.6372881 #> 2 1.0000000 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" #> [5] \"updated_design\" # Example B4 ---- # IA spending = min(observed events, planned events) / final planned events ustime <- c(min(observed_event_ia, planned_event_ia), min(observed_event_fa, planned_event_fa)) / planned_event_fa gs_update_ahr( x = x, ustime = ustime, lstime = ustime, observed_data = list(observed_data_ia, observed_data_fa)) #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 10.6 #> 2 All 2 21.2 #> 3 All 10 31.8 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.0001 1 #> 2 All Inf 0.0770 0.0001 0.6 #> #> $bound #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.315 0.00484 2.59 0.686 0.00484 #> 2 2 upper 0.890 0.0243 2.00 0.793 0.0230 #> 3 1 lower 0.0387 0.633 0.339 0.952 0.367 #> 4 2 lower 0.0387 0.633 -Inf Inf 1 #> #> $analysis #> analysis time n event ahr theta info info0 info_frac #> 1 1 19.91897 382 188 0.7356221 0.3070388 47.00 47.00 0.6329966 #> 2 2 36.06513 382 295 0.6832088 0.3809547 73.75 73.75 0.9932660 #> info_frac0 #> 1 0.6372881 #> 2 1.0000000 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" #> [5] \"updated_design\" # Example B5 ---- # alpha is updated to 0.05 ---- gs_update_ahr(x = x, alpha = 0.05) #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 10.6 #> 2 All 2 21.2 #> 3 All 10 31.8 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.0001 1 #> 2 All Inf 0.0770 0.0001 0.6 #> #> $bound #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.511 0.0144 2.19 0.735 0.0144 #> 2 2 upper 0.934 0.0487 1.69 0.826 0.0458 #> 3 1 lower 0.0401 0.679 0.464 0.937 0.321 #> 4 2 lower 0.0401 0.679 -Inf Inf 1 #> #> $analysis #> analysis time n event ahr theta info info0 info_frac #> 1 1 19.91897 382 202 0.7322996 0.3115656 50.50 50.50 0.6495177 #> 2 2 36.06513 382 311 0.6829028 0.3814027 77.75 77.75 1.0000000 #> info_frac0 #> 1 0.6495177 #> 2 1.0000000 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" #> [5] \"updated_design\" # Example B6 ---- # updated boundaries only when IA data is observed ustime <- c(observed_event_ia / planned_event_fa, 1) gs_update_ahr( x = x, ustime = ustime, lstime = ustime, observed_data = list(observed_data_ia, NULL)) #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 10.6 #> 2 All 2 21.2 #> 3 All 10 31.8 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.0001 1 #> 2 All Inf 0.0770 0.0001 0.6 #> #> $bound #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.315 0.00484 2.59 0.686 0.00484 #> 2 2 upper 0.903 0.0247 1.99 0.798 0.0233 #> 3 1 lower 0.0387 0.633 0.339 0.952 0.367 #> 4 2 lower 0.0387 0.633 -Inf Inf 1 #> #> $analysis #> analysis time n event ahr theta info info0 info_frac #> 1 1 19.91897 382 188 0.7356221 0.3070388 47.00 47.00 0.6329966 #> 2 2 36.06513 382 311 0.6829028 0.3814027 77.75 77.75 1.0000000 #> info_frac0 #> 1 0.6045016 #> 2 1.0000000 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" #> [5] \"updated_design\" # ------------------------------------------------- # # Example C: Two-sided asymmetric design, # with calendar spending for efficacy and futility bounds # beta-spending with non-binding lower bound # ------------------------------------------------- # # Original design x <- gs_design_ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, alpha = alpha, beta = beta, ratio = ratio, info_scale = \"h0_info\", info_frac = NULL, analysis_time = c(20, 36), upper = gs_spending_bound, upar = list(sf = sfLDOF, total_spend = alpha, timing = c(20, 36) / 36), test_upper = TRUE, lower = gs_spending_bound, lpar = list(sf = sfLDOF, total_spend = beta, timing = c(20, 36) / 36), test_lower = c(TRUE, FALSE), binding = FALSE) |> to_integer() # Updated design due to potential change of multiplicity graph gs_update_ahr(x = x, alpha = 0.05) #> $enroll_rate #> # A tibble: 3 × 3 #> stratum duration rate #> #> 1 All 2 10.2 #> 2 All 2 20.4 #> 3 All 10 30.7 #> #> $fail_rate #> # A tibble: 2 × 5 #> stratum duration fail_rate dropout_rate hr #> #> 1 All 3 0.0770 0.0001 1 #> 2 All Inf 0.0770 0.0001 0.6 #> #> $bound #> # A tibble: 4 × 7 #> analysis bound probability probability0 z `~hr at bound` `nominal p` #> #> 1 1 upper 0.418 0.00855 2.38 0.711 0.00855 #> 2 2 upper 0.940 0.0493 1.66 0.825 0.0483 #> 3 1 lower 0.0273 0.601 0.257 0.964 0.399 #> 4 2 lower 0.0273 0.601 -Inf Inf 1 #> #> $analysis #> analysis time n event ahr theta info info0 info_frac #> 1 1 19.95805 368 195 0.7319980 0.3119776 48.75 48.75 0.65 #> 2 2 36.16991 368 300 0.6827856 0.3815744 75.00 75.00 1.00 #> info_frac0 #> 1 0.65 #> 2 1.00 #> #> attr(,\"class\") #> [1] \"non_binding\" \"ahr\" \"gs_design\" \"list\" #> [5] \"updated_design\""},{"path":"https://merck.github.io/gsDesign2/reference/ppwe.html","id":null,"dir":"Reference","previous_headings":"","what":"Piecewise exponential cumulative distribution function — ppwe","title":"Piecewise exponential cumulative distribution function — ppwe","text":"Computes cumulative distribution function (CDF) survival rate piecewise exponential distribution.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/ppwe.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Piecewise exponential cumulative distribution function — ppwe","text":"","code":"ppwe(x, duration, rate, lower_tail = FALSE)"},{"path":"https://merck.github.io/gsDesign2/reference/ppwe.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Piecewise exponential cumulative distribution function — ppwe","text":"x Times distribution computed. duration numeric vector time duration. rate numeric vector event rate. lower_tail Indicator whether lower (TRUE) upper tail (FALSE; default) CDF computed.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/ppwe.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Piecewise exponential cumulative distribution function — ppwe","text":"vector cumulative distribution function survival values.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/ppwe.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Piecewise exponential cumulative distribution function — ppwe","text":"Suppose \\(\\lambda_i\\) failure rate interval \\((t_{-1},t_i], =1,2,\\ldots,M\\) \\(0=t_00\\) : $$\\Lambda(t)=\\sum_{=1}^M \\delta(t\\leq t_i)(\\min(t,t_i)-t_{-1})\\lambda_i.$$ survival time \\(t\\) $$S(t)=\\exp(-\\Lambda(t)).$$","code":""},{"path":"https://merck.github.io/gsDesign2/reference/ppwe.html","id":"specification","dir":"Reference","previous_headings":"","what":"Specification","title":"Piecewise exponential cumulative distribution function — ppwe","text":"contents section shown PDF user manual .","code":""},{"path":"https://merck.github.io/gsDesign2/reference/ppwe.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Piecewise exponential cumulative distribution function — ppwe","text":"","code":"# Plot a survival function with 2 different sets of time values # to demonstrate plot precision corresponding to input parameters. x1 <- seq(0, 10, 10 / pi) duration <- c(3, 3, 1) rate <- c(.2, .1, .005) survival <- ppwe( x = x1, duration = duration, rate = rate ) plot(x1, survival, type = \"l\", ylim = c(0, 1)) x2 <- seq(0, 10, .25) survival <- ppwe( x = x2, duration = duration, rate = rate ) lines(x2, survival, col = 2)"},{"path":"https://merck.github.io/gsDesign2/reference/pw_info.html","id":null,"dir":"Reference","previous_headings":"","what":"Average hazard ratio under non-proportional hazards — pw_info","title":"Average hazard ratio under non-proportional hazards — pw_info","text":"Provides geometric average hazard ratio various non-proportional hazards assumptions either single multiple strata studies. piecewise exponential distribution allows simple method specify distribution enrollment pattern enrollment, failure dropout rates changes time.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/pw_info.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Average hazard ratio under non-proportional hazards — pw_info","text":"","code":"pw_info( enroll_rate = define_enroll_rate(duration = c(2, 2, 10), rate = c(3, 6, 9)), fail_rate = define_fail_rate(duration = c(3, 100), fail_rate = log(2)/c(9, 18), hr = c(0.9, 0.6), dropout_rate = 0.001), total_duration = 30, ratio = 1 )"},{"path":"https://merck.github.io/gsDesign2/reference/pw_info.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Average hazard ratio under non-proportional hazards — pw_info","text":"enroll_rate enroll_rate data frame without stratum created define_enroll_rate(). fail_rate fail_rate data frame without stratum created define_fail_rate(). total_duration Total follow-start enrollment data cutoff; can single value vector positive numbers. ratio Ratio experimental control randomization.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/pw_info.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Average hazard ratio under non-proportional hazards — pw_info","text":"data frame time (total_duration), stratum, t, hr (hazard ratio), event (expected number events), info (information given scenarios), info0 (information related null hypothesis), n (sample size) value total_duration input","code":""},{"path":"https://merck.github.io/gsDesign2/reference/pw_info.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Average hazard ratio under non-proportional hazards — pw_info","text":"","code":"# Example: default pw_info() #> time stratum t hr n event info info0 #> 1 30 All 0 0.9 12 21.24782 5.300180 5.311956 #> 2 30 All 3 0.6 96 37.24314 9.027063 9.310786 # Example: default with multiple analysis times (varying total_duration) pw_info(total_duration = c(15, 30)) #> time stratum t hr n event info info0 #> 1 15 All 0 0.9 12 20.13991 5.023729 5.034979 #> 2 15 All 3 0.6 96 10.13850 2.417457 2.534625 #> 3 30 All 0 0.9 12 21.24782 5.300180 5.311956 #> 4 30 All 3 0.6 96 37.24314 9.027063 9.310786 # Stratified population enroll_rate <- define_enroll_rate( stratum = c(rep(\"Low\", 2), rep(\"High\", 3)), duration = c(2, 10, 4, 4, 8), rate = c(5, 10, 0, 3, 6) ) fail_rate <- define_fail_rate( stratum = c(rep(\"Low\", 2), rep(\"High\", 2)), duration = c(1, Inf, 1, Inf), fail_rate = c(.1, .2, .3, .4), dropout_rate = .001, hr = c(.9, .75, .8, .6) ) # Give results by change-points in the piecewise model ahr(enroll_rate = enroll_rate, fail_rate = fail_rate, total_duration = c(15, 30)) #> time ahr n event info info0 #> 1 15 0.7332218 164 113.2782 28.18130 28.31954 #> 2 30 0.7175169 170 166.1836 41.49942 41.54590 # Same example, give results by strata and time period pw_info(enroll_rate = enroll_rate, fail_rate = fail_rate, total_duration = c(15, 30)) #> time stratum t hr n event info info0 #> 1 15 High 0 0.80 0 12.076677 2.990626 3.019169 #> 2 15 High 1 0.60 54 23.118608 5.741884 5.779652 #> 3 15 Low 0 0.90 5 9.962824 2.484435 2.490706 #> 4 15 Low 1 0.75 105 68.120046 16.964361 17.030011 #> 5 30 High 0 0.80 0 14.169853 3.509171 3.542463 #> 6 30 High 1 0.60 60 45.213092 11.297986 11.303273 #> 7 30 Low 0 0.90 5 9.962824 2.484435 2.490706 #> 8 30 Low 1 0.75 105 96.837847 24.207826 24.209462"},{"path":"https://merck.github.io/gsDesign2/reference/s2pwe.html","id":null,"dir":"Reference","previous_headings":"","what":"Approximate survival distribution with piecewise exponential distribution — s2pwe","title":"Approximate survival distribution with piecewise exponential distribution — s2pwe","text":"Converts discrete set points arbitrary survival distribution piecewise exponential approximation.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/s2pwe.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Approximate survival distribution with piecewise exponential distribution — s2pwe","text":"","code":"s2pwe(times, survival)"},{"path":"https://merck.github.io/gsDesign2/reference/s2pwe.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Approximate survival distribution with piecewise exponential distribution — s2pwe","text":"times Positive increasing times survival distribution provided. survival Survival (1 - cumulative distribution function) specified times.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/s2pwe.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Approximate survival distribution with piecewise exponential distribution — s2pwe","text":"tibble containing duration rate.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/s2pwe.html","id":"specification","dir":"Reference","previous_headings":"","what":"Specification","title":"Approximate survival distribution with piecewise exponential distribution — s2pwe","text":"contents section shown PDF user manual .","code":""},{"path":"https://merck.github.io/gsDesign2/reference/s2pwe.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Approximate survival distribution with piecewise exponential distribution — s2pwe","text":"","code":"# Example: arbitrary numbers s2pwe(1:9, (9:1) / 10) #> # A tibble: 9 × 2 #> duration rate #> #> 1 1 0.105 #> 2 1 0.118 #> 3 1 0.134 #> 4 1 0.154 #> 5 1 0.182 #> 6 1 0.223 #> 7 1 0.288 #> 8 1 0.405 #> 9 1 0.693 # Example: lognormal s2pwe(c(1:6, 9), plnorm(c(1:6, 9), meanlog = 0, sdlog = 2, lower.tail = FALSE)) #> # A tibble: 7 × 2 #> duration rate #> #> 1 1 0.693 #> 2 1 0.316 #> 3 1 0.224 #> 4 1 0.177 #> 5 1 0.148 #> 6 1 0.128 #> 7 3 0.103"},{"path":"https://merck.github.io/gsDesign2/reference/summary.html","id":null,"dir":"Reference","previous_headings":"","what":"Summary for fixed design or group sequential design objects — summary.fixed_design","title":"Summary for fixed design or group sequential design objects — summary.fixed_design","text":"Summary fixed design group sequential design objects","code":""},{"path":"https://merck.github.io/gsDesign2/reference/summary.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Summary for fixed design or group sequential design objects — summary.fixed_design","text":"","code":"# S3 method for class 'fixed_design' summary(object, ...) # S3 method for class 'gs_design' summary( object, analysis_vars = NULL, analysis_decimals = NULL, col_vars = NULL, col_decimals = NULL, bound_names = c(\"Efficacy\", \"Futility\"), ... )"},{"path":"https://merck.github.io/gsDesign2/reference/summary.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Summary for fixed design or group sequential design objects — summary.fixed_design","text":"object design object returned fixed_design_xxx() gs_design_xxx(). ... Additional parameters (used). analysis_vars variables put summary header analysis. analysis_decimals displayed number digits analysis_vars. vector unnamed, must match length analysis_vars. vector named, specify number digits variables want displayed differently defaults. col_vars variables displayed. col_decimals decimals displayed displayed variables col_vars. vector unnamed, must match length col_vars. vector named, specify number digits columns want displayed differently defaults. bound_names Names bounds; default c(\"Efficacy\", \"Futility\").","code":""},{"path":"https://merck.github.io/gsDesign2/reference/summary.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Summary for fixed design or group sequential design objects — summary.fixed_design","text":"summary table (data frame).","code":""},{"path":"https://merck.github.io/gsDesign2/reference/summary.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Summary for fixed design or group sequential design objects — summary.fixed_design","text":"","code":"library(dplyr) # Enrollment rate enroll_rate <- define_enroll_rate( duration = 18, rate = 20 ) # Failure rates fail_rate <- define_fail_rate( duration = c(4, 100), fail_rate = log(2) / 12, hr = c(1, .6), dropout_rate = .001 ) # Study duration in months study_duration <- 36 # Experimental / Control randomization ratio ratio <- 1 # 1-sided Type I error alpha <- 0.025 # Type II error (1 - power) beta <- 0.1 # AHR ---- # under fixed power fixed_design_ahr( alpha = alpha, power = 1 - beta, enroll_rate = enroll_rate, fail_rate = fail_rate, study_duration = study_duration, ratio = ratio ) %>% summary() #> # A tibble: 1 × 7 #> Design N Events Time Bound alpha Power #> #> 1 Average hazard ratio 463. 325. 36 1.96 0.025 0.9 # FH ---- # under fixed power fixed_design_fh( alpha = alpha, power = 1 - beta, enroll_rate = enroll_rate, fail_rate = fail_rate, study_duration = study_duration, ratio = ratio ) %>% summary() #> # A tibble: 1 × 7 #> Design N Events Time Bound alpha Power #> #> 1 Fleming-Harrington FH(0, 0) (logrank) 458. 321. 36 1.96 0.025 0.9 # Design parameters ---- library(gsDesign) library(gsDesign2) library(dplyr) # enrollment/failure rates enroll_rate <- define_enroll_rate( stratum = \"All\", duration = 12, rate = 1 ) fail_rate <- define_fail_rate( duration = c(4, 100), fail_rate = log(2) / 12, hr = c(1, .6), dropout_rate = .001 ) # Information fraction info_frac <- (1:3) / 3 # Analysis times in months; first 2 will be ignored as info_frac will not be achieved analysis_time <- c(.01, .02, 36) # Experimental / Control randomization ratio ratio <- 1 # 1-sided Type I error alpha <- 0.025 # Type II error (1 - power) beta <- .1 # Upper bound upper <- gs_spending_bound upar <- list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL) # Lower bound lower <- gs_spending_bound lpar <- list(sf = gsDesign::sfHSD, total_spend = 0.1, param = 0, timing = NULL) # weight function in WLR wgt00 <- function(x, arm0, arm1) { wlr_weight_fh(x, arm0, arm1, rho = 0, gamma = 0) } wgt05 <- function(x, arm0, arm1) { wlr_weight_fh(x, arm0, arm1, rho = 0, gamma = .5) } # test in COMBO fh_test <- rbind( data.frame(rho = 0, gamma = 0, tau = -1, test = 1, analysis = 1:3, analysis_time = c(12, 24, 36)), data.frame(rho = c(0, 0.5), gamma = 0.5, tau = -1, test = 2:3, analysis = 3, analysis_time = 36) ) # Example 1 ---- # \\donttest{ x_ahr <- gs_design_ahr( enroll_rate = enroll_rate, fail_rate = fail_rate, info_frac = info_frac, # Information fraction analysis_time = analysis_time, ratio = ratio, alpha = alpha, beta = beta, upper = upper, upar = upar, lower = lower, lpar = lpar ) x_ahr %>% summary() #> # A tibble: 6 × 7 #> # Groups: Analysis [3] #> Analysis Bound Z `~HR at bound` `Nominal p` `Alternate hypothesis` #> #> 1 Analysis: 1 Tim… Futi… -0.94 1.19 0.826 0.0338 #> 2 Analysis: 1 Tim… Effi… 3.71 0.510 0.0001 0.0027 #> 3 Analysis: 2 Tim… Futi… 0.63 0.923 0.266 0.0666 #> 4 Analysis: 2 Tim… Effi… 2.51 0.725 0.006 0.414 #> 5 Analysis: 3 Tim… Futi… 1.99 0.812 0.0233 0.101 #> 6 Analysis: 3 Tim… Effi… 1.99 0.812 0.0231 0.9 #> # ℹ 1 more variable: `Null hypothesis` # Customize the digits to display x_ahr %>% summary(analysis_vars = c(\"time\", \"event\", \"info_frac\"), analysis_decimals = c(1, 0, 2)) #> # A tibble: 6 × 7 #> # Groups: Analysis [3] #> Analysis Bound Z `~HR at bound` `Nominal p` `Alternate hypothesis` #> #> 1 Analysis: 1 Tim… Futi… -0.94 1.19 0.826 0.0338 #> 2 Analysis: 1 Tim… Effi… 3.71 0.510 0.0001 0.0027 #> 3 Analysis: 2 Tim… Futi… 0.63 0.923 0.266 0.0666 #> 4 Analysis: 2 Tim… Effi… 2.51 0.725 0.006 0.414 #> 5 Analysis: 3 Tim… Futi… 1.99 0.812 0.0233 0.101 #> 6 Analysis: 3 Tim… Effi… 1.99 0.812 0.0231 0.9 #> # ℹ 1 more variable: `Null hypothesis` # Customize the labels of the crossing probability x_ahr %>% summary(bound_names = c(\"A is better\", \"B is better\")) #> # A tibble: 6 × 7 #> # Groups: Analysis [3] #> Analysis Bound Z `~HR at bound` `Nominal p` `Alternate hypothesis` #> #> 1 Analysis: 1 Tim… B is… -0.94 1.19 0.826 0.0338 #> 2 Analysis: 1 Tim… A is… 3.71 0.510 0.0001 0.0027 #> 3 Analysis: 2 Tim… B is… 0.63 0.923 0.266 0.0666 #> 4 Analysis: 2 Tim… A is… 2.51 0.725 0.006 0.414 #> 5 Analysis: 3 Tim… B is… 1.99 0.812 0.0233 0.101 #> 6 Analysis: 3 Tim… A is… 1.99 0.812 0.0231 0.9 #> # ℹ 1 more variable: `Null hypothesis` # Customize the variables to be summarized for each analysis x_ahr %>% summary(analysis_vars = c(\"n\", \"event\"), analysis_decimals = c(1, 1)) #> # A tibble: 6 × 7 #> # Groups: Analysis [3] #> Analysis Bound Z `~HR at bound` `Nominal p` `Alternate hypothesis` #> #> 1 Analysis: 1 N: … Futi… -0.94 1.19 0.826 0.0338 #> 2 Analysis: 1 N: … Effi… 3.71 0.510 0.0001 0.0027 #> 3 Analysis: 2 N: … Futi… 0.63 0.923 0.266 0.0666 #> 4 Analysis: 2 N: … Effi… 2.51 0.725 0.006 0.414 #> 5 Analysis: 3 N: … Futi… 1.99 0.812 0.0233 0.101 #> 6 Analysis: 3 N: … Effi… 1.99 0.812 0.0231 0.9 #> # ℹ 1 more variable: `Null hypothesis` # Customize the digits for the columns x_ahr %>% summary(col_decimals = c(z = 4)) #> # A tibble: 6 × 7 #> # Groups: Analysis [3] #> Analysis Bound Z `~HR at bound` `Nominal p` `Alternate hypothesis` #> #> 1 Analysis: 1 Ti… Futi… -0.938 1.19 0.826 0.0338 #> 2 Analysis: 1 Ti… Effi… 3.71 0.510 0.0001 0.0027 #> 3 Analysis: 2 Ti… Futi… 0.626 0.923 0.266 0.0666 #> 4 Analysis: 2 Ti… Effi… 2.51 0.725 0.006 0.414 #> 5 Analysis: 3 Ti… Futi… 1.99 0.812 0.0233 0.101 #> 6 Analysis: 3 Ti… Effi… 1.99 0.812 0.0231 0.9 #> # ℹ 1 more variable: `Null hypothesis` # Customize the columns to display x_ahr %>% summary(col_vars = c(\"z\", \"~hr at bound\", \"nominal p\")) #> Adding missing grouping variables: `Analysis` #> # A tibble: 6 × 5 #> # Groups: Analysis [3] #> Analysis Bound Z `~HR at bound` `Nominal p` #> #> 1 Analysis: 1 Time: 11.7 N: 479.6 Events… Futi… -0.94 1.19 0.826 #> 2 Analysis: 1 Time: 11.7 N: 479.6 Events… Effi… 3.71 0.510 0.0001 #> 3 Analysis: 2 Time: 20.3 N: 493.1 Events… Futi… 0.63 0.923 0.266 #> 4 Analysis: 2 Time: 20.3 N: 493.1 Events… Effi… 2.51 0.725 0.006 #> 5 Analysis: 3 Time: 36 N: 493.1 Events: … Futi… 1.99 0.812 0.0233 #> 6 Analysis: 3 Time: 36 N: 493.1 Events: … Effi… 1.99 0.812 0.0231 # Customize columns and digits x_ahr %>% summary(col_vars = c(\"z\", \"~hr at bound\", \"nominal p\"), col_decimals = c(4, 2, 2)) #> Adding missing grouping variables: `Analysis` #> # A tibble: 6 × 5 #> # Groups: Analysis [3] #> Analysis Bound Z `~HR at bound` `Nominal p` #> #> 1 Analysis: 1 Time: 11.7 N: 479.6 Event… Futi… -0.938 1.19 0.83 #> 2 Analysis: 1 Time: 11.7 N: 479.6 Event… Effi… 3.71 0.51 0 #> 3 Analysis: 2 Time: 20.3 N: 493.1 Event… Futi… 0.626 0.92 0.27 #> 4 Analysis: 2 Time: 20.3 N: 493.1 Event… Effi… 2.51 0.72 0.01 #> 5 Analysis: 3 Time: 36 N: 493.1 Events:… Futi… 1.99 0.81 0.02 #> 6 Analysis: 3 Time: 36 N: 493.1 Events:… Effi… 1.99 0.81 0.02 # } # Example 2 ---- # \\donttest{ x_wlr <- gs_design_wlr( enroll_rate = enroll_rate, fail_rate = fail_rate, weight = wgt05, info_frac = NULL, analysis_time = sort(unique(x_ahr$analysis$time)), ratio = ratio, alpha = alpha, beta = beta, upper = upper, upar = upar, lower = lower, lpar = lpar ) x_wlr %>% summary() #> # A tibble: 6 × 7 #> # Groups: Analysis [3] #> Analysis Bound Z `~wHR at bound` `Nominal p` `Alternate hypothesis` #> #> 1 Analysis: 1 Ti… Futi… -1.17 1.28 0.879 0.0141 #> 2 Analysis: 1 Ti… Effi… 6.02 0.284 0 0 #> 3 Analysis: 2 Ti… Futi… 0.57 0.919 0.283 0.0464 #> 4 Analysis: 2 Ti… Effi… 3.16 0.627 0.0008 0.214 #> 5 Analysis: 3 Ti… Futi… 1.96 0.789 0.0247 0.100 #> 6 Analysis: 3 Ti… Effi… 1.96 0.789 0.0247 0.9 #> # ℹ 1 more variable: `Null hypothesis` # } # Maxcombo ---- # \\donttest{ x_combo <- gs_design_combo( ratio = 1, alpha = 0.025, beta = 0.2, enroll_rate = define_enroll_rate(duration = 12, rate = 500 / 12), fail_rate = tibble::tibble( stratum = \"All\", duration = c(4, 100), fail_rate = log(2) / 15, hr = c(1, .6), dropout_rate = .001 ), fh_test = fh_test, upper = gs_spending_combo, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025), lower = gs_spending_combo, lpar = list(sf = gsDesign::sfLDOF, total_spend = 0.2) ) x_combo %>% summary() #> # A tibble: 6 × 6 #> # Groups: Analysis [3] #> Analysis Bound Z `Nominal p` `Alternate hypothesis` `Null hypothesis` #> #> 1 Analysis: 1 … Futi… -2.72 0.997 0.0003 0.0033 #> 2 Analysis: 1 … Effi… 6.18 0 0 0 #> 3 Analysis: 2 … Futi… 0.65 0.257 0.0847 0.743 #> 4 Analysis: 2 … Effi… 2.8 0.0026 0.220 0.0026 #> 5 Analysis: 3 … Futi… 2.1 0.018 0.2 0.976 #> 6 Analysis: 3 … Effi… 2.1 0.018 0.8 0.0237 # } # Risk difference ---- # \\donttest{ gs_design_rd( p_c = tibble::tibble(stratum = \"All\", rate = .2), p_e = tibble::tibble(stratum = \"All\", rate = .15), info_frac = c(0.7, 1), rd0 = 0, alpha = .025, beta = .1, ratio = 1, stratum_prev = NULL, weight = \"unstratified\", upper = gs_b, lower = gs_b, upar = gsDesign::gsDesign( k = 3, test.type = 1, sfu = gsDesign::sfLDOF, sfupar = NULL )$upper$bound, lpar = c(qnorm(.1), rep(-Inf, 2)) ) %>% summary() #> # A tibble: 3 × 7 #> # Groups: Analysis [2] #> Analysis Bound Z ~Risk difference at …¹ `Nominal p` `Alternate hypothesis` #> #> 1 Analysi… Futi… -1.28 -0.0201 0.9 0 #> 2 Analysi… Effi… 3.71 0.0582 0.0001 0.298 #> 3 Analysi… Effi… 2.51 0.033 0.006 0.9 #> # ℹ abbreviated name: ¹​`~Risk difference at bound` #> # ℹ 1 more variable: `Null hypothesis` # }"},{"path":"https://merck.github.io/gsDesign2/reference/to_integer.html","id":null,"dir":"Reference","previous_headings":"","what":"Rounds sample size to an even number for equal design — to_integer","title":"Rounds sample size to an even number for equal design — to_integer","text":"Rounds sample size even number equal design","code":""},{"path":"https://merck.github.io/gsDesign2/reference/to_integer.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Rounds sample size to an even number for equal design — to_integer","text":"","code":"to_integer(x, ...) # S3 method for class 'fixed_design' to_integer(x, sample_size = TRUE, ...) # S3 method for class 'gs_design' to_integer(x, sample_size = TRUE, ...)"},{"path":"https://merck.github.io/gsDesign2/reference/to_integer.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Rounds sample size to an even number for equal design — to_integer","text":"x object returned fixed_design_xxx() gs_design_xxx(). ... Additional parameters (used). sample_size Logical, indicting ceiling sample size even integer.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/to_integer.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Rounds sample size to an even number for equal design — to_integer","text":"list similar output fixed_design_xxx() gs_design_xxx(), except sample size integer.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/to_integer.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Rounds sample size to an even number for equal design — to_integer","text":"","code":"library(dplyr) library(gsDesign2) # Average hazard ratio # \\donttest{ x <- fixed_design_ahr( alpha = .025, power = .9, enroll_rate = define_enroll_rate(duration = 18, rate = 1), fail_rate = define_fail_rate( duration = c(4, 100), fail_rate = log(2) / 12, hr = c(1, .6), dropout_rate = .001 ), study_duration = 36 ) x |> to_integer() |> summary() #> # A tibble: 1 × 7 #> Design N Events Time Bound alpha Power #> #> 1 Average hazard ratio 464 325 35.9 1.96 0.025 0.900 # FH x <- fixed_design_fh( alpha = 0.025, power = 0.9, enroll_rate = define_enroll_rate(duration = 18, rate = 20), fail_rate = define_fail_rate( duration = c(4, 100), fail_rate = log(2) / 12, hr = c(1, .6), dropout_rate = .001 ), rho = 0.5, gamma = 0.5, study_duration = 36, ratio = 1 ) x |> to_integer() |> summary() #> # A tibble: 1 × 7 #> Design N Events Time Bound alpha Power #> #> 1 Fleming-Harrington FH(0.5, 0.5) 378 264 35.8 1.96 0.025 0.900 # MB x <- fixed_design_mb( alpha = 0.025, power = 0.9, enroll_rate = define_enroll_rate(duration = 18, rate = 20), fail_rate = define_fail_rate( duration = c(4, 100), fail_rate = log(2) / 12, hr = c(1, .6), dropout_rate = .001 ), tau = 4, study_duration = 36, ratio = 1 ) x |> to_integer() |> summary() #> # A tibble: 1 × 7 #> Design N Events Time Bound alpha Power #> #> 1 Modestly weighted LR: tau = 4 430 302 36.1 1.96 0.025 0.901 # } # \\donttest{ # Example 1: Information fraction based spending gs_design_ahr( analysis_time = c(18, 30), upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL), lower = gs_b, lpar = c(-Inf, -Inf) ) |> to_integer() |> summary() #> # A tibble: 2 × 7 #> # Groups: Analysis [2] #> Analysis Bound Z `~HR at bound` `Nominal p` `Alternate hypothesis` #> #> 1 Analysis: 1 Tim… Effi… 2.57 0.696 0.005 0.288 #> 2 Analysis: 2 Tim… Effi… 1.99 0.799 0.0234 0.901 #> # ℹ 1 more variable: `Null hypothesis` gs_design_wlr( analysis_time = c(18, 30), upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL), lower = gs_b, lpar = c(-Inf, -Inf) ) |> to_integer() |> summary() #> # A tibble: 2 × 7 #> # Groups: Analysis [2] #> Analysis Bound Z `~wHR at bound` `Nominal p` `Alternate hypothesis` #> #> 1 Analysis: 1 Ti… Effi… 2.57 0.700 0.0051 0.289 #> 2 Analysis: 2 Ti… Effi… 1.99 0.802 0.0234 0.900 #> # ℹ 1 more variable: `Null hypothesis` gs_design_rd( p_c = tibble::tibble(stratum = c(\"A\", \"B\"), rate = c(.2, .3)), p_e = tibble::tibble(stratum = c(\"A\", \"B\"), rate = c(.15, .27)), weight = \"ss\", stratum_prev = tibble::tibble(stratum = c(\"A\", \"B\"), prevalence = c(.4, .6)), info_frac = c(0.7, 1), upper = gs_spending_bound, upar = list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL), lower = gs_b, lpar = c(-Inf, -Inf) ) |> to_integer() |> summary() #> # A tibble: 2 × 7 #> # Groups: Analysis [2] #> Analysis Bound Z ~Risk difference at …¹ `Nominal p` `Alternate hypothesis` #> #> 1 Analysi… Effi… 2.44 0.0339 0.0074 0.616 #> 2 Analysi… Effi… 2 0.0232 0.0228 0.9 #> # ℹ abbreviated name: ¹​`~Risk difference at bound` #> # ℹ 1 more variable: `Null hypothesis` # Example 2: Calendar based spending x <- gs_design_ahr( upper = gs_spending_bound, analysis_time = c(18, 30), upar = list( sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = c(18, 30) / 30 ), lower = gs_b, lpar = c(-Inf, -Inf) ) |> to_integer() # The IA nominal p-value is the same as the IA alpha spending x$bound$`nominal p`[1] #> [1] 0.003808063 gsDesign::sfLDOF(alpha = 0.025, t = 18 / 30)$spend #> [1] 0.003808063 # }"},{"path":"https://merck.github.io/gsDesign2/reference/wlr_weight.html","id":null,"dir":"Reference","previous_headings":"","what":"Weight functions for weighted log-rank test — wlr_weight","title":"Weight functions for weighted log-rank test — wlr_weight","text":"wlr_weight_fh Fleming-Harrington, FH(rho, gamma) weight function. wlr_weight_1 constant log rank test. wlr_weight_power Gehan-Breslow Tarone-Ware weight function. wlr_weight_mb Magirr (2021) weight function.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/wlr_weight.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Weight functions for weighted log-rank test — wlr_weight","text":"","code":"wlr_weight_fh(x, arm0, arm1, rho = 0, gamma = 0, tau = NULL) wlr_weight_1(x, arm0, arm1) wlr_weight_n(x, arm0, arm1, power = 1) wlr_weight_mb(x, arm0, arm1, tau = NULL, w_max = Inf)"},{"path":"https://merck.github.io/gsDesign2/reference/wlr_weight.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Weight functions for weighted log-rank test — wlr_weight","text":"x vector numeric values. arm0 arm object defined npsurvSS package. arm1 arm object defined npsurvSS package. rho scalar parameter controls type test. gamma scalar parameter controls type test. tau scalar parameter cut-time modest weighted log rank test. power scalar parameter controls power weight function. w_max scalar parameter cut-weight modest weighted log rank test.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/wlr_weight.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Weight functions for weighted log-rank test — wlr_weight","text":"vector weights. vector weights. vector weights. vector weights.","code":""},{"path":"https://merck.github.io/gsDesign2/reference/wlr_weight.html","id":"specification","dir":"Reference","previous_headings":"","what":"Specification","title":"Weight functions for weighted log-rank test — wlr_weight","text":"contents section shown PDF user manual .","code":""},{"path":"https://merck.github.io/gsDesign2/reference/wlr_weight.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Weight functions for weighted log-rank test — wlr_weight","text":"","code":"enroll_rate <- define_enroll_rate( duration = c(2, 2, 10), rate = c(3, 6, 9) ) fail_rate <- define_fail_rate( duration = c(3, 100), fail_rate = log(2) / c(9, 18), hr = c(.9, .6), dropout_rate = .001 ) gs_arm <- gs_create_arm(enroll_rate, fail_rate, ratio = 1) arm0 <- gs_arm$arm0 arm1 <- gs_arm$arm1 wlr_weight_fh(1:3, arm0, arm1, rho = 0, gamma = 0, tau = NULL) #> [1] 1 1 1 enroll_rate <- define_enroll_rate( duration = c(2, 2, 10), rate = c(3, 6, 9) ) fail_rate <- define_fail_rate( duration = c(3, 100), fail_rate = log(2) / c(9, 18), hr = c(.9, .6), dropout_rate = .001 ) gs_arm <- gs_create_arm(enroll_rate, fail_rate, ratio = 1) arm0 <- gs_arm$arm0 arm1 <- gs_arm$arm1 wlr_weight_1(1:3, arm0, arm1) #> [1] 1 enroll_rate <- define_enroll_rate( duration = c(2, 2, 10), rate = c(3, 6, 9) ) fail_rate <- define_fail_rate( duration = c(3, 100), fail_rate = log(2) / c(9, 18), hr = c(.9, .6), dropout_rate = .001 ) gs_arm <- gs_create_arm(enroll_rate, fail_rate, ratio = 1) arm0 <- gs_arm$arm0 arm1 <- gs_arm$arm1 wlr_weight_n(1:3, arm0, arm1, power = 2) #> [1] 3.448634 2.973357 2.563657 enroll_rate <- define_enroll_rate( duration = c(2, 2, 10), rate = c(3, 6, 9) ) fail_rate <- define_fail_rate( duration = c(3, 100), fail_rate = log(2) / c(9, 18), hr = c(.9, .6), dropout_rate = .001 ) gs_arm <- gs_create_arm(enroll_rate, fail_rate, ratio = 1) arm0 <- gs_arm$arm0 arm1 <- gs_arm$arm1 wlr_weight_mb(1:3, arm0, arm1, tau = -1, w_max = 1.2) #> [1] 1.075901 1.157545 1.200000"},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"gsdesign2-112","dir":"Changelog","previous_headings":"","what":"gsDesign2 1.1.2","title":"gsDesign2 1.1.2","text":"CRAN release: 2024-04-09","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"new-features-1-1-2","dir":"Changelog","previous_headings":"","what":"New features","title":"gsDesign2 1.1.2","text":"gs_update_ahr() function now available efficacy futility boundary update based blinded estimation treatment effect (#370).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"bug-fixes-1-1-2","dir":"Changelog","previous_headings":"","what":"Bug fixes","title":"gsDesign2 1.1.2","text":"Fix accrual parameters bugs gs_design_wlr() depending npsurvSS (#344, #356). Fix gs_design_ahr() incorporate information fraction driven design number analyses >= 4 (#358).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"improvements-1-1-2","dir":"Changelog","previous_headings":"","what":"Improvements","title":"gsDesign2 1.1.2","text":"Zero failure rate intervals acceptable input (#360). Study duration > 100 units executable event accrual slow (#368).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"documentation-1-1-2","dir":"Changelog","previous_headings":"","what":"Documentation","title":"gsDesign2 1.1.2","text":"new vignette introducing boundary update available (#278, #364, #366). new vignette bridging gsDesign2 6 test types gsDesign available. pkgdown website re-organized providing better view users (#341).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"testing-1-1-2","dir":"Changelog","previous_headings":"","what":"Testing","title":"gsDesign2 1.1.2","text":"Independent testing as_gt() added (#337). Restructure tests make self-contained (#347).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"gsdesign2-111","dir":"Changelog","previous_headings":"","what":"gsDesign2 1.1.1","title":"gsDesign2 1.1.1","text":"CRAN release: 2024-02-09","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"new-features-1-1-1","dir":"Changelog","previous_headings":"","what":"New features","title":"gsDesign2 1.1.1","text":"as_rtf() method now available fixed_design gs_design objects generating RTF table outputs (#278).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"bug-fixes-1-1-1","dir":"Changelog","previous_headings":"","what":"Bug fixes","title":"gsDesign2 1.1.1","text":"gs_power_wlr() to_integer() now check convert integer sample size rigorously (#322). gs_design_*() now handle exceptions explicitly hazard ratio set 1 throughout study (#301). fixed_design_rd() generate warnings due previous default value change h1_spending (#296).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"improvements-1-1-1","dir":"Changelog","previous_headings":"","what":"Improvements","title":"gsDesign2 1.1.1","text":"gs_power_ahr() now runs twice fast using data.table performance optimizations (#295), enhanced similar improvements gs_info_ahr() pw_info() (#300). Enrollment failure rate input constructors validators refactored check format instead class. change reduces number warning messages catches real exceptions errors properly (#316). Nested functions refactored reusable internal functions, improve code rigor, avoid potential scoping pitfalls, facilitate debugging (#235). fixed designs, variable names table outputs to_integer() summary() updated (#292).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"documentation-1-1-1","dir":"Changelog","previous_headings":"","what":"Documentation","title":"gsDesign2 1.1.1","text":"Add new vignette statistical information null alternative hypothesis (#289). Improve define_enroll_rate() define_fail_rate() documentation adding detailed descriptions improving code examples (#302). function reference page now dedicated sections piecewise exponential distributions computing trial events (#258). Use four trailing dashes convention standardize code comment section format (#308).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"namespace-and-testing-1-1-1","dir":"Changelog","previous_headings":"","what":"Namespace and testing","title":"gsDesign2 1.1.1","text":"Tidy namespace removing rlang adding stats Imports (#307, #325). Qualify namespaces tests avoid library() calls (#332). Fortify GitHub Actions workflows limiting token usage necessary enabling manual trigger workflow runs (#326). Update GitHub Actions workflows latest versions upstream (#330).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"gsdesign2-110","dir":"Changelog","previous_headings":"","what":"gsDesign2 1.1.0","title":"gsDesign2 1.1.0","text":"CRAN release: 2023-08-23","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"breaking-changes-1-1-0","dir":"Changelog","previous_headings":"","what":"Breaking changes","title":"gsDesign2 1.1.0","text":"Split fixed_design() group fixed_design_*() functions enhanced modularity (#263). gs_design_rd() gs_power_rd() now updated options weighting stratified design (#276). ppwe() now accepts two arguments duration rate instead data frame fail_rate (#254). Unexport helper functions gridpts(), h1(), hupdate() (#253).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"new-features-1-1-0","dir":"Changelog","previous_headings":"","what":"New features","title":"gsDesign2 1.1.0","text":"Introduce define_enroll_rate() define_fail_rate() new input constructor functions replace tibble inputs (#238). Add new function pw_info() calculates statistical information piecewise model (#262).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"improvements-1-1-0","dir":"Changelog","previous_headings":"","what":"Improvements","title":"gsDesign2 1.1.0","text":"Add vignette showing canonical joint distribution Z-score B-values null alternative hypothesis AHR test (#246). Refactor expected_event() improve computational performance (@jdblischak, #250). Move source code legacy version inst/ tests/testthat/ developer tests (#269).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"gsdesign2-109","dir":"Changelog","previous_headings":"","what":"gsDesign2 1.0.9","title":"gsDesign2 1.0.9","text":"CRAN release: 2023-06-20","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"improvements-1-0-9","dir":"Changelog","previous_headings":"","what":"Improvements","title":"gsDesign2 1.0.9","text":"Add CRAN download counts badge (#215). Update documentation gs_design_rd() (#220). Format footnote numbers using decimal notation (#222). Split C++ functions individual .cpp header files (#224).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"bug-fixes-1-0-9","dir":"Changelog","previous_headings":"","what":"Bug fixes","title":"gsDesign2 1.0.9","text":"Fix digits display summary() (#231).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"gsdesign2-108","dir":"Changelog","previous_headings":"","what":"gsDesign2 1.0.8","title":"gsDesign2 1.0.8","text":"CRAN release: 2023-05-01","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"improvements-1-0-8","dir":"Changelog","previous_headings":"","what":"Improvements","title":"gsDesign2 1.0.8","text":"Update calculation upper/lower bounds final analysis MaxCombo tests (#217). Update fixed_design() function application stratified design using Lachin Foulkes method (#211). Correct fixed_design() function application rmst (#212). Rename info_scale argument options c(0, 1, 2) c(\"h0_h1_info\", \"h0_info\", \"h1_info\") informative make default value (\"h0_h1_info\") clear (#203). Add missing global functions/variables (#213). Fix outdated argument names use canonical style text elements README.md (#198). Add CRAN downloads badge README.md show monthly downloads (#216).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"bug-fixes-1-0-8","dir":"Changelog","previous_headings":"","what":"Bug fixes","title":"gsDesign2 1.0.8","text":"Fix calculation futility bounds gs_power_ahr() (#202).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"gsdesign2-107","dir":"Changelog","previous_headings":"","what":"gsDesign2 1.0.7","title":"gsDesign2 1.0.7","text":"CRAN release: 2023-03-20","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"improvements-1-0-7","dir":"Changelog","previous_headings":"","what":"Improvements","title":"gsDesign2 1.0.7","text":"Move imported dependencies Suggests Imports. Remove redundant dependencies Suggests. Update GitHub Actions workflows latest versions upstream. Add rule .gitattributes GitHub Linguist keep repository’s language statistics accurate.","code":""},{"path":[]},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"improvements-1-0-6","dir":"Changelog","previous_headings":"","what":"Improvements","title":"gsDesign2 1.0.6","text":"Export functions gridpts(), h1(), hupdate(), gs_create_arm() avoid use ::: code examples. Fix write path issue moving test fixture generation script data-raw/ included package.","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"gsdesign2-105","dir":"Changelog","previous_headings":"","what":"gsDesign2 1.0.5","title":"gsDesign2 1.0.5","text":"First submission CRAN March 2023.","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"breaking-changes-1-0-5","dir":"Changelog","previous_headings":"","what":"Breaking changes","title":"gsDesign2 1.0.5","text":"Passes lintr check entire package (#150, #151, #171). Improve documentation (#161, #163, #168, #176).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"bug-fixes-1-0-5","dir":"Changelog","previous_headings":"","what":"Bug fixes","title":"gsDesign2 1.0.5","text":"check_fail_rate() 1 number fail_rate > 0 (#132). gs_power_ahr() study duration > 48 months (#141). fixed_design() event-based design (#143). gs_design_combo() test applies part analysis (#148). gs_info_rd() variance calculation (#153). summary() capitalized first letter summary header (#164).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"gsdesign2-100","dir":"Changelog","previous_headings":"","what":"gsDesign2 1.0.0","title":"gsDesign2 1.0.0","text":"GitHub release December 2022.","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"breaking-changes-1-0-0","dir":"Changelog","previous_headings":"","what":"Breaking changes","title":"gsDesign2 1.0.0","text":"Merges gsDesign2 v0.2.1 gsdmvn. Updates API follow new style guide vignette(\"style\"). See detailed mapping old API new API #84.","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"new-features-1-0-0","dir":"Changelog","previous_headings":"","what":"New features","title":"gsDesign2 1.0.0","text":"Supports organized summary tables gt tables. Power/sample size calculation risk difference. Integer sample size support (#116, #125). Adds fixed_design() implement different methods power/sample size calculation. Adds info_scale arguments gs_design_*() gs_power_*(). Adds RMST milestone methods fixed design.","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"bug-fixes-1-0-0","dir":"Changelog","previous_headings":"","what":"Bug fixes","title":"gsDesign2 1.0.0","text":"expected_accrual() stratified population. gs_spending_bound() IA close FA (#40). gs_power_bound() applied MaxCombo test (#62). gs_design_npe() type error (#59).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"minor-improvements-1-0-0","dir":"Changelog","previous_headings":"","what":"Minor improvements","title":"gsDesign2 1.0.0","text":"Adds re-organizes vignettes.","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"gsdesign2-021","dir":"Changelog","previous_headings":"","what":"gsDesign2 0.2.1","title":"gsDesign2 0.2.1","text":"GitHub release August 2022. release merging Merck/gsdmvn.","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"gsdesign2-020","dir":"Changelog","previous_headings":"","what":"gsDesign2 0.2.0","title":"gsDesign2 0.2.0","text":"GitHub release May 2022. Supports Biometrical Journal paper “unified framework weighted parametric group sequential design” Keaven M. Anderson, Zifang Guo, Jing Zhao, Linda Z. Sun.","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"gsdesign2-010","dir":"Changelog","previous_headings":"","what":"gsDesign2 0.1.0","title":"gsDesign2 0.1.0","text":"GitHub release May 2021. Updated AHR vignette introduce average hazard ratio concept properly. Added arbitrary distribution vignette demonstrate s2pwe(). Corrected calculations AHR() using stratified population. Release Regulatory/Industry Symposium training.","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"gsdesign2-0009006","dir":"Changelog","previous_headings":"","what":"gsDesign2 0.0.0.9006","title":"gsDesign2 0.0.0.9006","text":"GitHub release December 2019. Added vignette eEvents_df() explaining methods thoroughly. Updated eEvents_df() simplify output option simple = FALSE.","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"gsdesign2-0009005","dir":"Changelog","previous_headings":"","what":"gsDesign2 0.0.0.9005","title":"gsDesign2 0.0.0.9005","text":"GitHub release December 2019. Updated docs/ directory correct reference materials website. Minor fixes eAccrual().","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"gsdesign2-0009004","dir":"Changelog","previous_headings":"","what":"gsDesign2 0.0.0.9004","title":"gsDesign2 0.0.0.9004","text":"GitHub release November 2019. Moved new simulation functions simtrial package (simfix(), simfix2simPWSurv(), pMaxCombo()).","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"gsdesign2-0009003","dir":"Changelog","previous_headings":"","what":"gsDesign2 0.0.0.9003","title":"gsDesign2 0.0.0.9003","text":"GitHub release November 2019. Tried make AHR() simfix() compatible . Improved vignette group sequential design. Added pkgdown website documentation vignettes. Added support functions support approximation using visualization piecewise model.","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"gsdesign2-0002","dir":"Changelog","previous_headings":"","what":"gsDesign2 0.0.0.2","title":"gsDesign2 0.0.0.2","text":"GitHub release October 2019. Update AHR() output trial duration, expected events average hazard ratio tibble. Vignette AHRvignette demonstrating sample size computations fixed design non-proportional hazards assumptions. Vignette gsNPH demonstrating sample size computations group sequential design non-proportional hazards assumptions. Initial implementation pMaxCombo() compute p-value MaxCombo test; pMaxComboVignette demonstrates capability.","code":""},{"path":"https://merck.github.io/gsDesign2/news/index.html","id":"gsdesign2-0001","dir":"Changelog","previous_headings":"","what":"gsDesign2 0.0.0.1","title":"gsDesign2 0.0.0.1","text":"GitHub release September 2019. Computations based piecewise constant enrollment piecewise exponential failure rate. Expected event count calculation different hazard ratios eEvents_df(). Average hazard ratio computation based expected event counts AHR(). Vignette demonstrating fixed sample size computation simulation verify power.","code":""}]