From e1c2883dbaf5e156f384c78249f8bd00a99ca986 Mon Sep 17 00:00:00 2001 From: Scott Chamberlain Date: Thu, 4 Apr 2024 21:49:40 -0700 Subject: [PATCH] add fxn six_file_upload #67 - as part of addressing six_file_upload, reworked s3 connection acquisition for paws and s3fs - rework tests to work with new s3 connection - rename random_str to random_db_id_str --- NAMESPACE | 2 + R/admin.R | 6 ++- R/bucket.R | 31 +++++++------ R/database-misc.R | 2 +- R/database-rds.R | 2 +- R/files.R | 78 +++++++++++++++++++++++++------- R/interface.R | 72 +++++++++++++++--------------- R/internal.R | 2 +- R/onload.R | 4 -- R/s3con.R | 39 ++++++++++++++++ man/aws_bucket_create.Rd | 3 +- man/aws_bucket_delete.Rd | 3 +- man/aws_bucket_download.Rd | 3 +- man/aws_bucket_exists.Rd | 3 +- man/aws_bucket_list_objects.Rd | 3 +- man/aws_bucket_tree.Rd | 3 +- man/aws_bucket_upload.Rd | 3 +- man/aws_buckets.Rd | 3 +- man/aws_file_attr.Rd | 3 +- man/aws_file_copy.Rd | 3 +- man/aws_file_delete.Rd | 3 +- man/aws_file_download.Rd | 3 +- man/aws_file_exists.Rd | 3 +- man/aws_file_rename.Rd | 3 +- man/aws_file_upload.Rd | 27 ++++-------- man/con_s3.Rd | 16 +++++++ man/con_s3fs.Rd | 17 +++++++ man/six_admin_setup.Rd | 2 + man/six_bucket_delete.Rd | 19 +++++--- man/six_file_upload.Rd | 81 ++++++++++++++++++++++++++++++++++ man/six_user_create.Rd | 2 + man/six_user_delete.Rd | 2 + tests/testthat/test-buckets.R | 25 +++++------ tests/testthat/test-files.R | 31 ++++++------- tests/testthat/test-s3.R | 5 +-- 35 files changed, 359 insertions(+), 148 deletions(-) create mode 100644 R/s3con.R create mode 100644 man/con_s3.Rd create mode 100644 man/con_s3fs.Rd create mode 100644 man/six_file_upload.Rd diff --git a/NAMESPACE b/NAMESPACE index 8131f80..333fec5 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -92,6 +92,7 @@ export(six_bucket_change_user) export(six_bucket_delete) export(six_bucket_permissions) export(six_bucket_remove_user) +export(six_file_upload) export(six_user_create) export(six_user_creds) export(six_user_delete) @@ -143,6 +144,7 @@ importFrom(purrr,flatten) importFrom(purrr,keep) importFrom(purrr,list_rbind) importFrom(purrr,map) +importFrom(purrr,map2_vec) importFrom(purrr,map_chr) importFrom(purrr,map_lgl) importFrom(purrr,pluck) diff --git a/R/admin.R b/R/admin.R index e5bb15b..ee16da6 100644 --- a/R/admin.R +++ b/R/admin.R @@ -37,8 +37,10 @@ group_policies_data <- list( #' group_policies("users") group_policies <- function(group) { stop_if_not(is.character(group), "group must be character") - stop_if_not(group %in% names(group_policies_data), - "group must be one of {names(group_policies_data)}") + stop_if_not( + group %in% names(group_policies_data), + "group must be one of {names(group_policies_data)}" + ) group_policies_data[[group]] } diff --git a/R/bucket.R b/R/bucket.R index aa619f4..f3cea1d 100644 --- a/R/bucket.R +++ b/R/bucket.R @@ -21,7 +21,7 @@ aws_bucket_exists <- function(bucket) { bucket_checks(bucket) res <- tryCatch( { - env64$s3$head_bucket(Bucket = bucket) + con_s3()$head_bucket(Bucket = bucket) }, error = function(e) e ) @@ -42,7 +42,7 @@ aws_bucket_exists <- function(bucket) { #' } aws_bucket_create <- function(bucket, ...) { bucket_checks(bucket) - env64$s3$create_bucket( + con_s3()$create_bucket( Bucket = bucket, CreateBucketConfiguration = list(LocationConstraint = env_var("AWS_REGION")), ... @@ -94,7 +94,7 @@ aws_bucket_delete <- function(bucket, force = FALSE, ...) { return(invisible()) } } - env64$s3$delete_bucket(Bucket = bucket, ...) + con_s3()$delete_bucket(Bucket = bucket, ...) return(invisible()) } @@ -105,12 +105,14 @@ aws_bucket_delete <- function(bucket, force = FALSE, ...) { #' #' @export #' @importFrom purrr safely -#' @inherit aws_bucket_delete +#' @inheritParams aws_bucket_delete #' @section What is magical: #' - Exits early if bucket does not exist #' - Checks for any objects in the bucket and deletes any present #' - Deletes bucket after deleting objects +#' @family buckets #' @family magicians +#' @return `NULL`, invisibly #' @examplesIf interactive() #' # bucket does not exist #' six_bucket_delete("notabucket") @@ -131,8 +133,10 @@ aws_bucket_delete <- function(bucket, force = FALSE, ...) { #' ) #' aws_file_upload( #' c(demo_rds_file, links_file), -#' s3_path(bucket, "newfolder", -#' c(basename(demo_rds_file), basename(links_file))) +#' s3_path( +#' bucket, "newfolder", +#' c(basename(demo_rds_file), basename(links_file)) +#' ) #' ) #' #' six_bucket_delete(bucket) @@ -274,12 +278,14 @@ aws_bucket_upload <- function( #' ) #' aws_bucket_list_objects(bucket = bucket_name) aws_bucket_list_objects <- function(bucket, ...) { - out <- env64$s3$list_objects(bucket, ...) - if (rlang::is_empty(out$Contents)) return(tibble()) + out <- con_s3()$list_objects(bucket, ...) + if (rlang::is_empty(out$Contents)) { + return(tibble()) + } as_tibble(jsonlite::fromJSON( - jsonlite::toJSON(out$Contents, auto_unbox = TRUE), - flatten = TRUE - )) %>% + jsonlite::toJSON(out$Contents, auto_unbox = TRUE), + flatten = TRUE + )) %>% mutate( bucket = bucket, uri = glue("s3://{bucket}/{Key}"), @@ -308,8 +314,7 @@ aws_bucket_list_objects <- function(bucket, ...) { #' aws_buckets() #' } aws_buckets <- function(...) { - s3fs_creds_refresh() - out <- s3fs::s3_dir_info(refresh = TRUE, ...) + out <- con_s3fs()$dir_info(refresh = TRUE, ...) if (is.data.frame(out) && NROW(out) > 0) { as_tibble(out) } else { diff --git a/R/database-misc.R b/R/database-misc.R index 50fddb0..3d66e93 100644 --- a/R/database-misc.R +++ b/R/database-misc.R @@ -35,6 +35,6 @@ which_driver <- function(engine) { ) } -random_str <- function(prefix = "-") { +random_db_id_str <- function(prefix = "-") { paste0(prefix, sub("-.+", "", uuid::UUIDgenerate())) } diff --git a/R/database-rds.R b/R/database-rds.R index 8b2456a..ba76b0b 100644 --- a/R/database-rds.R +++ b/R/database-rds.R @@ -149,7 +149,7 @@ aws_db_rds_create <- if (verbose) cli::cli_alert_info("Uploading user/pwd to secrets manager") x <- instance_con_info(id) aws_secrets_create( - name = paste0(id, random_str()), + name = paste0(id, random_db_id_str()), secret = construct_db_secret( engine = x$engine, host = x$host, diff --git a/R/files.R b/R/files.R index 94efc07..3524e15 100644 --- a/R/files.R +++ b/R/files.R @@ -13,23 +13,21 @@ equal_lengths <- function(x, y) { #' @export #' @importFrom fs file_exists #' @importFrom s3fs s3_file_copy -#' @inheritParams aws_file_copy +#' @importFrom purrr map2_vec #' @param path (character) a file path to read from. required #' @param remote_path (character) a remote path where the file #' should go. required #' @param ... named parameters passed on to [s3fs::s3_file_copy()] #' @return (character) a vector of remote s3 paths -#' @details -#' - For upload: if it does exist it will be created -#' - For download: if it does not exist, function will return an error -#' -#' To upload a folder of files see [aws_bucket_upload()] +#' @details to upload a folder of files see [aws_bucket_upload()] #' @family files -#' @examples \dontrun{ +#' @examplesIf interactive() +#' bucket <- random_string("bucket") +#' aws_bucket_create(bucket) #' demo_rds_file <- file.path(system.file(), "Meta/demo.rds") #' aws_file_upload( #' demo_rds_file, -#' s3_path("s64-test-2", basename(demo_rds_file)) +#' s3_path(bucket, basename(demo_rds_file)) #' ) #' #' ## many files at once @@ -46,20 +44,68 @@ equal_lengths <- function(x, y) { #' #' # bucket doesn't exist #' aws_file_upload(demo_rds_file, "s3://not-a-bucket/eee.rds") -#' } #' -#' @examplesIf interactive() #' # path doesn't exist #' aws_file_upload( #' "file_doesnt_exist.txt", -#' s3_path("s64-test-2", "file_doesnt_exist.txt") -#' ) -aws_file_upload <- function(path, remote_path, force = FALSE, ...) { +#' s3_path("s64-test-2", "file_doesnt_exist.txt")) +aws_file_upload <- function(path, remote_path, ...) { stopifnot(fs::file_exists(path)) bucket <- path_s3_parse(remote_path)[[1]]$bucket + stop_if_not(aws_bucket_exists(bucket), + "bucket {.strong {bucket}} doesn't exist") + # s3fs_creds_refresh() + map2_vec(path, remote_path, con_s3fs()$file_copy, ...) +} + +#' Magically upload a file +#' +#' @export +#' @param path (character) one or more file paths to add to +#' the `bucket`. required +#' @inheritParams aws_file_copy +#' @param ... named params passed on to +#' [put_object](https://www.paws-r-sdk.com/docs/s3_put_object/) +#' @section What is magical: +#' - Exits early if files do not exist +#' - Creates the bucket if it does not exist +#' - Adds files to the bucket, figuring out the key to use from +#' the supplied path +#' - Function is vectoried for the `path` argument; you can +#' pass in many file paths +#' - xx +#' @family files +#' @family magicians +#' @return (character) a vector of remote s3 paths where your +#' files are located +#' @examplesIf interactive() +#' bucket <- random_string("bucket") +#' demo_rds_file <- file.path(system.file(), "Meta/demo.rds") +#' six_file_upload(demo_rds_file, bucket) +#' +#' ## many files at once +#' links_file <- file.path(system.file(), "Meta/links.rds") +#' six_file_upload(c(demo_rds_file, links_file), bucket) +#' +#' # set expiration, expire 1 minute from now +#' six_file_upload(demo_rds_file, bucket, Expires = Sys.time() + 60) +#' +#' # bucket doesn't exist +#' six_file_upload(demo_rds_file, "not-a-buckets") +#' +#' # path doesn't exist +#' # six_file_upload("file_doesnt_exist.txt", random_string("bucket")) +six_file_upload <- function(path, bucket, force = FALSE, ...) { + stopifnot(fs::file_exists(path)) bucket_create_if_not(bucket, force) - s3fs_creds_refresh() - purrr::map2_vec(path, remote_path, s3fs::s3_file_copy, ...) + if (!aws_bucket_exists(bucket)) { + cli_warning("bucket {.strong {bucket}} not created; exiting") + return(invisible()) + } + map(path, \(p) { + con_s3()$put_object(Bucket = bucket, Key = basename(p), Body = p, ...) + }) + s3_path(bucket, basename(path)) } #' Download a file @@ -146,7 +192,7 @@ aws_file_delete_one <- function(one_path, ...) { } else { path_parsed[[1]]$file } - env64$s3$delete_object( + con_s3()$delete_object( path_parsed[[1]]$bucket, glue("{key}{ifelse(trailing_slash, '/', '')}") ) diff --git a/R/interface.R b/R/interface.R index 1c4af45..cc27d53 100644 --- a/R/interface.R +++ b/R/interface.R @@ -28,20 +28,8 @@ set_s3_interface <- function(interface = "aws") { } # package paws - if (interface == "aws") { - s3con <- paws::s3() - } else if (interface == "localstack") { - s3con <- paws::s3( - credentials = list( - creds = list( - access_key_id = "NOTAREALKEY", - secret_access_key = "AREALLYFAKETOKEN" - ) - ), - endpoint = LOCALSTACK_ENDPOINT - ) - } else { - s3con <- paws::s3(config = list( + if (interface == "minio") { + paws::s3(config = list( credentials = list( creds = list( access_key_id = Sys.getenv("MINIO_USER"), @@ -50,33 +38,45 @@ set_s3_interface <- function(interface = "aws") { ), endpoint = Sys.getenv("MINIO_ENDPOINT") )) - } - - # package s3fs - if (interface == "aws") { - s3fs::s3_file_system( - aws_access_key_id = Sys.getenv("AWS_ACCESS_KEY_ID"), - aws_secret_access_key = Sys.getenv("AWS_SECRET_ACCESS_KEY"), - region_name = Sys.getenv("AWS_REGION"), - refresh = TRUE - ) } else if (interface == "localstack") { - s3fs::s3_file_system( - aws_access_key_id = "NOTAREALKEY", - aws_secret_access_key = "AREALLYFAKETOKEN", - endpoint = LOCALSTACK_ENDPOINT, - refresh = TRUE + paws::s3( + credentials = list( + creds = list( + access_key_id = "NOTAREALKEY", + secret_access_key = "AREALLYFAKETOKEN" + ) + ), + endpoint = LOCALSTACK_ENDPOINT ) } else { - s3fs::s3_file_system( - aws_access_key_id = Sys.getenv("MINIO_USER"), - aws_secret_access_key = Sys.getenv("MINIO_PWD"), - endpoint = Sys.getenv("MINIO_ENDPOINT"), - refresh = TRUE - ) + paws::s3() } - return(s3con) + # # package s3fs + # if (interface == "aws") { + # s3fs::s3_file_system( + # aws_access_key_id = Sys.getenv("AWS_ACCESS_KEY_ID"), + # aws_secret_access_key = Sys.getenv("AWS_SECRET_ACCESS_KEY"), + # region_name = Sys.getenv("AWS_REGION"), + # refresh = TRUE + # ) + # } else if (interface == "localstack") { + # s3fs::s3_file_system( + # aws_access_key_id = "NOTAREALKEY", + # aws_secret_access_key = "AREALLYFAKETOKEN", + # endpoint = LOCALSTACK_ENDPOINT, + # refresh = TRUE + # ) + # } else { + # s3fs::s3_file_system( + # aws_access_key_id = Sys.getenv("MINIO_USER"), + # aws_secret_access_key = Sys.getenv("MINIO_PWD"), + # endpoint = Sys.getenv("MINIO_ENDPOINT"), + # refresh = TRUE + # ) + # } + + # return(s3con) } #' Copy of `testthat::is_testing` diff --git a/R/internal.R b/R/internal.R index 75a6e72..f7f3e43 100644 --- a/R/internal.R +++ b/R/internal.R @@ -18,7 +18,7 @@ account_id <- memoise::memoise(function() { #' @return character string of bucket region; NULL if bucket not found bucket_region <- function(bucket) { res <- tryCatch( - env64$s3$get_bucket_location(bucket), + con_s3()$get_bucket_location(bucket), error = function(e) e ) if (rlang::is_error(res)) NULL else res$LocationConstraint diff --git a/R/onload.R b/R/onload.R index 8291e01..137f626 100644 --- a/R/onload.R +++ b/R/onload.R @@ -2,10 +2,6 @@ env64 <- new.env() .onLoad <- function(libname, pkgname) { - # sets creds for paws and s3fs for the S3 service - env64$s3 <- set_s3_interface("aws") - - # iam and costexplorer services env64$iam <- paws::iam() env64$costexplorer <- paws::costexplorer() env64$secretsmanager <- paws::secretsmanager() diff --git a/R/s3con.R b/R/s3con.R new file mode 100644 index 0000000..d51c335 --- /dev/null +++ b/R/s3con.R @@ -0,0 +1,39 @@ +LOCALSTACK_ENDPOINT <- "http://localhost.localstack.cloud:4566" # nolint + +#' Get the `paws` S3 client +#' @return a list with methods for interfacing with IAM; +#' see +#' @keywords internal +con_s3 <- function() { + set_s3_interface(Sys.getenv("AWS_PROFILE", "aws")) +} + +#' s3fs connection +#' @examplesIf interactive() +#' con <- con_s3fs() +#' file_copy <- con_s3fs()$file_copy +con_s3fs <- function() { + profile <- Sys.getenv("AWS_PROFILE") + if (profile == "minio") { + s3fs::s3_file_system( + aws_access_key_id = Sys.getenv("MINIO_USER"), + aws_secret_access_key = Sys.getenv("MINIO_PWD"), + endpoint = Sys.getenv("MINIO_ENDPOINT"), + refresh = TRUE + ) + } else if (profile == "localstack") { + s3fs::s3_file_system( + aws_access_key_id = "NOTAREALKEY", + aws_secret_access_key = "AREALLYFAKETOKEN", + endpoint = LOCALSTACK_ENDPOINT, + refresh = TRUE + ) + } else { + s3fs::s3_file_system( + aws_access_key_id = Sys.getenv("AWS_ACCESS_KEY_ID"), + aws_secret_access_key = Sys.getenv("AWS_SECRET_ACCESS_KEY"), + region_name = Sys.getenv("AWS_REGION"), + refresh = TRUE + ) + } +} diff --git a/man/aws_bucket_create.Rd b/man/aws_bucket_create.Rd index a205460..2bb41a6 100644 --- a/man/aws_bucket_create.Rd +++ b/man/aws_bucket_create.Rd @@ -34,6 +34,7 @@ Other buckets: \code{\link{aws_bucket_list_objects}()}, \code{\link{aws_bucket_tree}()}, \code{\link{aws_bucket_upload}()}, -\code{\link{aws_buckets}()} +\code{\link{aws_buckets}()}, +\code{\link{six_bucket_delete}()} } \concept{buckets} diff --git a/man/aws_bucket_delete.Rd b/man/aws_bucket_delete.Rd index fe08a42..d7428d4 100644 --- a/man/aws_bucket_delete.Rd +++ b/man/aws_bucket_delete.Rd @@ -45,6 +45,7 @@ Other buckets: \code{\link{aws_bucket_list_objects}()}, \code{\link{aws_bucket_tree}()}, \code{\link{aws_bucket_upload}()}, -\code{\link{aws_buckets}()} +\code{\link{aws_buckets}()}, +\code{\link{six_bucket_delete}()} } \concept{buckets} diff --git a/man/aws_bucket_download.Rd b/man/aws_bucket_download.Rd index 4ab5828..43214cf 100644 --- a/man/aws_bucket_download.Rd +++ b/man/aws_bucket_download.Rd @@ -43,6 +43,7 @@ Other buckets: \code{\link{aws_bucket_list_objects}()}, \code{\link{aws_bucket_tree}()}, \code{\link{aws_bucket_upload}()}, -\code{\link{aws_buckets}()} +\code{\link{aws_buckets}()}, +\code{\link{six_bucket_delete}()} } \concept{buckets} diff --git a/man/aws_bucket_exists.Rd b/man/aws_bucket_exists.Rd index 3be1128..23e3ef1 100644 --- a/man/aws_bucket_exists.Rd +++ b/man/aws_bucket_exists.Rd @@ -35,6 +35,7 @@ Other buckets: \code{\link{aws_bucket_list_objects}()}, \code{\link{aws_bucket_tree}()}, \code{\link{aws_bucket_upload}()}, -\code{\link{aws_buckets}()} +\code{\link{aws_buckets}()}, +\code{\link{six_bucket_delete}()} } \concept{buckets} diff --git a/man/aws_bucket_list_objects.Rd b/man/aws_bucket_list_objects.Rd index a4244d2..f4b2b17 100644 --- a/man/aws_bucket_list_objects.Rd +++ b/man/aws_bucket_list_objects.Rd @@ -49,6 +49,7 @@ Other buckets: \code{\link{aws_bucket_exists}()}, \code{\link{aws_bucket_tree}()}, \code{\link{aws_bucket_upload}()}, -\code{\link{aws_buckets}()} +\code{\link{aws_buckets}()}, +\code{\link{six_bucket_delete}()} } \concept{buckets} diff --git a/man/aws_bucket_tree.Rd b/man/aws_bucket_tree.Rd index e09b039..6234ccd 100644 --- a/man/aws_bucket_tree.Rd +++ b/man/aws_bucket_tree.Rd @@ -56,6 +56,7 @@ Other buckets: \code{\link{aws_bucket_exists}()}, \code{\link{aws_bucket_list_objects}()}, \code{\link{aws_bucket_upload}()}, -\code{\link{aws_buckets}()} +\code{\link{aws_buckets}()}, +\code{\link{six_bucket_delete}()} } \concept{buckets} diff --git a/man/aws_bucket_upload.Rd b/man/aws_bucket_upload.Rd index 1117b70..495b92a 100644 --- a/man/aws_bucket_upload.Rd +++ b/man/aws_bucket_upload.Rd @@ -68,6 +68,7 @@ Other buckets: \code{\link{aws_bucket_exists}()}, \code{\link{aws_bucket_list_objects}()}, \code{\link{aws_bucket_tree}()}, -\code{\link{aws_buckets}()} +\code{\link{aws_buckets}()}, +\code{\link{six_bucket_delete}()} } \concept{buckets} diff --git a/man/aws_buckets.Rd b/man/aws_buckets.Rd index 7ea4ebb..634ddc8 100644 --- a/man/aws_buckets.Rd +++ b/man/aws_buckets.Rd @@ -47,6 +47,7 @@ Other buckets: \code{\link{aws_bucket_exists}()}, \code{\link{aws_bucket_list_objects}()}, \code{\link{aws_bucket_tree}()}, -\code{\link{aws_bucket_upload}()} +\code{\link{aws_bucket_upload}()}, +\code{\link{six_bucket_delete}()} } \concept{buckets} diff --git a/man/aws_file_attr.Rd b/man/aws_file_attr.Rd index f48fbdc..0e39fd7 100644 --- a/man/aws_file_attr.Rd +++ b/man/aws_file_attr.Rd @@ -36,6 +36,7 @@ Other files: \code{\link{aws_file_download}()}, \code{\link{aws_file_exists}()}, \code{\link{aws_file_rename}()}, -\code{\link{aws_file_upload}()} +\code{\link{aws_file_upload}()}, +\code{\link{six_file_upload}()} } \concept{files} diff --git a/man/aws_file_copy.Rd b/man/aws_file_copy.Rd index 1c1f210..8128923 100644 --- a/man/aws_file_copy.Rd +++ b/man/aws_file_copy.Rd @@ -49,6 +49,7 @@ Other files: \code{\link{aws_file_download}()}, \code{\link{aws_file_exists}()}, \code{\link{aws_file_rename}()}, -\code{\link{aws_file_upload}()} +\code{\link{aws_file_upload}()}, +\code{\link{six_file_upload}()} } \concept{files} diff --git a/man/aws_file_delete.Rd b/man/aws_file_delete.Rd index 011ddcf..a4e56e0 100644 --- a/man/aws_file_delete.Rd +++ b/man/aws_file_delete.Rd @@ -38,6 +38,7 @@ Other files: \code{\link{aws_file_download}()}, \code{\link{aws_file_exists}()}, \code{\link{aws_file_rename}()}, -\code{\link{aws_file_upload}()} +\code{\link{aws_file_upload}()}, +\code{\link{six_file_upload}()} } \concept{files} diff --git a/man/aws_file_download.Rd b/man/aws_file_download.Rd index 1dc1813..4c088f1 100644 --- a/man/aws_file_download.Rd +++ b/man/aws_file_download.Rd @@ -50,6 +50,7 @@ Other files: \code{\link{aws_file_delete}()}, \code{\link{aws_file_exists}()}, \code{\link{aws_file_rename}()}, -\code{\link{aws_file_upload}()} +\code{\link{aws_file_upload}()}, +\code{\link{six_file_upload}()} } \concept{files} diff --git a/man/aws_file_exists.Rd b/man/aws_file_exists.Rd index bc9eba9..868db23 100644 --- a/man/aws_file_exists.Rd +++ b/man/aws_file_exists.Rd @@ -30,6 +30,7 @@ Other files: \code{\link{aws_file_delete}()}, \code{\link{aws_file_download}()}, \code{\link{aws_file_rename}()}, -\code{\link{aws_file_upload}()} +\code{\link{aws_file_upload}()}, +\code{\link{six_file_upload}()} } \concept{files} diff --git a/man/aws_file_rename.Rd b/man/aws_file_rename.Rd index 9d266af..1d2470e 100644 --- a/man/aws_file_rename.Rd +++ b/man/aws_file_rename.Rd @@ -44,6 +44,7 @@ Other files: \code{\link{aws_file_delete}()}, \code{\link{aws_file_download}()}, \code{\link{aws_file_exists}()}, -\code{\link{aws_file_upload}()} +\code{\link{aws_file_upload}()}, +\code{\link{six_file_upload}()} } \concept{files} diff --git a/man/aws_file_upload.Rd b/man/aws_file_upload.Rd index 258d02c..c04f9ee 100644 --- a/man/aws_file_upload.Rd +++ b/man/aws_file_upload.Rd @@ -4,7 +4,7 @@ \alias{aws_file_upload} \title{Upload a file} \usage{ -aws_file_upload(path, remote_path, force = FALSE, ...) +aws_file_upload(path, remote_path, ...) } \arguments{ \item{path}{(character) a file path to read from. required} @@ -12,10 +12,6 @@ aws_file_upload(path, remote_path, force = FALSE, ...) \item{remote_path}{(character) a remote path where the file should go. required} -\item{force}{(logical) force bucket creation without going through -the prompt. default: \code{FALSE}. Should only be set to \code{TRUE} when -required for non-interactive use.} - \item{...}{named parameters passed on to \code{\link[s3fs:copy]{s3fs::s3_file_copy()}}} } \value{ @@ -25,19 +21,16 @@ required for non-interactive use.} Upload a file } \details{ -\itemize{ -\item For upload: if it does exist it will be created -\item For download: if it does not exist, function will return an error -} - -To upload a folder of files see \code{\link[=aws_bucket_upload]{aws_bucket_upload()}} +to upload a folder of files see \code{\link[=aws_bucket_upload]{aws_bucket_upload()}} } \examples{ -\dontrun{ +\dontshow{if (interactive()) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} +bucket <- random_string("bucket") +aws_bucket_create(bucket) demo_rds_file <- file.path(system.file(), "Meta/demo.rds") aws_file_upload( demo_rds_file, - s3_path("s64-test-2", basename(demo_rds_file)) + s3_path(bucket, basename(demo_rds_file)) ) ## many files at once @@ -54,14 +47,11 @@ aws_file_upload(demo_rds_file, s3_path("s64-test-2", "ddd.rds"), # bucket doesn't exist aws_file_upload(demo_rds_file, "s3://not-a-bucket/eee.rds") -} -\dontshow{if (interactive()) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} # path doesn't exist aws_file_upload( "file_doesnt_exist.txt", - s3_path("s64-test-2", "file_doesnt_exist.txt") -) + s3_path("s64-test-2", "file_doesnt_exist.txt")) \dontshow{\}) # examplesIf} } \seealso{ @@ -71,6 +61,7 @@ Other files: \code{\link{aws_file_delete}()}, \code{\link{aws_file_download}()}, \code{\link{aws_file_exists}()}, -\code{\link{aws_file_rename}()} +\code{\link{aws_file_rename}()}, +\code{\link{six_file_upload}()} } \concept{files} diff --git a/man/con_s3.Rd b/man/con_s3.Rd new file mode 100644 index 0000000..7015fa7 --- /dev/null +++ b/man/con_s3.Rd @@ -0,0 +1,16 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/s3con.R +\name{con_s3} +\alias{con_s3} +\title{Get the \code{paws} S3 client} +\usage{ +con_s3() +} +\value{ +a list with methods for interfacing with IAM; +see \url{https://www.paws-r-sdk.com/docs/s3/} +} +\description{ +Get the \code{paws} S3 client +} +\keyword{internal} diff --git a/man/con_s3fs.Rd b/man/con_s3fs.Rd new file mode 100644 index 0000000..be9af6d --- /dev/null +++ b/man/con_s3fs.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/s3con.R +\name{con_s3fs} +\alias{con_s3fs} +\title{s3fs connection} +\usage{ +con_s3fs() +} +\description{ +s3fs connection +} +\examples{ +\dontshow{if (interactive()) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} +con <- con_s3fs() +file_copy <- con_s3fs()$file_copy +\dontshow{\}) # examplesIf} +} diff --git a/man/six_admin_setup.Rd b/man/six_admin_setup.Rd index 6573c3d..09a673b 100644 --- a/man/six_admin_setup.Rd +++ b/man/six_admin_setup.Rd @@ -29,6 +29,8 @@ AWS account setup for administrators \seealso{ Other magicians: +\code{\link{six_bucket_delete}()}, +\code{\link{six_file_upload}()}, \code{\link{six_user_create}()}, \code{\link{six_user_delete}()} } diff --git a/man/six_bucket_delete.Rd b/man/six_bucket_delete.Rd index e65ee6d..0edd985 100644 --- a/man/six_bucket_delete.Rd +++ b/man/six_bucket_delete.Rd @@ -23,10 +23,6 @@ non-interactive use.} Takes care of deleting bucket objects, so that the bucket itself can be deleted cleanly } -\note{ -Requires the env var \code{AWS_REGION}. This function prompts you to make -sure that you want to delete the bucket. -} \section{What is magical}{ \itemize{ @@ -57,8 +53,10 @@ aws_file_upload( ) aws_file_upload( c(demo_rds_file, links_file), - s3_path(bucket, "newfolder", - c(basename(demo_rds_file), basename(links_file))) + s3_path( + bucket, "newfolder", + c(basename(demo_rds_file), basename(links_file)) + ) ) six_bucket_delete(bucket) @@ -67,10 +65,19 @@ six_bucket_delete(bucket) \seealso{ Other buckets: \code{\link{aws_bucket_create}()}, +\code{\link{aws_bucket_delete}()}, \code{\link{aws_bucket_download}()}, \code{\link{aws_bucket_exists}()}, \code{\link{aws_bucket_list_objects}()}, \code{\link{aws_bucket_tree}()}, \code{\link{aws_bucket_upload}()}, \code{\link{aws_buckets}()} + +Other magicians: +\code{\link{six_admin_setup}()}, +\code{\link{six_file_upload}()}, +\code{\link{six_user_create}()}, +\code{\link{six_user_delete}()} } +\concept{buckets} +\concept{magicians} diff --git a/man/six_file_upload.Rd b/man/six_file_upload.Rd new file mode 100644 index 0000000..6f1fdd3 --- /dev/null +++ b/man/six_file_upload.Rd @@ -0,0 +1,81 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/files.R +\name{six_file_upload} +\alias{six_file_upload} +\title{Magically upload a file} +\usage{ +six_file_upload(path, bucket, force = FALSE, ...) +} +\arguments{ +\item{path}{(character) one or more file paths to add to +the \code{bucket}. required} + +\item{bucket}{(character) bucket to copy files to. required. +if the bucket does not exist we prompt you asking if you'd like +the bucket to be created} + +\item{force}{(logical) force bucket creation without going through +the prompt. default: \code{FALSE}. Should only be set to \code{TRUE} when +required for non-interactive use.} + +\item{...}{named params passed on to +\href{https://www.paws-r-sdk.com/docs/s3_put_object/}{put_object}} +} +\value{ +(character) a vector of remote s3 paths where your +files are located +} +\description{ +Magically upload a file +} +\section{What is magical}{ + +\itemize{ +\item Exits early if files do not exist +\item Creates the bucket if it does not exist +\item Adds files to the bucket, figuring out the key to use from +the supplied path +\item Function is vectoried for the \code{path} argument; you can +pass in many file paths +\item xx +} +} + +\examples{ +\dontshow{if (interactive()) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} +bucket <- random_string("bucket") +demo_rds_file <- file.path(system.file(), "Meta/demo.rds") +six_file_upload(demo_rds_file, bucket) + +## many files at once +links_file <- file.path(system.file(), "Meta/links.rds") +six_file_upload(c(demo_rds_file, links_file), bucket) + +# set expiration, expire 1 minute from now +six_file_upload(demo_rds_file, bucket, Expires = Sys.time() + 60) + +# bucket doesn't exist +six_file_upload(demo_rds_file, "not-a-buckets") + +# path doesn't exist +# six_file_upload("file_doesnt_exist.txt", random_string("bucket")) +\dontshow{\}) # examplesIf} +} +\seealso{ +Other files: +\code{\link{aws_file_attr}()}, +\code{\link{aws_file_copy}()}, +\code{\link{aws_file_delete}()}, +\code{\link{aws_file_download}()}, +\code{\link{aws_file_exists}()}, +\code{\link{aws_file_rename}()}, +\code{\link{aws_file_upload}()} + +Other magicians: +\code{\link{six_admin_setup}()}, +\code{\link{six_bucket_delete}()}, +\code{\link{six_user_create}()}, +\code{\link{six_user_delete}()} +} +\concept{files} +\concept{magicians} diff --git a/man/six_user_create.Rd b/man/six_user_create.Rd index 15fbc67..fbb67f9 100644 --- a/man/six_user_create.Rd +++ b/man/six_user_create.Rd @@ -60,6 +60,8 @@ Other users: Other magicians: \code{\link{six_admin_setup}()}, +\code{\link{six_bucket_delete}()}, +\code{\link{six_file_upload}()}, \code{\link{six_user_delete}()} } \concept{magicians} diff --git a/man/six_user_delete.Rd b/man/six_user_delete.Rd index 5e0bf22..eb9e101 100644 --- a/man/six_user_delete.Rd +++ b/man/six_user_delete.Rd @@ -50,6 +50,8 @@ Other users: Other magicians: \code{\link{six_admin_setup}()}, +\code{\link{six_bucket_delete}()}, +\code{\link{six_file_upload}()}, \code{\link{six_user_create}()} } \concept{magicians} diff --git a/tests/testthat/test-buckets.R b/tests/testthat/test-buckets.R index 65a8f21..330fd82 100644 --- a/tests/testthat/test-buckets.R +++ b/tests/testthat/test-buckets.R @@ -1,11 +1,6 @@ skip_if_not(minio_available(), "Minio Not Available") -invisible(env64$s3 <- set_s3_interface("minio")) -s3fs::s3_file_system( - aws_access_key_id = "minioadmin", - aws_secret_access_key = "minioadmin", - endpoint = "http://localhost:9000" -) +Sys.setenv(AWS_PROFILE = "minio") buckets_empty() demo_rds_file <- file.path(system.file(), "Meta/demo.rds") @@ -16,7 +11,7 @@ test_that("aws_bucket_create", { expect_error(aws_bucket_create(5)) expect_error(aws_bucket_create(letters)) - bucket <- random_str("bucket") + bucket <- random_string("bucket") expect_false(aws_bucket_exists(bucket)) aws_bucket_create(bucket) expect_true(aws_bucket_exists(bucket)) @@ -27,7 +22,7 @@ test_that("aws_bucket_exists", { expect_error(aws_bucket_exists(5)) expect_error(aws_bucket_exists(letters)) - bucket <- random_str("bucket") + bucket <- random_string("bucket") # bucket DOES NOT exist, gives FALSE expect_false(aws_bucket_exists(bucket)) @@ -44,7 +39,7 @@ test_that("aws_bucket_delete", { expect_error(aws_bucket_delete(5)) expect_error(aws_bucket_delete(letters)) - bucket <- random_str("bucket") + bucket <- random_string("bucket") aws_bucket_create(bucket) expect_true(aws_bucket_exists(bucket)) res <- aws_bucket_delete(bucket, force = TRUE) @@ -56,7 +51,7 @@ test_that("aws_bucket_download", { expect_error(aws_bucket_download()) expect_error(aws_bucket_download("")) - bucket <- random_str("bucket") + bucket <- random_string("bucket") aws_bucket_create(bucket) aws_file_upload(demo_rds_file, s3_path(bucket, basename(demo_rds_file))) @@ -92,7 +87,7 @@ test_that("aws_bucket_list_objects", { expect_error(aws_bucket_list_objects()) expect_error(aws_bucket_list_objects(5)) - bucket <- random_str("bucket") + bucket <- random_string("bucket") aws_bucket_create(bucket) ffs <- list.files(file.path(system.file(), "Meta"), full.names = TRUE) for (f in ffs) aws_file_upload(f, s3_path(bucket, basename(f))) @@ -107,12 +102,12 @@ test_that("aws_bucket_list_objects", { }) test_that("aws_buckets", { - for (i in replicate(100, random_str("bucket"))) aws_bucket_create(i) + for (i in replicate(10, random_string("bucket"))) aws_bucket_create(i) res <- aws_buckets() expect_s3_class(res, "tbl") - expect_gt(NROW(res), 50) + expect_gt(NROW(res), 5) buckets_empty() }) @@ -121,7 +116,7 @@ test_that("aws_bucket_tree", { expect_error(aws_bucket_tree()) expect_error(aws_bucket_tree("", 5)) - bucket <- random_str("bucket") + bucket <- random_string("bucket") aws_bucket_create(bucket) ffs <- list.files(file.path(system.file(), "Meta"), full.names = TRUE) for (f in ffs) aws_file_upload(f, s3_path(bucket, basename(f))) @@ -140,4 +135,4 @@ test_that("aws_bucket_tree", { # cleanup buckets_empty() -invisible(env64$s3 <- set_s3_interface("aws")) +Sys.unsetenv("AWS_PROFILE") diff --git a/tests/testthat/test-files.R b/tests/testthat/test-files.R index a6acc03..c78eeae 100644 --- a/tests/testthat/test-files.R +++ b/tests/testthat/test-files.R @@ -1,11 +1,6 @@ skip_if_not(minio_available(), "Minio Not Available") -env64$s3 <- set_s3_interface("minio") -s3fs::s3_file_system( - aws_access_key_id = "minioadmin", - aws_secret_access_key = "minioadmin", - endpoint = "http://localhost:9000" -) +Sys.setenv(AWS_PROFILE = "minio") buckets_empty() demo_rds_file <- file.path(system.file(), "Meta/demo.rds") @@ -26,10 +21,11 @@ test_that("aws_file_upload - error behavior", { }) test_that("aws_file_upload - 1 file", { + bucket <- random_string("bucket") + aws_bucket_create(bucket) res <- aws_file_upload( demo_rds_file, - s3_path("s64-test-2", basename(demo_rds_file)), - force = TRUE + s3_path(bucket, basename(demo_rds_file)) ) expect_type(res, "character") @@ -87,14 +83,14 @@ test_that("aws_file_download - error behavior", { test_that("aws_file_download - many files", { aws_bucket_create("download") - the_files <- replicate(50, tempfile(fileext = ".txt")) + the_files <- replicate(10, tempfile(fileext = ".txt")) for (f in the_files) cat(letters, "\n", file = f) res <- aws_file_upload( the_files, s3_path("download", basename(the_files)) ) - downloaded_files <- replicate(50, tempfile(fileext = ".txt")) + downloaded_files <- replicate(10, tempfile(fileext = ".txt")) out <- aws_file_download( s3_path("download", basename(the_files)), downloaded_files @@ -186,7 +182,7 @@ test_that("aws_file_attr", { test_that("aws_file_exists - error behavior", { expect_error(aws_file_exists()) - bucket <- random_str("bucket") + bucket <- random_string("bucket") # bucket DOES NOT exist, just FALSE expect_false(aws_file_exists(s3_path(bucket, "TESTING123"))) @@ -199,10 +195,10 @@ test_that("aws_file_exists - error behavior", { }) test_that("aws_file_exists", { - bucket <- random_str("bucket") + bucket <- random_string("bucket") aws_bucket_create(bucket) - files <- replicate(25, tempfile(fileext = ".txt")) + files <- replicate(5, tempfile(fileext = ".txt")) for (i in files) { cat("Hello World!\n\n", file = i) remote_path <- s3_path(bucket, basename(i)) @@ -225,7 +221,7 @@ test_that("aws_file_rename", { expect_error(aws_file_rename()) expect_error(aws_file_rename("")) - bucket <- random_str("bucket") + bucket <- random_string("bucket") aws_bucket_create(bucket) aws_file_upload(links_file, s3_path(bucket, basename(links_file))) @@ -247,12 +243,12 @@ test_that("aws_file_copy", { expect_error(aws_file_copy()) expect_error(aws_file_copy("")) - bucket <- random_str("bucket") + bucket <- random_string("bucket") aws_bucket_create(bucket) aws_file_upload(links_file, s3_path(bucket, basename(links_file))) - bucket_2 <- random_str("bucket") + bucket_2 <- random_string("bucket") aws_bucket_create(bucket_2) expect_false(aws_file_exists(s3_path(bucket_2, "links.rds"))) @@ -271,5 +267,4 @@ test_that("aws_file_copy", { # cleanup buckets_empty() -s3fs::s3_file_system() -invisible(env64$s3 <- set_s3_interface("aws")) +Sys.unsetenv("AWS_PROFILE") diff --git a/tests/testthat/test-s3.R b/tests/testthat/test-s3.R index 50e94a4..1c86a62 100644 --- a/tests/testthat/test-s3.R +++ b/tests/testthat/test-s3.R @@ -1,7 +1,6 @@ -skip_on_ci() skip_if_not(localstack_available(), "LocalStack Not Available") -invisible(env64$s3 <- set_s3_interface("localstack")) +Sys.setenv(AWS_PROFILE = "localstack") buckets_empty() test_that("aws_s3_policy_doc_create", { @@ -238,4 +237,4 @@ test_that("six_bucket_remove_user", { # cleanup buckets_empty() -invisible(env64$s3 <- set_s3_interface("aws")) +Sys.unsetenv("AWS_PROFILE")