Skip to content
This repository has been archived by the owner on Oct 29, 2024. It is now read-only.

Commit

Permalink
Adds option of table names
Browse files Browse the repository at this point in the history
Allows us to target tables, or backup everything.
  • Loading branch information
jadudm committed Apr 22, 2024
1 parent c135326 commit c1a7fe4
Show file tree
Hide file tree
Showing 2 changed files with 75 additions and 32 deletions.
98 changes: 66 additions & 32 deletions cmd/dumpDbToS3.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,10 @@ package cmd
import (
"database/sql"
"fmt"
"net/url"
"os"
"path/filepath"
"slices"

_ "github.com/lib/pq"

Expand All @@ -19,7 +22,9 @@ import (

var (
db string
bucket string
s3path string
key string
)

func get_table_and_schema_names(source_creds vcap.Credentials) map[string]string {
Expand All @@ -28,14 +33,14 @@ func get_table_and_schema_names(source_creds vcap.Credentials) map[string]string
if err != nil {
logging.Logger.Println("DUMPDBTOS3 could not connect to DB for table-by-table dump")
logging.Logger.Printf("DUMPDBTOS3 %s\n", err)
os.Exit(-1)
os.Exit(logging.DB_SCHEMA_SCAN_FAILURE)
}

tables, err := db.Query("SELECT schemaname, tablename FROM pg_tables WHERE schemaname = 'public'")
if err != nil {
logging.Logger.Println("DUMPDBTOS3 could not get table names for table-by-table dump")
logging.Logger.Printf("DUMPDBTOS3 %s\n", err)
os.Exit(-1)
os.Exit(logging.DB_SCHEMA_SCAN_FAILURE)
}

table_names := make(map[string]string, 0)
Expand All @@ -45,84 +50,112 @@ func get_table_and_schema_names(source_creds vcap.Credentials) map[string]string
var schema string
if err := tables.Scan(&schema, &table); err != nil {
logging.Logger.Println("DUMPDBTOS3 could not scan table names in SELECT")
os.Exit(-1)
os.Exit(logging.DB_SCHEMA_SCAN_FAILURE)
}
table_names[table] = schema
}

return table_names
}
func bucket_local_tables(source_creds vcap.Credentials, up vcap.Credentials) {
func bucket_local_tables(source_creds vcap.Credentials, up vcap.Credentials, table_names []string) {
var BACKUP_ALL = len(table_names) == 0

logging.Logger.Printf("DUMPDBTOS3 backing up from %s to %s\n",
source_creds.Get("name").String(),
up.Get("name").String(),
)
table_to_schema := get_table_and_schema_names(source_creds)

for table, schema := range table_to_schema {
mc_pipe := pipes.Mc(
pipes.PG_Dump_Table(source_creds, schema, table),
up,
fmt.Sprintf("%s/%s-%s.dump", s3path, schema, table),
)
mc_pipe.Wait()
if err := mc_pipe.Error(); err != nil {
logging.Logger.Println("DUMPDBTOS3 `dump | mc` pipe failed")
os.Exit(-1)
// Back up tables under two conditions:
// 1. When it is in a list of names we want backed up, or
// 2. When there are no names in the list (backup all).
if slices.Contains(table_names, table) || BACKUP_ALL {
mc_pipe := pipes.Mc(
pipes.PG_Dump_Table(source_creds, schema, table),
up,
fmt.Sprintf("%s%s/%s-%s.dump", bucket, key, schema, table),
)
mc_pipe.Wait()
if err := mc_pipe.Error(); err != nil {
logging.Logger.Println("DUMPDBTOS3 `dump | mc` pipe failed")
os.Exit(logging.PIPE_FAILURE)
}
}
}

}

func bucket_cgov_tables(source_creds vcap.Credentials, up vcap.Credentials) {
func bucket_cgov_tables(source_creds vcap.Credentials, up vcap.Credentials, table_names []string) {
var BACKUP_ALL = len(table_names) == 0

table_to_schema := get_table_and_schema_names(source_creds)
for table, schema := range table_to_schema {
s3_pipe := pipes.S3(
pipes.PG_Dump_Table(source_creds, schema, table),
up,
fmt.Sprintf("%s/%s-%s.dump", s3path, schema, table),
)
s3_pipe.Wait()
if err := s3_pipe.Error(); err != nil {
logging.Logger.Println("DUMPDBTOS3 `dump | s3` pipe failed")
os.Exit(-1)
if slices.Contains(table_names, table) || BACKUP_ALL {
s3_pipe := pipes.S3(
pipes.PG_Dump_Table(source_creds, schema, table),
up,
fmt.Sprintf("%s%s/%s-%s.dump", bucket, key, schema, table),
)
s3_pipe.Wait()
if err := s3_pipe.Error(); err != nil {
logging.Logger.Println("DUMPDBTOS3 `dump | s3` pipe failed")
os.Exit(logging.PIPE_FAILURE)
}
}
}
}

// dumpDbToS3Cmd represents the dumpDbToS3 command
var dumpDbToS3Cmd = &cobra.Command{
Use: "dumpDbToS3",
Args: cobra.ArbitraryArgs,
Short: "Dumps a full database to a file in S3",
Long: `Dumps a full database to a file in S3`,
Run: func(cmd *cobra.Command, args []string) {
Long: `Dumps a full database to a file in S3
Takes 0 or more table names as arguments. If no arguments are
provided, all tables are backed up.
`,
Run: func(cmd *cobra.Command, table_names []string) {
u, err := url.Parse(s3path)
if err != nil {
logging.Logger.Printf("DUMPDBTOS3 could not parse s3 path: %s", s3path)
os.Exit(logging.S3_PATH_PARSE_ERROR)
}
if u.Scheme != "s3" {
logging.Logger.Printf("DUMPDBTOS3 does not look like an S3 path (e.g. `s3://`): %s", s3path)
os.Exit(logging.S3_PATH_PARSE_ERROR)
}
bucket = filepath.Clean(u.Host)
key = filepath.Clean(u.Path)

// Check that we can get credentials.
db_creds, err := vcap.VCS.GetCredentials("aws-rds", db)
if err != nil {
logging.Logger.Printf("DUMPDBTOS3 could not get DB credentials for %s", db)
os.Exit(-1)
os.Exit(logging.COULD_NOT_FIND_CREDENTIALS)
}

switch os.Getenv("ENV") {
case "LOCAL":
fallthrough
case "TESTING":
up, err := vcap.VCS.GetCredentials("user-provided", "backups")
up, err := vcap.VCS.GetCredentials("user-provided", bucket)
if err != nil {
logging.Logger.Printf("DUMPDBTOS3 could not get minio credentials")
os.Exit(-1)
os.Exit(logging.COULD_NOT_FIND_CREDENTIALS)
}
bucket_local_tables(db_creds, up)
bucket_local_tables(db_creds, up, table_names)
case "DEV":
fallthrough
case "STAGING":
fallthrough
case "PRODUCTION":
up, err := vcap.VCS.GetCredentials("aws-rds", s3path)
up, err := vcap.VCS.GetCredentials("aws-rds", bucket)
if err != nil {
logging.Logger.Printf("DUMPDBTOS3 could not get s3 credentials")
os.Exit(-1)
os.Exit(logging.COULD_NOT_FIND_CREDENTIALS)
}
bucket_cgov_tables(db_creds, up)
bucket_cgov_tables(db_creds, up, table_names)

}
},
Expand All @@ -134,6 +167,7 @@ func init() {
dumpDbToS3Cmd.Flags().StringVarP(&s3path, "s3path", "", "", "destination path")

dumpDbToS3Cmd.MarkFlagRequired("db")
dumpDbToS3Cmd.MarkFlagRequired("bucket")
dumpDbToS3Cmd.MarkFlagRequired("s3path")

}
9 changes: 9 additions & 0 deletions internal/logging/error_codes.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
package logging

const S3_PATH_PARSE_ERROR = -10

const COULD_NOT_FIND_CREDENTIALS = -20

const PIPE_FAILURE = -30

const DB_SCHEMA_SCAN_FAILURE = -40

0 comments on commit c1a7fe4

Please sign in to comment.