diff --git a/cmd/icingadb-migrate/convert.go b/cmd/icingadb-migrate/convert.go index 5cfa7bdcb..0a875e5c1 100644 --- a/cmd/icingadb-migrate/convert.go +++ b/cmd/icingadb-migrate/convert.go @@ -3,11 +3,12 @@ package main import ( "database/sql" _ "embed" - "github.com/icinga/icingadb/pkg/contracts" + "github.com/icinga/icinga-go-library/database" + "github.com/icinga/icinga-go-library/types" + "github.com/icinga/icinga-go-library/utils" + icingadbTypes "github.com/icinga/icingadb/pkg/icingadb/types" v1 "github.com/icinga/icingadb/pkg/icingadb/v1" "github.com/icinga/icingadb/pkg/icingadb/v1/history" - icingadbTypes "github.com/icinga/icingadb/pkg/types" - "github.com/icinga/icingadb/pkg/utils" "github.com/jmoiron/sqlx" "github.com/pkg/errors" "strconv" @@ -48,10 +49,10 @@ type commentRow = struct { } func convertCommentRows( - env string, envId icingadbTypes.Binary, + env string, envId types.Binary, _ func(interface{}, string, ...interface{}), _ *sqlx.Tx, idoRows []commentRow, ) (stages []icingaDbOutputStage, checkpoint any) { - var commentHistory, acknowledgementHistory, allHistoryComment, allHistoryAck []contracts.Entity + var commentHistory, acknowledgementHistory, allHistoryComment, allHistoryAck []database.Entity for _, row := range idoRows { checkpoint = row.CommenthistoryId @@ -81,14 +82,14 @@ func convertCommentRows( }, CommentHistoryUpserter: history.CommentHistoryUpserter{ RemoveTime: removeTime, - HasBeenRemoved: icingadbTypes.Bool{Bool: !removeTime.Time().IsZero(), Valid: true}, + HasBeenRemoved: types.Bool{Bool: !removeTime.Time().IsZero(), Valid: true}, }, EntryTime: entryTime, Author: row.AuthorName, Comment: row.CommentData, EntryType: icingadbTypes.CommentType(row.EntryType), - IsPersistent: icingadbTypes.Bool{Bool: row.IsPersistent != 0, Valid: true}, - IsSticky: icingadbTypes.Bool{Bool: false, Valid: true}, + IsPersistent: types.Bool{Bool: row.IsPersistent != 0, Valid: true}, + IsSticky: types.Bool{Bool: false, Valid: true}, ExpireTime: expireTime, }) @@ -150,10 +151,10 @@ func convertCommentRows( }, AckHistoryUpserter: history.AckHistoryUpserter{ClearTime: clearTime}, SetTime: setTime, - Author: icingadbTypes.MakeString(row.AuthorName), - Comment: icingadbTypes.MakeString(row.CommentData), + Author: types.MakeString(row.AuthorName), + Comment: types.MakeString(row.CommentData), ExpireTime: convertTime(row.ExpirationTime, 0), - IsPersistent: icingadbTypes.Bool{ + IsPersistent: types.Bool{ Bool: row.IsPersistent != 0, Valid: true, }, @@ -234,10 +235,10 @@ type downtimeRow = struct { } func convertDowntimeRows( - env string, envId icingadbTypes.Binary, + env string, envId types.Binary, _ func(interface{}, string, ...interface{}), _ *sqlx.Tx, idoRows []downtimeRow, ) (stages []icingaDbOutputStage, checkpoint any) { - var downtimeHistory, allHistory, sla []contracts.Entity + var downtimeHistory, allHistory, sla []database.Entity for _, row := range idoRows { checkpoint = row.DowntimehistoryId @@ -255,10 +256,10 @@ func convertDowntimeRows( triggerTime := convertTime(row.TriggerTime, 0) actualStart := convertTime(row.ActualStartTime, row.ActualStartTimeUsec) actualEnd := convertTime(row.ActualEndTime, row.ActualEndTimeUsec) - var startTime, endTime, cancelTime icingadbTypes.UnixMilli + var startTime, endTime, cancelTime types.UnixMilli if scheduledEnd.Time().IsZero() { - scheduledEnd = icingadbTypes.UnixMilli(scheduledStart.Time().Add(time.Duration(row.Duration) * time.Second)) + scheduledEnd = types.UnixMilli(scheduledStart.Time().Add(time.Duration(row.Duration) * time.Second)) } if actualStart.Time().IsZero() { @@ -290,14 +291,14 @@ func convertDowntimeRows( ServiceId: serviceId, }, DowntimeHistoryUpserter: history.DowntimeHistoryUpserter{ - HasBeenCancelled: icingadbTypes.Bool{Bool: row.WasCancelled != 0, Valid: true}, + HasBeenCancelled: types.Bool{Bool: row.WasCancelled != 0, Valid: true}, CancelTime: cancelTime, }, TriggeredById: calcObjectId(env, row.TriggeredBy), EntryTime: convertTime(row.EntryTime, 0), Author: row.AuthorName, Comment: row.CommentData, - IsFlexible: icingadbTypes.Bool{Bool: row.IsFixed == 0, Valid: true}, + IsFlexible: types.Bool{Bool: row.IsFixed == 0, Valid: true}, FlexibleDuration: uint64(row.Duration) * 1000, ScheduledStartTime: scheduledStart, ScheduledEndTime: scheduledEnd, @@ -336,7 +337,7 @@ func convertDowntimeRows( StartTime: startTime, CancelTime: cancelTime, EndTime: endTime, - HasBeenCancelled: icingadbTypes.Bool{Bool: row.WasCancelled != 0, Valid: true}, + HasBeenCancelled: types.Bool{Bool: row.WasCancelled != 0, Valid: true}, } h2.EventTime.History = h2 @@ -352,7 +353,7 @@ func convertDowntimeRows( ServiceId: serviceId, }, DowntimeStart: startTime, - HasBeenCancelled: icingadbTypes.Bool{Bool: row.WasCancelled != 0, Valid: true}, + HasBeenCancelled: types.Bool{Bool: row.WasCancelled != 0, Valid: true}, CancelTime: cancelTime, EndTime: endTime, } @@ -383,7 +384,7 @@ type flappingRow = struct { } func convertFlappingRows( - env string, envId icingadbTypes.Binary, + env string, envId types.Binary, selectCache func(dest interface{}, query string, args ...interface{}), _ *sqlx.Tx, idoRows []flappingRow, ) (stages []icingaDbOutputStage, checkpoint any) { if len(idoRows) < 1 { @@ -401,12 +402,12 @@ func convertFlappingRows( ) // Needed for start time (see below). - cachedById := make(map[uint64]icingadbTypes.UnixMilli, len(cached)) + cachedById := make(map[uint64]types.UnixMilli, len(cached)) for _, c := range cached { cachedById[c.HistoryId] = convertTime(c.EventTime, c.EventTimeUsec) } - var flappingHistory, flappingHistoryUpserts, allHistory []contracts.Entity + var flappingHistory, flappingHistoryUpserts, allHistory []database.Entity for _, row := range idoRows { checkpoint = row.FlappinghistoryId @@ -417,7 +418,7 @@ func convertFlappingRows( ts := convertTime(row.EventTime.Int64, row.EventTimeUsec) // Needed for ID (see below). - var start icingadbTypes.UnixMilli + var start types.UnixMilli if row.EventType == 1001 { // end var ok bool start, ok = cachedById[row.FlappinghistoryId] @@ -454,7 +455,7 @@ func convertFlappingRows( }, FlappingHistoryUpserter: history.FlappingHistoryUpserter{ EndTime: ts, - PercentStateChangeEnd: icingadbTypes.Float{NullFloat64: row.PercentStateChange}, + PercentStateChangeEnd: types.Float{NullFloat64: row.PercentStateChange}, FlappingThresholdLow: float32(row.LowThreshold), FlappingThresholdHigh: float32(row.HighThreshold), }, @@ -495,7 +496,7 @@ func convertFlappingRows( FlappingThresholdHigh: float32(row.HighThreshold), }, StartTime: start, - PercentStateChangeStart: icingadbTypes.Float{NullFloat64: row.PercentStateChange}, + PercentStateChangeStart: types.Float{NullFloat64: row.PercentStateChange}, }) h := &history.HistoryFlapping{ @@ -541,7 +542,7 @@ type notificationRow = struct { } func convertNotificationRows( - env string, envId icingadbTypes.Binary, + env string, envId types.Binary, selectCache func(dest interface{}, query string, args ...interface{}), ido *sqlx.Tx, idoRows []notificationRow, ) (stages []icingaDbOutputStage, checkpoint any) { if len(idoRows) < 1 { @@ -590,7 +591,7 @@ func convertNotificationRows( perId[contact.Name1] = struct{}{} } - var notificationHistory, userNotificationHistory, allHistory []contracts.Entity + var notificationHistory, userNotificationHistory, allHistory []database.Entity for _, row := range idoRows { checkpoint = row.NotificationId @@ -646,7 +647,7 @@ func convertNotificationRows( SendTime: ts, State: row.State, PreviousHardState: previousHardState, - Text: icingadbTypes.MakeString(text), + Text: types.MakeString(text), UsersNotified: row.ContactsNotified, }) @@ -737,7 +738,7 @@ type stateRow = struct { } func convertStateRows( - env string, envId icingadbTypes.Binary, + env string, envId types.Binary, selectCache func(dest interface{}, query string, args ...interface{}), _ *sqlx.Tx, idoRows []stateRow, ) (stages []icingaDbOutputStage, checkpoint any) { if len(idoRows) < 1 { @@ -758,7 +759,7 @@ func convertStateRows( cachedById[c.HistoryId] = c.PreviousHardState } - var stateHistory, allHistory, sla []contracts.Entity + var stateHistory, allHistory, sla []database.Entity for _, row := range idoRows { checkpoint = row.StatehistoryId @@ -799,10 +800,10 @@ func convertStateRows( PreviousSoftState: row.LastState, PreviousHardState: previousHardState, CheckAttempt: row.CurrentCheckAttempt, - Output: icingadbTypes.String{NullString: row.Output}, - LongOutput: icingadbTypes.String{NullString: row.LongOutput}, + Output: types.String{NullString: row.Output}, + LongOutput: types.String{NullString: row.LongOutput}, MaxCheckAttempts: row.MaxCheckAttempts, - CheckSource: icingadbTypes.String{NullString: row.CheckSource}, + CheckSource: types.String{NullString: row.CheckSource}, }) allHistory = append(allHistory, &history.HistoryState{ diff --git a/cmd/icingadb-migrate/main.go b/cmd/icingadb-migrate/main.go index c089e78a7..15faddd08 100644 --- a/cmd/icingadb-migrate/main.go +++ b/cmd/icingadb-migrate/main.go @@ -8,11 +8,11 @@ import ( "fmt" "github.com/creasty/defaults" "github.com/goccy/go-yaml" - "github.com/icinga/icingadb/pkg/config" + "github.com/icinga/icinga-go-library/database" + "github.com/icinga/icinga-go-library/logging" + "github.com/icinga/icinga-go-library/types" + "github.com/icinga/icinga-go-library/utils" "github.com/icinga/icingadb/pkg/icingadb" - "github.com/icinga/icingadb/pkg/logging" - icingadbTypes "github.com/icinga/icingadb/pkg/types" - "github.com/icinga/icingadb/pkg/utils" "github.com/jessevdk/go-flags" "github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx/reflectx" @@ -41,11 +41,11 @@ type Flags struct { // Config defines the YAML config structure. type Config struct { IDO struct { - config.Database `yaml:"-,inline"` + database.Config `yaml:"-,inline"` From int32 `yaml:"from"` To int32 `yaml:"to" default:"2147483647"` } `yaml:"ido"` - IcingaDB config.Database `yaml:"icingadb"` + IcingaDB database.Config `yaml:"icingadb"` // Icinga2 specifies information the IDO doesn't provide. Icinga2 struct { // Env specifies the environment ID, hex. @@ -78,7 +78,7 @@ func main() { ido, idb := connectAll(c) - if err := idb.CheckSchema(context.Background()); err != nil { + if err := icingadb.CheckSchema(context.Background(), idb); err != nil { log.Fatalf("%+v", err) } @@ -135,7 +135,7 @@ func parseConfig(f *Flags) (_ *Config, exit int) { var nonWords = regexp.MustCompile(`\W+`) // mkCache ensures /.sqlite3 files are present and contain their schema -// and initializes types[*].cache. (On non-recoverable errors the whole program exits.) +// and initializes typesToMigrate[*].cache. (On non-recoverable errors the whole program exits.) func mkCache(f *Flags, c *Config, mapper *reflectx.Mapper) { log.Info("Preparing cache") @@ -143,7 +143,7 @@ func mkCache(f *Flags, c *Config, mapper *reflectx.Mapper) { log.With("dir", f.Cache).Fatalf("%+v", errors.Wrap(err, "can't create directory")) } - types.forEach(func(ht *historyType) { + typesToMigrate.forEach(func(ht *historyType) { if ht.cacheSchema == "" { return } @@ -170,12 +170,12 @@ func mkCache(f *Flags, c *Config, mapper *reflectx.Mapper) { } // connectAll connects to ido and idb (Icinga DB) as c specifies. (On non-recoverable errors the whole program exits.) -func connectAll(c *Config) (ido, idb *icingadb.DB) { +func connectAll(c *Config) (ido, idb *database.DB) { log.Info("Connecting to databases") eg, _ := errgroup.WithContext(context.Background()) eg.Go(func() error { - ido = connect("IDO", &c.IDO.Database) + ido = connect("IDO", &c.IDO.Config) return nil }) @@ -189,8 +189,12 @@ func connectAll(c *Config) (ido, idb *icingadb.DB) { } // connect connects to which DB as cfg specifies. (On non-recoverable errors the whole program exits.) -func connect(which string, cfg *config.Database) *icingadb.DB { - db, err := cfg.Open(logging.NewLogger(zap.NewNop().Sugar(), 20*time.Second)) +func connect(which string, cfg *database.Config) *database.DB { + db, err := database.NewDbFromConfig( + cfg, + logging.NewLogger(zap.NewNop().Sugar(), 20*time.Second), + database.RetryConnectorCallbacks{}, + ) if err != nil { log.With("backend", which).Fatalf("%+v", errors.Wrap(err, "can't connect to database")) } @@ -202,10 +206,10 @@ func connect(which string, cfg *config.Database) *icingadb.DB { return db } -// startIdoTx initializes types[*].snapshot with new repeatable-read-isolated ido transactions. +// startIdoTx initializes typesToMigrate[*].snapshot with new repeatable-read-isolated ido transactions. // (On non-recoverable errors the whole program exits.) -func startIdoTx(ido *icingadb.DB) { - types.forEach(func(ht *historyType) { +func startIdoTx(ido *database.DB) { + typesToMigrate.forEach(func(ht *historyType) { tx, err := ido.BeginTxx(context.Background(), &sql.TxOptions{Isolation: sql.LevelRepeatableRead}) if err != nil { log.Fatalf("%+v", errors.Wrap(err, "can't begin snapshot transaction")) @@ -215,10 +219,10 @@ func startIdoTx(ido *icingadb.DB) { }) } -// computeIdRange initializes types[*].fromId and types[*].toId. +// computeIdRange initializes typesToMigrate[*].fromId and typesToMigrate[*].toId. // (On non-recoverable errors the whole program exits.) func computeIdRange(c *Config) { - types.forEach(func(ht *historyType) { + typesToMigrate.forEach(func(ht *historyType) { getBorderId := func(id *uint64, timeColumns []string, compOperator string, borderTime int32, sortOrder string) { deZeroFied := make([]string, 0, len(timeColumns)) for _, column := range timeColumns { @@ -257,15 +261,15 @@ func computeIdRange(c *Config) { //go:embed embed/ido_migration_progress_schema.sql var idoMigrationProgressSchema string -// computeProgress initializes types[*].lastId, types[*].total and types[*].done. +// computeProgress initializes typesToMigrate[*].lastId, typesToMigrate[*].total and typesToMigrate[*].done. // (On non-recoverable errors the whole program exits.) -func computeProgress(c *Config, idb *icingadb.DB, envId []byte) { +func computeProgress(c *Config, idb *database.DB, envId []byte) { if _, err := idb.Exec(idoMigrationProgressSchema); err != nil { log.Fatalf("%+v", errors.Wrap(err, "can't create table ido_migration_progress")) } envIdHex := hex.EncodeToString(envId) - types.forEach(func(ht *historyType) { + typesToMigrate.forEach(func(ht *historyType) { var query = idb.Rebind( "SELECT last_ido_id FROM ido_migration_progress" + " WHERE environment_id=? AND history_type=? AND from_ts=? AND to_ts=?", @@ -279,7 +283,7 @@ func computeProgress(c *Config, idb *icingadb.DB, envId []byte) { } }) - types.forEach(func(ht *historyType) { + typesToMigrate.forEach(func(ht *historyType) { if ht.cacheFiller != nil { err := ht.snapshot.Get( &ht.cacheTotal, @@ -297,7 +301,7 @@ func computeProgress(c *Config, idb *icingadb.DB, envId []byte) { } }) - types.forEach(func(ht *historyType) { + typesToMigrate.forEach(func(ht *historyType) { var rows []struct { Migrated uint8 Cnt int64 @@ -330,16 +334,16 @@ func computeProgress(c *Config, idb *icingadb.DB, envId []byte) { }) } -// fillCache fills /.sqlite3 (actually types[*].cacheFiller does). +// fillCache fills /.sqlite3 (actually typesToMigrate[*].cacheFiller does). func fillCache() { progress := mpb.New() - for _, ht := range types { + for _, ht := range typesToMigrate { if ht.cacheFiller != nil { ht.setupBar(progress, ht.cacheTotal) } } - types.forEach(func(ht *historyType) { + typesToMigrate.forEach(func(ht *historyType) { if ht.cacheFiller != nil { ht.cacheFiller(ht) } @@ -349,13 +353,13 @@ func fillCache() { } // migrate does the actual migration. -func migrate(c *Config, idb *icingadb.DB, envId []byte) { +func migrate(c *Config, idb *database.DB, envId []byte) { progress := mpb.New() - for _, ht := range types { + for _, ht := range typesToMigrate { ht.setupBar(progress, ht.total) } - types.forEach(func(ht *historyType) { + typesToMigrate.forEach(func(ht *historyType) { ht.migrate(c, idb, envId, ht) }) @@ -364,8 +368,8 @@ func migrate(c *Config, idb *icingadb.DB, envId []byte) { // migrate does the actual migration for one history type. func migrateOneType[IdoRow any]( - c *Config, idb *icingadb.DB, envId []byte, ht *historyType, - convertRows func(env string, envId icingadbTypes.Binary, + c *Config, idb *database.DB, envId []byte, ht *historyType, + convertRows func(env string, envId types.Binary, selectCache func(dest interface{}, query string, args ...interface{}), ido *sqlx.Tx, idoRows []IdoRow) (stages []icingaDbOutputStage, checkpoint any), ) { @@ -431,7 +435,7 @@ func migrateOneType[IdoRow any]( ch := utils.ChanFromSlice(stage.insert) if err := idb.CreateIgnoreStreamed(context.Background(), ch); err != nil { - log.With("backend", "Icinga DB", "op", "INSERT IGNORE", "table", utils.TableName(stage.insert[0])). + log.With("backend", "Icinga DB", "op", "INSERT IGNORE", "table", database.TableName(stage.insert[0])). Fatalf("%+v", errors.Wrap(err, "can't perform DML")) } } @@ -440,7 +444,7 @@ func migrateOneType[IdoRow any]( ch := utils.ChanFromSlice(stage.upsert) if err := idb.UpsertStreamed(context.Background(), ch); err != nil { - log.With("backend", "Icinga DB", "op", "UPSERT", "table", utils.TableName(stage.upsert[0])). + log.With("backend", "Icinga DB", "op", "UPSERT", "table", database.TableName(stage.upsert[0])). Fatalf("%+v", errors.Wrap(err, "can't perform DML")) } } @@ -468,7 +472,7 @@ func migrateOneType[IdoRow any]( // cleanupCache removes /.sqlite3 files. func cleanupCache(f *Flags) { - types.forEach(func(ht *historyType) { + typesToMigrate.forEach(func(ht *historyType) { if ht.cacheFile != "" { if err := ht.cache.Close(); err != nil { log.With("file", ht.cacheFile).Warnf("%+v", errors.Wrap(err, "can't close SQLite database")) diff --git a/cmd/icingadb-migrate/misc.go b/cmd/icingadb-migrate/misc.go index b8d358fff..3d33ef8c2 100644 --- a/cmd/icingadb-migrate/misc.go +++ b/cmd/icingadb-migrate/misc.go @@ -3,10 +3,9 @@ package main import ( "context" "crypto/sha1" - "github.com/icinga/icingadb/pkg/contracts" - "github.com/icinga/icingadb/pkg/icingadb" - "github.com/icinga/icingadb/pkg/icingadb/objectpacker" - icingadbTypes "github.com/icinga/icingadb/pkg/types" + "github.com/icinga/icinga-go-library/database" + "github.com/icinga/icinga-go-library/objectpacker" + "github.com/icinga/icinga-go-library/types" "github.com/jmoiron/sqlx" "github.com/pkg/errors" "github.com/vbauerster/mpb/v6" @@ -36,8 +35,8 @@ type IdoMigrationProgress struct { // Assert interface compliance. var ( - _ contracts.Upserter = (*IdoMigrationProgressUpserter)(nil) - _ contracts.Upserter = (*IdoMigrationProgress)(nil) + _ database.Upserter = (*IdoMigrationProgressUpserter)(nil) + _ database.Upserter = (*IdoMigrationProgress)(nil) ) // log is the root logger. @@ -64,12 +63,12 @@ func hashAny(in interface{}) []byte { } // convertTime converts *nix timestamps from the IDO for Icinga DB. -func convertTime(ts int64, tsUs uint32) icingadbTypes.UnixMilli { +func convertTime(ts int64, tsUs uint32) types.UnixMilli { if ts == 0 && tsUs == 0 { - return icingadbTypes.UnixMilli{} + return types.UnixMilli{} } - return icingadbTypes.UnixMilli(time.Unix(ts, int64(tsUs)*int64(time.Microsecond/time.Nanosecond))) + return types.UnixMilli(time.Unix(ts, int64(tsUs)*int64(time.Microsecond/time.Nanosecond))) } // calcObjectId calculates the ID of the config object named name1 for Icinga DB. @@ -109,7 +108,7 @@ func sliceIdoHistory[Row any]( args["checkpoint"] = checkpoint args["bulk"] = 20000 - if ht.snapshot.DriverName() != icingadb.MySQL { + if ht.snapshot.DriverName() != database.MySQL { query = strings.ReplaceAll(query, " USE INDEX (PRIMARY)", "") } @@ -179,7 +178,7 @@ type historyType struct { // migrationQuery SELECTs source data for actual migration. migrationQuery string // migrate does the actual migration. - migrate func(c *Config, idb *icingadb.DB, envId []byte, ht *historyType) + migrate func(c *Config, idb *database.DB, envId []byte, ht *historyType) // cacheFile locates .sqlite3. cacheFile string @@ -237,10 +236,10 @@ func (hts historyTypes) forEach(f func(*historyType)) { } type icingaDbOutputStage struct { - insert, upsert []contracts.Entity + insert, upsert []database.Entity } -var types = historyTypes{ +var typesToMigrate = historyTypes{ { name: "ack & comment", idoTable: "icinga_commenthistory", @@ -249,7 +248,7 @@ var types = historyTypes{ // Manual deletion time wins vs. time of expiration which never happens due to manual deletion. idoEndColumns: []string{"deletion_time", "expiration_time"}, migrationQuery: commentMigrationQuery, - migrate: func(c *Config, idb *icingadb.DB, envId []byte, ht *historyType) { + migrate: func(c *Config, idb *database.DB, envId []byte, ht *historyType) { migrateOneType(c, idb, envId, ht, convertCommentRows) }, }, @@ -261,7 +260,7 @@ var types = historyTypes{ idoStartColumns: []string{"actual_start_time", "scheduled_start_time"}, idoEndColumns: []string{"actual_end_time", "scheduled_end_time"}, migrationQuery: downtimeMigrationQuery, - migrate: func(c *Config, idb *icingadb.DB, envId []byte, ht *historyType) { + migrate: func(c *Config, idb *database.DB, envId []byte, ht *historyType) { migrateOneType(c, idb, envId, ht, convertDowntimeRows) }, }, @@ -279,7 +278,7 @@ var types = historyTypes{ }) }, migrationQuery: flappingMigrationQuery, - migrate: func(c *Config, idb *icingadb.DB, envId []byte, ht *historyType) { + migrate: func(c *Config, idb *database.DB, envId []byte, ht *historyType) { migrateOneType(c, idb, envId, ht, convertFlappingRows) }, }, @@ -297,7 +296,7 @@ var types = historyTypes{ }, cacheLimitQuery: "SELECT MAX(history_id) FROM previous_hard_state", migrationQuery: notificationMigrationQuery, - migrate: func(c *Config, idb *icingadb.DB, envId []byte, ht *historyType) { + migrate: func(c *Config, idb *database.DB, envId []byte, ht *historyType) { migrateOneType(c, idb, envId, ht, convertNotificationRows) }, }, @@ -313,7 +312,7 @@ var types = historyTypes{ }, cacheLimitQuery: "SELECT MAX(history_id) FROM previous_hard_state", migrationQuery: stateMigrationQuery, - migrate: func(c *Config, idb *icingadb.DB, envId []byte, ht *historyType) { + migrate: func(c *Config, idb *database.DB, envId []byte, ht *historyType) { migrateOneType(c, idb, envId, ht, convertStateRows) }, }, diff --git a/cmd/icingadb/main.go b/cmd/icingadb/main.go index 4e165eb91..d5fe38f1a 100644 --- a/cmd/icingadb/main.go +++ b/cmd/icingadb/main.go @@ -3,6 +3,9 @@ package main import ( "context" "fmt" + "github.com/icinga/icinga-go-library/logging" + "github.com/icinga/icinga-go-library/redis" + "github.com/icinga/icinga-go-library/utils" "github.com/icinga/icingadb/internal" "github.com/icinga/icingadb/internal/command" "github.com/icinga/icingadb/pkg/common" @@ -12,11 +15,8 @@ import ( v1 "github.com/icinga/icingadb/pkg/icingadb/v1" "github.com/icinga/icingadb/pkg/icingaredis" "github.com/icinga/icingadb/pkg/icingaredis/telemetry" - "github.com/icinga/icingadb/pkg/logging" - "github.com/icinga/icingadb/pkg/utils" "github.com/okzk/sdnotify" "github.com/pkg/errors" - "github.com/redis/go-redis/v9" "go.uber.org/zap" "golang.org/x/sync/errgroup" "os" @@ -39,16 +39,12 @@ func main() { func run() int { cmd := command.New() - logs, err := logging.NewLogging( - utils.AppName(), - cmd.Config.Logging.Level, - cmd.Config.Logging.Output, - cmd.Config.Logging.Options, - cmd.Config.Logging.Interval, - ) + + logs, err := logging.NewLoggingFromConfig(utils.AppName(), cmd.Config.Logging) if err != nil { - utils.Fatal(errors.Wrap(err, "can't configure logging")) + utils.PrintErrorThenExit(err, ExitFailure) } + // When started by systemd, NOTIFY_SOCKET is set by systemd for Type=notify supervised services, which is the // default setting for the Icinga DB service. So we notify that Icinga DB finished starting up. _ = sdnotify.Ready() @@ -64,14 +60,14 @@ func run() int { } defer db.Close() { - logger.Infof("Connecting to database at '%s'", utils.JoinHostPort(cmd.Config.Database.Host, cmd.Config.Database.Port)) + logger.Infof("Connecting to database at '%s'", db.GetAddr()) err := db.Ping() if err != nil { logger.Fatalf("%+v", errors.Wrap(err, "can't connect to database")) } } - if err := db.CheckSchema(context.Background()); err != nil { + if err := icingadb.CheckSchema(context.Background(), db); err != nil { logger.Fatalf("%+v", err) } @@ -80,7 +76,7 @@ func run() int { logger.Fatalf("%+v", errors.Wrap(err, "can't create Redis client from config")) } { - logger.Infof("Connecting to Redis at '%s'", utils.JoinHostPort(cmd.Config.Redis.Host, cmd.Config.Redis.Port)) + logger.Infof("Connecting to Redis at '%s'", rc.GetAddr()) _, err := rc.Ping(context.Background()).Result() if err != nil { logger.Fatalf("%+v", errors.Wrap(err, "can't connect to Redis")) @@ -356,7 +352,7 @@ func run() int { } // monitorRedisSchema monitors rc's icinga:schema version validity. -func monitorRedisSchema(logger *logging.Logger, rc *icingaredis.Client, pos string) { +func monitorRedisSchema(logger *logging.Logger, rc *redis.Client, pos string) { for { var err error pos, err = checkRedisSchema(logger, rc, pos) @@ -368,7 +364,7 @@ func monitorRedisSchema(logger *logging.Logger, rc *icingaredis.Client, pos stri } // checkRedisSchema verifies rc's icinga:schema version. -func checkRedisSchema(logger *logging.Logger, rc *icingaredis.Client, pos string) (newPos string, err error) { +func checkRedisSchema(logger *logging.Logger, rc *redis.Client, pos string) (newPos string, err error) { if pos == "0-0" { defer time.AfterFunc(3*time.Second, func() { logger.Info("Waiting for Icinga 2 to write into Redis, please make sure you have started Icinga 2 and the Icinga DB feature is enabled") diff --git a/go.mod b/go.mod index c510ed745..30df81be9 100644 --- a/go.mod +++ b/go.mod @@ -4,21 +4,18 @@ go 1.22 require ( github.com/creasty/defaults v1.7.0 - github.com/go-sql-driver/mysql v1.8.1 github.com/goccy/go-yaml v1.11.3 + github.com/google/go-cmp v0.6.0 github.com/google/uuid v1.6.0 + github.com/icinga/icinga-go-library v0.1.0 github.com/jessevdk/go-flags v1.5.0 github.com/jmoiron/sqlx v1.4.0 - github.com/lib/pq v1.10.9 github.com/mattn/go-sqlite3 v1.14.22 github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd github.com/pkg/errors v0.9.1 - github.com/redis/go-redis/v9 v9.5.1 - github.com/ssgreg/journald v1.0.0 github.com/stretchr/testify v1.9.0 github.com/vbauerster/mpb/v6 v6.0.4 go.uber.org/zap v1.27.0 - golang.org/x/exp v0.0.0-20220613132600-b0d781184e0d golang.org/x/sync v0.7.0 ) @@ -29,14 +26,19 @@ require ( github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect - github.com/fatih/color v1.13.0 // indirect + github.com/fatih/color v1.16.0 // indirect + github.com/go-sql-driver/mysql v1.8.1 // indirect + github.com/lib/pq v1.10.9 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.16 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.12 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/redis/go-redis/v9 v9.5.1 // indirect github.com/rivo/uniseg v0.2.0 // indirect - go.uber.org/multierr v1.10.0 // indirect - golang.org/x/sys v0.6.0 // indirect - golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect + github.com/ssgreg/journald v1.0.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect + golang.org/x/sys v0.14.0 // indirect + golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 306c502b7..71d43e6d8 100644 --- a/go.sum +++ b/go.sum @@ -16,8 +16,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= @@ -28,10 +28,12 @@ github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpv github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/goccy/go-yaml v1.11.3 h1:B3W9IdWbvrUu2OYQGwvU1nZtvMQJPBKgBUuweJjLj6I= github.com/goccy/go-yaml v1.11.3/go.mod h1:wKnAMd44+9JAAnGQpWVEgBzGt3YuTaQ4uXoHvE4m7WU= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/icinga/icinga-go-library v0.1.0 h1:CKnEBsxl65Ik0F0yHn2N7dX77mIFanPye927lsqu9yI= +github.com/icinga/icinga-go-library v0.1.0/go.mod h1:YN7XJN3W0FodD+j4kirO89zk2tgvanXWt1RMV8UgOLo= github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= @@ -40,13 +42,11 @@ github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.12 h1:Y41i/hVW3Pgwr8gV+J23B9YEY0zxjptBuCWEaxmAOow= github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= @@ -70,26 +70,24 @@ github.com/vbauerster/mpb/v6 v6.0.4 h1:h6J5zM/2wimP5Hj00unQuV8qbo5EPcj6wbkCqgj7K github.com/vbauerster/mpb/v6 v6.0.4/go.mod h1:a/+JT57gqh6Du0Ay5jSR+uBMfXGdlR7VQlGP52fJxLM= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= -go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/exp v0.0.0-20220613132600-b0d781184e0d h1:vtUKgx8dahOomfFzLREU8nSv25YHnTgLBn4rDnWZdU0= -golang.org/x/exp v0.0.0-20220613132600-b0d781184e0d/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= +golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= +golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/internal/command/command.go b/internal/command/command.go index a4c76ab8d..3521bedf2 100644 --- a/internal/command/command.go +++ b/internal/command/command.go @@ -1,33 +1,35 @@ package command import ( - "fmt" + "github.com/icinga/icinga-go-library/config" + "github.com/icinga/icinga-go-library/database" + "github.com/icinga/icinga-go-library/logging" + "github.com/icinga/icinga-go-library/redis" + "github.com/icinga/icinga-go-library/utils" "github.com/icinga/icingadb/internal" - "github.com/icinga/icingadb/pkg/config" - "github.com/icinga/icingadb/pkg/icingadb" - "github.com/icinga/icingadb/pkg/icingaredis" - "github.com/icinga/icingadb/pkg/logging" - goflags "github.com/jessevdk/go-flags" + icingadbconfig "github.com/icinga/icingadb/internal/config" + "github.com/icinga/icingadb/pkg/icingaredis/telemetry" "github.com/pkg/errors" "os" + "time" ) // Command provides factories for creating Redis and Database connections from Config. type Command struct { - Flags *config.Flags - Config *config.Config + Flags icingadbconfig.Flags + Config icingadbconfig.Config } -// New creates and returns a new Command, parses CLI flags and YAML the config, and initializes the logger. +// New parses CLI flags and the YAML configuration and returns a new Command. +// New prints any error during parsing to [os.Stderr] and exits. func New() *Command { - flags, err := config.ParseFlags() - if err != nil { - var cliErr *goflags.Error - if errors.As(err, &cliErr) && cliErr.Type == goflags.ErrHelp { - os.Exit(0) + var flags icingadbconfig.Flags + if err := config.ParseFlags(&flags); err != nil { + if errors.Is(err, config.ErrInvalidArgument) { + panic(err) } - os.Exit(2) + utils.PrintErrorThenExit(err, 2) } if flags.Version { @@ -35,10 +37,13 @@ func New() *Command { os.Exit(0) } - cfg, err := config.FromYAMLFile(flags.Config) - if err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(2) + var cfg icingadbconfig.Config + if err := config.FromYAMLFile(flags.Config, &cfg); err != nil { + if errors.Is(err, config.ErrInvalidArgument) { + panic(err) + } + + utils.PrintErrorThenExit(err, 1) } return &Command{ @@ -48,11 +53,18 @@ func New() *Command { } // Database creates and returns a new icingadb.DB connection from config.Config. -func (c Command) Database(l *logging.Logger) (*icingadb.DB, error) { - return c.Config.Database.Open(l) +func (c Command) Database(l *logging.Logger) (*database.DB, error) { + return database.NewDbFromConfig(&c.Config.Database, l, database.RetryConnectorCallbacks{ + OnRetryableError: func(_ time.Duration, _ uint64, err, _ error) { + telemetry.UpdateCurrentDbConnErr(err) + }, + OnSuccess: func(_ time.Duration, _ uint64, _ error) { + telemetry.UpdateCurrentDbConnErr(nil) + }, + }) } // Redis creates and returns a new icingaredis.Client connection from config.Config. -func (c Command) Redis(l *logging.Logger) (*icingaredis.Client, error) { - return c.Config.Redis.NewClient(l) +func (c Command) Redis(l *logging.Logger) (*redis.Client, error) { + return redis.NewClientFromConfig(&c.Config.Redis, l) } diff --git a/internal/config/config.go b/internal/config/config.go new file mode 100644 index 000000000..0d7a07a92 --- /dev/null +++ b/internal/config/config.go @@ -0,0 +1,77 @@ +package config + +import ( + "github.com/creasty/defaults" + "github.com/icinga/icinga-go-library/database" + "github.com/icinga/icinga-go-library/logging" + "github.com/icinga/icinga-go-library/redis" + "github.com/icinga/icingadb/pkg/icingadb/history" + "github.com/pkg/errors" + "time" +) + +// Config defines Icinga DB config. +type Config struct { + Database database.Config `yaml:"database"` + Redis redis.Config `yaml:"redis"` + Logging logging.Config `yaml:"logging"` + Retention RetentionConfig `yaml:"retention"` +} + +func (c *Config) SetDefaults() { + // Since SetDefaults() is called after the default values of the struct's fields have been evaluated, + // setting the default port only works here because + // the embedded Redis config struct itself does not provide a default value. + if defaults.CanUpdate(c.Redis.Port) { + c.Redis.Port = 6380 + } +} + +// Validate checks constraints in the supplied configuration and returns an error if they are violated. +func (c *Config) Validate() error { + if err := c.Database.Validate(); err != nil { + return err + } + if err := c.Redis.Validate(); err != nil { + return err + } + if err := c.Logging.Validate(); err != nil { + return err + } + if err := c.Retention.Validate(); err != nil { + return err + } + + return nil +} + +// Flags defines CLI flags. +type Flags struct { + // Version decides whether to just print the version and exit. + Version bool `long:"version" description:"print version and exit"` + // Config is the path to the config file + Config string `short:"c" long:"config" description:"path to config file" required:"true" default:"/etc/icingadb/config.yml"` +} + +// RetentionConfig defines configuration for history retention. +type RetentionConfig struct { + HistoryDays uint64 `yaml:"history-days"` + SlaDays uint64 `yaml:"sla-days"` + Interval time.Duration `yaml:"interval" default:"1h"` + Count uint64 `yaml:"count" default:"5000"` + Options history.RetentionOptions `yaml:"options"` +} + +// Validate checks constraints in the supplied retention configuration and +// returns an error if they are violated. +func (r *RetentionConfig) Validate() error { + if r.Interval <= 0 { + return errors.New("retention interval must be positive") + } + + if r.Count == 0 { + return errors.New("count must be greater than zero") + } + + return r.Options.Validate() +} diff --git a/pkg/config/config_test.go b/internal/config/config_test.go similarity index 83% rename from pkg/config/config_test.go rename to internal/config/config_test.go index 94e377355..692ea9209 100644 --- a/pkg/config/config_test.go +++ b/internal/config/config_test.go @@ -2,7 +2,8 @@ package config import ( "github.com/creasty/defaults" - "github.com/icinga/icingadb/pkg/logging" + "github.com/icinga/icinga-go-library/config" + "github.com/icinga/icinga-go-library/logging" "github.com/stretchr/testify/require" "os" "testing" @@ -58,11 +59,12 @@ redis: require.NoError(t, os.WriteFile(tempFile.Name(), []byte(st.input), 0o600)) - if actual, err := FromYAMLFile(tempFile.Name()); st.output == nil { + var actual Config + if err := config.FromYAMLFile(tempFile.Name(), &actual); st.output == nil { require.Error(t, err) } else { require.NoError(t, err) - require.Equal(t, st.output, actual) + require.Equal(t, *st.output, actual) } }) } diff --git a/internal/internal.go b/internal/internal.go deleted file mode 100644 index 7352d4092..000000000 --- a/internal/internal.go +++ /dev/null @@ -1,48 +0,0 @@ -package internal - -import ( - "encoding/json" - "github.com/pkg/errors" -) - -// CantDecodeHex wraps the given error with the given string that cannot be hex-decoded. -func CantDecodeHex(err error, s string) error { - return errors.Wrapf(err, "can't decode hex %q", s) -} - -// CantParseFloat64 wraps the given error with the specified string that cannot be parsed into float64. -func CantParseFloat64(err error, s string) error { - return errors.Wrapf(err, "can't parse %q into float64", s) -} - -// CantParseInt64 wraps the given error with the specified string that cannot be parsed into int64. -func CantParseInt64(err error, s string) error { - return errors.Wrapf(err, "can't parse %q into int64", s) -} - -// CantParseUint64 wraps the given error with the specified string that cannot be parsed into uint64. -func CantParseUint64(err error, s string) error { - return errors.Wrapf(err, "can't parse %q into uint64", s) -} - -// CantPerformQuery wraps the given error with the specified query that cannot be executed. -func CantPerformQuery(err error, q string) error { - return errors.Wrapf(err, "can't perform %q", q) -} - -// CantUnmarshalYAML wraps the given error with the designated value, which cannot be unmarshalled into. -func CantUnmarshalYAML(err error, v interface{}) error { - return errors.Wrapf(err, "can't unmarshal YAML into %T", v) -} - -// MarshalJSON calls json.Marshal and wraps any resulting errors. -func MarshalJSON(v interface{}) ([]byte, error) { - b, err := json.Marshal(v) - - return b, errors.Wrapf(err, "can't marshal JSON from %T", v) -} - -// UnmarshalJSON calls json.Unmarshal and wraps any resulting errors. -func UnmarshalJSON(data []byte, v interface{}) error { - return errors.Wrapf(json.Unmarshal(data, v), "can't unmarshal JSON into %T", v) -} diff --git a/internal/version.go b/internal/version.go index 70e1c565f..d1aab39f8 100644 --- a/internal/version.go +++ b/internal/version.go @@ -1,7 +1,7 @@ package internal import ( - "github.com/icinga/icingadb/pkg/version" + "github.com/icinga/icinga-go-library/version" ) // Version contains version and Git commit information. diff --git a/pkg/backoff/backoff.go b/pkg/backoff/backoff.go deleted file mode 100644 index e79a1ee7d..000000000 --- a/pkg/backoff/backoff.go +++ /dev/null @@ -1,43 +0,0 @@ -package backoff - -import ( - "math/rand" - "time" -) - -// Backoff returns the backoff duration for a specific retry attempt. -type Backoff func(uint64) time.Duration - -// NewExponentialWithJitter returns a backoff implementation that -// exponentially increases the backoff duration for each retry from min, -// never exceeding max. Some randomization is added to the backoff duration. -// It panics if min >= max. -func NewExponentialWithJitter(min, max time.Duration) Backoff { - if min <= 0 { - min = 100 * time.Millisecond - } - if max <= 0 { - max = 10 * time.Second - } - if min >= max { - panic("max must be larger than min") - } - - return func(attempt uint64) time.Duration { - e := min << attempt - if e <= 0 || e > max { - e = max - } - - return time.Duration(jitter(int64(e))) - } -} - -// jitter returns a random integer distributed in the range [n/2..n). -func jitter(n int64) int64 { - if n == 0 { - return 0 - } - - return n/2 + rand.Int63n(n/2) -} diff --git a/pkg/com/atomic.go b/pkg/com/atomic.go deleted file mode 100644 index 316413dfd..000000000 --- a/pkg/com/atomic.go +++ /dev/null @@ -1,38 +0,0 @@ -package com - -import "sync/atomic" - -// Atomic is a type-safe wrapper around atomic.Value. -type Atomic[T any] struct { - v atomic.Value -} - -func (a *Atomic[T]) Load() (_ T, ok bool) { - if v, ok := a.v.Load().(box[T]); ok { - return v.v, true - } - - return -} - -func (a *Atomic[T]) Store(v T) { - a.v.Store(box[T]{v}) -} - -func (a *Atomic[T]) Swap(new T) (old T, ok bool) { - if old, ok := a.v.Swap(box[T]{new}).(box[T]); ok { - return old.v, true - } - - return -} - -func (a *Atomic[T]) CompareAndSwap(old, new T) (swapped bool) { - return a.v.CompareAndSwap(box[T]{old}, box[T]{new}) -} - -// box allows, for the case T is an interface, nil values and values of different specific types implementing T -// to be stored in Atomic[T]#v (bypassing atomic.Value#Store()'s policy) by wrapping it (into a non-interface). -type box[T any] struct { - v T -} diff --git a/pkg/com/bulker.go b/pkg/com/bulker.go deleted file mode 100644 index 5e8de5287..000000000 --- a/pkg/com/bulker.go +++ /dev/null @@ -1,187 +0,0 @@ -package com - -import ( - "context" - "github.com/icinga/icingadb/pkg/contracts" - "golang.org/x/sync/errgroup" - "sync" - "time" -) - -// BulkChunkSplitPolicy is a state machine which tracks the items of a chunk a bulker assembles. -// A call takes an item for the current chunk into account. -// Output true indicates that the state machine was reset first and the bulker -// shall finish the current chunk now (not e.g. once $size is reached) without the given item. -type BulkChunkSplitPolicy[T any] func(T) bool - -type BulkChunkSplitPolicyFactory[T any] func() BulkChunkSplitPolicy[T] - -// NeverSplit returns a pseudo state machine which never demands splitting. -func NeverSplit[T any]() BulkChunkSplitPolicy[T] { - return neverSplit[T] -} - -// SplitOnDupId returns a state machine which tracks the inputs' IDs. -// Once an already seen input arrives, it demands splitting. -func SplitOnDupId[T contracts.IDer]() BulkChunkSplitPolicy[T] { - seenIds := map[string]struct{}{} - - return func(ider T) bool { - id := ider.ID().String() - - _, ok := seenIds[id] - if ok { - seenIds = map[string]struct{}{id: {}} - } else { - seenIds[id] = struct{}{} - } - - return ok - } -} - -func neverSplit[T any](T) bool { - return false -} - -// Bulker reads all values from a channel and streams them in chunks into a Bulk channel. -type Bulker[T any] struct { - ch chan []T - ctx context.Context - mu sync.Mutex -} - -// NewBulker returns a new Bulker and starts streaming. -func NewBulker[T any]( - ctx context.Context, ch <-chan T, count int, splitPolicyFactory BulkChunkSplitPolicyFactory[T], -) *Bulker[T] { - b := &Bulker[T]{ - ch: make(chan []T), - ctx: ctx, - mu: sync.Mutex{}, - } - - go b.run(ch, count, splitPolicyFactory) - - return b -} - -// Bulk returns the channel on which the bulks are delivered. -func (b *Bulker[T]) Bulk() <-chan []T { - return b.ch -} - -func (b *Bulker[T]) run(ch <-chan T, count int, splitPolicyFactory BulkChunkSplitPolicyFactory[T]) { - defer close(b.ch) - - bufCh := make(chan T, count) - splitPolicy := splitPolicyFactory() - g, ctx := errgroup.WithContext(b.ctx) - - g.Go(func() error { - defer close(bufCh) - - for { - select { - case v, ok := <-ch: - if !ok { - return nil - } - - bufCh <- v - case <-ctx.Done(): - return ctx.Err() - } - } - }) - - g.Go(func() error { - for done := false; !done; { - buf := make([]T, 0, count) - timeout := time.After(256 * time.Millisecond) - - for drain := true; drain && len(buf) < count; { - select { - case v, ok := <-bufCh: - if !ok { - drain = false - done = true - - break - } - - if splitPolicy(v) { - if len(buf) > 0 { - b.ch <- buf - buf = make([]T, 0, count) - } - - timeout = time.After(256 * time.Millisecond) - } - - buf = append(buf, v) - case <-timeout: - drain = false - case <-ctx.Done(): - return ctx.Err() - } - } - - if len(buf) > 0 { - b.ch <- buf - } - - splitPolicy = splitPolicyFactory() - } - - return nil - }) - - // We don't expect an error here. - // We only use errgroup for the encapsulated use of sync.WaitGroup. - _ = g.Wait() -} - -// Bulk reads all values from a channel and streams them in chunks into a returned channel. -func Bulk[T any]( - ctx context.Context, ch <-chan T, count int, splitPolicyFactory BulkChunkSplitPolicyFactory[T], -) <-chan []T { - if count <= 1 { - return oneBulk(ctx, ch) - } - - return NewBulker(ctx, ch, count, splitPolicyFactory).Bulk() -} - -// oneBulk operates just as NewBulker(ctx, ch, 1, splitPolicy).Bulk(), -// but without the overhead of the actual bulk creation with a buffer channel, timeout and BulkChunkSplitPolicy. -func oneBulk[T any](ctx context.Context, ch <-chan T) <-chan []T { - out := make(chan []T) - go func() { - defer close(out) - - for { - select { - case item, ok := <-ch: - if !ok { - return - } - - select { - case out <- []T{item}: - case <-ctx.Done(): - return - } - case <-ctx.Done(): - return - } - } - }() - - return out -} - -var ( - _ BulkChunkSplitPolicyFactory[struct{}] = NeverSplit[struct{}] - _ BulkChunkSplitPolicyFactory[contracts.Entity] = SplitOnDupId[contracts.Entity] -) diff --git a/pkg/com/com.go b/pkg/com/com.go deleted file mode 100644 index 9e0a69831..000000000 --- a/pkg/com/com.go +++ /dev/null @@ -1,82 +0,0 @@ -package com - -import ( - "context" - "github.com/icinga/icingadb/pkg/contracts" - "golang.org/x/sync/errgroup" -) - -// WaitAsync calls Wait() on the passed Waiter in a new goroutine and -// sends the first non-nil error (if any) to the returned channel. -// The returned channel is always closed when the Waiter is done. -func WaitAsync(w contracts.Waiter) <-chan error { - errs := make(chan error, 1) - - go func() { - defer close(errs) - - if e := w.Wait(); e != nil { - errs <- e - } - }() - - return errs -} - -// ErrgroupReceive adds a goroutine to the specified group that -// returns the first non-nil error (if any) from the specified channel. -// If the channel is closed, it will return nil. -func ErrgroupReceive(g *errgroup.Group, err <-chan error) { - g.Go(func() error { - if e := <-err; e != nil { - return e - } - - return nil - }) -} - -// CopyFirst asynchronously forwards all items from input to forward and synchronously returns the first item. -func CopyFirst( - ctx context.Context, input <-chan contracts.Entity, -) (first contracts.Entity, forward <-chan contracts.Entity, err error) { - var ok bool - select { - case <-ctx.Done(): - return nil, nil, ctx.Err() - case first, ok = <-input: - } - - if !ok { - return - } - - // Buffer of one because we receive an entity and send it back immediately. - fwd := make(chan contracts.Entity, 1) - fwd <- first - - forward = fwd - - go func() { - defer close(fwd) - - for { - select { - case <-ctx.Done(): - return - case e, ok := <-input: - if !ok { - return - } - - select { - case <-ctx.Done(): - return - case fwd <- e: - } - } - } - }() - - return -} diff --git a/pkg/com/cond.go b/pkg/com/cond.go deleted file mode 100644 index 72ba347c5..000000000 --- a/pkg/com/cond.go +++ /dev/null @@ -1,90 +0,0 @@ -package com - -import ( - "context" - "github.com/pkg/errors" -) - -// Cond implements a channel-based synchronization for goroutines that wait for signals or send them. -// Internally based on a controller loop that handles the synchronization of new listeners and signal propagation, -// which is only started when NewCond is called. Thus the zero value cannot be used. -type Cond struct { - broadcast chan struct{} - done chan struct{} - cancel context.CancelFunc - listeners chan chan struct{} -} - -// NewCond returns a new Cond and starts the controller loop. -func NewCond(ctx context.Context) *Cond { - ctx, cancel := context.WithCancel(ctx) - - c := &Cond{ - broadcast: make(chan struct{}), - cancel: cancel, - done: make(chan struct{}), - listeners: make(chan chan struct{}), - } - - go c.controller(ctx) - - return c -} - -// Broadcast sends a signal to all current listeners by closing the previously returned channel from Wait. -// Panics if the controller loop has already ended. -func (c *Cond) Broadcast() { - select { - case c.broadcast <- struct{}{}: - case <-c.done: - panic(errors.New("condition closed")) - } -} - -// Close stops the controller loop, waits for it to finish, and returns an error if any. -// Implements the io.Closer interface. -func (c *Cond) Close() error { - c.cancel() - <-c.done - - return nil -} - -// Done returns a channel that will be closed when the controller loop has ended. -func (c *Cond) Done() <-chan struct{} { - return c.done -} - -// Wait returns a channel that is closed with the next signal. -// Panics if the controller loop has already ended. -func (c *Cond) Wait() <-chan struct{} { - select { - case l := <-c.listeners: - return l - case <-c.done: - panic(errors.New("condition closed")) - } -} - -// controller loop. -func (c *Cond) controller(ctx context.Context) { - defer close(c.done) - - // Note that the notify channel does not close when the controller loop ends - // in order not to notify pending listeners. - notify := make(chan struct{}) - - for { - select { - case <-c.broadcast: - // Close channel to notify all current listeners. - close(notify) - // Create a new channel for the next listeners. - notify = make(chan struct{}) - case c.listeners <- notify: - // A new listener received the channel. - case <-ctx.Done(): - return - } - } -} diff --git a/pkg/com/counter.go b/pkg/com/counter.go deleted file mode 100644 index 52f9f7ff2..000000000 --- a/pkg/com/counter.go +++ /dev/null @@ -1,48 +0,0 @@ -package com - -import ( - "sync" - "sync/atomic" -) - -// Counter implements an atomic counter. -type Counter struct { - value uint64 - mu sync.Mutex // Protects total. - total uint64 -} - -// Add adds the given delta to the counter. -func (c *Counter) Add(delta uint64) { - atomic.AddUint64(&c.value, delta) -} - -// Inc increments the counter by one. -func (c *Counter) Inc() { - c.Add(1) -} - -// Reset resets the counter to 0 and returns its previous value. -// Does not reset the total value returned from Total. -func (c *Counter) Reset() uint64 { - c.mu.Lock() - defer c.mu.Unlock() - - v := atomic.SwapUint64(&c.value, 0) - c.total += v - - return v -} - -// Total returns the total counter value. -func (c *Counter) Total() uint64 { - c.mu.Lock() - defer c.mu.Unlock() - - return c.total + c.Val() -} - -// Val returns the current counter value. -func (c *Counter) Val() uint64 { - return atomic.LoadUint64(&c.value) -} diff --git a/pkg/common/sync_subject.go b/pkg/common/sync_subject.go index a39d6df6d..96fd50c74 100644 --- a/pkg/common/sync_subject.go +++ b/pkg/common/sync_subject.go @@ -1,25 +1,26 @@ package common import ( + "github.com/icinga/icinga-go-library/database" + "github.com/icinga/icinga-go-library/types" "github.com/icinga/icingadb/pkg/contracts" v1 "github.com/icinga/icingadb/pkg/icingadb/v1" - "github.com/icinga/icingadb/pkg/utils" ) // SyncSubject defines information about entities to be synchronized. type SyncSubject struct { - entity contracts.Entity - factory contracts.EntityFactoryFunc + entity database.Entity + factory database.EntityFactoryFunc withChecksum bool } // NewSyncSubject returns a new SyncSubject. -func NewSyncSubject(factoryFunc contracts.EntityFactoryFunc) *SyncSubject { +func NewSyncSubject(factoryFunc database.EntityFactoryFunc) *SyncSubject { e := factoryFunc() - var factory contracts.EntityFactoryFunc + var factory database.EntityFactoryFunc if _, ok := e.(contracts.Initer); ok { - factory = func() contracts.Entity { + factory = func() database.Entity { e := factoryFunc() e.(contracts.Initer).Init() @@ -39,12 +40,12 @@ func NewSyncSubject(factoryFunc contracts.EntityFactoryFunc) *SyncSubject { } // Entity returns one value from the factory. Always returns the same entity. -func (s SyncSubject) Entity() contracts.Entity { +func (s SyncSubject) Entity() database.Entity { return s.entity } // Factory returns the entity factory function that calls Init() on the created contracts.Entity if applicable. -func (s SyncSubject) Factory() contracts.EntityFactoryFunc { +func (s SyncSubject) Factory() database.EntityFactoryFunc { return s.factory } @@ -52,7 +53,7 @@ func (s SyncSubject) Factory() contracts.EntityFactoryFunc { // In the latter case it returns a factory for EntityWithChecksum instead. // Rationale: Sync#ApplyDelta() uses its input entities which are WithChecksum() only for the delta itself // and not for insertion into the database, so EntityWithChecksum is enough. And it consumes less memory. -func (s SyncSubject) FactoryForDelta() contracts.EntityFactoryFunc { +func (s SyncSubject) FactoryForDelta() database.EntityFactoryFunc { if s.withChecksum { return v1.NewEntityWithChecksum } @@ -62,7 +63,7 @@ func (s SyncSubject) FactoryForDelta() contracts.EntityFactoryFunc { // Name returns the declared name of the entity. func (s SyncSubject) Name() string { - return utils.Name(s.entity) + return types.Name(s.entity) } // WithChecksum returns whether entities from the factory implement contracts.Checksumer. diff --git a/pkg/config/config.go b/pkg/config/config.go deleted file mode 100644 index 744f4c31f..000000000 --- a/pkg/config/config.go +++ /dev/null @@ -1,134 +0,0 @@ -package config - -import ( - "crypto/tls" - "crypto/x509" - "github.com/creasty/defaults" - "github.com/goccy/go-yaml" - "github.com/jessevdk/go-flags" - "github.com/pkg/errors" - "os" -) - -// Config defines Icinga DB config. -type Config struct { - Database Database `yaml:"database"` - Redis Redis `yaml:"redis"` - Logging Logging `yaml:"logging"` - Retention Retention `yaml:"retention"` -} - -// Validate checks constraints in the supplied configuration and returns an error if they are violated. -func (c *Config) Validate() error { - if err := c.Database.Validate(); err != nil { - return err - } - if err := c.Redis.Validate(); err != nil { - return err - } - if err := c.Logging.Validate(); err != nil { - return err - } - if err := c.Retention.Validate(); err != nil { - return err - } - - return nil -} - -// Flags defines CLI flags. -type Flags struct { - // Version decides whether to just print the version and exit. - Version bool `long:"version" description:"print version and exit"` - // Config is the path to the config file - Config string `short:"c" long:"config" description:"path to config file" required:"true" default:"/etc/icingadb/config.yml"` -} - -// FromYAMLFile returns a new Config value created from the given YAML config file. -func FromYAMLFile(name string) (*Config, error) { - f, err := os.Open(name) - if err != nil { - return nil, errors.Wrap(err, "can't open YAML file "+name) - } - defer f.Close() - - c := &Config{} - d := yaml.NewDecoder(f, yaml.DisallowUnknownField()) - - if err := defaults.Set(c); err != nil { - return nil, errors.Wrap(err, "can't set config defaults") - } - - if err := d.Decode(c); err != nil { - return nil, errors.Wrap(err, "can't parse YAML file "+name) - } - - if err := c.Validate(); err != nil { - return nil, errors.Wrap(err, "invalid configuration") - } - - return c, nil -} - -// ParseFlags parses CLI flags and -// returns a Flags value created from them. -func ParseFlags() (*Flags, error) { - f := &Flags{} - parser := flags.NewParser(f, flags.Default) - - if _, err := parser.Parse(); err != nil { - return nil, errors.Wrap(err, "can't parse CLI flags") - } - - return f, nil -} - -// TLS provides TLS configuration options for Redis and Database. -type TLS struct { - Enable bool `yaml:"tls"` - Cert string `yaml:"cert"` - Key string `yaml:"key"` - Ca string `yaml:"ca"` - Insecure bool `yaml:"insecure"` -} - -// MakeConfig assembles a tls.Config from t and serverName. -func (t *TLS) MakeConfig(serverName string) (*tls.Config, error) { - if !t.Enable { - return nil, nil - } - - tlsConfig := &tls.Config{} - if t.Cert == "" { - if t.Key != "" { - return nil, errors.New("private key given, but client certificate missing") - } - } else if t.Key == "" { - return nil, errors.New("client certificate given, but private key missing") - } else { - crt, err := tls.LoadX509KeyPair(t.Cert, t.Key) - if err != nil { - return nil, errors.Wrap(err, "can't load X.509 key pair") - } - - tlsConfig.Certificates = []tls.Certificate{crt} - } - - if t.Insecure { - tlsConfig.InsecureSkipVerify = true - } else if t.Ca != "" { - raw, err := os.ReadFile(t.Ca) - if err != nil { - return nil, errors.Wrap(err, "can't read CA file") - } - - tlsConfig.RootCAs = x509.NewCertPool() - if !tlsConfig.RootCAs.AppendCertsFromPEM(raw) { - return nil, errors.New("can't parse CA file") - } - } - - tlsConfig.ServerName = serverName - - return tlsConfig, nil -} diff --git a/pkg/config/database.go b/pkg/config/database.go deleted file mode 100644 index 0895d26c9..000000000 --- a/pkg/config/database.go +++ /dev/null @@ -1,216 +0,0 @@ -package config - -import ( - "context" - "database/sql" - "database/sql/driver" - "fmt" - "github.com/go-sql-driver/mysql" - "github.com/icinga/icingadb/pkg/icingadb" - "github.com/icinga/icingadb/pkg/logging" - "github.com/icinga/icingadb/pkg/utils" - "github.com/jmoiron/sqlx" - "github.com/jmoiron/sqlx/reflectx" - "github.com/lib/pq" - "github.com/pkg/errors" - "net" - "net/url" - "strconv" - "strings" - "time" -) - -// Database defines database client configuration. -type Database struct { - Type string `yaml:"type" default:"mysql"` - Host string `yaml:"host"` - Port int `yaml:"port"` - Database string `yaml:"database"` - User string `yaml:"user"` - Password string `yaml:"password"` - TlsOptions TLS `yaml:",inline"` - Options icingadb.Options `yaml:"options"` -} - -// Open prepares the DSN string and driver configuration, -// calls sqlx.Open, but returns *icingadb.DB. -func (d *Database) Open(logger *logging.Logger) (*icingadb.DB, error) { - var db *sqlx.DB - switch d.Type { - case "mysql": - config := mysql.NewConfig() - - config.User = d.User - config.Passwd = d.Password - config.Logger = icingadb.MysqlFuncLogger(logger.Debug) - - if d.isUnixAddr() { - config.Net = "unix" - config.Addr = d.Host - } else { - config.Net = "tcp" - port := d.Port - if port == 0 { - port = 3306 - } - config.Addr = net.JoinHostPort(d.Host, fmt.Sprint(port)) - } - - config.DBName = d.Database - config.Timeout = time.Minute - config.Params = map[string]string{"sql_mode": "'TRADITIONAL,ANSI_QUOTES'"} - - tlsConfig, err := d.TlsOptions.MakeConfig(d.Host) - if err != nil { - return nil, err - } - - if tlsConfig != nil { - config.TLSConfig = "icingadb" - if err := mysql.RegisterTLSConfig(config.TLSConfig, tlsConfig); err != nil { - return nil, errors.Wrap(err, "can't register TLS config") - } - } - - c, err := mysql.NewConnector(config) - if err != nil { - return nil, errors.Wrap(err, "can't open mysql database") - } - - wsrepSyncWait := int64(d.Options.WsrepSyncWait) - setWsrepSyncWait := func(ctx context.Context, conn driver.Conn) error { - return setGaleraOpts(ctx, conn, wsrepSyncWait) - } - - db = sqlx.NewDb(sql.OpenDB(icingadb.NewConnector(c, logger, setWsrepSyncWait)), icingadb.MySQL) - case "pgsql": - uri := &url.URL{ - Scheme: "postgres", - User: url.UserPassword(d.User, d.Password), - Path: "/" + url.PathEscape(d.Database), - } - - query := url.Values{ - "connect_timeout": {"60"}, - "binary_parameters": {"yes"}, - - // Host and port can alternatively be specified in the query string. lib/pq can't parse the connection URI - // if a Unix domain socket path is specified in the host part of the URI, therefore always use the query - // string. See also https://github.com/lib/pq/issues/796 - "host": {d.Host}, - } - if d.Port != 0 { - query["port"] = []string{strconv.FormatInt(int64(d.Port), 10)} - } - - if _, err := d.TlsOptions.MakeConfig(d.Host); err != nil { - return nil, err - } - - if d.TlsOptions.Enable { - if d.TlsOptions.Insecure { - query["sslmode"] = []string{"require"} - } else { - query["sslmode"] = []string{"verify-full"} - } - - if d.TlsOptions.Cert != "" { - query["sslcert"] = []string{d.TlsOptions.Cert} - } - - if d.TlsOptions.Key != "" { - query["sslkey"] = []string{d.TlsOptions.Key} - } - - if d.TlsOptions.Ca != "" { - query["sslrootcert"] = []string{d.TlsOptions.Ca} - } - } else { - query["sslmode"] = []string{"disable"} - } - - uri.RawQuery = query.Encode() - - connector, err := pq.NewConnector(uri.String()) - if err != nil { - return nil, errors.Wrap(err, "can't open pgsql database") - } - - db = sqlx.NewDb(sql.OpenDB(icingadb.NewConnector(connector, logger, nil)), icingadb.PostgreSQL) - default: - return nil, unknownDbType(d.Type) - } - - db.SetMaxIdleConns(d.Options.MaxConnections / 3) - db.SetMaxOpenConns(d.Options.MaxConnections) - - db.Mapper = reflectx.NewMapperFunc("db", func(s string) string { - return utils.Key(s, '_') - }) - - return icingadb.NewDb(db, logger, &d.Options), nil -} - -// Validate checks constraints in the supplied database configuration and returns an error if they are violated. -func (d *Database) Validate() error { - switch d.Type { - case "mysql", "pgsql": - default: - return unknownDbType(d.Type) - } - - if d.Host == "" { - return errors.New("database host missing") - } - - if d.User == "" { - return errors.New("database user missing") - } - - if d.Database == "" { - return errors.New("database name missing") - } - - return d.Options.Validate() -} - -func (d *Database) isUnixAddr() bool { - return strings.HasPrefix(d.Host, "/") -} - -func unknownDbType(t string) error { - return errors.Errorf(`unknown database type %q, must be one of: "mysql", "pgsql"`, t) -} - -// setGaleraOpts sets the "wsrep_sync_wait" variable for each session ensures that causality checks are performed -// before execution and that each statement is executed on a fully synchronized node. Doing so prevents foreign key -// violation when inserting into dependent tables on different MariaDB/MySQL nodes. When using MySQL single nodes, -// the "SET SESSION" command will fail with "Unknown system variable (1193)" and will therefore be silently dropped. -// -// https://mariadb.com/kb/en/galera-cluster-system-variables/#wsrep_sync_wait -func setGaleraOpts(ctx context.Context, conn driver.Conn, wsrepSyncWait int64) error { - const galeraOpts = "SET SESSION wsrep_sync_wait=?" - - stmt, err := conn.(driver.ConnPrepareContext).PrepareContext(ctx, galeraOpts) - if err != nil { - if errors.Is(err, &mysql.MySQLError{Number: 1193}) { // Unknown system variable - return nil - } - - return errors.Wrap(err, "cannot prepare "+galeraOpts) - } - // This is just for an unexpected exit and any returned error can safely be ignored and in case - // of the normal function exit, the stmt is closed manually, and its error is handled gracefully. - defer func() { _ = stmt.Close() }() - - _, err = stmt.(driver.StmtExecContext).ExecContext(ctx, []driver.NamedValue{{Value: wsrepSyncWait}}) - if err != nil { - return errors.Wrap(err, "cannot execute "+galeraOpts) - } - - if err = stmt.Close(); err != nil { - return errors.Wrap(err, "cannot close prepared statement "+galeraOpts) - } - - return nil -} diff --git a/pkg/config/history_retention.go b/pkg/config/history_retention.go deleted file mode 100644 index d4373b708..000000000 --- a/pkg/config/history_retention.go +++ /dev/null @@ -1,30 +0,0 @@ -package config - -import ( - "github.com/icinga/icingadb/pkg/icingadb/history" - "github.com/pkg/errors" - "time" -) - -// Retention defines configuration for history retention. -type Retention struct { - HistoryDays uint64 `yaml:"history-days"` - SlaDays uint64 `yaml:"sla-days"` - Interval time.Duration `yaml:"interval" default:"1h"` - Count uint64 `yaml:"count" default:"5000"` - Options history.RetentionOptions `yaml:"options"` -} - -// Validate checks constraints in the supplied retention configuration and -// returns an error if they are violated. -func (r *Retention) Validate() error { - if r.Interval <= 0 { - return errors.New("retention interval must be positive") - } - - if r.Count == 0 { - return errors.New("count must be greater than zero") - } - - return r.Options.Validate() -} diff --git a/pkg/config/logging.go b/pkg/config/logging.go deleted file mode 100644 index 9ccd35e01..000000000 --- a/pkg/config/logging.go +++ /dev/null @@ -1,44 +0,0 @@ -package config - -import ( - "github.com/icinga/icingadb/pkg/logging" - "github.com/pkg/errors" - "go.uber.org/zap/zapcore" - "os" - "time" -) - -// Logging defines Logger configuration. -type Logging struct { - // zapcore.Level at 0 is for info level. - Level zapcore.Level `yaml:"level" default:"0"` - Output string `yaml:"output"` - // Interval for periodic logging. - Interval time.Duration `yaml:"interval" default:"20s"` - - logging.Options `yaml:"options"` -} - -// Validate checks constraints in the supplied Logging configuration and returns an error if they are violated. -// Also configures the log output if it is not configured: -// systemd-journald is used when Icinga DB is running under systemd, otherwise stderr. -func (l *Logging) Validate() error { - if l.Interval <= 0 { - return errors.New("periodic logging interval must be positive") - } - - if l.Output == "" { - if _, ok := os.LookupEnv("NOTIFY_SOCKET"); ok { - // When started by systemd, NOTIFY_SOCKET is set by systemd for Type=notify supervised services, - // which is the default setting for the Icinga DB service. - // This assumes that Icinga DB is running under systemd, so set output to systemd-journald. - l.Output = logging.JOURNAL - } else { - // Otherwise set it to console, i.e. write log messages to stderr. - l.Output = logging.CONSOLE - } - } - - // To be on the safe side, always call AssertOutput. - return logging.AssertOutput(l.Output) -} diff --git a/pkg/config/redis.go b/pkg/config/redis.go deleted file mode 100644 index ad8b31a60..000000000 --- a/pkg/config/redis.go +++ /dev/null @@ -1,116 +0,0 @@ -package config - -import ( - "context" - "crypto/tls" - "fmt" - "github.com/icinga/icingadb/pkg/backoff" - "github.com/icinga/icingadb/pkg/icingaredis" - "github.com/icinga/icingadb/pkg/logging" - "github.com/icinga/icingadb/pkg/retry" - "github.com/icinga/icingadb/pkg/utils" - "github.com/pkg/errors" - "github.com/redis/go-redis/v9" - "go.uber.org/zap" - "net" - "strings" - "time" -) - -// Redis defines Redis client configuration. -type Redis struct { - Host string `yaml:"host"` - Port int `yaml:"port" default:"6380"` - Password string `yaml:"password"` - TlsOptions TLS `yaml:",inline"` - Options icingaredis.Options `yaml:"options"` -} - -type ctxDialerFunc = func(ctx context.Context, network, addr string) (net.Conn, error) - -// NewClient prepares Redis client configuration, -// calls redis.NewClient, but returns *icingaredis.Client. -func (r *Redis) NewClient(logger *logging.Logger) (*icingaredis.Client, error) { - tlsConfig, err := r.TlsOptions.MakeConfig(r.Host) - if err != nil { - return nil, err - } - - var dialer ctxDialerFunc - dl := &net.Dialer{Timeout: 15 * time.Second} - - if tlsConfig == nil { - dialer = dl.DialContext - } else { - dialer = (&tls.Dialer{NetDialer: dl, Config: tlsConfig}).DialContext - } - - options := &redis.Options{ - Dialer: dialWithLogging(dialer, logger), - Password: r.Password, - DB: 0, // Use default DB, - ReadTimeout: r.Options.Timeout, - TLSConfig: tlsConfig, - } - - if strings.HasPrefix(r.Host, "/") { - options.Network = "unix" - options.Addr = r.Host - } else { - options.Network = "tcp" - options.Addr = net.JoinHostPort(r.Host, fmt.Sprint(r.Port)) - } - - c := redis.NewClient(options) - - opts := c.Options() - opts.PoolSize = utils.MaxInt(32, opts.PoolSize) - opts.MaxRetries = opts.PoolSize + 1 // https://github.com/go-redis/redis/issues/1737 - c = redis.NewClient(opts) - - return icingaredis.NewClient(c, logger, &r.Options), nil -} - -// dialWithLogging returns a Redis Dialer with logging capabilities. -func dialWithLogging(dialer ctxDialerFunc, logger *logging.Logger) ctxDialerFunc { - // dial behaves like net.Dialer#DialContext, - // but re-tries on common errors that are considered retryable. - return func(ctx context.Context, network, addr string) (conn net.Conn, err error) { - err = retry.WithBackoff( - ctx, - func(ctx context.Context) (err error) { - conn, err = dialer(ctx, network, addr) - return - }, - retry.Retryable, - backoff.NewExponentialWithJitter(1*time.Millisecond, 1*time.Second), - retry.Settings{ - Timeout: retry.DefaultTimeout, - OnRetryableError: func(_ time.Duration, _ uint64, err, lastErr error) { - if lastErr == nil || err.Error() != lastErr.Error() { - logger.Warnw("Can't connect to Redis. Retrying", zap.Error(err)) - } - }, - OnSuccess: func(elapsed time.Duration, attempt uint64, _ error) { - if attempt > 1 { - logger.Infow("Reconnected to Redis", - zap.Duration("after", elapsed), zap.Uint64("attempts", attempt)) - } - }, - }, - ) - - err = errors.Wrap(err, "can't connect to Redis") - - return - } -} - -// Validate checks constraints in the supplied Redis configuration and returns an error if they are violated. -func (r *Redis) Validate() error { - if r.Host == "" { - return errors.New("Redis host missing") - } - - return r.Options.Validate() -} diff --git a/pkg/contracts/contracts.go b/pkg/contracts/contracts.go index a8b420121..424d059f2 100644 --- a/pkg/contracts/contracts.go +++ b/pkg/contracts/contracts.go @@ -1,39 +1,7 @@ package contracts -// Entity is implemented by every type Icinga DB should synchronize. -type Entity interface { - Fingerprinter - IDer -} - -// Fingerprinter is implemented by every entity that uniquely identifies itself. -type Fingerprinter interface { - // Fingerprint returns the value that uniquely identifies the entity. - Fingerprint() Fingerprinter -} - -// ID is a unique identifier of an entity. -type ID interface { - // String returns the string representation form of the ID. - // The String method is used to use the ID in functions - // where it needs to be compared or hashed. - String() string -} - -// IDer is implemented by every entity that uniquely identifies itself. -type IDer interface { - ID() ID // ID returns the ID. - SetID(ID) // SetID sets the ID. -} - -// Equaler is implemented by every type that is comparable. -type Equaler interface { - Equal(Equaler) bool // Equal checks for equality. -} - // Checksum is a unique identifier of an entity. type Checksum interface { - Equaler // String returns the string representation form of the Checksum. // The String method is used to use the Checksum in functions // where it needs to be compared or hashed. @@ -46,45 +14,16 @@ type Checksumer interface { SetChecksum(Checksum) // SetChecksum sets the Checksum. } -// EntityFactoryFunc knows how to create an Entity. -type EntityFactoryFunc func() Entity - -// Waiter implements the Wait method, -// which blocks until execution is complete. -type Waiter interface { - Wait() error // Wait waits for execution to complete. -} - -// The WaiterFunc type is an adapter to allow the use of ordinary functions as Waiter. -// If f is a function with the appropriate signature, WaiterFunc(f) is a Waiter that calls f. -type WaiterFunc func() error - -// Wait implements the Waiter interface. -func (f WaiterFunc) Wait() error { - return f() -} - // Initer implements the Init method, // which initializes the object in addition to zeroing. type Initer interface { Init() // Init initializes the object. } -// Upserter implements the Upsert method, -// which returns a part of the object for ON DUPLICATE KEY UPDATE. -type Upserter interface { - Upsert() interface{} // Upsert partitions the object. -} - -// TableNamer implements the TableName method, -// which returns the table of the object. -type TableNamer interface { - TableName() string // TableName tells the table. -} - -// Scoper implements the Scope method, -// which returns a struct specifying the WHERE conditions that -// entities must satisfy in order to be SELECTed. -type Scoper interface { - Scope() interface{} +// SafeInit attempts to initialize the passed argument by calling its Init method, +// but only if the argument implements the [Initer] interface. +func SafeInit(v any) { + if initer, ok := v.(Initer); ok { + initer.Init() + } } diff --git a/pkg/flatten/flatten.go b/pkg/flatten/flatten.go deleted file mode 100644 index 698eff178..000000000 --- a/pkg/flatten/flatten.go +++ /dev/null @@ -1,46 +0,0 @@ -package flatten - -import ( - "fmt" - "github.com/icinga/icingadb/pkg/types" - "strconv" -) - -// Flatten creates flat, one-dimensional maps from arbitrarily nested values, e.g. JSON. -func Flatten(value interface{}, prefix string) map[string]types.String { - var flatten func(string, interface{}) - flattened := make(map[string]types.String) - - flatten = func(key string, value interface{}) { - switch value := value.(type) { - case map[string]interface{}: - if len(value) == 0 { - flattened[key] = types.String{} - break - } - - for k, v := range value { - flatten(key+"."+k, v) - } - case []interface{}: - if len(value) == 0 { - flattened[key] = types.String{} - break - } - - for i, v := range value { - flatten(key+"["+strconv.Itoa(i)+"]", v) - } - case nil: - flattened[key] = types.MakeString("null") - case float64: - flattened[key] = types.MakeString(strconv.FormatFloat(value, 'f', -1, 64)) - default: - flattened[key] = types.MakeString(fmt.Sprintf("%v", value)) - } - } - - flatten(prefix, value) - - return flattened -} diff --git a/pkg/flatten/flatten_test.go b/pkg/flatten/flatten_test.go deleted file mode 100644 index f84b8d9ec..000000000 --- a/pkg/flatten/flatten_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package flatten - -import ( - "github.com/icinga/icingadb/pkg/types" - "github.com/stretchr/testify/assert" - "testing" -) - -func TestFlatten(t *testing.T) { - for _, st := range []struct { - name string - prefix string - value any - output map[string]types.String - }{ - {"nil", "a", nil, map[string]types.String{"a": types.MakeString("null")}}, - {"bool", "b", true, map[string]types.String{"b": types.MakeString("true")}}, - {"int", "c", 42, map[string]types.String{"c": types.MakeString("42")}}, - {"float", "d", 77.7, map[string]types.String{"d": types.MakeString("77.7")}}, - {"large_float", "e", 1e23, map[string]types.String{"e": types.MakeString("100000000000000000000000")}}, - {"string", "f", "\x00", map[string]types.String{"f": types.MakeString("\x00")}}, - {"nil_slice", "g", []any(nil), map[string]types.String{"g": {}}}, - {"empty_slice", "h", []any{}, map[string]types.String{"h": {}}}, - {"slice", "i", []any{nil}, map[string]types.String{"i[0]": types.MakeString("null")}}, - {"nil_map", "j", map[string]any(nil), map[string]types.String{"j": {}}}, - {"empty_map", "k", map[string]any{}, map[string]types.String{"k": {}}}, - {"map", "l", map[string]any{" ": nil}, map[string]types.String{"l. ": types.MakeString("null")}}, - {"map_with_slice", "m", map[string]any{"\t": []any{"ä", "ö", "ü"}, "ß": "s"}, map[string]types.String{ - "m.\t[0]": types.MakeString("ä"), - "m.\t[1]": types.MakeString("ö"), - "m.\t[2]": types.MakeString("ü"), - "m.ß": types.MakeString("s"), - }}, - {"slice_with_map", "n", []any{map[string]any{"ä": "a", "ö": "o", "ü": "u"}, "ß"}, map[string]types.String{ - "n[0].ä": types.MakeString("a"), - "n[0].ö": types.MakeString("o"), - "n[0].ü": types.MakeString("u"), - "n[1]": types.MakeString("ß"), - }}, - } { - t.Run(st.name, func(t *testing.T) { - assert.Equal(t, st.output, Flatten(st.value, st.prefix)) - }) - } -} diff --git a/pkg/icingadb/cleanup.go b/pkg/icingadb/cleanup.go index 22bf02d6e..754e5a33d 100644 --- a/pkg/icingadb/cleanup.go +++ b/pkg/icingadb/cleanup.go @@ -3,11 +3,11 @@ package icingadb import ( "context" "fmt" - "github.com/icinga/icingadb/internal" - "github.com/icinga/icingadb/pkg/backoff" - "github.com/icinga/icingadb/pkg/com" - "github.com/icinga/icingadb/pkg/retry" - "github.com/icinga/icingadb/pkg/types" + "github.com/icinga/icinga-go-library/backoff" + "github.com/icinga/icinga-go-library/com" + "github.com/icinga/icinga-go-library/database" + "github.com/icinga/icinga-go-library/retry" + "github.com/icinga/icinga-go-library/types" "time" ) @@ -18,34 +18,18 @@ type CleanupStmt struct { Column string } -// Build assembles the cleanup statement for the specified database driver with the given limit. -func (stmt *CleanupStmt) Build(driverName string, limit uint64) string { - switch driverName { - case MySQL: - return fmt.Sprintf(`DELETE FROM %[1]s WHERE environment_id = :environment_id AND %[2]s < :time -ORDER BY %[2]s LIMIT %[3]d`, stmt.Table, stmt.Column, limit) - case PostgreSQL: - return fmt.Sprintf(`WITH rows AS ( -SELECT %[1]s FROM %[2]s WHERE environment_id = :environment_id AND %[3]s < :time ORDER BY %[3]s LIMIT %[4]d -) -DELETE FROM %[2]s WHERE %[1]s IN (SELECT %[1]s FROM rows)`, stmt.PK, stmt.Table, stmt.Column, limit) - default: - panic(fmt.Sprintf("invalid database type %s", driverName)) - } -} - // CleanupOlderThan deletes all rows with the specified statement that are older than the given time. // Deletes a maximum of as many rows per round as defined in count. Actually deleted rows will be passed to onSuccess. // Returns the total number of rows deleted. -func (db *DB) CleanupOlderThan( - ctx context.Context, stmt CleanupStmt, envId types.Binary, - count uint64, olderThan time.Time, onSuccess ...OnSuccess[struct{}], +func (stmt *CleanupStmt) CleanupOlderThan( + ctx context.Context, db *database.DB, envId types.Binary, + count uint64, olderThan time.Time, onSuccess ...database.OnSuccess[struct{}], ) (uint64, error) { var counter com.Counter - q := db.Rebind(stmt.Build(db.DriverName(), count)) + q := db.Rebind(stmt.build(db.DriverName(), count)) - defer db.log(ctx, q, &counter).Stop() + defer db.Log(ctx, q, &counter).Stop() for { var rowsDeleted int64 @@ -58,7 +42,7 @@ func (db *DB) CleanupOlderThan( Time: types.UnixMilli(olderThan), }) if err != nil { - return internal.CantPerformQuery(err, q) + return database.CantPerformQuery(err, q) } rowsDeleted, err = rs.RowsAffected() @@ -67,7 +51,7 @@ func (db *DB) CleanupOlderThan( }, retry.Retryable, backoff.NewExponentialWithJitter(1*time.Millisecond, 1*time.Second), - db.getDefaultRetrySettings(), + db.GetDefaultRetrySettings(), ) if err != nil { return 0, err @@ -89,6 +73,22 @@ func (db *DB) CleanupOlderThan( return counter.Total(), nil } +// build assembles the cleanup statement for the specified database driver with the given limit. +func (stmt *CleanupStmt) build(driverName string, limit uint64) string { + switch driverName { + case database.MySQL: + return fmt.Sprintf(`DELETE FROM %[1]s WHERE environment_id = :environment_id AND %[2]s < :time +ORDER BY %[2]s LIMIT %[3]d`, stmt.Table, stmt.Column, limit) + case database.PostgreSQL: + return fmt.Sprintf(`WITH rows AS ( +SELECT %[1]s FROM %[2]s WHERE environment_id = :environment_id AND %[3]s < :time ORDER BY %[3]s LIMIT %[4]d +) +DELETE FROM %[2]s WHERE %[1]s IN (SELECT %[1]s FROM rows)`, stmt.PK, stmt.Table, stmt.Column, limit) + default: + panic(fmt.Sprintf("invalid database type %s", driverName)) + } +} + type cleanupWhere struct { EnvironmentId types.Binary Time types.UnixMilli diff --git a/pkg/icingadb/db.go b/pkg/icingadb/db.go deleted file mode 100644 index 47940af9e..000000000 --- a/pkg/icingadb/db.go +++ /dev/null @@ -1,713 +0,0 @@ -package icingadb - -import ( - "context" - "fmt" - "github.com/icinga/icingadb/internal" - "github.com/icinga/icingadb/pkg/backoff" - "github.com/icinga/icingadb/pkg/com" - "github.com/icinga/icingadb/pkg/contracts" - "github.com/icinga/icingadb/pkg/logging" - "github.com/icinga/icingadb/pkg/periodic" - "github.com/icinga/icingadb/pkg/retry" - "github.com/icinga/icingadb/pkg/utils" - "github.com/jmoiron/sqlx" - "github.com/pkg/errors" - "go.uber.org/zap" - "golang.org/x/sync/errgroup" - "golang.org/x/sync/semaphore" - "reflect" - "strings" - "sync" - "time" -) - -// DB is a wrapper around sqlx.DB with bulk execution, -// statement building, streaming and logging capabilities. -type DB struct { - *sqlx.DB - - Options *Options - - logger *logging.Logger - tableSemaphores map[string]*semaphore.Weighted - tableSemaphoresMu sync.Mutex -} - -// Options define user configurable database options. -type Options struct { - // Maximum number of open connections to the database. - MaxConnections int `yaml:"max_connections" default:"16"` - - // Maximum number of connections per table, - // regardless of what the connection is actually doing, - // e.g. INSERT, UPDATE, DELETE. - MaxConnectionsPerTable int `yaml:"max_connections_per_table" default:"8"` - - // MaxPlaceholdersPerStatement defines the maximum number of placeholders in an - // INSERT, UPDATE or DELETE statement. Theoretically, MySQL can handle up to 2^16-1 placeholders, - // but this increases the execution time of queries and thus reduces the number of queries - // that can be executed in parallel in a given time. - // The default is 2^13, which in our tests showed the best performance in terms of execution time and parallelism. - MaxPlaceholdersPerStatement int `yaml:"max_placeholders_per_statement" default:"8192"` - - // MaxRowsPerTransaction defines the maximum number of rows per transaction. - // The default is 2^13, which in our tests showed the best performance in terms of execution time and parallelism. - MaxRowsPerTransaction int `yaml:"max_rows_per_transaction" default:"8192"` - - // WsrepSyncWait enforces Galera cluster nodes to perform strict cluster-wide causality checks - // before executing specific SQL queries determined by the number you provided. - // Please refer to the below link for a detailed description. - // https://icinga.com/docs/icinga-db/latest/doc/03-Configuration/#galera-cluster - WsrepSyncWait int `yaml:"wsrep_sync_wait" default:"7"` -} - -// Validate checks constraints in the supplied database options and returns an error if they are violated. -func (o *Options) Validate() error { - if o.MaxConnections == 0 { - return errors.New("max_connections cannot be 0. Configure a value greater than zero, or use -1 for no connection limit") - } - if o.MaxConnectionsPerTable < 1 { - return errors.New("max_connections_per_table must be at least 1") - } - if o.MaxPlaceholdersPerStatement < 1 { - return errors.New("max_placeholders_per_statement must be at least 1") - } - if o.MaxRowsPerTransaction < 1 { - return errors.New("max_rows_per_transaction must be at least 1") - } - if o.WsrepSyncWait < 0 || o.WsrepSyncWait > 15 { - return errors.New("wsrep_sync_wait can only be set to a number between 0 and 15") - } - - return nil -} - -// NewDb returns a new icingadb.DB wrapper for a pre-existing *sqlx.DB. -func NewDb(db *sqlx.DB, logger *logging.Logger, options *Options) *DB { - return &DB{ - DB: db, - logger: logger, - Options: options, - tableSemaphores: make(map[string]*semaphore.Weighted), - } -} - -const ( - expectedMysqlSchemaVersion = 5 - expectedPostgresSchemaVersion = 3 -) - -// CheckSchema asserts the database schema of the expected version being present. -func (db *DB) CheckSchema(ctx context.Context) error { - var expectedDbSchemaVersion uint16 - switch db.DriverName() { - case MySQL: - expectedDbSchemaVersion = expectedMysqlSchemaVersion - case PostgreSQL: - expectedDbSchemaVersion = expectedPostgresSchemaVersion - } - - var version uint16 - - err := retry.WithBackoff( - ctx, - func(ctx context.Context) (err error) { - query := "SELECT version FROM icingadb_schema ORDER BY id DESC LIMIT 1" - err = db.QueryRowxContext(ctx, query).Scan(&version) - if err != nil { - err = internal.CantPerformQuery(err, query) - } - return - }, - retry.Retryable, - backoff.NewExponentialWithJitter(128*time.Millisecond, 1*time.Minute), - db.getDefaultRetrySettings()) - if err != nil { - return errors.Wrap(err, "can't check database schema version") - } - - if version != expectedDbSchemaVersion { - // Since these error messages are trivial and mostly caused by users, we don't need - // to print a stack trace here. However, since errors.Errorf() does this automatically, - // we need to use fmt instead. - return fmt.Errorf( - "unexpected database schema version: v%d (expected v%d), please make sure you have applied all database"+ - " migrations after upgrading Icinga DB", version, expectedDbSchemaVersion, - ) - } - - return nil -} - -// BuildColumns returns all columns of the given struct. -func (db *DB) BuildColumns(subject interface{}) []string { - fields := db.Mapper.TypeMap(reflect.TypeOf(subject)).Names - columns := make([]string, 0, len(fields)) - for _, f := range fields { - if f.Field.Tag == "" { - continue - } - columns = append(columns, f.Name) - } - - return columns -} - -// BuildDeleteStmt returns a DELETE statement for the given struct. -func (db *DB) BuildDeleteStmt(from interface{}) string { - return fmt.Sprintf( - `DELETE FROM "%s" WHERE id IN (?)`, - utils.TableName(from), - ) -} - -// BuildInsertStmt returns an INSERT INTO statement for the given struct. -func (db *DB) BuildInsertStmt(into interface{}) (string, int) { - columns := db.BuildColumns(into) - - return fmt.Sprintf( - `INSERT INTO "%s" ("%s") VALUES (%s)`, - utils.TableName(into), - strings.Join(columns, `", "`), - fmt.Sprintf(":%s", strings.Join(columns, ", :")), - ), len(columns) -} - -// BuildInsertIgnoreStmt returns an INSERT statement for the specified struct for -// which the database ignores rows that have already been inserted. -func (db *DB) BuildInsertIgnoreStmt(into interface{}) (string, int) { - table := utils.TableName(into) - columns := db.BuildColumns(into) - var clause string - - switch db.DriverName() { - case MySQL: - // MySQL treats UPDATE id = id as a no-op. - clause = fmt.Sprintf(`ON DUPLICATE KEY UPDATE "%s" = "%s"`, columns[0], columns[0]) - case PostgreSQL: - clause = fmt.Sprintf("ON CONFLICT ON CONSTRAINT pk_%s DO NOTHING", table) - } - - return fmt.Sprintf( - `INSERT INTO "%s" ("%s") VALUES (%s) %s`, - table, - strings.Join(columns, `", "`), - fmt.Sprintf(":%s", strings.Join(columns, ", :")), - clause, - ), len(columns) -} - -// BuildSelectStmt returns a SELECT query that creates the FROM part from the given table struct -// and the column list from the specified columns struct. -func (db *DB) BuildSelectStmt(table interface{}, columns interface{}) string { - q := fmt.Sprintf( - `SELECT "%s" FROM "%s"`, - strings.Join(db.BuildColumns(columns), `", "`), - utils.TableName(table), - ) - - if scoper, ok := table.(contracts.Scoper); ok { - where, _ := db.BuildWhere(scoper.Scope()) - q += ` WHERE ` + where - } - - return q -} - -// BuildUpdateStmt returns an UPDATE statement for the given struct. -func (db *DB) BuildUpdateStmt(update interface{}) (string, int) { - columns := db.BuildColumns(update) - set := make([]string, 0, len(columns)) - - for _, col := range columns { - set = append(set, fmt.Sprintf(`"%s" = :%s`, col, col)) - } - - return fmt.Sprintf( - `UPDATE "%s" SET %s WHERE id = :id`, - utils.TableName(update), - strings.Join(set, ", "), - ), len(columns) + 1 // +1 because of WHERE id = :id -} - -// BuildUpsertStmt returns an upsert statement for the given struct. -func (db *DB) BuildUpsertStmt(subject interface{}) (stmt string, placeholders int) { - insertColumns := db.BuildColumns(subject) - table := utils.TableName(subject) - var updateColumns []string - - if upserter, ok := subject.(contracts.Upserter); ok { - updateColumns = db.BuildColumns(upserter.Upsert()) - } else { - updateColumns = insertColumns - } - - var clause, setFormat string - switch db.DriverName() { - case MySQL: - clause = "ON DUPLICATE KEY UPDATE" - setFormat = `"%[1]s" = VALUES("%[1]s")` - case PostgreSQL: - clause = fmt.Sprintf("ON CONFLICT ON CONSTRAINT pk_%s DO UPDATE SET", table) - setFormat = `"%[1]s" = EXCLUDED."%[1]s"` - } - - set := make([]string, 0, len(updateColumns)) - - for _, col := range updateColumns { - set = append(set, fmt.Sprintf(setFormat, col)) - } - - return fmt.Sprintf( - `INSERT INTO "%s" ("%s") VALUES (%s) %s %s`, - table, - strings.Join(insertColumns, `", "`), - fmt.Sprintf(":%s", strings.Join(insertColumns, ",:")), - clause, - strings.Join(set, ","), - ), len(insertColumns) -} - -// BuildWhere returns a WHERE clause with named placeholder conditions built from the specified struct -// combined with the AND operator. -func (db *DB) BuildWhere(subject interface{}) (string, int) { - columns := db.BuildColumns(subject) - where := make([]string, 0, len(columns)) - for _, col := range columns { - where = append(where, fmt.Sprintf(`"%s" = :%s`, col, col)) - } - - return strings.Join(where, ` AND `), len(columns) -} - -// OnSuccess is a callback for successful (bulk) DML operations. -type OnSuccess[T any] func(ctx context.Context, affectedRows []T) (err error) - -func OnSuccessIncrement[T any](counter *com.Counter) OnSuccess[T] { - return func(_ context.Context, rows []T) error { - counter.Add(uint64(len(rows))) - return nil - } -} - -func OnSuccessSendTo[T any](ch chan<- T) OnSuccess[T] { - return func(ctx context.Context, rows []T) error { - for _, row := range rows { - select { - case ch <- row: - case <-ctx.Done(): - return ctx.Err() - } - } - - return nil - } -} - -// BulkExec bulk executes queries with a single slice placeholder in the form of `IN (?)`. -// Takes in up to the number of arguments specified in count from the arg stream, -// derives and expands a query and executes it with this set of arguments until the arg stream has been processed. -// The derived queries are executed in a separate goroutine with a weighting of 1 -// and can be executed concurrently to the extent allowed by the semaphore passed in sem. -// Arguments for which the query ran successfully will be passed to onSuccess. -func (db *DB) BulkExec( - ctx context.Context, query string, count int, sem *semaphore.Weighted, arg <-chan any, onSuccess ...OnSuccess[any], -) error { - var counter com.Counter - defer db.log(ctx, query, &counter).Stop() - - g, ctx := errgroup.WithContext(ctx) - // Use context from group. - bulk := com.Bulk(ctx, arg, count, com.NeverSplit[any]) - - g.Go(func() error { - g, ctx := errgroup.WithContext(ctx) - - for b := range bulk { - if err := sem.Acquire(ctx, 1); err != nil { - return errors.Wrap(err, "can't acquire semaphore") - } - - g.Go(func(b []interface{}) func() error { - return func() error { - defer sem.Release(1) - - return retry.WithBackoff( - ctx, - func(context.Context) error { - stmt, args, err := sqlx.In(query, b) - if err != nil { - return errors.Wrapf(err, "can't build placeholders for %q", query) - } - - stmt = db.Rebind(stmt) - _, err = db.ExecContext(ctx, stmt, args...) - if err != nil { - return internal.CantPerformQuery(err, query) - } - - counter.Add(uint64(len(b))) - - for _, onSuccess := range onSuccess { - if err := onSuccess(ctx, b); err != nil { - return err - } - } - - return nil - }, - retry.Retryable, - backoff.NewExponentialWithJitter(1*time.Millisecond, 1*time.Second), - db.getDefaultRetrySettings(), - ) - } - }(b)) - } - - return g.Wait() - }) - - return g.Wait() -} - -// NamedBulkExec bulk executes queries with named placeholders in a VALUES clause most likely -// in the format INSERT ... VALUES. Takes in up to the number of entities specified in count -// from the arg stream, derives and executes a new query with the VALUES clause expanded to -// this set of arguments, until the arg stream has been processed. -// The queries are executed in a separate goroutine with a weighting of 1 -// and can be executed concurrently to the extent allowed by the semaphore passed in sem. -// Entities for which the query ran successfully will be passed to onSuccess. -func (db *DB) NamedBulkExec( - ctx context.Context, query string, count int, sem *semaphore.Weighted, arg <-chan contracts.Entity, - splitPolicyFactory com.BulkChunkSplitPolicyFactory[contracts.Entity], onSuccess ...OnSuccess[contracts.Entity], -) error { - var counter com.Counter - defer db.log(ctx, query, &counter).Stop() - - g, ctx := errgroup.WithContext(ctx) - bulk := com.Bulk(ctx, arg, count, splitPolicyFactory) - - g.Go(func() error { - for { - select { - case b, ok := <-bulk: - if !ok { - return nil - } - - if err := sem.Acquire(ctx, 1); err != nil { - return errors.Wrap(err, "can't acquire semaphore") - } - - g.Go(func(b []contracts.Entity) func() error { - return func() error { - defer sem.Release(1) - - return retry.WithBackoff( - ctx, - func(ctx context.Context) error { - _, err := db.NamedExecContext(ctx, query, b) - if err != nil { - return internal.CantPerformQuery(err, query) - } - - counter.Add(uint64(len(b))) - - for _, onSuccess := range onSuccess { - if err := onSuccess(ctx, b); err != nil { - return err - } - } - - return nil - }, - retry.Retryable, - backoff.NewExponentialWithJitter(1*time.Millisecond, 1*time.Second), - db.getDefaultRetrySettings(), - ) - } - }(b)) - case <-ctx.Done(): - return ctx.Err() - } - } - }) - - return g.Wait() -} - -// NamedBulkExecTx bulk executes queries with named placeholders in separate transactions. -// Takes in up to the number of entities specified in count from the arg stream and -// executes a new transaction that runs a new query for each entity in this set of arguments, -// until the arg stream has been processed. -// The transactions are executed in a separate goroutine with a weighting of 1 -// and can be executed concurrently to the extent allowed by the semaphore passed in sem. -func (db *DB) NamedBulkExecTx( - ctx context.Context, query string, count int, sem *semaphore.Weighted, arg <-chan contracts.Entity, -) error { - var counter com.Counter - defer db.log(ctx, query, &counter).Stop() - - g, ctx := errgroup.WithContext(ctx) - bulk := com.Bulk(ctx, arg, count, com.NeverSplit[contracts.Entity]) - - g.Go(func() error { - for { - select { - case b, ok := <-bulk: - if !ok { - return nil - } - - if err := sem.Acquire(ctx, 1); err != nil { - return errors.Wrap(err, "can't acquire semaphore") - } - - g.Go(func(b []contracts.Entity) func() error { - return func() error { - defer sem.Release(1) - - return retry.WithBackoff( - ctx, - func(ctx context.Context) error { - tx, err := db.BeginTxx(ctx, nil) - if err != nil { - return errors.Wrap(err, "can't start transaction") - } - - stmt, err := tx.PrepareNamedContext(ctx, query) - if err != nil { - return errors.Wrap(err, "can't prepare named statement with context in transaction") - } - - for _, arg := range b { - if _, err := stmt.ExecContext(ctx, arg); err != nil { - return errors.Wrap(err, "can't execute statement in transaction") - } - } - - if err := tx.Commit(); err != nil { - return errors.Wrap(err, "can't commit transaction") - } - - counter.Add(uint64(len(b))) - - return nil - }, - retry.Retryable, - backoff.NewExponentialWithJitter(1*time.Millisecond, 1*time.Second), - db.getDefaultRetrySettings(), - ) - } - }(b)) - case <-ctx.Done(): - return ctx.Err() - } - } - }) - - return g.Wait() -} - -// BatchSizeByPlaceholders returns how often the specified number of placeholders fits -// into Options.MaxPlaceholdersPerStatement, but at least 1. -func (db *DB) BatchSizeByPlaceholders(n int) int { - s := db.Options.MaxPlaceholdersPerStatement / n - if s > 0 { - return s - } - - return 1 -} - -// YieldAll executes the query with the supplied scope, -// scans each resulting row into an entity returned by the factory function, -// and streams them into a returned channel. -func (db *DB) YieldAll(ctx context.Context, factoryFunc contracts.EntityFactoryFunc, query string, scope interface{}) (<-chan contracts.Entity, <-chan error) { - entities := make(chan contracts.Entity, 1) - g, ctx := errgroup.WithContext(ctx) - - g.Go(func() error { - var counter com.Counter - defer db.log(ctx, query, &counter).Stop() - defer close(entities) - - rows, err := db.NamedQueryContext(ctx, query, scope) - if err != nil { - return internal.CantPerformQuery(err, query) - } - defer rows.Close() - - for rows.Next() { - e := factoryFunc() - - if err := rows.StructScan(e); err != nil { - return errors.Wrapf(err, "can't store query result into a %T: %s", e, query) - } - - select { - case entities <- e: - counter.Inc() - case <-ctx.Done(): - return ctx.Err() - } - } - - return nil - }) - - return entities, com.WaitAsync(g) -} - -// CreateStreamed bulk creates the specified entities via NamedBulkExec. -// The insert statement is created using BuildInsertStmt with the first entity from the entities stream. -// Bulk size is controlled via Options.MaxPlaceholdersPerStatement and -// concurrency is controlled via Options.MaxConnectionsPerTable. -// Entities for which the query ran successfully will be passed to onSuccess. -func (db *DB) CreateStreamed( - ctx context.Context, entities <-chan contracts.Entity, onSuccess ...OnSuccess[contracts.Entity], -) error { - first, forward, err := com.CopyFirst(ctx, entities) - if first == nil { - return errors.Wrap(err, "can't copy first entity") - } - - sem := db.GetSemaphoreForTable(utils.TableName(first)) - stmt, placeholders := db.BuildInsertStmt(first) - - return db.NamedBulkExec( - ctx, stmt, db.BatchSizeByPlaceholders(placeholders), sem, - forward, com.NeverSplit[contracts.Entity], onSuccess..., - ) -} - -// CreateIgnoreStreamed bulk creates the specified entities via NamedBulkExec. -// The insert statement is created using BuildInsertIgnoreStmt with the first entity from the entities stream. -// Bulk size is controlled via Options.MaxPlaceholdersPerStatement and -// concurrency is controlled via Options.MaxConnectionsPerTable. -// Entities for which the query ran successfully will be passed to onSuccess. -func (db *DB) CreateIgnoreStreamed( - ctx context.Context, entities <-chan contracts.Entity, onSuccess ...OnSuccess[contracts.Entity], -) error { - first, forward, err := com.CopyFirst(ctx, entities) - if first == nil { - return errors.Wrap(err, "can't copy first entity") - } - - sem := db.GetSemaphoreForTable(utils.TableName(first)) - stmt, placeholders := db.BuildInsertIgnoreStmt(first) - - return db.NamedBulkExec( - ctx, stmt, db.BatchSizeByPlaceholders(placeholders), sem, - forward, com.SplitOnDupId[contracts.Entity], onSuccess..., - ) -} - -// UpsertStreamed bulk upserts the specified entities via NamedBulkExec. -// The upsert statement is created using BuildUpsertStmt with the first entity from the entities stream. -// Bulk size is controlled via Options.MaxPlaceholdersPerStatement and -// concurrency is controlled via Options.MaxConnectionsPerTable. -// Entities for which the query ran successfully will be passed to onSuccess. -func (db *DB) UpsertStreamed( - ctx context.Context, entities <-chan contracts.Entity, onSuccess ...OnSuccess[contracts.Entity], -) error { - first, forward, err := com.CopyFirst(ctx, entities) - if first == nil { - return errors.Wrap(err, "can't copy first entity") - } - - sem := db.GetSemaphoreForTable(utils.TableName(first)) - stmt, placeholders := db.BuildUpsertStmt(first) - - return db.NamedBulkExec( - ctx, stmt, db.BatchSizeByPlaceholders(placeholders), sem, - forward, com.SplitOnDupId[contracts.Entity], onSuccess..., - ) -} - -// UpdateStreamed bulk updates the specified entities via NamedBulkExecTx. -// The update statement is created using BuildUpdateStmt with the first entity from the entities stream. -// Bulk size is controlled via Options.MaxRowsPerTransaction and -// concurrency is controlled via Options.MaxConnectionsPerTable. -func (db *DB) UpdateStreamed(ctx context.Context, entities <-chan contracts.Entity) error { - first, forward, err := com.CopyFirst(ctx, entities) - if first == nil { - return errors.Wrap(err, "can't copy first entity") - } - sem := db.GetSemaphoreForTable(utils.TableName(first)) - stmt, _ := db.BuildUpdateStmt(first) - - return db.NamedBulkExecTx(ctx, stmt, db.Options.MaxRowsPerTransaction, sem, forward) -} - -// DeleteStreamed bulk deletes the specified ids via BulkExec. -// The delete statement is created using BuildDeleteStmt with the passed entityType. -// Bulk size is controlled via Options.MaxPlaceholdersPerStatement and -// concurrency is controlled via Options.MaxConnectionsPerTable. -// IDs for which the query ran successfully will be passed to onSuccess. -func (db *DB) DeleteStreamed( - ctx context.Context, entityType contracts.Entity, ids <-chan interface{}, onSuccess ...OnSuccess[any], -) error { - sem := db.GetSemaphoreForTable(utils.TableName(entityType)) - return db.BulkExec( - ctx, db.BuildDeleteStmt(entityType), db.Options.MaxPlaceholdersPerStatement, sem, ids, onSuccess..., - ) -} - -// Delete creates a channel from the specified ids and -// bulk deletes them by passing the channel along with the entityType to DeleteStreamed. -// IDs for which the query ran successfully will be passed to onSuccess. -func (db *DB) Delete( - ctx context.Context, entityType contracts.Entity, ids []interface{}, onSuccess ...OnSuccess[any], -) error { - idsCh := make(chan interface{}, len(ids)) - for _, id := range ids { - idsCh <- id - } - close(idsCh) - - return db.DeleteStreamed(ctx, entityType, idsCh, onSuccess...) -} - -func (db *DB) GetSemaphoreForTable(table string) *semaphore.Weighted { - db.tableSemaphoresMu.Lock() - defer db.tableSemaphoresMu.Unlock() - - if sem, ok := db.tableSemaphores[table]; ok { - return sem - } else { - sem = semaphore.NewWeighted(int64(db.Options.MaxConnectionsPerTable)) - db.tableSemaphores[table] = sem - return sem - } -} - -func (db *DB) getDefaultRetrySettings() retry.Settings { - return retry.Settings{ - Timeout: retry.DefaultTimeout, - OnRetryableError: func(_ time.Duration, _ uint64, err, lastErr error) { - if lastErr == nil || err.Error() != lastErr.Error() { - db.logger.Warnw("Can't execute query. Retrying", zap.Error(err)) - } - }, - OnSuccess: func(elapsed time.Duration, attempt uint64, lastErr error) { - if attempt > 1 { - db.logger.Infow("Query retried successfully after error", - zap.Duration("after", elapsed), - zap.Uint64("attempts", attempt), - zap.NamedError("recovered_error", lastErr)) - } - }, - } -} - -func (db *DB) log(ctx context.Context, query string, counter *com.Counter) periodic.Stopper { - return periodic.Start(ctx, db.logger.Interval(), func(tick periodic.Tick) { - if count := counter.Reset(); count > 0 { - db.logger.Debugf("Executed %q with %d rows", query, count) - } - }, periodic.OnStop(func(tick periodic.Tick) { - db.logger.Debugf("Finished executing %q with %d rows in %s", query, counter.Total(), tick.Elapsed) - })) -} diff --git a/pkg/icingadb/delta.go b/pkg/icingadb/delta.go index 4f6d09894..e370fd03a 100644 --- a/pkg/icingadb/delta.go +++ b/pkg/icingadb/delta.go @@ -3,10 +3,12 @@ package icingadb import ( "context" "fmt" + "github.com/google/go-cmp/cmp" + "github.com/icinga/icinga-go-library/database" + "github.com/icinga/icinga-go-library/logging" + "github.com/icinga/icinga-go-library/types" "github.com/icinga/icingadb/pkg/common" "github.com/icinga/icingadb/pkg/contracts" - "github.com/icinga/icingadb/pkg/logging" - "github.com/icinga/icingadb/pkg/utils" "go.uber.org/zap" "time" ) @@ -23,7 +25,7 @@ type Delta struct { // NewDelta creates a new Delta and starts calculating it. The caller must ensure // that no duplicate entities are sent to the same stream. -func NewDelta(ctx context.Context, actual, desired <-chan contracts.Entity, subject *common.SyncSubject, logger *logging.Logger) *Delta { +func NewDelta(ctx context.Context, actual, desired <-chan database.Entity, subject *common.SyncSubject, logger *logging.Logger) *Delta { delta := &Delta{ Subject: subject, done: make(chan error, 1), @@ -40,7 +42,7 @@ func (delta *Delta) Wait() error { return <-delta.done } -func (delta *Delta) run(ctx context.Context, actualCh, desiredCh <-chan contracts.Entity) { +func (delta *Delta) run(ctx context.Context, actualCh, desiredCh <-chan database.Entity) { defer close(delta.done) start := time.Now() @@ -103,8 +105,8 @@ func (delta *Delta) run(ctx context.Context, actualCh, desiredCh <-chan contract delta.Update = update delta.Delete = actual - delta.logger.Debugw(fmt.Sprintf("Finished %s delta", utils.Name(delta.Subject.Entity())), - zap.String("subject", utils.Name(delta.Subject.Entity())), + delta.logger.Debugw(fmt.Sprintf("Finished %s delta", types.Name(delta.Subject.Entity())), + zap.String("subject", types.Name(delta.Subject.Entity())), zap.Duration("time_total", time.Since(start)), zap.Duration("time_actual", endActual.Sub(start)), zap.Duration("time_desired", endDesired.Sub(start)), @@ -117,8 +119,6 @@ func (delta *Delta) run(ctx context.Context, actualCh, desiredCh <-chan contract // checksumsMatch returns whether the checksums of two entities are the same. // Both entities must implement contracts.Checksumer. -func checksumsMatch(a, b contracts.Entity) bool { - c1 := a.(contracts.Checksumer).Checksum() - c2 := b.(contracts.Checksumer).Checksum() - return c1.Equal(c2) +func checksumsMatch(a, b database.Entity) bool { + return cmp.Equal(a.(contracts.Checksumer).Checksum(), b.(contracts.Checksumer).Checksum()) } diff --git a/pkg/icingadb/delta_test.go b/pkg/icingadb/delta_test.go index 909cc229e..5067ecd3a 100644 --- a/pkg/icingadb/delta_test.go +++ b/pkg/icingadb/delta_test.go @@ -3,11 +3,12 @@ package icingadb import ( "context" "encoding/binary" + "github.com/icinga/icinga-go-library/database" + "github.com/icinga/icinga-go-library/logging" + "github.com/icinga/icinga-go-library/types" "github.com/icinga/icingadb/pkg/common" "github.com/icinga/icingadb/pkg/contracts" v1 "github.com/icinga/icingadb/pkg/icingadb/v1" - "github.com/icinga/icingadb/pkg/logging" - "github.com/icinga/icingadb/pkg/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/zap" @@ -58,11 +59,11 @@ func TestDelta(t *testing.T) { // that only show depending on the order in which actual and desired values are processed for an ID. type SendOrder struct { Name string - Send func(id uint64, test TestData, chActual, chDesired chan<- contracts.Entity) + Send func(id uint64, test TestData, chActual, chDesired chan<- database.Entity) } sendOrders := []SendOrder{{ Name: "ActualFirst", - Send: func(id uint64, test TestData, chActual, chDesired chan<- contracts.Entity) { + Send: func(id uint64, test TestData, chActual, chDesired chan<- database.Entity) { if test.Actual != 0 { chActual <- makeEndpoint(id, test.Actual) } @@ -72,7 +73,7 @@ func TestDelta(t *testing.T) { }, }, { Name: "DesiredFirst", - Send: func(id uint64, test TestData, chActual, chDesired chan<- contracts.Entity) { + Send: func(id uint64, test TestData, chActual, chDesired chan<- database.Entity) { if test.Desired != 0 { chDesired <- makeEndpoint(id, test.Desired) } @@ -87,8 +88,8 @@ func TestDelta(t *testing.T) { for _, sendOrder := range sendOrders { t.Run(sendOrder.Name, func(t *testing.T) { id := uint64(0x42) - chActual := make(chan contracts.Entity) - chDesired := make(chan contracts.Entity) + chActual := make(chan database.Entity) + chDesired := make(chan database.Entity) subject := common.NewSyncSubject(v1.NewEndpoint) logger := logging.NewLogger(zaptest.NewLogger(t).Sugar(), time.Second) @@ -116,8 +117,8 @@ func TestDelta(t *testing.T) { } t.Run("Combined", func(t *testing.T) { - chActual := make(chan contracts.Entity) - chDesired := make(chan contracts.Entity) + chActual := make(chan database.Entity) + chDesired := make(chan database.Entity) subject := common.NewSyncSubject(v1.NewEndpoint) logger := logging.NewLogger(zaptest.NewLogger(t).Sugar(), time.Second) @@ -215,11 +216,11 @@ func BenchmarkDelta(b *testing.B) { } func benchmarkDelta(b *testing.B, numEntities int) { - chActual := make([]chan contracts.Entity, b.N) - chDesired := make([]chan contracts.Entity, b.N) + chActual := make([]chan database.Entity, b.N) + chDesired := make([]chan database.Entity, b.N) for i := 0; i < b.N; i++ { - chActual[i] = make(chan contracts.Entity, numEntities) - chDesired[i] = make(chan contracts.Entity, numEntities) + chActual[i] = make(chan database.Entity, numEntities) + chDesired[i] = make(chan database.Entity, numEntities) } makeEndpoint := func(id1, id2, checksum uint64) *v1.Endpoint { e := new(v1.Endpoint) @@ -232,7 +233,7 @@ func benchmarkDelta(b *testing.B, numEntities int) { } for i := 0; i < numEntities; i++ { // each iteration writes exactly one entity to each channel - var eActual, eDesired contracts.Entity + var eActual, eDesired database.Entity switch i % 3 { case 0: // distinct IDs eActual = makeEndpoint(1, uint64(i), uint64(i)) diff --git a/pkg/icingadb/driver.go b/pkg/icingadb/driver.go deleted file mode 100644 index d56491654..000000000 --- a/pkg/icingadb/driver.go +++ /dev/null @@ -1,90 +0,0 @@ -package icingadb - -import ( - "context" - "database/sql/driver" - "github.com/icinga/icingadb/pkg/backoff" - "github.com/icinga/icingadb/pkg/icingaredis/telemetry" - "github.com/icinga/icingadb/pkg/logging" - "github.com/icinga/icingadb/pkg/retry" - "github.com/pkg/errors" - "go.uber.org/zap" - "time" -) - -// Driver names as automatically registered in the database/sql package by themselves. -const ( - MySQL string = "mysql" - PostgreSQL string = "postgres" -) - -type InitConnFunc func(context.Context, driver.Conn) error - -// RetryConnector wraps driver.Connector with retry logic. -type RetryConnector struct { - driver.Connector - - logger *logging.Logger - - // initConn can be used to execute post Connect() arbitrary actions. - // It will be called after successfully initiated a new connection using the connector's Connect method. - initConn InitConnFunc -} - -// NewConnector creates a fully initialized RetryConnector from the given args. -func NewConnector(c driver.Connector, logger *logging.Logger, init InitConnFunc) *RetryConnector { - return &RetryConnector{Connector: c, logger: logger, initConn: init} -} - -// Connect implements part of the driver.Connector interface. -func (c RetryConnector) Connect(ctx context.Context) (driver.Conn, error) { - var conn driver.Conn - err := errors.Wrap(retry.WithBackoff( - ctx, - func(ctx context.Context) (err error) { - conn, err = c.Connector.Connect(ctx) - if err == nil && c.initConn != nil { - if err = c.initConn(ctx, conn); err != nil { - // We're going to retry this, so just don't bother whether Close() fails! - _ = conn.Close() - } - } - - return - }, - retry.Retryable, - backoff.NewExponentialWithJitter(128*time.Millisecond, 1*time.Minute), - retry.Settings{ - Timeout: retry.DefaultTimeout, - OnRetryableError: func(_ time.Duration, _ uint64, err, lastErr error) { - telemetry.UpdateCurrentDbConnErr(err) - - if lastErr == nil || err.Error() != lastErr.Error() { - c.logger.Warnw("Can't connect to database. Retrying", zap.Error(err)) - } - }, - OnSuccess: func(elapsed time.Duration, attempt uint64, _ error) { - telemetry.UpdateCurrentDbConnErr(nil) - - if attempt > 1 { - c.logger.Infow("Reconnected to database", - zap.Duration("after", elapsed), zap.Uint64("attempts", attempt)) - } - }, - }, - ), "can't connect to database") - return conn, err -} - -// Driver implements part of the driver.Connector interface. -func (c RetryConnector) Driver() driver.Driver { - return c.Connector.Driver() -} - -// MysqlFuncLogger is an adapter that allows ordinary functions to be used as a logger for mysql.SetLogger. -type MysqlFuncLogger func(v ...interface{}) - -// Print implements the mysql.Logger interface. -func (log MysqlFuncLogger) Print(v ...interface{}) { - log(v) -} diff --git a/pkg/icingadb/dump_signals.go b/pkg/icingadb/dump_signals.go index 2f8b46e6b..89595f309 100644 --- a/pkg/icingadb/dump_signals.go +++ b/pkg/icingadb/dump_signals.go @@ -2,10 +2,9 @@ package icingadb import ( "context" - "github.com/icinga/icingadb/pkg/icingaredis" - "github.com/icinga/icingadb/pkg/logging" + "github.com/icinga/icinga-go-library/logging" + "github.com/icinga/icinga-go-library/redis" "github.com/pkg/errors" - "github.com/redis/go-redis/v9" "go.uber.org/zap" "sync" ) @@ -13,7 +12,7 @@ import ( // DumpSignals reads dump signals from a Redis stream via Listen. // Dump-done signals are passed on via Done channels, while InProgress must be checked for dump-wip signals. type DumpSignals struct { - redis *icingaredis.Client + redis *redis.Client logger *logging.Logger mutex sync.Mutex doneCh map[string]chan struct{} @@ -22,7 +21,7 @@ type DumpSignals struct { } // NewDumpSignals returns new DumpSignals. -func NewDumpSignals(redis *icingaredis.Client, logger *logging.Logger) *DumpSignals { +func NewDumpSignals(redis *redis.Client, logger *logging.Logger) *DumpSignals { return &DumpSignals{ redis: redis, logger: logger, diff --git a/pkg/icingadb/entitiesbyid.go b/pkg/icingadb/entitiesbyid.go index b40050e10..019e15f10 100644 --- a/pkg/icingadb/entitiesbyid.go +++ b/pkg/icingadb/entitiesbyid.go @@ -2,11 +2,11 @@ package icingadb import ( "context" - "github.com/icinga/icingadb/pkg/contracts" + "github.com/icinga/icinga-go-library/database" ) // EntitiesById is a map of key-contracts.Entity pairs. -type EntitiesById map[string]contracts.Entity +type EntitiesById map[string]database.Entity // Keys returns the keys. func (ebi EntitiesById) Keys() []string { @@ -22,15 +22,15 @@ func (ebi EntitiesById) Keys() []string { func (ebi EntitiesById) IDs() []interface{} { ids := make([]interface{}, 0, len(ebi)) for _, v := range ebi { - ids = append(ids, v.(contracts.IDer).ID()) + ids = append(ids, v.(database.IDer).ID()) } return ids } // Entities streams the entities on a returned channel. -func (ebi EntitiesById) Entities(ctx context.Context) <-chan contracts.Entity { - entities := make(chan contracts.Entity) +func (ebi EntitiesById) Entities(ctx context.Context) <-chan database.Entity { + entities := make(chan database.Entity) go func() { defer close(entities) diff --git a/pkg/icingadb/ha.go b/pkg/icingadb/ha.go index cc32a4b37..6460ac32d 100644 --- a/pkg/icingadb/ha.go +++ b/pkg/icingadb/ha.go @@ -6,16 +6,16 @@ import ( "database/sql" "encoding/hex" "github.com/google/uuid" - "github.com/icinga/icingadb/internal" - "github.com/icinga/icingadb/pkg/backoff" - "github.com/icinga/icingadb/pkg/com" + "github.com/icinga/icinga-go-library/backoff" + "github.com/icinga/icinga-go-library/com" + "github.com/icinga/icinga-go-library/database" + "github.com/icinga/icinga-go-library/logging" + "github.com/icinga/icinga-go-library/retry" + "github.com/icinga/icinga-go-library/types" + "github.com/icinga/icinga-go-library/utils" v1 "github.com/icinga/icingadb/pkg/icingadb/v1" "github.com/icinga/icingadb/pkg/icingaredis" icingaredisv1 "github.com/icinga/icingadb/pkg/icingaredis/v1" - "github.com/icinga/icingadb/pkg/logging" - "github.com/icinga/icingadb/pkg/retry" - "github.com/icinga/icingadb/pkg/types" - "github.com/icinga/icingadb/pkg/utils" "github.com/pkg/errors" "go.uber.org/zap" "sync" @@ -39,7 +39,7 @@ type HA struct { ctx context.Context cancelCtx context.CancelFunc instanceId types.Binary - db *DB + db *database.DB environmentMu sync.Mutex environment *v1.Environment heartbeat *icingaredis.Heartbeat @@ -54,7 +54,7 @@ type HA struct { } // NewHA returns a new HA and starts the controller loop. -func NewHA(ctx context.Context, db *DB, heartbeat *icingaredis.Heartbeat, logger *logging.Logger) *HA { +func NewHA(ctx context.Context, db *database.DB, heartbeat *icingaredis.Heartbeat, logger *logging.Logger) *HA { ctx, cancelCtx := context.WithCancel(ctx) instanceId := uuid.New() @@ -289,7 +289,7 @@ func (h *HA) realize( isoLvl := sql.LevelSerializable selectLock := "" - if h.db.DriverName() == MySQL { + if h.db.DriverName() == database.MySQL { // The RDBMS may actually be a Percona XtraDB Cluster which doesn't // support serializable transactions, but only their following equivalent: isoLvl = sql.LevelRepeatableRead @@ -338,7 +338,7 @@ func (h *HA) realize( } default: - return internal.CantPerformQuery(errQuery, query) + return database.CantPerformQuery(errQuery, query) } i := v1.IcingadbInstance{ @@ -365,7 +365,7 @@ func (h *HA) realize( stmt, _ := h.db.BuildUpsertStmt(i) if _, err := tx.NamedExecContext(ctx, stmt, i); err != nil { - return internal.CantPerformQuery(err, stmt) + return database.CantPerformQuery(err, stmt) } if takeover != "" { @@ -373,7 +373,7 @@ func (h *HA) realize( _, err := tx.ExecContext(ctx, stmt, "n", envId, h.instanceId) if err != nil { - return internal.CantPerformQuery(err, stmt) + return database.CantPerformQuery(err, stmt) } } @@ -441,7 +441,7 @@ func (h *HA) realize( func (h *HA) realizeLostHeartbeat() { stmt := h.db.Rebind("UPDATE icingadb_instance SET responsible = ? WHERE id = ?") if _, err := h.db.ExecContext(h.ctx, stmt, "n", h.instanceId); err != nil && !utils.IsContextCanceled(err) { - h.logger.Warnw("Can't update instance", zap.Error(internal.CantPerformQuery(err, stmt))) + h.logger.Warnw("Can't update instance", zap.Error(database.CantPerformQuery(err, stmt))) } } @@ -451,7 +451,7 @@ func (h *HA) insertEnvironment() error { stmt, _ := h.db.BuildInsertIgnoreStmt(h.environment) if _, err := h.db.NamedExecContext(h.ctx, stmt, h.environment); err != nil { - return internal.CantPerformQuery(err, stmt) + return database.CantPerformQuery(err, stmt) } return nil diff --git a/pkg/icingadb/history/retention.go b/pkg/icingadb/history/retention.go index ff217cdd5..f10048cf2 100644 --- a/pkg/icingadb/history/retention.go +++ b/pkg/icingadb/history/retention.go @@ -3,11 +3,12 @@ package history import ( "context" "fmt" + "github.com/icinga/icinga-go-library/database" + "github.com/icinga/icinga-go-library/logging" + "github.com/icinga/icinga-go-library/periodic" "github.com/icinga/icingadb/pkg/icingadb" v1 "github.com/icinga/icingadb/pkg/icingadb/v1" "github.com/icinga/icingadb/pkg/icingaredis/telemetry" - "github.com/icinga/icingadb/pkg/logging" - "github.com/icinga/icingadb/pkg/periodic" "github.com/pkg/errors" "go.uber.org/zap" "time" @@ -117,7 +118,7 @@ func (o RetentionOptions) Validate() error { // Retention deletes rows from history tables that exceed their configured retention period. type Retention struct { - db *icingadb.DB + db *database.DB logger *logging.Logger historyDays uint64 slaDays uint64 @@ -128,7 +129,7 @@ type Retention struct { // NewRetention returns a new Retention. func NewRetention( - db *icingadb.DB, historyDays uint64, slaDays uint64, interval time.Duration, + db *database.DB, historyDays uint64, slaDays uint64, interval time.Duration, count uint64, options RetentionOptions, logger *logging.Logger, ) *Retention { return &Retention{ @@ -186,9 +187,9 @@ func (r *Retention) Start(ctx context.Context) error { r.logger.Debugf("Cleaning up historical data for category %s from table %s older than %s", stmt.Category, stmt.Table, olderThan) - deleted, err := r.db.CleanupOlderThan( - ctx, stmt.CleanupStmt, e.Id, r.count, olderThan, - icingadb.OnSuccessIncrement[struct{}](&telemetry.Stats.HistoryCleanup), + deleted, err := stmt.CleanupOlderThan( + ctx, r.db, e.Id, r.count, olderThan, + database.OnSuccessIncrement[struct{}](&telemetry.Stats.HistoryCleanup), ) if err != nil { select { diff --git a/pkg/icingadb/history/sla.go b/pkg/icingadb/history/sla.go index 7c0849e61..caf96a561 100644 --- a/pkg/icingadb/history/sla.go +++ b/pkg/icingadb/history/sla.go @@ -1,14 +1,18 @@ package history import ( + "github.com/icinga/icinga-go-library/redis" + "github.com/icinga/icinga-go-library/structify" + "github.com/icinga/icingadb/pkg/contracts" + "github.com/icinga/icingadb/pkg/icingadb/types" "github.com/icinga/icingadb/pkg/icingadb/v1/history" - "github.com/icinga/icingadb/pkg/structify" - "github.com/icinga/icingadb/pkg/types" - "github.com/redis/go-redis/v9" "reflect" ) -var slaStateStructify = structify.MakeMapStructifier(reflect.TypeOf((*history.SlaHistoryState)(nil)).Elem(), "json") +var slaStateStructify = structify.MakeMapStructifier( + reflect.TypeOf((*history.SlaHistoryState)(nil)).Elem(), + "json", + contracts.SafeInit) func stateHistoryToSlaEntity(entry redis.XMessage) ([]history.UpserterEntity, error) { slaStateInterface, err := slaStateStructify(entry.Values) diff --git a/pkg/icingadb/history/sync.go b/pkg/icingadb/history/sync.go index 4be0e71f9..5838391f6 100644 --- a/pkg/icingadb/history/sync.go +++ b/pkg/icingadb/history/sync.go @@ -2,21 +2,19 @@ package history import ( "context" - "github.com/icinga/icingadb/internal" - "github.com/icinga/icingadb/pkg/com" + "github.com/icinga/icinga-go-library/com" + "github.com/icinga/icinga-go-library/database" + "github.com/icinga/icinga-go-library/logging" + "github.com/icinga/icinga-go-library/periodic" + "github.com/icinga/icinga-go-library/redis" + "github.com/icinga/icinga-go-library/structify" + "github.com/icinga/icinga-go-library/types" + "github.com/icinga/icinga-go-library/utils" "github.com/icinga/icingadb/pkg/contracts" - "github.com/icinga/icingadb/pkg/icingadb" v1types "github.com/icinga/icingadb/pkg/icingadb/v1" v1 "github.com/icinga/icingadb/pkg/icingadb/v1/history" - "github.com/icinga/icingadb/pkg/icingaredis" "github.com/icinga/icingadb/pkg/icingaredis/telemetry" - "github.com/icinga/icingadb/pkg/logging" - "github.com/icinga/icingadb/pkg/periodic" - "github.com/icinga/icingadb/pkg/structify" - "github.com/icinga/icingadb/pkg/types" - "github.com/icinga/icingadb/pkg/utils" "github.com/pkg/errors" - "github.com/redis/go-redis/v9" "golang.org/x/sync/errgroup" "reflect" "sync" @@ -24,13 +22,13 @@ import ( // Sync specifies the source and destination of a history sync. type Sync struct { - db *icingadb.DB - redis *icingaredis.Client + db *database.DB + redis *redis.Client logger *logging.Logger } // NewSync creates a new Sync. -func NewSync(db *icingadb.DB, redis *icingaredis.Client, logger *logging.Logger) *Sync { +func NewSync(db *database.DB, redis *redis.Client, logger *logging.Logger) *Sync { return &Sync{ db: db, redis: redis, @@ -156,7 +154,7 @@ func (s Sync) deleteFromRedis(ctx context.Context, key string, input <-chan redi cmd := s.redis.XDel(ctx, stream, ids...) if _, err := cmd.Result(); err != nil { - return icingaredis.WrapCmdErr(cmd) + return redis.WrapCmdErr(cmd) } counter.Add(uint64(len(ids))) @@ -180,7 +178,10 @@ type stageFunc func(ctx context.Context, s Sync, key string, in <-chan redis.XMe // For each history event it receives, it parses that event into a new instance of that entity type and writes it to // the database. It writes exactly one entity to the database for each history event. func writeOneEntityStage(structPtr interface{}) stageFunc { - structifier := structify.MakeMapStructifier(reflect.TypeOf(structPtr).Elem(), "json") + structifier := structify.MakeMapStructifier( + reflect.TypeOf(structPtr).Elem(), + "json", + contracts.SafeInit) return writeMultiEntityStage(func(entry redis.XMessage) ([]v1.UpserterEntity, error) { ptr, err := structifier(entry.Values) @@ -201,11 +202,11 @@ func writeMultiEntityStage(entryToEntities func(entry redis.XMessage) ([]v1.Upse } bufSize := s.db.Options.MaxPlaceholdersPerStatement - insert := make(chan contracts.Entity, bufSize) // Events sent to the database for insertion. - inserted := make(chan contracts.Entity) // Events returned by the database after successful insertion. - skipped := make(chan redis.XMessage) // Events skipping insert/inserted (no entities generated). - state := make(map[contracts.Entity]*State) // Shared state between all entities created by one event. - var stateMu sync.Mutex // Synchronizes concurrent access to state. + insert := make(chan database.Entity, bufSize) // Events sent to the database for insertion. + inserted := make(chan database.Entity) // Events returned by the database after successful insertion. + skipped := make(chan redis.XMessage) // Events skipping insert/inserted (no entities generated). + state := make(map[database.Entity]*State) // Shared state between all entities created by one event. + var stateMu sync.Mutex // Synchronizes concurrent access to state. g, ctx := errgroup.WithContext(ctx) @@ -257,7 +258,7 @@ func writeMultiEntityStage(entryToEntities func(entry redis.XMessage) ([]v1.Upse g.Go(func() error { defer close(inserted) - return s.db.UpsertStreamed(ctx, insert, icingadb.OnSuccessSendTo[contracts.Entity](inserted)) + return s.db.UpsertStreamed(ctx, insert, database.OnSuccessSendTo[database.Entity](inserted)) }) g.Go(func() error { @@ -316,7 +317,10 @@ func userNotificationStage(ctx context.Context, s Sync, key string, in <-chan re UserIds types.String `structify:"users_notified_ids"` } - structifier := structify.MakeMapStructifier(reflect.TypeOf((*NotificationHistory)(nil)).Elem(), "structify") + structifier := structify.MakeMapStructifier( + reflect.TypeOf((*NotificationHistory)(nil)).Elem(), + "structify", + contracts.SafeInit) return writeMultiEntityStage(func(entry redis.XMessage) ([]v1.UpserterEntity, error) { rawNotificationHistory, err := structifier(entry.Values) @@ -330,7 +334,7 @@ func userNotificationStage(ctx context.Context, s Sync, key string, in <-chan re } var users []types.Binary - err = internal.UnmarshalJSON([]byte(notificationHistory.UserIds.String), &users) + err = types.UnmarshalJSON([]byte(notificationHistory.UserIds.String), &users) if err != nil { return nil, err } diff --git a/pkg/icingadb/objectpacker/objectpacker.go b/pkg/icingadb/objectpacker/objectpacker.go deleted file mode 100644 index 015274599..000000000 --- a/pkg/icingadb/objectpacker/objectpacker.go +++ /dev/null @@ -1,213 +0,0 @@ -package objectpacker - -import ( - "bytes" - "encoding/binary" - "fmt" - "github.com/pkg/errors" - "io" - "reflect" - "sort" -) - -// MustPackSlice calls PackAny using items and panics if there was an error. -func MustPackSlice(items ...interface{}) []byte { - var buf bytes.Buffer - - if err := PackAny(items, &buf); err != nil { - panic(err) - } - - return buf.Bytes() -} - -// PackAny packs any JSON-encodable value (ex. structs, also ignores interfaces like encoding.TextMarshaler) -// to a BSON-similar format suitable for consistent hashing. Spec: -// -// PackAny(nil) => 0x0 -// PackAny(false) => 0x1 -// PackAny(true) => 0x2 -// PackAny(float64(42)) => 0x3 ieee754_binary64_bigendian(42) -// PackAny("exämple") => 0x4 uint64_bigendian(len([]byte("exämple"))) []byte("exämple") -// PackAny([]uint8{0x42}) => 0x4 uint64_bigendian(len([]uint8{0x42})) []uint8{0x42} -// PackAny([1]uint8{0x42}) => 0x4 uint64_bigendian(len([1]uint8{0x42})) [1]uint8{0x42} -// PackAny([]T{x,y}) => 0x5 uint64_bigendian(len([]T{x,y})) PackAny(x) PackAny(y) -// PackAny(map[K]V{x:y}) => 0x6 uint64_bigendian(len(map[K]V{x:y})) len(map_key(x)) map_key(x) PackAny(y) -// PackAny((*T)(nil)) => 0x0 -// PackAny((*T)(0x42)) => PackAny(*(*T)(0x42)) -// PackAny(x) => panic() -// -// map_key([1]uint8{0x42}) => [1]uint8{0x42} -// map_key(x) => []byte(fmt.Sprint(x)) -func PackAny(in interface{}, out io.Writer) error { - return errors.Wrapf(packValue(reflect.ValueOf(in), out), "can't pack %#v", in) -} - -var tByte = reflect.TypeOf(byte(0)) -var tBytes = reflect.TypeOf([]uint8(nil)) - -// packValue does the actual job of packAny and just exists for recursion w/o unnecessary reflect.ValueOf calls. -func packValue(in reflect.Value, out io.Writer) error { - switch kind := in.Kind(); kind { - case reflect.Invalid: // nil - _, err := out.Write([]byte{0}) - return err - case reflect.Bool: - if in.Bool() { - _, err := out.Write([]byte{2}) - return err - } else { - _, err := out.Write([]byte{1}) - return err - } - case reflect.Float64: - if _, err := out.Write([]byte{3}); err != nil { - return err - } - - return binary.Write(out, binary.BigEndian, in.Float()) - case reflect.Array, reflect.Slice: - if typ := in.Type(); typ.Elem() == tByte { - if kind == reflect.Array { - if !in.CanAddr() { - vNewElem := reflect.New(typ).Elem() - vNewElem.Set(in) - in = vNewElem - } - - in = in.Slice(0, in.Len()) - } - - // Pack []byte as string, not array of numbers. - return packString(in.Convert(tBytes). // Support types.Binary - Interface().([]uint8), out) - } - - if _, err := out.Write([]byte{5}); err != nil { - return err - } - - l := in.Len() - if err := binary.Write(out, binary.BigEndian, uint64(l)); err != nil { - return err - } - - for i := 0; i < l; i++ { - if err := packValue(in.Index(i), out); err != nil { - return err - } - } - - // If there aren't any values to pack, ... - if l < 1 { - // ... create one and pack it - panics on disallowed type. - _ = packValue(reflect.Zero(in.Type().Elem()), io.Discard) - } - - return nil - case reflect.Interface: - return packValue(in.Elem(), out) - case reflect.Map: - type kv struct { - key []byte - value reflect.Value - } - - if _, err := out.Write([]byte{6}); err != nil { - return err - } - - l := in.Len() - if err := binary.Write(out, binary.BigEndian, uint64(l)); err != nil { - return err - } - - sorted := make([]kv, 0, l) - - { - iter := in.MapRange() - for iter.Next() { - var packedKey []byte - if key := iter.Key(); key.Kind() == reflect.Array { - if typ := key.Type(); typ.Elem() == tByte { - if !key.CanAddr() { - vNewElem := reflect.New(typ).Elem() - vNewElem.Set(key) - key = vNewElem - } - - packedKey = key.Slice(0, key.Len()).Interface().([]byte) - } else { - // Not just stringify the key (below), but also pack it (here) - panics on disallowed type. - _ = packValue(iter.Key(), io.Discard) - - packedKey = []byte(fmt.Sprint(key.Interface())) - } - } else { - // Not just stringify the key (below), but also pack it (here) - panics on disallowed type. - _ = packValue(iter.Key(), io.Discard) - - packedKey = []byte(fmt.Sprint(key.Interface())) - } - - sorted = append(sorted, kv{packedKey, iter.Value()}) - } - } - - sort.Slice(sorted, func(i, j int) bool { return bytes.Compare(sorted[i].key, sorted[j].key) < 0 }) - - for _, kv := range sorted { - if err := binary.Write(out, binary.BigEndian, uint64(len(kv.key))); err != nil { - return err - } - - if _, err := out.Write(kv.key); err != nil { - return err - } - - if err := packValue(kv.value, out); err != nil { - return err - } - } - - // If there aren't any key-value pairs to pack, ... - if l < 1 { - typ := in.Type() - - // ... create one and pack it - panics on disallowed type. - _ = packValue(reflect.Zero(typ.Key()), io.Discard) - _ = packValue(reflect.Zero(typ.Elem()), io.Discard) - } - - return nil - case reflect.Ptr: - if in.IsNil() { - err := packValue(reflect.Value{}, out) - - // Create a fictive referenced value and pack it - panics on disallowed type. - _ = packValue(reflect.Zero(in.Type().Elem()), io.Discard) - - return err - } else { - return packValue(in.Elem(), out) - } - case reflect.String: - return packString([]byte(in.String()), out) - default: - panic("bad type: " + in.Kind().String()) - } -} - -// packString deduplicates string packing of multiple locations in packValue. -func packString(in []byte, out io.Writer) error { - if _, err := out.Write([]byte{4}); err != nil { - return err - } - - if err := binary.Write(out, binary.BigEndian, uint64(len(in))); err != nil { - return err - } - - _, err := out.Write(in) - return err -} diff --git a/pkg/icingadb/objectpacker/objectpacker_test.go b/pkg/icingadb/objectpacker/objectpacker_test.go deleted file mode 100644 index e377d7736..000000000 --- a/pkg/icingadb/objectpacker/objectpacker_test.go +++ /dev/null @@ -1,195 +0,0 @@ -package objectpacker - -import ( - "bytes" - "github.com/icinga/icingadb/pkg/types" - "github.com/pkg/errors" - "io" - "testing" -) - -// limitedWriter allows writing a specific amount of data. -type limitedWriter struct { - // limit specifies how many bytes to allow to write. - limit int -} - -var _ io.Writer = (*limitedWriter)(nil) - -// Write returns io.EOF once lw.limit is exceeded, nil otherwise. -func (lw *limitedWriter) Write(p []byte) (n int, err error) { - if len(p) <= lw.limit { - lw.limit -= len(p) - return len(p), nil - } - - n = lw.limit - err = io.EOF - - lw.limit = 0 - return -} - -func TestLimitedWriter_Write(t *testing.T) { - assertLimitedWriter_Write(t, 3, []byte{1, 2}, 2, nil, 1) - assertLimitedWriter_Write(t, 3, []byte{1, 2, 3}, 3, nil, 0) - assertLimitedWriter_Write(t, 3, []byte{1, 2, 3, 4}, 3, io.EOF, 0) - assertLimitedWriter_Write(t, 0, []byte{1}, 0, io.EOF, 0) - assertLimitedWriter_Write(t, 0, nil, 0, nil, 0) -} - -func assertLimitedWriter_Write(t *testing.T, limitBefore int, p []byte, n int, err error, limitAfter int) { - t.Helper() - - lw := limitedWriter{limitBefore} - actualN, actualErr := lw.Write(p) - - if !errors.Is(actualErr, err) { - t.Errorf("_, err := (&limitedWriter{%d}).Write(%#v); err != %#v", limitBefore, p, err) - } - - if actualN != n { - t.Errorf("n, _ := (&limitedWriter{%d}).Write(%#v); n != %d", limitBefore, p, n) - } - - if lw.limit != limitAfter { - t.Errorf("lw := limitedWriter{%d}; lw.Write(%#v); lw.limit != %d", limitBefore, p, limitAfter) - } -} - -func TestPackAny(t *testing.T) { - assertPackAny(t, nil, []byte{0}) - assertPackAny(t, false, []byte{1}) - assertPackAny(t, true, []byte{2}) - - assertPackAnyPanic(t, -42, 0) - assertPackAnyPanic(t, int8(-42), 0) - assertPackAnyPanic(t, int16(-42), 0) - assertPackAnyPanic(t, int32(-42), 0) - assertPackAnyPanic(t, int64(-42), 0) - - assertPackAnyPanic(t, uint(42), 0) - assertPackAnyPanic(t, uint8(42), 0) - assertPackAnyPanic(t, uint16(42), 0) - assertPackAnyPanic(t, uint32(42), 0) - assertPackAnyPanic(t, uint64(42), 0) - assertPackAnyPanic(t, uintptr(42), 0) - - assertPackAnyPanic(t, float32(-42.5), 0) - assertPackAny(t, -42.5, []byte{3, 0xc0, 0x45, 0x40, 0, 0, 0, 0, 0}) - - assertPackAnyPanic(t, []struct{}(nil), 9) - assertPackAnyPanic(t, []struct{}{}, 9) - - assertPackAny(t, []interface{}{nil, true, -42.5}, []byte{ - 5, 0, 0, 0, 0, 0, 0, 0, 3, - 0, - 2, - 3, 0xc0, 0x45, 0x40, 0, 0, 0, 0, 0, - }) - - assertPackAny(t, []string{"", "a"}, []byte{ - 5, 0, 0, 0, 0, 0, 0, 0, 2, - 4, 0, 0, 0, 0, 0, 0, 0, 0, - 4, 0, 0, 0, 0, 0, 0, 0, 1, 'a', - }) - - assertPackAnyPanic(t, []interface{}{0 + 0i}, 9) - - assertPackAnyPanic(t, map[struct{}]struct{}(nil), 9) - assertPackAnyPanic(t, map[struct{}]struct{}{}, 9) - - assertPackAny(t, map[interface{}]interface{}{true: "", "nil": -42.5}, []byte{ - 6, 0, 0, 0, 0, 0, 0, 0, 2, - 0, 0, 0, 0, 0, 0, 0, 3, 'n', 'i', 'l', - 3, 0xc0, 0x45, 0x40, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 4, 't', 'r', 'u', 'e', - 4, 0, 0, 0, 0, 0, 0, 0, 0, - }) - - assertPackAny(t, map[string]float64{"": 42}, []byte{ - 6, 0, 0, 0, 0, 0, 0, 0, 1, - 0, 0, 0, 0, 0, 0, 0, 0, - 3, 0x40, 0x45, 0, 0, 0, 0, 0, 0, - }) - - assertPackAny(t, map[[1]byte]bool{{42}: true}, []byte{ - 6, 0, 0, 0, 0, 0, 0, 0, 1, - 0, 0, 0, 0, 0, 0, 0, 1, 42, - 2, - }) - - assertPackAnyPanic(t, map[struct{}]struct{}{{}: {}}, 9) - - assertPackAny(t, (*string)(nil), []byte{0}) - assertPackAnyPanic(t, (*int)(nil), 0) - assertPackAny(t, new(float64), []byte{3, 0, 0, 0, 0, 0, 0, 0, 0}) - - assertPackAny(t, "", []byte{4, 0, 0, 0, 0, 0, 0, 0, 0}) - assertPackAny(t, "a", []byte{4, 0, 0, 0, 0, 0, 0, 0, 1, 'a'}) - assertPackAny(t, "ä", []byte{4, 0, 0, 0, 0, 0, 0, 0, 2, 0xc3, 0xa4}) - - { - var binary [256]byte - for i := range binary { - binary[i] = byte(i) - } - - assertPackAny(t, binary, append([]byte{4, 0, 0, 0, 0, 0, 0, 1, 0}, binary[:]...)) - assertPackAny(t, binary[:], append([]byte{4, 0, 0, 0, 0, 0, 0, 1, 0}, binary[:]...)) - assertPackAny(t, types.Binary(binary[:]), append([]byte{4, 0, 0, 0, 0, 0, 0, 1, 0}, binary[:]...)) - } - - { - type myByte byte - assertPackAnyPanic(t, []myByte(nil), 9) - } - - assertPackAnyPanic(t, complex64(0+0i), 0) - assertPackAnyPanic(t, 0+0i, 0) - assertPackAnyPanic(t, make(chan struct{}), 0) - assertPackAnyPanic(t, func() {}, 0) - assertPackAnyPanic(t, struct{}{}, 0) - assertPackAnyPanic(t, uintptr(0), 0) -} - -func assertPackAny(t *testing.T, in interface{}, out []byte) { - t.Helper() - - { - buf := &bytes.Buffer{} - if err := PackAny(in, buf); err == nil { - if !bytes.Equal(buf.Bytes(), out) { - t.Errorf("buf := &bytes.Buffer{}; packAny(%#v, buf); !bytes.Equal(buf.Bytes(), %#v)", in, out) - } - } else { - t.Errorf("packAny(%#v, &bytes.Buffer{}) != nil", in) - } - } - - for i := 0; i < len(out); i++ { - if !errors.Is(PackAny(in, &limitedWriter{i}), io.EOF) { - t.Errorf("packAny(%#v, &limitedWriter{%d}) != io.EOF", in, i) - } - } -} - -func assertPackAnyPanic(t *testing.T, in interface{}, allowToWrite int) { - t.Helper() - - for i := 0; i < allowToWrite; i++ { - if !errors.Is(PackAny(in, &limitedWriter{i}), io.EOF) { - t.Errorf("packAny(%#v, &limitedWriter{%d}) != io.EOF", in, i) - } - } - - defer func() { - t.Helper() - - if r := recover(); r == nil { - t.Errorf("packAny(%#v, &limitedWriter{%d}) didn't panic", in, allowToWrite) - } - }() - - _ = PackAny(in, &limitedWriter{allowToWrite}) -} diff --git a/pkg/icingadb/overdue/sync.go b/pkg/icingadb/overdue/sync.go index 377592ac1..3f431b71a 100644 --- a/pkg/icingadb/overdue/sync.go +++ b/pkg/icingadb/overdue/sync.go @@ -5,18 +5,15 @@ import ( _ "embed" "fmt" "github.com/google/uuid" - "github.com/icinga/icingadb/internal" - "github.com/icinga/icingadb/pkg/com" - "github.com/icinga/icingadb/pkg/contracts" - "github.com/icinga/icingadb/pkg/icingadb" + "github.com/icinga/icinga-go-library/com" + "github.com/icinga/icinga-go-library/database" + "github.com/icinga/icinga-go-library/logging" + "github.com/icinga/icinga-go-library/periodic" + "github.com/icinga/icinga-go-library/redis" "github.com/icinga/icingadb/pkg/icingadb/v1" "github.com/icinga/icingadb/pkg/icingadb/v1/overdue" - "github.com/icinga/icingadb/pkg/icingaredis" "github.com/icinga/icingadb/pkg/icingaredis/telemetry" - "github.com/icinga/icingadb/pkg/logging" - "github.com/icinga/icingadb/pkg/periodic" "github.com/pkg/errors" - "github.com/redis/go-redis/v9" "golang.org/x/sync/errgroup" "regexp" "strconv" @@ -26,13 +23,13 @@ import ( // Sync specifies the source and destination of an overdue sync. type Sync struct { - db *icingadb.DB - redis *icingaredis.Client + db *database.DB + redis *redis.Client logger *logging.Logger } // NewSync creates a new Sync. -func NewSync(db *icingadb.DB, redis *icingaredis.Client, logger *logging.Logger) *Sync { +func NewSync(db *database.DB, redis *redis.Client, logger *logging.Logger) *Sync { return &Sync{ db: db, redis: redis, @@ -41,7 +38,7 @@ func NewSync(db *icingadb.DB, redis *icingaredis.Client, logger *logging.Logger) } // factory abstracts overdue.NewHostState and overdue.NewServiceState. -type factory = func(id string, overdue bool) (contracts.Entity, error) +type factory = func(id string, overdue bool) (database.Entity, error) // Sync synchronizes Redis overdue sets from s.redis to s.db. func (s Sync) Sync(ctx context.Context) error { @@ -89,7 +86,7 @@ func (s Sync) initSync(ctx context.Context, objectType string) error { query := fmt.Sprintf("SELECT id FROM %s_state WHERE is_overdue='y'", objectType) if err := s.db.SelectContext(ctx, &rows, query); err != nil { - return internal.CantPerformQuery(err, query) + return database.CantPerformQuery(err, query) } _, err := s.redis.Pipelined(ctx, func(pipe redis.Pipeliner) error { @@ -223,7 +220,7 @@ func (s Sync) updateOverdue( // updateDb sets objectType_state#is_overdue for ids to overdue. func (s Sync) updateDb(ctx context.Context, factory factory, ids []interface{}, overdue bool) error { g, ctx := errgroup.WithContext(ctx) - ch := make(chan contracts.Entity, 1<<10) + ch := make(chan database.Entity, 1<<10) g.Go(func() error { defer close(ch) diff --git a/pkg/icingadb/runtime_updates.go b/pkg/icingadb/runtime_updates.go index a56263a3d..fed4591ed 100644 --- a/pkg/icingadb/runtime_updates.go +++ b/pkg/icingadb/runtime_updates.go @@ -3,18 +3,18 @@ package icingadb import ( "context" "fmt" - "github.com/icinga/icingadb/pkg/com" + "github.com/icinga/icinga-go-library/com" + "github.com/icinga/icinga-go-library/database" + "github.com/icinga/icinga-go-library/logging" + "github.com/icinga/icinga-go-library/periodic" + "github.com/icinga/icinga-go-library/redis" + "github.com/icinga/icinga-go-library/strcase" + "github.com/icinga/icinga-go-library/structify" "github.com/icinga/icingadb/pkg/common" "github.com/icinga/icingadb/pkg/contracts" v1 "github.com/icinga/icingadb/pkg/icingadb/v1" - "github.com/icinga/icingadb/pkg/icingaredis" "github.com/icinga/icingadb/pkg/icingaredis/telemetry" - "github.com/icinga/icingadb/pkg/logging" - "github.com/icinga/icingadb/pkg/periodic" - "github.com/icinga/icingadb/pkg/structify" - "github.com/icinga/icingadb/pkg/utils" "github.com/pkg/errors" - "github.com/redis/go-redis/v9" "go.uber.org/zap" "golang.org/x/sync/errgroup" "golang.org/x/sync/semaphore" @@ -26,13 +26,13 @@ import ( // RuntimeUpdates specifies the source and destination of runtime updates. type RuntimeUpdates struct { - db *DB - redis *icingaredis.Client + db *database.DB + redis *redis.Client logger *logging.Logger } // NewRuntimeUpdates creates a new RuntimeUpdates. -func NewRuntimeUpdates(db *DB, redis *icingaredis.Client, logger *logging.Logger) *RuntimeUpdates { +func NewRuntimeUpdates(db *database.DB, redis *redis.Client, logger *logging.Logger) *RuntimeUpdates { return &RuntimeUpdates{ db: db, redis: redis, @@ -42,18 +42,18 @@ func NewRuntimeUpdates(db *DB, redis *icingaredis.Client, logger *logging.Logger // ClearStreams returns the stream key to ID mapping of the runtime update streams // for later use in Sync and clears the streams themselves. -func (r *RuntimeUpdates) ClearStreams(ctx context.Context) (config, state icingaredis.Streams, err error) { - config = icingaredis.Streams{"icinga:runtime": "0-0"} - state = icingaredis.Streams{"icinga:runtime:state": "0-0"} +func (r *RuntimeUpdates) ClearStreams(ctx context.Context) (config, state redis.Streams, err error) { + config = redis.Streams{"icinga:runtime": "0-0"} + state = redis.Streams{"icinga:runtime:state": "0-0"} var keys []string - for _, streams := range [...]icingaredis.Streams{config, state} { + for _, streams := range [...]redis.Streams{config, state} { for key := range streams { keys = append(keys, key) } } - err = icingaredis.WrapCmdErr(r.redis.Del(ctx, keys...)) + err = redis.WrapCmdErr(r.redis.Del(ctx, keys...)) return } @@ -61,7 +61,7 @@ func (r *RuntimeUpdates) ClearStreams(ctx context.Context) (config, state icinga // Note that Sync must be only be called configuration synchronization has been completed. // allowParallel allows synchronizing out of order (not FIFO). func (r *RuntimeUpdates) Sync( - ctx context.Context, factoryFuncs []contracts.EntityFactoryFunc, streams icingaredis.Streams, allowParallel bool, + ctx context.Context, factoryFuncs []database.EntityFactoryFunc, streams redis.Streams, allowParallel bool, ) error { g, ctx := errgroup.WithContext(ctx) @@ -72,16 +72,16 @@ func (r *RuntimeUpdates) Sync( stat := getCounterForEntity(s.Entity()) updateMessages := make(chan redis.XMessage, r.redis.Options.XReadCount) - upsertEntities := make(chan contracts.Entity, r.redis.Options.XReadCount) + upsertEntities := make(chan database.Entity, r.redis.Options.XReadCount) deleteIds := make(chan interface{}, r.redis.Options.XReadCount) - var upsertedFifo chan contracts.Entity + var upsertedFifo chan database.Entity var deletedFifo chan interface{} var upsertCount int var deleteCount int upsertStmt, upsertPlaceholders := r.db.BuildUpsertStmt(s.Entity()) if !allowParallel { - upsertedFifo = make(chan contracts.Entity, 1) + upsertedFifo = make(chan database.Entity, 1) deletedFifo = make(chan interface{}, 1) upsertCount = 1 deleteCount = 1 @@ -90,13 +90,16 @@ func (r *RuntimeUpdates) Sync( deleteCount = r.db.Options.MaxPlaceholdersPerStatement } - updateMessagesByKey[fmt.Sprintf("icinga:%s", utils.Key(s.Name(), ':'))] = updateMessages + updateMessagesByKey[fmt.Sprintf("icinga:%s", strcase.Delimited(s.Name(), ':'))] = updateMessages r.logger.Debugf("Syncing runtime updates of %s", s.Name()) g.Go(structifyStream( ctx, updateMessages, upsertEntities, upsertedFifo, deleteIds, deletedFifo, - structify.MakeMapStructifier(reflect.TypeOf(s.Entity()).Elem(), "json"), + structify.MakeMapStructifier( + reflect.TypeOf(s.Entity()).Elem(), + "json", + contracts.SafeInit), )) g.Go(func() error { @@ -110,15 +113,15 @@ func (r *RuntimeUpdates) Sync( // Updates must be executed in order, ensure this by using a semaphore with maximum 1. sem := semaphore.NewWeighted(1) - onSuccess := []OnSuccess[contracts.Entity]{ - OnSuccessIncrement[contracts.Entity](&counter), OnSuccessIncrement[contracts.Entity](stat), + onSuccess := []database.OnSuccess[database.Entity]{ + database.OnSuccessIncrement[database.Entity](&counter), database.OnSuccessIncrement[database.Entity](stat), } if !allowParallel { - onSuccess = append(onSuccess, OnSuccessSendTo(upsertedFifo)) + onSuccess = append(onSuccess, database.OnSuccessSendTo(upsertedFifo)) } return r.db.NamedBulkExec( - ctx, upsertStmt, upsertCount, sem, upsertEntities, com.SplitOnDupId[contracts.Entity], onSuccess..., + ctx, upsertStmt, upsertCount, sem, upsertEntities, database.SplitOnDupId[database.Entity], onSuccess..., ) }) @@ -130,11 +133,11 @@ func (r *RuntimeUpdates) Sync( } }).Stop() - sem := r.db.GetSemaphoreForTable(utils.TableName(s.Entity())) + sem := r.db.GetSemaphoreForTable(database.TableName(s.Entity())) - onSuccess := []OnSuccess[any]{OnSuccessIncrement[any](&counter), OnSuccessIncrement[any](stat)} + onSuccess := []database.OnSuccess[any]{database.OnSuccessIncrement[any](&counter), database.OnSuccessIncrement[any](stat)} if !allowParallel { - onSuccess = append(onSuccess, OnSuccessSendTo(deletedFifo)) + onSuccess = append(onSuccess, database.OnSuccessSendTo(deletedFifo)) } return r.db.BulkExec(ctx, r.db.BuildDeleteStmt(s.Entity()), deleteCount, sem, deleteIds, onSuccess...) @@ -144,7 +147,7 @@ func (r *RuntimeUpdates) Sync( // customvar and customvar_flat sync. { updateMessages := make(chan redis.XMessage, r.redis.Options.XReadCount) - upsertEntities := make(chan contracts.Entity, r.redis.Options.XReadCount) + upsertEntities := make(chan database.Entity, r.redis.Options.XReadCount) deleteIds := make(chan interface{}, r.redis.Options.XReadCount) cv := common.NewSyncSubject(v1.NewCustomvar) @@ -153,10 +156,13 @@ func (r *RuntimeUpdates) Sync( r.logger.Debug("Syncing runtime updates of " + cv.Name()) r.logger.Debug("Syncing runtime updates of " + cvFlat.Name()) - updateMessagesByKey["icinga:"+utils.Key(cv.Name(), ':')] = updateMessages + updateMessagesByKey["icinga:"+strcase.Delimited(cv.Name(), ':')] = updateMessages g.Go(structifyStream( ctx, updateMessages, upsertEntities, nil, deleteIds, nil, - structify.MakeMapStructifier(reflect.TypeOf(cv.Entity()).Elem(), "json"), + structify.MakeMapStructifier( + reflect.TypeOf(cv.Entity()).Elem(), + "json", + contracts.SafeInit), )) customvars, flatCustomvars, errs := v1.ExpandCustomvars(ctx, upsertEntities) @@ -176,9 +182,9 @@ func (r *RuntimeUpdates) Sync( sem := semaphore.NewWeighted(1) return r.db.NamedBulkExec( - ctx, cvStmt, cvCount, sem, customvars, com.SplitOnDupId[contracts.Entity], - OnSuccessIncrement[contracts.Entity](&counter), - OnSuccessIncrement[contracts.Entity](&telemetry.Stats.Config), + ctx, cvStmt, cvCount, sem, customvars, database.SplitOnDupId[database.Entity], + database.OnSuccessIncrement[database.Entity](&counter), + database.OnSuccessIncrement[database.Entity](&telemetry.Stats.Config), ) }) @@ -197,8 +203,8 @@ func (r *RuntimeUpdates) Sync( return r.db.NamedBulkExec( ctx, cvFlatStmt, cvFlatCount, sem, flatCustomvars, - com.SplitOnDupId[contracts.Entity], OnSuccessIncrement[contracts.Entity](&counter), - OnSuccessIncrement[contracts.Entity](&telemetry.Stats.Config), + database.SplitOnDupId[database.Entity], database.OnSuccessIncrement[database.Entity](&counter), + database.OnSuccessIncrement[database.Entity](&telemetry.Stats.Config), ) }) @@ -228,7 +234,7 @@ func (r *RuntimeUpdates) Sync( // xRead reads from the runtime update streams and sends the data to the corresponding updateMessages channel. // The updateMessages channel is determined by a "redis_key" on each redis message. -func (r *RuntimeUpdates) xRead(ctx context.Context, updateMessagesByKey map[string]chan<- redis.XMessage, streams icingaredis.Streams) func() error { +func (r *RuntimeUpdates) xRead(ctx context.Context, updateMessagesByKey map[string]chan<- redis.XMessage, streams redis.Streams) func() error { return func() error { defer func() { for _, updateMessages := range updateMessagesByKey { @@ -283,7 +289,7 @@ func (r *RuntimeUpdates) xRead(ctx context.Context, updateMessagesByKey map[stri } else { for _, cmd := range cmds { if cmd.Err() != nil { - r.logger.Errorw("Can't trim runtime updates stream", zap.Error(icingaredis.WrapCmdErr(cmd))) + r.logger.Errorw("Can't trim runtime updates stream", zap.Error(redis.WrapCmdErr(cmd))) } } } @@ -295,11 +301,11 @@ func (r *RuntimeUpdates) xRead(ctx context.Context, updateMessagesByKey map[stri // those messages into Icinga DB entities (contracts.Entity) using the provided structifier. // Converted entities are inserted into the upsertEntities or deleteIds channel depending on the "runtime_type" message field. func structifyStream( - ctx context.Context, updateMessages <-chan redis.XMessage, upsertEntities, upserted chan contracts.Entity, + ctx context.Context, updateMessages <-chan redis.XMessage, upsertEntities, upserted chan database.Entity, deleteIds, deleted chan interface{}, structifier structify.MapStructifier, ) func() error { if upserted == nil { - upserted = make(chan contracts.Entity) + upserted = make(chan database.Entity) close(upserted) } @@ -326,7 +332,7 @@ func structifyStream( return errors.Wrapf(err, "can't structify values %#v", message.Values) } - entity := ptr.(contracts.Entity) + entity := ptr.(database.Entity) runtimeType := message.Values["runtime_type"] if runtimeType == nil { diff --git a/pkg/icingadb/schema.go b/pkg/icingadb/schema.go new file mode 100644 index 000000000..aa4735af3 --- /dev/null +++ b/pkg/icingadb/schema.go @@ -0,0 +1,58 @@ +package icingadb + +import ( + "context" + "fmt" + "github.com/icinga/icinga-go-library/backoff" + "github.com/icinga/icinga-go-library/database" + "github.com/icinga/icinga-go-library/retry" + "github.com/pkg/errors" + "time" +) + +const ( + expectedMysqlSchemaVersion = 5 + expectedPostgresSchemaVersion = 3 +) + +// CheckSchema asserts the database schema of the expected version being present. +func CheckSchema(ctx context.Context, db *database.DB) error { + var expectedDbSchemaVersion uint16 + switch db.DriverName() { + case database.MySQL: + expectedDbSchemaVersion = expectedMysqlSchemaVersion + case database.PostgreSQL: + expectedDbSchemaVersion = expectedPostgresSchemaVersion + } + + var version uint16 + + err := retry.WithBackoff( + ctx, + func(ctx context.Context) (err error) { + query := "SELECT version FROM icingadb_schema ORDER BY id DESC LIMIT 1" + err = db.QueryRowxContext(ctx, query).Scan(&version) + if err != nil { + err = database.CantPerformQuery(err, query) + } + return + }, + retry.Retryable, + backoff.NewExponentialWithJitter(128*time.Millisecond, 1*time.Minute), + db.GetDefaultRetrySettings()) + if err != nil { + return errors.Wrap(err, "can't check database schema version") + } + + if version != expectedDbSchemaVersion { + // Since these error messages are trivial and mostly caused by users, we don't need + // to print a stack trace here. However, since errors.Errorf() does this automatically, + // we need to use fmt instead. + return fmt.Errorf( + "unexpected database schema version: v%d (expected v%d), please make sure you have applied all database"+ + " migrations after upgrading Icinga DB", version, expectedDbSchemaVersion, + ) + } + + return nil +} diff --git a/pkg/icingadb/scoped_entity.go b/pkg/icingadb/scoped_entity.go index 7c1688c7a..5a77e7c53 100644 --- a/pkg/icingadb/scoped_entity.go +++ b/pkg/icingadb/scoped_entity.go @@ -1,15 +1,14 @@ package icingadb import ( - "github.com/icinga/icingadb/pkg/contracts" - "github.com/icinga/icingadb/pkg/utils" + "github.com/icinga/icinga-go-library/database" ) // ScopedEntity combines an entity and a scope that specifies // the WHERE conditions that entities of the // enclosed entity type must satisfy in order to be SELECTed. type ScopedEntity struct { - contracts.Entity + database.Entity scope interface{} } @@ -20,11 +19,11 @@ func (e ScopedEntity) Scope() interface{} { // TableName implements the contracts.TableNamer interface. func (e ScopedEntity) TableName() string { - return utils.TableName(e.Entity) + return database.TableName(e.Entity) } // NewScopedEntity returns a new ScopedEntity. -func NewScopedEntity(entity contracts.Entity, scope interface{}) *ScopedEntity { +func NewScopedEntity(entity database.Entity, scope interface{}) *ScopedEntity { return &ScopedEntity{ Entity: entity, scope: scope, diff --git a/pkg/icingadb/sync.go b/pkg/icingadb/sync.go index 790f11e43..6b39ee64f 100644 --- a/pkg/icingadb/sync.go +++ b/pkg/icingadb/sync.go @@ -3,14 +3,16 @@ package icingadb import ( "context" "fmt" - "github.com/icinga/icingadb/pkg/com" + "github.com/icinga/icinga-go-library/com" + "github.com/icinga/icinga-go-library/database" + "github.com/icinga/icinga-go-library/logging" + "github.com/icinga/icinga-go-library/redis" + "github.com/icinga/icinga-go-library/strcase" + "github.com/icinga/icinga-go-library/types" "github.com/icinga/icingadb/pkg/common" - "github.com/icinga/icingadb/pkg/contracts" v1 "github.com/icinga/icingadb/pkg/icingadb/v1" "github.com/icinga/icingadb/pkg/icingaredis" "github.com/icinga/icingadb/pkg/icingaredis/telemetry" - "github.com/icinga/icingadb/pkg/logging" - "github.com/icinga/icingadb/pkg/utils" "github.com/pkg/errors" "go.uber.org/zap" "golang.org/x/sync/errgroup" @@ -20,13 +22,13 @@ import ( // Sync implements a rendezvous point for Icinga DB and Redis to synchronize their entities. type Sync struct { - db *DB - redis *icingaredis.Client + db *database.DB + redis *redis.Client logger *logging.Logger } // NewSync returns a new Sync. -func NewSync(db *DB, redis *icingaredis.Client, logger *logging.Logger) *Sync { +func NewSync(db *database.DB, redis *redis.Client, logger *logging.Logger) *Sync { return &Sync{ db: db, redis: redis, @@ -37,8 +39,8 @@ func NewSync(db *DB, redis *icingaredis.Client, logger *logging.Logger) *Sync { // SyncAfterDump waits for a config dump to finish (using the dump parameter) and then starts a sync for the given // sync subject using the Sync function. func (s Sync) SyncAfterDump(ctx context.Context, subject *common.SyncSubject, dump *DumpSignals) error { - typeName := utils.Name(subject.Entity()) - key := "icinga:" + utils.Key(typeName, ':') + typeName := types.Name(subject.Entity()) + key := "icinga:" + strcase.Delimited(typeName, ':') startTime := time.Now() logTicker := time.NewTicker(s.logger.Interval()) @@ -74,7 +76,7 @@ func (s Sync) SyncAfterDump(ctx context.Context, subject *common.SyncSubject, du func (s Sync) Sync(ctx context.Context, subject *common.SyncSubject) error { g, ctx := errgroup.WithContext(ctx) - desired, redisErrs := s.redis.YieldAll(ctx, subject) + desired, redisErrs := icingaredis.YieldAll(ctx, s.redis, subject) // Let errors from Redis cancel our group. com.ErrgroupReceive(g, redisErrs) @@ -108,12 +110,12 @@ func (s Sync) ApplyDelta(ctx context.Context, delta *Delta) error { // Create if len(delta.Create) > 0 { - s.logger.Infof("Inserting %d items of type %s", len(delta.Create), utils.Key(utils.Name(delta.Subject.Entity()), ' ')) - var entities <-chan contracts.Entity + s.logger.Infof("Inserting %d items of type %s", len(delta.Create), strcase.Delimited(types.Name(delta.Subject.Entity()), ' ')) + var entities <-chan database.Entity if delta.Subject.WithChecksum() { pairs, errs := s.redis.HMYield( ctx, - fmt.Sprintf("icinga:%s", utils.Key(utils.Name(delta.Subject.Entity()), ':')), + fmt.Sprintf("icinga:%s", strcase.Delimited(types.Name(delta.Subject.Entity()), ':')), delta.Create.Keys()...) // Let errors from Redis cancel our group. com.ErrgroupReceive(g, errs) @@ -129,16 +131,16 @@ func (s Sync) ApplyDelta(ctx context.Context, delta *Delta) error { } g.Go(func() error { - return s.db.CreateStreamed(ctx, entities, OnSuccessIncrement[contracts.Entity](stat)) + return s.db.CreateStreamed(ctx, entities, database.OnSuccessIncrement[database.Entity](stat)) }) } // Update if len(delta.Update) > 0 { - s.logger.Infof("Updating %d items of type %s", len(delta.Update), utils.Key(utils.Name(delta.Subject.Entity()), ' ')) + s.logger.Infof("Updating %d items of type %s", len(delta.Update), strcase.Delimited(types.Name(delta.Subject.Entity()), ' ')) pairs, errs := s.redis.HMYield( ctx, - fmt.Sprintf("icinga:%s", utils.Key(utils.Name(delta.Subject.Entity()), ':')), + fmt.Sprintf("icinga:%s", strcase.Delimited(types.Name(delta.Subject.Entity()), ':')), delta.Update.Keys()...) // Let errors from Redis cancel our group. com.ErrgroupReceive(g, errs) @@ -153,15 +155,15 @@ func (s Sync) ApplyDelta(ctx context.Context, delta *Delta) error { g.Go(func() error { // Using upsert here on purpose as this is the fastest way to do bulk updates. // However, there is a risk that errors in the sync implementation could silently insert new rows. - return s.db.UpsertStreamed(ctx, entities, OnSuccessIncrement[contracts.Entity](stat)) + return s.db.UpsertStreamed(ctx, entities, database.OnSuccessIncrement[database.Entity](stat)) }) } // Delete if len(delta.Delete) > 0 { - s.logger.Infof("Deleting %d items of type %s", len(delta.Delete), utils.Key(utils.Name(delta.Subject.Entity()), ' ')) + s.logger.Infof("Deleting %d items of type %s", len(delta.Delete), strcase.Delimited(types.Name(delta.Subject.Entity()), ' ')) g.Go(func() error { - return s.db.Delete(ctx, delta.Subject.Entity(), delta.Delete.IDs(), OnSuccessIncrement[any](stat)) + return s.db.Delete(ctx, delta.Subject.Entity(), delta.Delete.IDs(), database.OnSuccessIncrement[any](stat)) }) } @@ -179,7 +181,7 @@ func (s Sync) SyncCustomvars(ctx context.Context) error { cv := common.NewSyncSubject(v1.NewCustomvar) - cvs, errs := s.redis.YieldAll(ctx, cv) + cvs, errs := icingaredis.YieldAll(ctx, s.redis, cv) com.ErrgroupReceive(g, errs) desiredCvs, desiredFlatCvs, errs := v1.ExpandCustomvars(ctx, cvs) @@ -211,7 +213,7 @@ func (s Sync) SyncCustomvars(ctx context.Context) error { } // getCounterForEntity returns the appropriate counter (config/state) from telemetry.Stats for e. -func getCounterForEntity(e contracts.Entity) *com.Counter { +func getCounterForEntity(e database.Entity) *com.Counter { switch e.(type) { case *v1.HostState, *v1.ServiceState: return &telemetry.Stats.State diff --git a/pkg/types/acknowledgement_state.go b/pkg/icingadb/types/acknowledgement_state.go similarity index 93% rename from pkg/types/acknowledgement_state.go rename to pkg/icingadb/types/acknowledgement_state.go index 5bff61374..5a385653b 100644 --- a/pkg/types/acknowledgement_state.go +++ b/pkg/icingadb/types/acknowledgement_state.go @@ -4,7 +4,7 @@ import ( "database/sql/driver" "encoding" "encoding/json" - "github.com/icinga/icingadb/internal" + "github.com/icinga/icinga-go-library/types" "github.com/pkg/errors" ) @@ -19,7 +19,7 @@ func (as *AcknowledgementState) UnmarshalText(text []byte) error { // UnmarshalJSON implements the json.Unmarshaler interface. func (as *AcknowledgementState) UnmarshalJSON(data []byte) error { var i uint8 - if err := internal.UnmarshalJSON(data, &i); err != nil { + if err := types.UnmarshalJSON(data, &i); err != nil { return err } diff --git a/pkg/types/comment_type.go b/pkg/icingadb/types/comment_type.go similarity index 92% rename from pkg/types/comment_type.go rename to pkg/icingadb/types/comment_type.go index 8aed47591..86bb092c6 100644 --- a/pkg/types/comment_type.go +++ b/pkg/icingadb/types/comment_type.go @@ -4,7 +4,7 @@ import ( "database/sql/driver" "encoding" "encoding/json" - "github.com/icinga/icingadb/internal" + "github.com/icinga/icinga-go-library/types" "github.com/pkg/errors" "strconv" ) @@ -15,7 +15,7 @@ type CommentType uint8 // UnmarshalJSON implements the json.Unmarshaler interface. func (ct *CommentType) UnmarshalJSON(data []byte) error { var i uint8 - if err := internal.UnmarshalJSON(data, &i); err != nil { + if err := types.UnmarshalJSON(data, &i); err != nil { return err } @@ -34,7 +34,7 @@ func (ct *CommentType) UnmarshalText(text []byte) error { i, err := strconv.ParseUint(s, 10, 64) if err != nil { - return internal.CantParseUint64(err, s) + return types.CantParseUint64(err, s) } c := CommentType(i) diff --git a/pkg/types/notification_states.go b/pkg/icingadb/types/notification_states.go similarity index 94% rename from pkg/types/notification_states.go rename to pkg/icingadb/types/notification_states.go index ff5760a33..28f4fc84d 100644 --- a/pkg/types/notification_states.go +++ b/pkg/icingadb/types/notification_states.go @@ -4,7 +4,7 @@ import ( "database/sql/driver" "encoding" "encoding/json" - "github.com/icinga/icingadb/internal" + "github.com/icinga/icinga-go-library/types" "github.com/pkg/errors" ) @@ -14,7 +14,7 @@ type NotificationStates uint8 // UnmarshalJSON implements the json.Unmarshaler interface. func (nst *NotificationStates) UnmarshalJSON(data []byte) error { var states []string - if err := internal.UnmarshalJSON(data, &states); err != nil { + if err := types.UnmarshalJSON(data, &states); err != nil { return err } diff --git a/pkg/types/notification_type.go b/pkg/icingadb/types/notification_type.go similarity index 94% rename from pkg/types/notification_type.go rename to pkg/icingadb/types/notification_type.go index f2980f4e0..ce88c2efb 100644 --- a/pkg/types/notification_type.go +++ b/pkg/icingadb/types/notification_type.go @@ -3,7 +3,7 @@ package types import ( "database/sql/driver" "encoding" - "github.com/icinga/icingadb/internal" + "github.com/icinga/icinga-go-library/types" "github.com/pkg/errors" "strconv" ) @@ -17,7 +17,7 @@ func (nt *NotificationType) UnmarshalText(text []byte) error { i, err := strconv.ParseUint(s, 10, 64) if err != nil { - return internal.CantParseUint64(err, s) + return types.CantParseUint64(err, s) } n := NotificationType(i) diff --git a/pkg/types/notification_types.go b/pkg/icingadb/types/notification_types.go similarity index 76% rename from pkg/types/notification_types.go rename to pkg/icingadb/types/notification_types.go index 832a515cf..add8e3e6b 100644 --- a/pkg/types/notification_types.go +++ b/pkg/icingadb/types/notification_types.go @@ -4,7 +4,7 @@ import ( "database/sql/driver" "encoding" "encoding/json" - "github.com/icinga/icingadb/internal" + "github.com/icinga/icinga-go-library/types" "github.com/pkg/errors" ) @@ -13,21 +13,21 @@ type NotificationTypes uint16 // UnmarshalJSON implements the json.Unmarshaler interface. func (nt *NotificationTypes) UnmarshalJSON(data []byte) error { - var types []string - if err := internal.UnmarshalJSON(data, &types); err != nil { + var names []string + if err := types.UnmarshalJSON(data, &names); err != nil { return err } - var n NotificationTypes - for _, typ := range types { - if v, ok := notificationTypeNames[typ]; ok { - n |= v + var v NotificationTypes + for _, name := range names { + if i, ok := notificationTypeMap[name]; ok { + v |= i } else { return badNotificationTypes(nt) } } - *nt = n + *nt = v return nil } @@ -50,8 +50,8 @@ func badNotificationTypes(t interface{}) error { return errors.Errorf("bad notification types: %#v", t) } -// notificationTypeNames maps all valid NotificationTypes values to their SQL representation. -var notificationTypeNames = map[string]NotificationTypes{ +// notificationTypeMap maps all valid NotificationTypes values to their SQL representation. +var notificationTypeMap = map[string]NotificationTypes{ "DowntimeStart": 1, "DowntimeEnd": 2, "DowntimeRemoved": 4, @@ -65,12 +65,12 @@ var notificationTypeNames = map[string]NotificationTypes{ // allNotificationTypes is the largest valid NotificationTypes value. var allNotificationTypes = func() NotificationTypes { - var nt NotificationTypes - for _, v := range notificationTypeNames { - nt |= v + var all NotificationTypes + for _, i := range notificationTypeMap { + all |= i } - return nt + return all }() // Assert interface compliance. diff --git a/pkg/types/state_type.go b/pkg/icingadb/types/state_type.go similarity index 93% rename from pkg/types/state_type.go rename to pkg/icingadb/types/state_type.go index f0cc69afe..247443182 100644 --- a/pkg/types/state_type.go +++ b/pkg/icingadb/types/state_type.go @@ -4,7 +4,7 @@ import ( "database/sql/driver" "encoding" "encoding/json" - "github.com/icinga/icingadb/internal" + "github.com/icinga/icinga-go-library/types" "github.com/pkg/errors" ) @@ -19,7 +19,7 @@ func (st *StateType) UnmarshalText(text []byte) error { // UnmarshalJSON implements the json.Unmarshaler interface. func (st *StateType) UnmarshalJSON(data []byte) error { var i uint8 - if err := internal.UnmarshalJSON(data, &i); err != nil { + if err := types.UnmarshalJSON(data, &i); err != nil { return err } diff --git a/pkg/icingadb/v1/checkable.go b/pkg/icingadb/v1/checkable.go index 4b1efeb9c..78d75f5b3 100644 --- a/pkg/icingadb/v1/checkable.go +++ b/pkg/icingadb/v1/checkable.go @@ -1,8 +1,8 @@ package v1 import ( + "github.com/icinga/icinga-go-library/types" "github.com/icinga/icingadb/pkg/contracts" - "github.com/icinga/icingadb/pkg/types" ) type Checkable struct { diff --git a/pkg/icingadb/v1/command.go b/pkg/icingadb/v1/command.go index cd4db2a73..74ec5553f 100644 --- a/pkg/icingadb/v1/command.go +++ b/pkg/icingadb/v1/command.go @@ -1,8 +1,9 @@ package v1 import ( + "github.com/icinga/icinga-go-library/database" + "github.com/icinga/icinga-go-library/types" "github.com/icinga/icingadb/pkg/contracts" - "github.com/icinga/icingadb/pkg/types" ) type Command struct { @@ -111,51 +112,51 @@ type NotificationcommandCustomvar struct { NotificationcommandId types.Binary `json:"notificationcommand_id"` } -func NewCheckcommand() contracts.Entity { +func NewCheckcommand() database.Entity { return &Checkcommand{} } -func NewCheckcommandArgument() contracts.Entity { +func NewCheckcommandArgument() database.Entity { return &CheckcommandArgument{} } -func NewCheckcommandEnvvar() contracts.Entity { +func NewCheckcommandEnvvar() database.Entity { return &CheckcommandEnvvar{} } -func NewCheckcommandCustomvar() contracts.Entity { +func NewCheckcommandCustomvar() database.Entity { return &CheckcommandCustomvar{} } -func NewEventcommand() contracts.Entity { +func NewEventcommand() database.Entity { return &Eventcommand{} } -func NewEventcommandArgument() contracts.Entity { +func NewEventcommandArgument() database.Entity { return &EventcommandArgument{} } -func NewEventcommandEnvvar() contracts.Entity { +func NewEventcommandEnvvar() database.Entity { return &EventcommandEnvvar{} } -func NewEventcommandCustomvar() contracts.Entity { +func NewEventcommandCustomvar() database.Entity { return &EventcommandCustomvar{} } -func NewNotificationcommand() contracts.Entity { +func NewNotificationcommand() database.Entity { return &Notificationcommand{} } -func NewNotificationcommandArgument() contracts.Entity { +func NewNotificationcommandArgument() database.Entity { return &NotificationcommandArgument{} } -func NewNotificationcommandEnvvar() contracts.Entity { +func NewNotificationcommandEnvvar() database.Entity { return &NotificationcommandEnvvar{} } -func NewNotificationcommandCustomvar() contracts.Entity { +func NewNotificationcommandCustomvar() database.Entity { return &NotificationcommandCustomvar{} } diff --git a/pkg/icingadb/v1/comment.go b/pkg/icingadb/v1/comment.go index b720fac8a..75f29f738 100644 --- a/pkg/icingadb/v1/comment.go +++ b/pkg/icingadb/v1/comment.go @@ -1,27 +1,28 @@ package v1 import ( - "github.com/icinga/icingadb/pkg/contracts" - "github.com/icinga/icingadb/pkg/types" + "github.com/icinga/icinga-go-library/database" + "github.com/icinga/icinga-go-library/types" + icingadbTypes "github.com/icinga/icingadb/pkg/icingadb/types" ) type Comment struct { EntityWithChecksum `json:",inline"` EnvironmentMeta `json:",inline"` NameMeta `json:",inline"` - ObjectType string `json:"object_type"` - HostId types.Binary `json:"host_id"` - ServiceId types.Binary `json:"service_id"` - Author string `json:"author"` - Text string `json:"text"` - EntryType types.CommentType `json:"entry_type"` - EntryTime types.UnixMilli `json:"entry_time"` - IsPersistent types.Bool `json:"is_persistent"` - IsSticky types.Bool `json:"is_sticky"` - ExpireTime types.UnixMilli `json:"expire_time"` - ZoneId types.Binary `json:"zone_id"` + ObjectType string `json:"object_type"` + HostId types.Binary `json:"host_id"` + ServiceId types.Binary `json:"service_id"` + Author string `json:"author"` + Text string `json:"text"` + EntryType icingadbTypes.CommentType `json:"entry_type"` + EntryTime types.UnixMilli `json:"entry_time"` + IsPersistent types.Bool `json:"is_persistent"` + IsSticky types.Bool `json:"is_sticky"` + ExpireTime types.UnixMilli `json:"expire_time"` + ZoneId types.Binary `json:"zone_id"` } -func NewComment() contracts.Entity { +func NewComment() database.Entity { return &Comment{} } diff --git a/pkg/icingadb/v1/customvar.go b/pkg/icingadb/v1/customvar.go index 462b87c4e..a9a866dd2 100644 --- a/pkg/icingadb/v1/customvar.go +++ b/pkg/icingadb/v1/customvar.go @@ -2,13 +2,12 @@ package v1 import ( "context" - "github.com/icinga/icingadb/internal" - "github.com/icinga/icingadb/pkg/com" - "github.com/icinga/icingadb/pkg/contracts" - "github.com/icinga/icingadb/pkg/flatten" - "github.com/icinga/icingadb/pkg/icingadb/objectpacker" - "github.com/icinga/icingadb/pkg/types" - "github.com/icinga/icingadb/pkg/utils" + "github.com/icinga/icinga-go-library/com" + "github.com/icinga/icinga-go-library/database" + "github.com/icinga/icinga-go-library/flatten" + "github.com/icinga/icinga-go-library/objectpacker" + "github.com/icinga/icinga-go-library/types" + "github.com/icinga/icinga-go-library/utils" "golang.org/x/sync/errgroup" "runtime" ) @@ -27,11 +26,11 @@ type CustomvarFlat struct { Flatvalue types.String `json:"flatvalue"` } -func NewCustomvar() contracts.Entity { +func NewCustomvar() database.Entity { return &Customvar{} } -func NewCustomvarFlat() contracts.Entity { +func NewCustomvarFlat() database.Entity { return &CustomvarFlat{} } @@ -41,12 +40,12 @@ func NewCustomvarFlat() contracts.Entity { // and the third channel providing an error, if any. func ExpandCustomvars( ctx context.Context, - cvs <-chan contracts.Entity, -) (customvars, flatCustomvars <-chan contracts.Entity, errs <-chan error) { + cvs <-chan database.Entity, +) (customvars, flatCustomvars <-chan database.Entity, errs <-chan error) { g, ctx := errgroup.WithContext(ctx) // Multiplex cvs to use them both for customvar and customvar_flat. - var forward chan contracts.Entity + var forward chan database.Entity customvars, forward = multiplexCvs(ctx, g, cvs) flatCustomvars = flattenCustomvars(ctx, g, forward) errs = com.WaitAsync(g) @@ -59,10 +58,10 @@ func ExpandCustomvars( func multiplexCvs( ctx context.Context, g *errgroup.Group, - cvs <-chan contracts.Entity, -) (customvars1, customvars2 chan contracts.Entity) { - customvars1 = make(chan contracts.Entity) - customvars2 = make(chan contracts.Entity) + cvs <-chan database.Entity, +) (customvars1, customvars2 chan database.Entity) { + customvars1 = make(chan database.Entity) + customvars2 = make(chan database.Entity) g.Go(func() error { defer close(customvars1) @@ -96,8 +95,8 @@ func multiplexCvs( } // flattenCustomvars creates and yields flat custom variables from the provided custom variables. -func flattenCustomvars(ctx context.Context, g *errgroup.Group, cvs <-chan contracts.Entity) (flatCustomvars chan contracts.Entity) { - flatCustomvars = make(chan contracts.Entity) +func flattenCustomvars(ctx context.Context, g *errgroup.Group, cvs <-chan database.Entity) (flatCustomvars chan database.Entity) { + flatCustomvars = make(chan database.Entity) g.Go(func() error { defer close(flatCustomvars) @@ -109,7 +108,7 @@ func flattenCustomvars(ctx context.Context, g *errgroup.Group, cvs <-chan contra for entity := range cvs { var value interface{} customvar := entity.(*Customvar) - if err := internal.UnmarshalJSON([]byte(customvar.Value), &value); err != nil { + if err := types.UnmarshalJSON([]byte(customvar.Value), &value); err != nil { return err } diff --git a/pkg/icingadb/v1/downtime.go b/pkg/icingadb/v1/downtime.go index 0878e5e34..2df189403 100644 --- a/pkg/icingadb/v1/downtime.go +++ b/pkg/icingadb/v1/downtime.go @@ -1,8 +1,8 @@ package v1 import ( - "github.com/icinga/icingadb/pkg/contracts" - "github.com/icinga/icingadb/pkg/types" + "github.com/icinga/icinga-go-library/database" + "github.com/icinga/icinga-go-library/types" ) type Downtime struct { @@ -30,6 +30,6 @@ type Downtime struct { ZoneId types.Binary `json:"zone_id"` } -func NewDowntime() contracts.Entity { +func NewDowntime() database.Entity { return &Downtime{} } diff --git a/pkg/icingadb/v1/endpoint.go b/pkg/icingadb/v1/endpoint.go index 6abe9d7fd..464b103fd 100644 --- a/pkg/icingadb/v1/endpoint.go +++ b/pkg/icingadb/v1/endpoint.go @@ -1,8 +1,9 @@ package v1 import ( + "github.com/icinga/icinga-go-library/database" + "github.com/icinga/icinga-go-library/types" "github.com/icinga/icingadb/pkg/contracts" - "github.com/icinga/icingadb/pkg/types" ) type Endpoint struct { @@ -21,11 +22,11 @@ type Zone struct { Depth uint8 `json:"depth"` } -func NewEndpoint() contracts.Entity { +func NewEndpoint() database.Entity { return &Endpoint{} } -func NewZone() contracts.Entity { +func NewZone() database.Entity { return &Zone{} } diff --git a/pkg/icingadb/v1/entity.go b/pkg/icingadb/v1/entity.go index 5dfa3d299..e7584648e 100644 --- a/pkg/icingadb/v1/entity.go +++ b/pkg/icingadb/v1/entity.go @@ -1,6 +1,8 @@ package v1 -import "github.com/icinga/icingadb/pkg/contracts" +import ( + "github.com/icinga/icinga-go-library/database" +) // EntityWithoutChecksum represents entities without a checksum. type EntityWithoutChecksum struct { @@ -8,7 +10,7 @@ type EntityWithoutChecksum struct { } // Fingerprint implements the contracts.Fingerprinter interface. -func (e EntityWithoutChecksum) Fingerprint() contracts.Fingerprinter { +func (e EntityWithoutChecksum) Fingerprint() database.Fingerprinter { return e } @@ -19,10 +21,10 @@ type EntityWithChecksum struct { } // Fingerprint implements the contracts.Fingerprinter interface. -func (e EntityWithChecksum) Fingerprint() contracts.Fingerprinter { +func (e EntityWithChecksum) Fingerprint() database.Fingerprinter { return e } -func NewEntityWithChecksum() contracts.Entity { +func NewEntityWithChecksum() database.Entity { return &EntityWithChecksum{} } diff --git a/pkg/icingadb/v1/environment.go b/pkg/icingadb/v1/environment.go index 80e06f6aa..6b35d5c4b 100644 --- a/pkg/icingadb/v1/environment.go +++ b/pkg/icingadb/v1/environment.go @@ -2,7 +2,7 @@ package v1 import ( "context" - "github.com/icinga/icingadb/pkg/types" + "github.com/icinga/icinga-go-library/types" ) type Environment struct { diff --git a/pkg/icingadb/v1/history/ack.go b/pkg/icingadb/v1/history/ack.go index 094a7e434..f89a9adaa 100644 --- a/pkg/icingadb/v1/history/ack.go +++ b/pkg/icingadb/v1/history/ack.go @@ -2,9 +2,10 @@ package history import ( "database/sql/driver" + "github.com/icinga/icinga-go-library/database" + "github.com/icinga/icinga-go-library/types" "github.com/icinga/icingadb/pkg/contracts" "github.com/icinga/icingadb/pkg/icingadb/v1" - "github.com/icinga/icingadb/pkg/types" ) type AckHistoryUpserter struct { @@ -74,9 +75,9 @@ func (et AckEventTime) Value() (driver.Value, error) { // Assert interface compliance. var ( - _ UpserterEntity = (*AcknowledgementHistory)(nil) - _ contracts.Initer = (*HistoryAck)(nil) - _ contracts.TableNamer = (*HistoryAck)(nil) - _ UpserterEntity = (*HistoryAck)(nil) - _ driver.Valuer = AckEventTime{} + _ UpserterEntity = (*AcknowledgementHistory)(nil) + _ contracts.Initer = (*HistoryAck)(nil) + _ database.TableNamer = (*HistoryAck)(nil) + _ UpserterEntity = (*HistoryAck)(nil) + _ driver.Valuer = AckEventTime{} ) diff --git a/pkg/icingadb/v1/history/comment.go b/pkg/icingadb/v1/history/comment.go index d3a5743a2..bf5941928 100644 --- a/pkg/icingadb/v1/history/comment.go +++ b/pkg/icingadb/v1/history/comment.go @@ -2,8 +2,10 @@ package history import ( "database/sql/driver" + "github.com/icinga/icinga-go-library/database" + "github.com/icinga/icinga-go-library/types" "github.com/icinga/icingadb/pkg/contracts" - "github.com/icinga/icingadb/pkg/types" + icingadbTypes "github.com/icinga/icingadb/pkg/icingadb/types" ) type CommentHistoryEntity struct { @@ -11,17 +13,17 @@ type CommentHistoryEntity struct { } // Fingerprint implements part of the contracts.Entity interface. -func (che CommentHistoryEntity) Fingerprint() contracts.Fingerprinter { +func (che CommentHistoryEntity) Fingerprint() database.Fingerprinter { return che } // ID implements part of the contracts.Entity interface. -func (che CommentHistoryEntity) ID() contracts.ID { +func (che CommentHistoryEntity) ID() database.ID { return che.CommentId } // SetID implements part of the contracts.Entity interface. -func (che *CommentHistoryEntity) SetID(id contracts.ID) { +func (che *CommentHistoryEntity) SetID(id database.ID) { che.CommentId = id.(types.Binary) } @@ -40,13 +42,13 @@ type CommentHistory struct { CommentHistoryEntity `json:",inline"` HistoryTableMeta `json:",inline"` CommentHistoryUpserter `json:",inline"` - EntryTime types.UnixMilli `json:"entry_time"` - Author string `json:"author"` - Comment string `json:"comment"` - EntryType types.CommentType `json:"entry_type"` - IsPersistent types.Bool `json:"is_persistent"` - IsSticky types.Bool `json:"is_sticky"` - ExpireTime types.UnixMilli `json:"expire_time"` + EntryTime types.UnixMilli `json:"entry_time"` + Author string `json:"author"` + Comment string `json:"comment"` + EntryType icingadbTypes.CommentType `json:"entry_type"` + IsPersistent types.Bool `json:"is_persistent"` + IsSticky types.Bool `json:"is_sticky"` + ExpireTime types.UnixMilli `json:"expire_time"` } // Init implements the contracts.Initer interface. @@ -109,12 +111,12 @@ func (et CommentEventTime) Value() (driver.Value, error) { // Assert interface compliance. var ( - _ contracts.Entity = (*CommentHistoryEntity)(nil) - _ contracts.Upserter = (*CommentHistoryUpserter)(nil) - _ contracts.Initer = (*CommentHistory)(nil) - _ UpserterEntity = (*CommentHistory)(nil) - _ contracts.Initer = (*HistoryComment)(nil) - _ contracts.TableNamer = (*HistoryComment)(nil) - _ UpserterEntity = (*HistoryComment)(nil) - _ driver.Valuer = CommentEventTime{} + _ database.Entity = (*CommentHistoryEntity)(nil) + _ database.Upserter = (*CommentHistoryUpserter)(nil) + _ contracts.Initer = (*CommentHistory)(nil) + _ UpserterEntity = (*CommentHistory)(nil) + _ contracts.Initer = (*HistoryComment)(nil) + _ database.TableNamer = (*HistoryComment)(nil) + _ UpserterEntity = (*HistoryComment)(nil) + _ driver.Valuer = CommentEventTime{} ) diff --git a/pkg/icingadb/v1/history/downtime.go b/pkg/icingadb/v1/history/downtime.go index 99f93f6a9..ec3c8d668 100644 --- a/pkg/icingadb/v1/history/downtime.go +++ b/pkg/icingadb/v1/history/downtime.go @@ -2,8 +2,9 @@ package history import ( "database/sql/driver" + "github.com/icinga/icinga-go-library/database" + "github.com/icinga/icinga-go-library/types" "github.com/icinga/icingadb/pkg/contracts" - "github.com/icinga/icingadb/pkg/types" ) type DowntimeHistoryEntity struct { @@ -11,17 +12,17 @@ type DowntimeHistoryEntity struct { } // Fingerprint implements part of the contracts.Entity interface. -func (dhe DowntimeHistoryEntity) Fingerprint() contracts.Fingerprinter { +func (dhe DowntimeHistoryEntity) Fingerprint() database.Fingerprinter { return dhe } // ID implements part of the contracts.Entity interface. -func (dhe DowntimeHistoryEntity) ID() contracts.ID { +func (dhe DowntimeHistoryEntity) ID() database.ID { return dhe.DowntimeId } // SetID implements part of the contracts.Entity interface. -func (dhe *DowntimeHistoryEntity) SetID(id contracts.ID) { +func (dhe *DowntimeHistoryEntity) SetID(id database.ID) { dhe.DowntimeId = id.(types.Binary) } @@ -148,14 +149,14 @@ func (et SlaDowntimeEndTime) Value() (driver.Value, error) { // Assert interface compliance. var ( - _ contracts.Entity = (*DowntimeHistoryEntity)(nil) - _ contracts.Upserter = (*DowntimeHistoryUpserter)(nil) - _ UpserterEntity = (*DowntimeHistory)(nil) - _ contracts.Initer = (*HistoryDowntime)(nil) - _ contracts.TableNamer = (*HistoryDowntime)(nil) - _ UpserterEntity = (*HistoryDowntime)(nil) - _ contracts.Initer = (*SlaHistoryDowntime)(nil) - _ UpserterEntity = (*SlaHistoryDowntime)(nil) - _ driver.Valuer = DowntimeEventTime{} - _ driver.Valuer = SlaDowntimeEndTime{} + _ database.Entity = (*DowntimeHistoryEntity)(nil) + _ database.Upserter = (*DowntimeHistoryUpserter)(nil) + _ UpserterEntity = (*DowntimeHistory)(nil) + _ contracts.Initer = (*HistoryDowntime)(nil) + _ database.TableNamer = (*HistoryDowntime)(nil) + _ UpserterEntity = (*HistoryDowntime)(nil) + _ contracts.Initer = (*SlaHistoryDowntime)(nil) + _ UpserterEntity = (*SlaHistoryDowntime)(nil) + _ driver.Valuer = DowntimeEventTime{} + _ driver.Valuer = SlaDowntimeEndTime{} ) diff --git a/pkg/icingadb/v1/history/flapping.go b/pkg/icingadb/v1/history/flapping.go index 9280b27aa..bd86f6205 100644 --- a/pkg/icingadb/v1/history/flapping.go +++ b/pkg/icingadb/v1/history/flapping.go @@ -2,9 +2,10 @@ package history import ( "database/sql/driver" + "github.com/icinga/icinga-go-library/database" + "github.com/icinga/icinga-go-library/types" "github.com/icinga/icingadb/pkg/contracts" "github.com/icinga/icingadb/pkg/icingadb/v1" - "github.com/icinga/icingadb/pkg/types" ) type FlappingHistoryUpserter struct { @@ -72,9 +73,9 @@ func (et FlappingEventTime) Value() (driver.Value, error) { // Assert interface compliance. var ( - _ UpserterEntity = (*FlappingHistory)(nil) - _ contracts.Initer = (*HistoryFlapping)(nil) - _ contracts.TableNamer = (*HistoryFlapping)(nil) - _ UpserterEntity = (*HistoryFlapping)(nil) - _ driver.Valuer = FlappingEventTime{} + _ UpserterEntity = (*FlappingHistory)(nil) + _ contracts.Initer = (*HistoryFlapping)(nil) + _ database.TableNamer = (*HistoryFlapping)(nil) + _ UpserterEntity = (*HistoryFlapping)(nil) + _ driver.Valuer = FlappingEventTime{} ) diff --git a/pkg/icingadb/v1/history/meta.go b/pkg/icingadb/v1/history/meta.go index 434ed118b..0f23908f4 100644 --- a/pkg/icingadb/v1/history/meta.go +++ b/pkg/icingadb/v1/history/meta.go @@ -1,15 +1,15 @@ package history import ( - "github.com/icinga/icingadb/pkg/contracts" + "github.com/icinga/icinga-go-library/database" + "github.com/icinga/icinga-go-library/types" "github.com/icinga/icingadb/pkg/icingadb/v1" - "github.com/icinga/icingadb/pkg/types" ) // UpserterEntity provides upsert for entities. type UpserterEntity interface { - contracts.Upserter - contracts.Entity + database.Upserter + database.Entity } // HistoryTableEntity is embedded by every concrete history type that has its own table. @@ -29,17 +29,17 @@ type HistoryEntity struct { } // Fingerprint implements part of the contracts.Entity interface. -func (he HistoryEntity) Fingerprint() contracts.Fingerprinter { +func (he HistoryEntity) Fingerprint() database.Fingerprinter { return he } // ID implements part of the contracts.Entity interface. -func (he HistoryEntity) ID() contracts.ID { +func (he HistoryEntity) ID() database.ID { return he.Id } // SetID implements part of the contracts.Entity interface. -func (he *HistoryEntity) SetID(id contracts.ID) { +func (he *HistoryEntity) SetID(id database.ID) { he.Id = id.(types.Binary) } @@ -71,10 +71,10 @@ type HistoryMeta struct { // Assert interface compliance. var ( - _ contracts.Entity = (*HistoryTableEntity)(nil) - _ contracts.Upserter = HistoryTableEntity{} - _ contracts.Entity = (*HistoryEntity)(nil) - _ contracts.Upserter = HistoryEntity{} - _ contracts.Entity = (*HistoryMeta)(nil) - _ contracts.Upserter = (*HistoryMeta)(nil) + _ database.Entity = (*HistoryTableEntity)(nil) + _ database.Upserter = HistoryTableEntity{} + _ database.Entity = (*HistoryEntity)(nil) + _ database.Upserter = HistoryEntity{} + _ database.Entity = (*HistoryMeta)(nil) + _ database.Upserter = (*HistoryMeta)(nil) ) diff --git a/pkg/icingadb/v1/history/notification.go b/pkg/icingadb/v1/history/notification.go index 17fd375ca..27f9d8778 100644 --- a/pkg/icingadb/v1/history/notification.go +++ b/pkg/icingadb/v1/history/notification.go @@ -1,22 +1,23 @@ package history import ( - "github.com/icinga/icingadb/pkg/contracts" + "github.com/icinga/icinga-go-library/database" + "github.com/icinga/icinga-go-library/types" + icingadbTypes "github.com/icinga/icingadb/pkg/icingadb/types" v1 "github.com/icinga/icingadb/pkg/icingadb/v1" - "github.com/icinga/icingadb/pkg/types" ) type NotificationHistory struct { HistoryTableEntity `json:",inline"` HistoryTableMeta `json:",inline"` - NotificationId types.Binary `json:"notification_id"` - Type types.NotificationType `json:"type"` - SendTime types.UnixMilli `json:"send_time"` - State uint8 `json:"state"` - PreviousHardState uint8 `json:"previous_hard_state"` - Author string `json:"author"` - Text types.String `json:"text"` - UsersNotified uint16 `json:"users_notified"` + NotificationId types.Binary `json:"notification_id"` + Type icingadbTypes.NotificationType `json:"type"` + SendTime types.UnixMilli `json:"send_time"` + State uint8 `json:"state"` + PreviousHardState uint8 `json:"previous_hard_state"` + Author string `json:"author"` + Text types.String `json:"text"` + UsersNotified uint16 `json:"users_notified"` } type UserNotificationHistory struct { @@ -43,8 +44,8 @@ func (*HistoryNotification) TableName() string { // Assert interface compliance. var ( - _ UpserterEntity = (*NotificationHistory)(nil) - _ UpserterEntity = (*UserNotificationHistory)(nil) - _ contracts.TableNamer = (*HistoryNotification)(nil) - _ UpserterEntity = (*HistoryNotification)(nil) + _ UpserterEntity = (*NotificationHistory)(nil) + _ UpserterEntity = (*UserNotificationHistory)(nil) + _ database.TableNamer = (*HistoryNotification)(nil) + _ UpserterEntity = (*HistoryNotification)(nil) ) diff --git a/pkg/icingadb/v1/history/state.go b/pkg/icingadb/v1/history/state.go index 6320b738a..dfd421a0c 100644 --- a/pkg/icingadb/v1/history/state.go +++ b/pkg/icingadb/v1/history/state.go @@ -1,25 +1,26 @@ package history import ( - "github.com/icinga/icingadb/pkg/contracts" - "github.com/icinga/icingadb/pkg/types" + "github.com/icinga/icinga-go-library/database" + "github.com/icinga/icinga-go-library/types" + icingadbTypes "github.com/icinga/icingadb/pkg/icingadb/types" ) type StateHistory struct { HistoryTableEntity `json:",inline"` HistoryTableMeta `json:",inline"` - EventTime types.UnixMilli `json:"event_time"` - StateType types.StateType `json:"state_type"` - SoftState uint8 `json:"soft_state"` - HardState uint8 `json:"hard_state"` - PreviousSoftState uint8 `json:"previous_soft_state"` - PreviousHardState uint8 `json:"previous_hard_state"` - CheckAttempt uint32 `json:"check_attempt"` - Output types.String `json:"output"` - LongOutput types.String `json:"long_output"` - MaxCheckAttempts uint32 `json:"max_check_attempts"` - CheckSource types.String `json:"check_source"` - SchedulingSource types.String `json:"scheduling_source"` + EventTime types.UnixMilli `json:"event_time"` + StateType icingadbTypes.StateType `json:"state_type"` + SoftState uint8 `json:"soft_state"` + HardState uint8 `json:"hard_state"` + PreviousSoftState uint8 `json:"previous_soft_state"` + PreviousHardState uint8 `json:"previous_hard_state"` + CheckAttempt uint32 `json:"check_attempt"` + Output types.String `json:"output"` + LongOutput types.String `json:"long_output"` + MaxCheckAttempts uint32 `json:"max_check_attempts"` + CheckSource types.String `json:"check_source"` + SchedulingSource types.String `json:"scheduling_source"` } type HistoryState struct { @@ -36,16 +37,16 @@ func (*HistoryState) TableName() string { type SlaHistoryState struct { HistoryTableEntity `json:",inline"` HistoryTableMeta `json:",inline"` - EventTime types.UnixMilli `json:"event_time"` - StateType types.StateType `json:"state_type" db:"-"` - HardState uint8 `json:"hard_state"` - PreviousHardState uint8 `json:"previous_hard_state"` + EventTime types.UnixMilli `json:"event_time"` + StateType icingadbTypes.StateType `json:"state_type" db:"-"` + HardState uint8 `json:"hard_state"` + PreviousHardState uint8 `json:"previous_hard_state"` } // Assert interface compliance. var ( - _ UpserterEntity = (*StateHistory)(nil) - _ contracts.TableNamer = (*HistoryState)(nil) - _ UpserterEntity = (*HistoryState)(nil) - _ UpserterEntity = (*SlaHistoryState)(nil) + _ UpserterEntity = (*StateHistory)(nil) + _ database.TableNamer = (*HistoryState)(nil) + _ UpserterEntity = (*HistoryState)(nil) + _ UpserterEntity = (*SlaHistoryState)(nil) ) diff --git a/pkg/icingadb/v1/host.go b/pkg/icingadb/v1/host.go index fbab47c1c..47aaa7869 100644 --- a/pkg/icingadb/v1/host.go +++ b/pkg/icingadb/v1/host.go @@ -3,8 +3,9 @@ package v1 import ( "bytes" "database/sql/driver" + "github.com/icinga/icinga-go-library/database" + "github.com/icinga/icinga-go-library/types" "github.com/icinga/icingadb/pkg/contracts" - "github.com/icinga/icingadb/pkg/types" "net" ) @@ -89,27 +90,27 @@ type HostgroupMember struct { HostgroupId types.Binary `json:"hostgroup_id"` } -func NewHost() contracts.Entity { +func NewHost() database.Entity { return &Host{} } -func NewHostCustomvar() contracts.Entity { +func NewHostCustomvar() database.Entity { return &HostCustomvar{} } -func NewHostState() contracts.Entity { +func NewHostState() database.Entity { return &HostState{} } -func NewHostgroup() contracts.Entity { +func NewHostgroup() database.Entity { return &Hostgroup{} } -func NewHostgroupCustomvar() contracts.Entity { +func NewHostgroupCustomvar() database.Entity { return &HostgroupCustomvar{} } -func NewHostgroupMember() contracts.Entity { +func NewHostgroupMember() database.Entity { return &HostgroupMember{} } diff --git a/pkg/icingadb/v1/icingadb_instance.go b/pkg/icingadb/v1/icingadb_instance.go index d694b74a8..9af58aa04 100644 --- a/pkg/icingadb/v1/icingadb_instance.go +++ b/pkg/icingadb/v1/icingadb_instance.go @@ -1,7 +1,7 @@ package v1 import ( - "github.com/icinga/icingadb/pkg/types" + "github.com/icinga/icinga-go-library/types" ) type IcingadbInstance struct { @@ -11,7 +11,7 @@ type IcingadbInstance struct { Heartbeat types.UnixMilli `json:"heartbeat"` Responsible types.Bool `json:"responsible"` Icinga2Version string `json:"icinga2_version"` - Icinga2StartTime types.UnixMilli `json:"icinga2_start_Time"` + Icinga2StartTime types.UnixMilli `json:"icinga2_start_time"` Icinga2NotificationsEnabled types.Bool `json:"icinga2_notifications_enabled"` Icinga2ActiveServiceChecksEnabled types.Bool `json:"icinga2_active_service_checks_enabled"` Icinga2ActiveHostChecksEnabled types.Bool `json:"icinga2_active_host_checks_enabled"` diff --git a/pkg/icingadb/v1/meta.go b/pkg/icingadb/v1/meta.go index 9266751e4..2d209b193 100644 --- a/pkg/icingadb/v1/meta.go +++ b/pkg/icingadb/v1/meta.go @@ -1,8 +1,9 @@ package v1 import ( + "github.com/icinga/icinga-go-library/database" + "github.com/icinga/icinga-go-library/types" "github.com/icinga/icingadb/pkg/contracts" - "github.com/icinga/icingadb/pkg/types" ) // ChecksumMeta is embedded by every type with a checksum. @@ -31,12 +32,12 @@ type IdMeta struct { } // ID implements part of the contracts.IDer interface. -func (m IdMeta) ID() contracts.ID { +func (m IdMeta) ID() database.ID { return m.Id } // SetID implements part of the contracts.IDer interface. -func (m *IdMeta) SetID(id contracts.ID) { +func (m *IdMeta) SetID(id database.ID) { m.Id = id.(types.Binary) } diff --git a/pkg/icingadb/v1/notification.go b/pkg/icingadb/v1/notification.go index 31bdbbf6a..3cfca866b 100644 --- a/pkg/icingadb/v1/notification.go +++ b/pkg/icingadb/v1/notification.go @@ -1,24 +1,26 @@ package v1 import ( + "github.com/icinga/icinga-go-library/database" + "github.com/icinga/icinga-go-library/types" "github.com/icinga/icingadb/pkg/contracts" - "github.com/icinga/icingadb/pkg/types" + icingadbTypes "github.com/icinga/icingadb/pkg/icingadb/types" ) type Notification struct { EntityWithChecksum `json:",inline"` EnvironmentMeta `json:",inline"` NameCiMeta `json:",inline"` - HostId types.Binary `json:"host_id"` - ServiceId types.Binary `json:"service_id"` - NotificationcommandId types.Binary `json:"notificationcommand_id"` - TimesBegin types.Int `json:"times_begin"` - TimesEnd types.Int `json:"times_end"` - NotificationInterval uint32 `json:"notification_interval"` - TimeperiodId types.Binary `json:"timeperiod_id"` - States types.NotificationStates `json:"states"` - Types types.NotificationTypes `json:"types"` - ZoneId types.Binary `json:"zone_id"` + HostId types.Binary `json:"host_id"` + ServiceId types.Binary `json:"service_id"` + NotificationcommandId types.Binary `json:"notificationcommand_id"` + TimesBegin types.Int `json:"times_begin"` + TimesEnd types.Int `json:"times_end"` + NotificationInterval uint32 `json:"notification_interval"` + TimeperiodId types.Binary `json:"timeperiod_id"` + States icingadbTypes.NotificationStates `json:"states"` + Types icingadbTypes.NotificationTypes `json:"types"` + ZoneId types.Binary `json:"zone_id"` } type NotificationUser struct { @@ -48,23 +50,23 @@ type NotificationCustomvar struct { NotificationId types.Binary `json:"notification_id"` } -func NewNotification() contracts.Entity { +func NewNotification() database.Entity { return &Notification{} } -func NewNotificationUser() contracts.Entity { +func NewNotificationUser() database.Entity { return &NotificationUser{} } -func NewNotificationUsergroup() contracts.Entity { +func NewNotificationUsergroup() database.Entity { return &NotificationUsergroup{} } -func NewNotificationRecipient() contracts.Entity { +func NewNotificationRecipient() database.Entity { return &NotificationRecipient{} } -func NewNotificationCustomvar() contracts.Entity { +func NewNotificationCustomvar() database.Entity { return &NotificationCustomvar{} } diff --git a/pkg/icingadb/v1/overdue/host.go b/pkg/icingadb/v1/overdue/host.go index 9d4299452..d61b48712 100644 --- a/pkg/icingadb/v1/overdue/host.go +++ b/pkg/icingadb/v1/overdue/host.go @@ -1,9 +1,9 @@ package overdue import ( - "github.com/icinga/icingadb/pkg/contracts" + "github.com/icinga/icinga-go-library/database" + "github.com/icinga/icinga-go-library/types" v1 "github.com/icinga/icingadb/pkg/icingadb/v1" - "github.com/icinga/icingadb/pkg/types" ) type HostState struct { @@ -11,7 +11,7 @@ type HostState struct { IsOverdue types.Bool `json:"is_overdue"` } -func NewHostState(id string, overdue bool) (contracts.Entity, error) { +func NewHostState(id string, overdue bool) (database.Entity, error) { hs := &HostState{IsOverdue: types.Bool{ Bool: overdue, Valid: true, @@ -22,5 +22,5 @@ func NewHostState(id string, overdue bool) (contracts.Entity, error) { // Assert interface compliance. var ( - _ contracts.Entity = (*HostState)(nil) + _ database.Entity = (*HostState)(nil) ) diff --git a/pkg/icingadb/v1/overdue/service.go b/pkg/icingadb/v1/overdue/service.go index dfd5383cc..14bb25906 100644 --- a/pkg/icingadb/v1/overdue/service.go +++ b/pkg/icingadb/v1/overdue/service.go @@ -1,9 +1,9 @@ package overdue import ( - "github.com/icinga/icingadb/pkg/contracts" + "github.com/icinga/icinga-go-library/database" + "github.com/icinga/icinga-go-library/types" v1 "github.com/icinga/icingadb/pkg/icingadb/v1" - "github.com/icinga/icingadb/pkg/types" ) type ServiceState struct { @@ -11,7 +11,7 @@ type ServiceState struct { IsOverdue types.Bool `json:"is_overdue"` } -func NewServiceState(id string, overdue bool) (contracts.Entity, error) { +func NewServiceState(id string, overdue bool) (database.Entity, error) { hs := &ServiceState{IsOverdue: types.Bool{ Bool: overdue, Valid: true, @@ -22,5 +22,5 @@ func NewServiceState(id string, overdue bool) (contracts.Entity, error) { // Assert interface compliance. var ( - _ contracts.Entity = (*ServiceState)(nil) + _ database.Entity = (*ServiceState)(nil) ) diff --git a/pkg/icingadb/v1/service.go b/pkg/icingadb/v1/service.go index 404544965..2c8a650b5 100644 --- a/pkg/icingadb/v1/service.go +++ b/pkg/icingadb/v1/service.go @@ -1,8 +1,9 @@ package v1 import ( + "github.com/icinga/icinga-go-library/database" + "github.com/icinga/icinga-go-library/types" "github.com/icinga/icingadb/pkg/contracts" - "github.com/icinga/icingadb/pkg/types" ) type Service struct { @@ -36,27 +37,27 @@ type ServicegroupMember struct { ServicegroupId types.Binary `json:"servicegroup_id"` } -func NewService() contracts.Entity { +func NewService() database.Entity { return &Service{} } -func NewServiceCustomvar() contracts.Entity { +func NewServiceCustomvar() database.Entity { return &ServiceCustomvar{} } -func NewServiceState() contracts.Entity { +func NewServiceState() database.Entity { return &ServiceState{} } -func NewServicegroup() contracts.Entity { +func NewServicegroup() database.Entity { return &Servicegroup{} } -func NewServicegroupCustomvar() contracts.Entity { +func NewServicegroupCustomvar() database.Entity { return &ServicegroupCustomvar{} } -func NewServicegroupMember() contracts.Entity { +func NewServicegroupMember() database.Entity { return &ServicegroupMember{} } diff --git a/pkg/icingadb/v1/state.go b/pkg/icingadb/v1/state.go index 983b14d5a..d2e48e814 100644 --- a/pkg/icingadb/v1/state.go +++ b/pkg/icingadb/v1/state.go @@ -1,39 +1,40 @@ package v1 import ( - "github.com/icinga/icingadb/pkg/types" + "github.com/icinga/icinga-go-library/types" + icingadbTypes "github.com/icinga/icingadb/pkg/icingadb/types" ) type State struct { EntityWithChecksum `json:",inline"` EnvironmentMeta `json:",inline"` - AcknowledgementCommentId types.Binary `json:"acknowledgement_comment_id"` - LastCommentId types.Binary `json:"last_comment_id"` - CheckAttempt uint32 `json:"check_attempt"` - CheckCommandline types.String `json:"check_commandline"` - CheckSource types.String `json:"check_source"` - SchedulingSource types.String `json:"scheduling_source"` - ExecutionTime float64 `json:"execution_time"` - HardState uint8 `json:"hard_state"` - InDowntime types.Bool `json:"in_downtime"` - IsAcknowledged types.AcknowledgementState `json:"is_acknowledged"` - IsFlapping types.Bool `json:"is_flapping"` - IsHandled types.Bool `json:"is_handled"` - IsProblem types.Bool `json:"is_problem"` - IsReachable types.Bool `json:"is_reachable"` - LastStateChange types.UnixMilli `json:"last_state_change"` - LastUpdate types.UnixMilli `json:"last_update"` - Latency float64 `json:"latency"` - LongOutput types.String `json:"long_output"` - NextCheck types.UnixMilli `json:"next_check"` - NextUpdate types.UnixMilli `json:"next_update"` - Output types.String `json:"output"` - PerformanceData types.String `json:"performance_data"` - NormalizedPerformanceData types.String `json:"normalized_performance_data"` - PreviousSoftState uint8 `json:"previous_soft_state"` - PreviousHardState uint8 `json:"previous_hard_state"` - Severity uint16 `json:"severity"` - SoftState uint8 `json:"soft_state"` - StateType types.StateType `json:"state_type"` - CheckTimeout float64 `json:"check_timeout"` + AcknowledgementCommentId types.Binary `json:"acknowledgement_comment_id"` + LastCommentId types.Binary `json:"last_comment_id"` + CheckAttempt uint32 `json:"check_attempt"` + CheckCommandline types.String `json:"check_commandline"` + CheckSource types.String `json:"check_source"` + SchedulingSource types.String `json:"scheduling_source"` + ExecutionTime float64 `json:"execution_time"` + HardState uint8 `json:"hard_state"` + InDowntime types.Bool `json:"in_downtime"` + IsAcknowledged icingadbTypes.AcknowledgementState `json:"is_acknowledged"` + IsFlapping types.Bool `json:"is_flapping"` + IsHandled types.Bool `json:"is_handled"` + IsProblem types.Bool `json:"is_problem"` + IsReachable types.Bool `json:"is_reachable"` + LastStateChange types.UnixMilli `json:"last_state_change"` + LastUpdate types.UnixMilli `json:"last_update"` + Latency float64 `json:"latency"` + LongOutput types.String `json:"long_output"` + NextCheck types.UnixMilli `json:"next_check"` + NextUpdate types.UnixMilli `json:"next_update"` + Output types.String `json:"output"` + PerformanceData types.String `json:"performance_data"` + NormalizedPerformanceData types.String `json:"normalized_performance_data"` + PreviousSoftState uint8 `json:"previous_soft_state"` + PreviousHardState uint8 `json:"previous_hard_state"` + Severity uint16 `json:"severity"` + SoftState uint8 `json:"soft_state"` + StateType icingadbTypes.StateType `json:"state_type"` + CheckTimeout float64 `json:"check_timeout"` } diff --git a/pkg/icingadb/v1/timeperiod.go b/pkg/icingadb/v1/timeperiod.go index 06a3bd290..2025fcbc9 100644 --- a/pkg/icingadb/v1/timeperiod.go +++ b/pkg/icingadb/v1/timeperiod.go @@ -1,8 +1,9 @@ package v1 import ( + "github.com/icinga/icinga-go-library/database" + "github.com/icinga/icinga-go-library/types" "github.com/icinga/icingadb/pkg/contracts" - "github.com/icinga/icingadb/pkg/types" ) type Timeperiod struct { @@ -41,23 +42,23 @@ type TimeperiodCustomvar struct { TimeperiodId types.Binary `json:"timeperiod_id"` } -func NewTimeperiod() contracts.Entity { +func NewTimeperiod() database.Entity { return &Timeperiod{} } -func NewTimeperiodRange() contracts.Entity { +func NewTimeperiodRange() database.Entity { return &TimeperiodRange{} } -func NewTimeperiodOverrideInclude() contracts.Entity { +func NewTimeperiodOverrideInclude() database.Entity { return &TimeperiodOverrideInclude{} } -func NewTimeperiodOverrideExclude() contracts.Entity { +func NewTimeperiodOverrideExclude() database.Entity { return &TimeperiodOverrideExclude{} } -func NewTimeperiodCustomvar() contracts.Entity { +func NewTimeperiodCustomvar() database.Entity { return &TimeperiodCustomvar{} } diff --git a/pkg/icingadb/v1/url.go b/pkg/icingadb/v1/url.go index cf70abc6a..55f6eb15b 100644 --- a/pkg/icingadb/v1/url.go +++ b/pkg/icingadb/v1/url.go @@ -1,6 +1,8 @@ package v1 -import "github.com/icinga/icingadb/pkg/contracts" +import ( + "github.com/icinga/icinga-go-library/database" +) type ActionUrl struct { EntityWithoutChecksum `json:",inline"` @@ -20,14 +22,14 @@ type IconImage struct { IconImage string `json:"icon_image"` } -func NewActionUrl() contracts.Entity { +func NewActionUrl() database.Entity { return &ActionUrl{} } -func NewNotesUrl() contracts.Entity { +func NewNotesUrl() database.Entity { return &NotesUrl{} } -func NewIconImage() contracts.Entity { +func NewIconImage() database.Entity { return &IconImage{} } diff --git a/pkg/icingadb/v1/user.go b/pkg/icingadb/v1/user.go index 6a4405008..a556e30fa 100644 --- a/pkg/icingadb/v1/user.go +++ b/pkg/icingadb/v1/user.go @@ -1,22 +1,24 @@ package v1 import ( + "github.com/icinga/icinga-go-library/database" + "github.com/icinga/icinga-go-library/types" "github.com/icinga/icingadb/pkg/contracts" - "github.com/icinga/icingadb/pkg/types" + icingadbTypes "github.com/icinga/icingadb/pkg/icingadb/types" ) type User struct { EntityWithChecksum `json:",inline"` EnvironmentMeta `json:",inline"` NameCiMeta `json:",inline"` - DisplayName string `json:"display_name"` - Email string `json:"email"` - Pager string `json:"pager"` - NotificationsEnabled types.Bool `json:"notifications_enabled"` - TimeperiodId types.Binary `json:"timeperiod_id"` - States types.NotificationStates `json:"states"` - Types types.NotificationTypes `json:"types"` - ZoneId types.Binary `json:"zone_id"` + DisplayName string `json:"display_name"` + Email string `json:"email"` + Pager string `json:"pager"` + NotificationsEnabled types.Bool `json:"notifications_enabled"` + TimeperiodId types.Binary `json:"timeperiod_id"` + States icingadbTypes.NotificationStates `json:"states"` + Types icingadbTypes.NotificationTypes `json:"types"` + ZoneId types.Binary `json:"zone_id"` } type UserCustomvar struct { @@ -39,23 +41,23 @@ type UsergroupMember struct { UsergroupId types.Binary `json:"usergroup_id"` } -func NewUser() contracts.Entity { +func NewUser() database.Entity { return &User{} } -func NewUserCustomvar() contracts.Entity { +func NewUserCustomvar() database.Entity { return &UserCustomvar{} } -func NewUsergroup() contracts.Entity { +func NewUsergroup() database.Entity { return &Usergroup{} } -func NewUsergroupCustomvar() contracts.Entity { +func NewUsergroupCustomvar() database.Entity { return &UsergroupCustomvar{} } -func NewUsergroupMember() contracts.Entity { +func NewUsergroupMember() database.Entity { return &UsergroupMember{} } diff --git a/pkg/icingadb/v1/v1.go b/pkg/icingadb/v1/v1.go index af19fdf80..f7677c775 100644 --- a/pkg/icingadb/v1/v1.go +++ b/pkg/icingadb/v1/v1.go @@ -1,12 +1,12 @@ package v1 import ( - "github.com/icinga/icingadb/pkg/contracts" + "github.com/icinga/icinga-go-library/database" ) -var StateFactories = []contracts.EntityFactoryFunc{NewHostState, NewServiceState} +var StateFactories = []database.EntityFactoryFunc{NewHostState, NewServiceState} -var ConfigFactories = []contracts.EntityFactoryFunc{ +var ConfigFactories = []database.EntityFactoryFunc{ NewActionUrl, NewCheckcommand, NewCheckcommandArgument, diff --git a/pkg/icingaredis/client.go b/pkg/icingaredis/client.go deleted file mode 100644 index c494f95d0..000000000 --- a/pkg/icingaredis/client.go +++ /dev/null @@ -1,243 +0,0 @@ -package icingaredis - -import ( - "context" - "github.com/icinga/icingadb/pkg/com" - "github.com/icinga/icingadb/pkg/common" - "github.com/icinga/icingadb/pkg/contracts" - "github.com/icinga/icingadb/pkg/logging" - "github.com/icinga/icingadb/pkg/periodic" - "github.com/icinga/icingadb/pkg/utils" - "github.com/pkg/errors" - "github.com/redis/go-redis/v9" - "golang.org/x/sync/errgroup" - "golang.org/x/sync/semaphore" - "runtime" - "time" -) - -// Client is a wrapper around redis.Client with -// streaming and logging capabilities. -type Client struct { - *redis.Client - - Options *Options - - logger *logging.Logger -} - -// Options define user configurable Redis options. -type Options struct { - BlockTimeout time.Duration `yaml:"block_timeout" default:"1s"` - HMGetCount int `yaml:"hmget_count" default:"4096"` - HScanCount int `yaml:"hscan_count" default:"4096"` - MaxHMGetConnections int `yaml:"max_hmget_connections" default:"8"` - Timeout time.Duration `yaml:"timeout" default:"30s"` - XReadCount int `yaml:"xread_count" default:"4096"` -} - -// Validate checks constraints in the supplied Redis options and returns an error if they are violated. -func (o *Options) Validate() error { - if o.BlockTimeout <= 0 { - return errors.New("block_timeout must be positive") - } - if o.HMGetCount < 1 { - return errors.New("hmget_count must be at least 1") - } - if o.HScanCount < 1 { - return errors.New("hscan_count must be at least 1") - } - if o.MaxHMGetConnections < 1 { - return errors.New("max_hmget_connections must be at least 1") - } - if o.Timeout == 0 { - return errors.New("timeout cannot be 0. Configure a value greater than zero, or use -1 for no timeout") - } - if o.XReadCount < 1 { - return errors.New("xread_count must be at least 1") - } - - return nil -} - -// NewClient returns a new icingaredis.Client wrapper for a pre-existing *redis.Client. -func NewClient(client *redis.Client, logger *logging.Logger, options *Options) *Client { - return &Client{Client: client, logger: logger, Options: options} -} - -// HPair defines Redis hashes field-value pairs. -type HPair struct { - Field string - Value string -} - -// HYield yields HPair field-value pairs for all fields in the hash stored at key. -func (c *Client) HYield(ctx context.Context, key string) (<-chan HPair, <-chan error) { - pairs := make(chan HPair, c.Options.HScanCount) - - return pairs, com.WaitAsync(contracts.WaiterFunc(func() error { - var counter com.Counter - defer c.log(ctx, key, &counter).Stop() - defer close(pairs) - - seen := make(map[string]struct{}) - - var cursor uint64 - var err error - var page []string - - for { - cmd := c.HScan(ctx, key, cursor, "", int64(c.Options.HScanCount)) - page, cursor, err = cmd.Result() - - if err != nil { - return WrapCmdErr(cmd) - } - - for i := 0; i < len(page); i += 2 { - if _, ok := seen[page[i]]; ok { - // Ignore duplicate returned by HSCAN. - continue - } - - seen[page[i]] = struct{}{} - - select { - case pairs <- HPair{ - Field: page[i], - Value: page[i+1], - }: - counter.Inc() - case <-ctx.Done(): - return ctx.Err() - } - } - - if cursor == 0 { - break - } - } - - return nil - })) -} - -// HMYield yields HPair field-value pairs for the specified fields in the hash stored at key. -func (c *Client) HMYield(ctx context.Context, key string, fields ...string) (<-chan HPair, <-chan error) { - pairs := make(chan HPair) - - return pairs, com.WaitAsync(contracts.WaiterFunc(func() error { - var counter com.Counter - defer c.log(ctx, key, &counter).Stop() - - g, ctx := errgroup.WithContext(ctx) - - defer func() { - // Wait until the group is done so that we can safely close the pairs channel, - // because on error, sem.Acquire will return before calling g.Wait(), - // which can result in goroutines working on a closed channel. - _ = g.Wait() - close(pairs) - }() - - // Use context from group. - batches := utils.BatchSliceOfStrings(ctx, fields, c.Options.HMGetCount) - - sem := semaphore.NewWeighted(int64(c.Options.MaxHMGetConnections)) - - for batch := range batches { - if err := sem.Acquire(ctx, 1); err != nil { - return errors.Wrap(err, "can't acquire semaphore") - } - - batch := batch - g.Go(func() error { - defer sem.Release(1) - - cmd := c.HMGet(ctx, key, batch...) - vals, err := cmd.Result() - - if err != nil { - return WrapCmdErr(cmd) - } - - for i, v := range vals { - if v == nil { - c.logger.Warnf("HMGET %s: field %#v missing", key, batch[i]) - continue - } - - select { - case pairs <- HPair{ - Field: batch[i], - Value: v.(string), - }: - counter.Inc() - case <-ctx.Done(): - return ctx.Err() - } - } - - return nil - }) - } - - return g.Wait() - })) -} - -// XReadUntilResult (repeatedly) calls XREAD with the specified arguments until a result is returned. -// Each call blocks at most for the duration specified in Options.BlockTimeout until data -// is available before it times out and the next call is made. -// This also means that an already set block timeout is overridden. -func (c *Client) XReadUntilResult(ctx context.Context, a *redis.XReadArgs) ([]redis.XStream, error) { - a.Block = c.Options.BlockTimeout - - for { - cmd := c.XRead(ctx, a) - streams, err := cmd.Result() - if err != nil { - if errors.Is(err, redis.Nil) { - continue - } - - return streams, WrapCmdErr(cmd) - } - - return streams, nil - } -} - -// YieldAll yields all entities from Redis that belong to the specified SyncSubject. -func (c Client) YieldAll(ctx context.Context, subject *common.SyncSubject) (<-chan contracts.Entity, <-chan error) { - key := utils.Key(utils.Name(subject.Entity()), ':') - if subject.WithChecksum() { - key = "icinga:checksum:" + key - } else { - key = "icinga:" + key - } - - pairs, errs := c.HYield(ctx, key) - g, ctx := errgroup.WithContext(ctx) - // Let errors from HYield cancel the group. - com.ErrgroupReceive(g, errs) - - desired, errs := CreateEntities(ctx, subject.FactoryForDelta(), pairs, runtime.NumCPU()) - // Let errors from CreateEntities cancel the group. - com.ErrgroupReceive(g, errs) - - return desired, com.WaitAsync(g) -} - -func (c *Client) log(ctx context.Context, key string, counter *com.Counter) periodic.Stopper { - return periodic.Start(ctx, c.logger.Interval(), func(tick periodic.Tick) { - // We may never get to progress logging here, - // as fetching should be completed before the interval expires, - // but if it does, it is good to have this log message. - if count := counter.Reset(); count > 0 { - c.logger.Debugf("Fetched %d items from %s", count, key) - } - }, periodic.OnStop(func(tick periodic.Tick) { - c.logger.Debugf("Finished fetching from %s with %d items in %s", key, counter.Total(), tick.Elapsed) - })) -} diff --git a/pkg/icingaredis/heartbeat.go b/pkg/icingaredis/heartbeat.go index cb3401087..840445a23 100644 --- a/pkg/icingaredis/heartbeat.go +++ b/pkg/icingaredis/heartbeat.go @@ -2,13 +2,12 @@ package icingaredis import ( "context" - "github.com/icinga/icingadb/internal" + "github.com/icinga/icinga-go-library/logging" + "github.com/icinga/icinga-go-library/redis" + "github.com/icinga/icinga-go-library/types" + "github.com/icinga/icinga-go-library/utils" v1 "github.com/icinga/icingadb/pkg/icingaredis/v1" - "github.com/icinga/icingadb/pkg/logging" - "github.com/icinga/icingadb/pkg/types" - "github.com/icinga/icingadb/pkg/utils" "github.com/pkg/errors" - "github.com/redis/go-redis/v9" "go.uber.org/zap" "golang.org/x/sync/errgroup" "sync" @@ -27,7 +26,7 @@ type Heartbeat struct { events chan *HeartbeatMessage lastReceivedMs int64 cancelCtx context.CancelFunc - client *Client + client *redis.Client done chan struct{} errMu sync.Mutex err error @@ -35,7 +34,7 @@ type Heartbeat struct { } // NewHeartbeat returns a new Heartbeat and starts the heartbeat controller loop. -func NewHeartbeat(ctx context.Context, client *Client, logger *logging.Logger) *Heartbeat { +func NewHeartbeat(ctx context.Context, client *redis.Client, logger *logging.Logger) *Heartbeat { ctx, cancelCtx := context.WithCancel(ctx) heartbeat := &Heartbeat{ @@ -208,7 +207,7 @@ func (m *HeartbeatMessage) Stats() *v1.StatsMessage { // EnvironmentID returns the Icinga DB environment ID stored in the heartbeat message. func (m *HeartbeatMessage) EnvironmentID() (types.Binary, error) { var id types.Binary - err := internal.UnmarshalJSON([]byte(m.stats["icingadb_environment"].(string)), &id) + err := types.UnmarshalJSON([]byte(m.stats["icingadb_environment"].(string)), &id) if err != nil { return nil, err } diff --git a/pkg/icingaredis/telemetry/heartbeat.go b/pkg/icingaredis/telemetry/heartbeat.go index 0057ae07f..041a696d0 100644 --- a/pkg/icingaredis/telemetry/heartbeat.go +++ b/pkg/icingaredis/telemetry/heartbeat.go @@ -3,14 +3,14 @@ package telemetry import ( "context" "fmt" + "github.com/icinga/icinga-go-library/com" + "github.com/icinga/icinga-go-library/logging" + "github.com/icinga/icinga-go-library/periodic" + "github.com/icinga/icinga-go-library/redis" + "github.com/icinga/icinga-go-library/utils" "github.com/icinga/icingadb/internal" - "github.com/icinga/icingadb/pkg/com" "github.com/icinga/icingadb/pkg/icingaredis" - "github.com/icinga/icingadb/pkg/logging" - "github.com/icinga/icingadb/pkg/periodic" - "github.com/icinga/icingadb/pkg/utils" "github.com/pkg/errors" - "github.com/redis/go-redis/v9" "go.uber.org/zap" "regexp" "runtime/metrics" @@ -88,7 +88,7 @@ var startTime = time.Now().UnixMilli() // StartHeartbeat periodically writes heartbeats to Redis for being monitored by Icinga 2. func StartHeartbeat( - ctx context.Context, client *icingaredis.Client, logger *logging.Logger, ha ha, heartbeat *icingaredis.Heartbeat, + ctx context.Context, client *redis.Client, logger *logging.Logger, ha ha, heartbeat *icingaredis.Heartbeat, ) { goMetrics := NewGoMetrics() @@ -139,7 +139,7 @@ func StartHeartbeat( silenceUntil = now.Add(time.Minute) } - logw("Can't update own heartbeat", zap.Error(icingaredis.WrapCmdErr(cmd))) + logw("Can't update own heartbeat", zap.Error(redis.WrapCmdErr(cmd))) } else { lastErr = "" silenceUntil = time.Time{} diff --git a/pkg/icingaredis/telemetry/stats.go b/pkg/icingaredis/telemetry/stats.go index 2b592a547..78f5c5c67 100644 --- a/pkg/icingaredis/telemetry/stats.go +++ b/pkg/icingaredis/telemetry/stats.go @@ -2,12 +2,11 @@ package telemetry import ( "context" - "github.com/icinga/icingadb/pkg/com" - "github.com/icinga/icingadb/pkg/icingaredis" - "github.com/icinga/icingadb/pkg/logging" - "github.com/icinga/icingadb/pkg/periodic" - "github.com/icinga/icingadb/pkg/utils" - "github.com/redis/go-redis/v9" + "github.com/icinga/icinga-go-library/com" + "github.com/icinga/icinga-go-library/logging" + "github.com/icinga/icinga-go-library/periodic" + "github.com/icinga/icinga-go-library/redis" + "github.com/icinga/icinga-go-library/utils" "go.uber.org/zap" "strconv" "time" @@ -19,7 +18,7 @@ var Stats struct { } // WriteStats periodically forwards Stats to Redis for being monitored by Icinga 2. -func WriteStats(ctx context.Context, client *icingaredis.Client, logger *logging.Logger) { +func WriteStats(ctx context.Context, client *redis.Client, logger *logging.Logger) { counters := map[string]*com.Counter{ "config_sync": &Stats.Config, "state_sync": &Stats.State, @@ -44,7 +43,7 @@ func WriteStats(ctx context.Context, client *icingaredis.Client, logger *logging Values: data, }) if err := cmd.Err(); err != nil && !utils.IsContextCanceled(err) { - logger.Warnw("Can't update own stats", zap.Error(icingaredis.WrapCmdErr(cmd))) + logger.Warnw("Can't update own stats", zap.Error(redis.WrapCmdErr(cmd))) } } }) diff --git a/pkg/icingaredis/utils.go b/pkg/icingaredis/utils.go index 50c97f9bf..22c8661f4 100644 --- a/pkg/icingaredis/utils.go +++ b/pkg/icingaredis/utils.go @@ -2,40 +2,23 @@ package icingaredis import ( "context" - "github.com/icinga/icingadb/internal" - "github.com/icinga/icingadb/pkg/com" + "github.com/icinga/icinga-go-library/com" + "github.com/icinga/icinga-go-library/database" + "github.com/icinga/icinga-go-library/redis" + "github.com/icinga/icinga-go-library/strcase" + "github.com/icinga/icinga-go-library/types" + "github.com/icinga/icingadb/pkg/common" "github.com/icinga/icingadb/pkg/contracts" - "github.com/icinga/icingadb/pkg/types" - "github.com/icinga/icingadb/pkg/utils" "github.com/pkg/errors" - "github.com/redis/go-redis/v9" "golang.org/x/sync/errgroup" + "runtime" ) -// Streams represents a Redis stream key to ID mapping. -type Streams map[string]string - -// Option returns the Redis stream key to ID mapping -// as a slice of stream keys followed by their IDs -// that is compatible for the Redis STREAMS option. -func (s Streams) Option() []string { - // len*2 because we're appending the IDs later. - streams := make([]string, 0, len(s)*2) - ids := make([]string, 0, len(s)) - - for key, id := range s { - streams = append(streams, key) - ids = append(ids, id) - } - - return append(streams, ids...) -} - // CreateEntities streams and creates entities from the // given Redis field value pairs using the specified factory function, // and streams them on a returned channel. -func CreateEntities(ctx context.Context, factoryFunc contracts.EntityFactoryFunc, pairs <-chan HPair, concurrent int) (<-chan contracts.Entity, <-chan error) { - entities := make(chan contracts.Entity) +func CreateEntities(ctx context.Context, factoryFunc database.EntityFactoryFunc, pairs <-chan redis.HPair, concurrent int) (<-chan database.Entity, <-chan error) { + entities := make(chan database.Entity) g, ctx := errgroup.WithContext(ctx) g.Go(func() error { @@ -53,7 +36,7 @@ func CreateEntities(ctx context.Context, factoryFunc contracts.EntityFactoryFunc } e := factoryFunc() - if err := internal.UnmarshalJSON([]byte(pair.Value), e); err != nil { + if err := types.UnmarshalJSON([]byte(pair.Value), e); err != nil { return err } e.SetID(id) @@ -78,8 +61,8 @@ func CreateEntities(ctx context.Context, factoryFunc contracts.EntityFactoryFunc // SetChecksums concurrently streams from the given entities and // sets their checksums using the specified map and // streams the results on a returned channel. -func SetChecksums(ctx context.Context, entities <-chan contracts.Entity, checksums map[string]contracts.Entity, concurrent int) (<-chan contracts.Entity, <-chan error) { - entitiesWithChecksum := make(chan contracts.Entity) +func SetChecksums(ctx context.Context, entities <-chan database.Entity, checksums map[string]database.Entity, concurrent int) (<-chan database.Entity, <-chan error) { + entitiesWithChecksum := make(chan database.Entity) g, ctx := errgroup.WithContext(ctx) g.Go(func() error { @@ -113,16 +96,23 @@ func SetChecksums(ctx context.Context, entities <-chan contracts.Entity, checksu return entitiesWithChecksum, com.WaitAsync(g) } -// WrapCmdErr adds the command itself and -// the stack of the current goroutine to the command's error if any. -func WrapCmdErr(cmd redis.Cmder) error { - err := cmd.Err() - if err != nil { - err = errors.Wrapf(err, "can't perform %q", utils.Ellipsize( - redis.NewCmd(context.Background(), cmd.Args()).String(), // Omits error in opposite to cmd.String() - 100, - )) +// YieldAll yields all entities from Redis that belong to the specified SyncSubject. +func YieldAll(ctx context.Context, c *redis.Client, subject *common.SyncSubject) (<-chan database.Entity, <-chan error) { + key := strcase.Delimited(types.Name(subject.Entity()), ':') + if subject.WithChecksum() { + key = "icinga:checksum:" + key + } else { + key = "icinga:" + key } - return err + pairs, errs := c.HYield(ctx, key) + g, ctx := errgroup.WithContext(ctx) + // Let errors from HYield cancel the group. + com.ErrgroupReceive(g, errs) + + desired, errs := CreateEntities(ctx, subject.FactoryForDelta(), pairs, runtime.NumCPU()) + // Let errors from CreateEntities cancel the group. + com.ErrgroupReceive(g, errs) + + return desired, com.WaitAsync(g) } diff --git a/pkg/icingaredis/v1/icinga_status.go b/pkg/icingaredis/v1/icinga_status.go index d94d3d650..eaad7207e 100644 --- a/pkg/icingaredis/v1/icinga_status.go +++ b/pkg/icingaredis/v1/icinga_status.go @@ -1,7 +1,7 @@ package v1 import ( - "github.com/icinga/icingadb/pkg/types" + "github.com/icinga/icinga-go-library/types" ) // IcingaStatus defines Icinga status information. diff --git a/pkg/icingaredis/v1/stats_message.go b/pkg/icingaredis/v1/stats_message.go index 5b0462928..0798468a9 100644 --- a/pkg/icingaredis/v1/stats_message.go +++ b/pkg/icingaredis/v1/stats_message.go @@ -1,8 +1,7 @@ package v1 import ( - "github.com/icinga/icingadb/internal" - "github.com/icinga/icingadb/pkg/types" + "github.com/icinga/icinga-go-library/types" "github.com/pkg/errors" ) @@ -25,7 +24,7 @@ func (m StatsMessage) IcingaStatus() (*IcingaStatus, error) { } `json:"status"` } - if err := internal.UnmarshalJSON([]byte(s), &envelope); err != nil { + if err := types.UnmarshalJSON([]byte(s), &envelope); err != nil { return nil, err } @@ -40,7 +39,7 @@ func (m StatsMessage) Time() (*types.UnixMilli, error) { if s, ok := m["timestamp"].(string); ok { var t types.UnixMilli - if err := internal.UnmarshalJSON([]byte(s), &t); err != nil { + if err := types.UnmarshalJSON([]byte(s), &t); err != nil { return nil, err } diff --git a/pkg/logging/journald_core.go b/pkg/logging/journald_core.go deleted file mode 100644 index 50dd39f80..000000000 --- a/pkg/logging/journald_core.go +++ /dev/null @@ -1,85 +0,0 @@ -package logging - -import ( - "github.com/icinga/icingadb/pkg/utils" - "github.com/pkg/errors" - "github.com/ssgreg/journald" - "go.uber.org/zap/zapcore" - "strings" - "unicode" -) - -// priorities maps zapcore.Level to journal.Priority. -var priorities = map[zapcore.Level]journald.Priority{ - zapcore.DebugLevel: journald.PriorityDebug, - zapcore.InfoLevel: journald.PriorityInfo, - zapcore.WarnLevel: journald.PriorityWarning, - zapcore.ErrorLevel: journald.PriorityErr, - zapcore.FatalLevel: journald.PriorityCrit, - zapcore.PanicLevel: journald.PriorityCrit, - zapcore.DPanicLevel: journald.PriorityCrit, -} - -// NewJournaldCore returns a zapcore.Core that sends log entries to systemd-journald and -// uses the given identifier as a prefix for structured logging context that is sent as journal fields. -func NewJournaldCore(identifier string, enab zapcore.LevelEnabler) zapcore.Core { - return &journaldCore{ - LevelEnabler: enab, - identifier: identifier, - identifierU: strings.ToUpper(identifier), - } -} - -type journaldCore struct { - zapcore.LevelEnabler - context []zapcore.Field - identifier string - identifierU string -} - -func (c *journaldCore) Check(ent zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry { - if c.Enabled(ent.Level) { - return ce.AddCore(ent, c) - } - - return ce -} - -func (c *journaldCore) Sync() error { - return nil -} - -func (c *journaldCore) With(fields []zapcore.Field) zapcore.Core { - cc := *c - cc.context = append(cc.context[:len(cc.context):len(cc.context)], fields...) - - return &cc -} - -func (c *journaldCore) Write(ent zapcore.Entry, fields []zapcore.Field) error { - pri, ok := priorities[ent.Level] - if !ok { - return errors.Errorf("unknown log level %q", ent.Level) - } - - enc := zapcore.NewMapObjectEncoder() - c.addFields(enc, fields) - c.addFields(enc, c.context) - enc.Fields["SYSLOG_IDENTIFIER"] = c.identifier - - message := ent.Message - if ent.LoggerName != c.identifier { - message = ent.LoggerName + ": " + message - } - - return journald.Send(message, pri, enc.Fields) -} - -func (c *journaldCore) addFields(enc zapcore.ObjectEncoder, fields []zapcore.Field) { - for _, field := range fields { - field.Key = c.identifierU + - "_" + - utils.ConvertCamelCase(field.Key, unicode.UpperCase, '_') - field.AddTo(enc) - } -} diff --git a/pkg/logging/logger.go b/pkg/logging/logger.go deleted file mode 100644 index 490445e17..000000000 --- a/pkg/logging/logger.go +++ /dev/null @@ -1,26 +0,0 @@ -package logging - -import ( - "go.uber.org/zap" - "time" -) - -// Logger wraps zap.SugaredLogger and -// allows to get the interval for periodic logging. -type Logger struct { - *zap.SugaredLogger - interval time.Duration -} - -// NewLogger returns a new Logger. -func NewLogger(base *zap.SugaredLogger, interval time.Duration) *Logger { - return &Logger{ - SugaredLogger: base, - interval: interval, - } -} - -// Interval returns the interval for periodic logging. -func (l *Logger) Interval() time.Duration { - return l.interval -} diff --git a/pkg/logging/logging.go b/pkg/logging/logging.go deleted file mode 100644 index e3106956a..000000000 --- a/pkg/logging/logging.go +++ /dev/null @@ -1,131 +0,0 @@ -package logging - -import ( - "fmt" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" - "os" - "sync" - "time" -) - -const ( - CONSOLE = "console" - JOURNAL = "systemd-journald" -) - -// defaultEncConfig defines the default zapcore.EncoderConfig for the logging package. -var defaultEncConfig = zapcore.EncoderConfig{ - TimeKey: "ts", - LevelKey: "level", - NameKey: "logger", - CallerKey: "caller", - MessageKey: "msg", - StacktraceKey: "stacktrace", - LineEnding: zapcore.DefaultLineEnding, - EncodeLevel: zapcore.CapitalLevelEncoder, - EncodeTime: zapcore.ISO8601TimeEncoder, - EncodeDuration: zapcore.StringDurationEncoder, - EncodeCaller: zapcore.ShortCallerEncoder, -} - -// Options define child loggers with their desired log level. -type Options map[string]zapcore.Level - -// Logging implements access to a default logger and named child loggers. -// Log levels can be configured per named child via Options which, if not configured, -// fall back on a default log level. -// Logs either to the console or to systemd-journald. -type Logging struct { - logger *Logger - output string - verbosity zap.AtomicLevel - interval time.Duration - - // coreFactory creates zapcore.Core based on the log level and the log output. - coreFactory func(zap.AtomicLevel) zapcore.Core - - mu sync.Mutex - loggers map[string]*Logger - - options Options -} - -// NewLogging takes the name and log level for the default logger, -// output where log messages are written to, -// options having log levels for named child loggers -// and returns a new Logging. -func NewLogging(name string, level zapcore.Level, output string, options Options, interval time.Duration) (*Logging, error) { - verbosity := zap.NewAtomicLevelAt(level) - - var coreFactory func(zap.AtomicLevel) zapcore.Core - switch output { - case CONSOLE: - enc := zapcore.NewConsoleEncoder(defaultEncConfig) - ws := zapcore.Lock(os.Stderr) - coreFactory = func(verbosity zap.AtomicLevel) zapcore.Core { - return zapcore.NewCore(enc, ws, verbosity) - } - case JOURNAL: - coreFactory = func(verbosity zap.AtomicLevel) zapcore.Core { - return NewJournaldCore(name, verbosity) - } - default: - return nil, invalidOutput(output) - } - - logger := NewLogger(zap.New(coreFactory(verbosity)).Named(name).Sugar(), interval) - - return &Logging{ - logger: logger, - output: output, - verbosity: verbosity, - interval: interval, - coreFactory: coreFactory, - loggers: make(map[string]*Logger), - options: options, - }, - nil -} - -// GetChildLogger returns a named child logger. -// Log levels for named child loggers are obtained from the logging options and, if not found, -// set to the default log level. -func (l *Logging) GetChildLogger(name string) *Logger { - l.mu.Lock() - defer l.mu.Unlock() - - if logger, ok := l.loggers[name]; ok { - return logger - } - - var verbosity zap.AtomicLevel - if level, found := l.options[name]; found { - verbosity = zap.NewAtomicLevelAt(level) - } else { - verbosity = l.verbosity - } - - logger := NewLogger(zap.New(l.coreFactory(verbosity)).Named(name).Sugar(), l.interval) - l.loggers[name] = logger - - return logger -} - -// GetLogger returns the default logger. -func (l *Logging) GetLogger() *Logger { - return l.logger -} - -// AssertOutput returns an error if output is not a valid logger output. -func AssertOutput(o string) error { - if o == CONSOLE || o == JOURNAL { - return nil - } - - return invalidOutput(o) -} - -func invalidOutput(o string) error { - return fmt.Errorf("%s is not a valid logger output. Must be either %q or %q", o, CONSOLE, JOURNAL) -} diff --git a/pkg/periodic/periodic.go b/pkg/periodic/periodic.go deleted file mode 100644 index 6ef5ceb87..000000000 --- a/pkg/periodic/periodic.go +++ /dev/null @@ -1,123 +0,0 @@ -package periodic - -import ( - "context" - "sync" - "time" -) - -// Option configures Start. -type Option interface { - apply(*periodic) -} - -// Stopper implements the Stop method, -// which stops a periodic task from Start(). -type Stopper interface { - Stop() // Stops a periodic task. -} - -// Tick is the value for periodic task callbacks that -// contains the time of the tick and -// the time elapsed since the start of the periodic task. -type Tick struct { - Elapsed time.Duration - Time time.Time -} - -// Immediate starts the periodic task immediately instead of after the first tick. -func Immediate() Option { - return optionFunc(func(p *periodic) { - p.immediate = true - }) -} - -// OnStop configures a callback that is executed when a periodic task is stopped or canceled. -func OnStop(f func(Tick)) Option { - return optionFunc(func(p *periodic) { - p.onStop = f - }) -} - -// Start starts a periodic task with a ticker at the specified interval, -// which executes the given callback after each tick. -// Pending tasks do not overlap, but could start immediately if -// the previous task(s) takes longer than the interval. -// Call Stop() on the return value in order to stop the ticker and to release associated resources. -// The interval must be greater than zero. -func Start(ctx context.Context, interval time.Duration, callback func(Tick), options ...Option) Stopper { - t := &periodic{ - interval: interval, - callback: callback, - } - - for _, option := range options { - option.apply(t) - } - - ctx, cancelCtx := context.WithCancel(ctx) - - start := time.Now() - - go func() { - done := false - - if !t.immediate { - select { - case <-time.After(interval): - case <-ctx.Done(): - done = true - } - } - - if !done { - ticker := time.NewTicker(t.interval) - defer ticker.Stop() - - for tickTime := time.Now(); !done; { - t.callback(Tick{ - Elapsed: tickTime.Sub(start), - Time: tickTime, - }) - - select { - case tickTime = <-ticker.C: - case <-ctx.Done(): - done = true - } - } - } - - if t.onStop != nil { - now := time.Now() - t.onStop(Tick{ - Elapsed: now.Sub(start), - Time: now, - }) - } - }() - - return stoperFunc(func() { - t.stop.Do(cancelCtx) - }) -} - -type optionFunc func(*periodic) - -func (f optionFunc) apply(p *periodic) { - f(p) -} - -type stoperFunc func() - -func (f stoperFunc) Stop() { - f() -} - -type periodic struct { - interval time.Duration - callback func(Tick) - immediate bool - stop sync.Once - onStop func(Tick) -} diff --git a/pkg/retry/retry.go b/pkg/retry/retry.go deleted file mode 100644 index e5b93de33..000000000 --- a/pkg/retry/retry.go +++ /dev/null @@ -1,197 +0,0 @@ -package retry - -import ( - "context" - "database/sql/driver" - "github.com/go-sql-driver/mysql" - "github.com/icinga/icingadb/pkg/backoff" - "github.com/lib/pq" - "github.com/pkg/errors" - "io" - "net" - "syscall" - "time" -) - -// DefaultTimeout is our opinionated default timeout for retrying database and Redis operations. -const DefaultTimeout = 5 * time.Minute - -// RetryableFunc is a retryable function. -type RetryableFunc func(context.Context) error - -// IsRetryable checks whether a new attempt can be started based on the error passed. -type IsRetryable func(error) bool - -// Settings aggregates optional settings for WithBackoff. -type Settings struct { - // If >0, Timeout lets WithBackoff stop retrying gracefully once elapsed based on the following criteria: - // * If the execution of RetryableFunc has taken longer than Timeout, no further attempts are made. - // * If Timeout elapses during the sleep phase between retries, one final retry is attempted. - // * RetryableFunc is always granted its full execution time and is not canceled if it exceeds Timeout. - // This means that WithBackoff may not stop exactly after Timeout expires, - // or may not retry at all if the first execution of RetryableFunc already takes longer than Timeout. - Timeout time.Duration - // OnRetryableError is called if a retryable error occurs. - OnRetryableError func(elapsed time.Duration, attempt uint64, err, lastErr error) - // OnSuccess is called once the operation succeeds. - OnSuccess func(elapsed time.Duration, attempt uint64, lastErr error) -} - -// WithBackoff retries the passed function if it fails and the error allows it to retry. -// The specified backoff policy is used to determine how long to sleep between attempts. -func WithBackoff( - ctx context.Context, retryableFunc RetryableFunc, retryable IsRetryable, b backoff.Backoff, settings Settings, -) (err error) { - // Channel for retry deadline, which is set to the channel of NewTimer() if a timeout is configured, - // otherwise nil, so that it blocks forever if there is no timeout. - var timeout <-chan time.Time - - if settings.Timeout > 0 { - t := time.NewTimer(settings.Timeout) - defer t.Stop() - timeout = t.C - } - - start := time.Now() - timedOut := false - for attempt := uint64(1); ; /* true */ attempt++ { - prevErr := err - - if err = retryableFunc(ctx); err == nil { - if settings.OnSuccess != nil { - settings.OnSuccess(time.Since(start), attempt, prevErr) - } - - return - } - - // Retryable function may have exited prematurely due to context errors. - // We explicitly check the context error here, as the error returned by the retryable function can pass the - // error.Is() checks even though it is not a real context error, e.g. - // https://cs.opensource.google/go/go/+/refs/tags/go1.22.2:src/net/net.go;l=422 - // https://cs.opensource.google/go/go/+/refs/tags/go1.22.2:src/net/net.go;l=601 - if errors.Is(ctx.Err(), context.DeadlineExceeded) || errors.Is(ctx.Err(), context.Canceled) { - if prevErr != nil { - err = errors.Wrap(err, prevErr.Error()) - } - - return - } - - if !retryable(err) { - err = errors.Wrap(err, "can't retry") - - return - } - - select { - case <-timeout: - // Stop retrying immediately if executing the retryable function took longer than the timeout. - timedOut = true - default: - } - - if timedOut { - err = errors.Wrap(err, "retry deadline exceeded") - - return - } - - if settings.OnRetryableError != nil { - settings.OnRetryableError(time.Since(start), attempt, err, prevErr) - } - - select { - case <-time.After(b(attempt)): - case <-timeout: - // Do not stop retrying immediately, but start one last attempt to mitigate timing issues where - // the timeout expires while waiting for the next attempt and - // therefore no retries have happened during this possibly long period. - timedOut = true - case <-ctx.Done(): - err = errors.Wrap(ctx.Err(), err.Error()) - - return - } - } -} - -// ResetTimeout changes the possibly expired timer t to expire after duration d. -// -// If the timer has already expired and nothing has been received from its channel, -// it is automatically drained as if the timer had never expired. -func ResetTimeout(t *time.Timer, d time.Duration) { - if !t.Stop() { - <-t.C - } - - t.Reset(d) -} - -// Retryable returns true for common errors that are considered retryable, -// i.e. temporary, timeout, DNS, connection refused and reset, host down and unreachable and -// network down and unreachable errors. In addition, any database error is considered retryable. -func Retryable(err error) bool { - var temporary interface { - Temporary() bool - } - if errors.As(err, &temporary) && temporary.Temporary() { - return true - } - - var timeout interface { - Timeout() bool - } - if errors.As(err, &timeout) && timeout.Timeout() { - return true - } - - var dnsError *net.DNSError - if errors.As(err, &dnsError) { - return true - } - - var opError *net.OpError - if errors.As(err, &opError) { - // OpError provides Temporary() and Timeout(), but not Unwrap(), - // so we have to extract the underlying error ourselves to also check for ECONNREFUSED, - // which is not considered temporary or timed out by Go. - err = opError.Err - } - if errors.Is(err, syscall.ECONNREFUSED) || errors.Is(err, syscall.ENOENT) { - // syscall errors provide Temporary() and Timeout(), - // which do not include ECONNREFUSED or ENOENT, so we check these ourselves. - return true - } - if errors.Is(err, syscall.ECONNRESET) { - // ECONNRESET is treated as a temporary error by Go only if it comes from calling accept. - return true - } - if errors.Is(err, syscall.EHOSTDOWN) || errors.Is(err, syscall.EHOSTUNREACH) { - return true - } - if errors.Is(err, syscall.ENETDOWN) || errors.Is(err, syscall.ENETUNREACH) { - return true - } - if errors.Is(err, syscall.EPIPE) { - return true - } - if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) { - return true - } - - if errors.Is(err, driver.ErrBadConn) { - return true - } - if errors.Is(err, mysql.ErrInvalidConn) { - return true - } - - var mye *mysql.MySQLError - var pqe *pq.Error - if errors.As(err, &mye) || errors.As(err, &pqe) { - return true - } - - return false -} diff --git a/pkg/structify/structify.go b/pkg/structify/structify.go deleted file mode 100644 index 2b2b5bbbe..000000000 --- a/pkg/structify/structify.go +++ /dev/null @@ -1,179 +0,0 @@ -package structify - -import ( - "encoding" - "fmt" - "github.com/icinga/icingadb/pkg/contracts" - "github.com/pkg/errors" - "golang.org/x/exp/constraints" - "reflect" - "strconv" - "strings" - "unsafe" -) - -// structBranch represents either a leaf or a subTree. -type structBranch struct { - // field specifies the struct field index. - field int - // leaf specifies the map key to parse the struct field from. - leaf string - // subTree specifies the struct field's inner tree. - subTree []structBranch -} - -type MapStructifier = func(map[string]interface{}) (interface{}, error) - -// MakeMapStructifier builds a function which parses a map's string values into a new struct of type t -// and returns a pointer to it. tag specifies which tag connects struct fields to map keys. -// MakeMapStructifier panics if it detects an unsupported type (suitable for usage in init() or global vars). -func MakeMapStructifier(t reflect.Type, tag string) MapStructifier { - tree := buildStructTree(t, tag) - - return func(kv map[string]interface{}) (interface{}, error) { - vPtr := reflect.New(t) - ptr := vPtr.Interface() - - if initer, ok := ptr.(contracts.Initer); ok { - initer.Init() - } - - vPtrElem := vPtr.Elem() - err := errors.Wrapf(structifyMapByTree(kv, tree, vPtrElem, vPtrElem, new([]int)), "can't structify map %#v by tree %#v", kv, tree) - - return ptr, err - } -} - -// buildStructTree assembles a tree which represents the struct t based on tag. -func buildStructTree(t reflect.Type, tag string) []structBranch { - var tree []structBranch - numFields := t.NumField() - - for i := 0; i < numFields; i++ { - if field := t.Field(i); field.PkgPath == "" { - switch tagValue := field.Tag.Get(tag); tagValue { - case "", "-": - case ",inline": - if subTree := buildStructTree(field.Type, tag); subTree != nil { - tree = append(tree, structBranch{i, "", subTree}) - } - default: - // If parseString doesn't support *T, it'll panic. - _ = parseString("", reflect.New(field.Type).Interface()) - - tree = append(tree, structBranch{i, tagValue, nil}) - } - } - } - - return tree -} - -// structifyMapByTree parses src's string values into the struct dest according to tree's specification. -func structifyMapByTree(src map[string]interface{}, tree []structBranch, dest, root reflect.Value, stack *[]int) error { - *stack = append(*stack, 0) - defer func() { - *stack = (*stack)[:len(*stack)-1] - }() - - for _, branch := range tree { - (*stack)[len(*stack)-1] = branch.field - - if branch.subTree == nil { - if v, ok := src[branch.leaf]; ok { - if vs, ok := v.(string); ok { - if err := parseString(vs, dest.Field(branch.field).Addr().Interface()); err != nil { - rt := root.Type() - typ := rt - var path []string - - for _, i := range *stack { - f := typ.Field(i) - path = append(path, f.Name) - typ = f.Type - } - - return errors.Wrapf(err, "can't parse %s into the %s %s#%s: %s", - branch.leaf, typ.Name(), rt.Name(), strings.Join(path, "."), vs) - } - } - } - } else if err := structifyMapByTree(src, branch.subTree, dest.Field(branch.field), root, stack); err != nil { - return err - } - } - - return nil -} - -// parseString parses src into *dest. -func parseString(src string, dest interface{}) error { - switch ptr := dest.(type) { - case encoding.TextUnmarshaler: - return ptr.UnmarshalText([]byte(src)) - case *string: - *ptr = src - return nil - case **string: - *ptr = &src - return nil - case *uint8: - return parseUint(src, ptr) - case *uint16: - return parseUint(src, ptr) - case *uint32: - return parseUint(src, ptr) - case *uint64: - return parseUint(src, ptr) - case *int8: - return parseInt(src, ptr) - case *int16: - return parseInt(src, ptr) - case *int32: - return parseInt(src, ptr) - case *int64: - return parseInt(src, ptr) - case *float32: - return parseFloat(src, ptr) - case *float64: - return parseFloat(src, ptr) - default: - panic(fmt.Sprintf("unsupported type: %T", dest)) - } -} - -// parseUint parses src into *dest. -func parseUint[T constraints.Unsigned](src string, dest *T) error { - i, err := strconv.ParseUint(src, 10, bitSizeOf[T]()) - if err == nil { - *dest = T(i) - } - - return err -} - -// parseInt parses src into *dest. -func parseInt[T constraints.Signed](src string, dest *T) error { - i, err := strconv.ParseInt(src, 10, bitSizeOf[T]()) - if err == nil { - *dest = T(i) - } - - return err -} - -// parseFloat parses src into *dest. -func parseFloat[T constraints.Float](src string, dest *T) error { - f, err := strconv.ParseFloat(src, bitSizeOf[T]()) - if err == nil { - *dest = T(f) - } - - return err -} - -func bitSizeOf[T any]() int { - var x T - return int(unsafe.Sizeof(x) * 8) -} diff --git a/pkg/types/binary.go b/pkg/types/binary.go deleted file mode 100644 index 9fb0a9f4f..000000000 --- a/pkg/types/binary.go +++ /dev/null @@ -1,137 +0,0 @@ -package types - -import ( - "bytes" - "database/sql" - "database/sql/driver" - "encoding" - "encoding/hex" - "encoding/json" - "github.com/icinga/icingadb/internal" - "github.com/icinga/icingadb/pkg/contracts" - "github.com/pkg/errors" -) - -// Binary nullable byte string. Hex as JSON. -type Binary []byte - -// nullBinary for validating whether a Binary is valid. -var nullBinary Binary - -// Equal returns whether the binaries are the same length and -// contain the same bytes. -func (binary Binary) Equal(equaler contracts.Equaler) bool { - b, ok := equaler.(Binary) - if !ok { - panic("bad Binary type assertion") - } - - return bytes.Equal(binary, b) -} - -// Valid returns whether the Binary is valid. -func (binary Binary) Valid() bool { - return !bytes.Equal(binary, nullBinary) -} - -// String returns the hex string representation form of the Binary. -func (binary Binary) String() string { - return hex.EncodeToString(binary) -} - -// MarshalText implements a custom marhsal function to encode -// the Binary as hex. MarshalText implements the -// encoding.TextMarshaler interface. -func (binary Binary) MarshalText() ([]byte, error) { - return []byte(binary.String()), nil -} - -// UnmarshalText implements a custom unmarshal function to decode -// hex into a Binary. UnmarshalText implements the -// encoding.TextUnmarshaler interface. -func (binary *Binary) UnmarshalText(text []byte) error { - b := make([]byte, hex.DecodedLen(len(text))) - _, err := hex.Decode(b, text) - if err != nil { - return internal.CantDecodeHex(err, string(text)) - } - *binary = b - - return nil -} - -// MarshalJSON implements a custom marshal function to encode the Binary -// as a hex string. MarshalJSON implements the json.Marshaler interface. -// Supports JSON null. -func (binary Binary) MarshalJSON() ([]byte, error) { - if !binary.Valid() { - return []byte("null"), nil - } - - return internal.MarshalJSON(binary.String()) -} - -// UnmarshalJSON implements a custom unmarshal function to decode -// a JSON hex string into a Binary. UnmarshalJSON implements the -// json.Unmarshaler interface. Supports JSON null. -func (binary *Binary) UnmarshalJSON(data []byte) error { - if string(data) == "null" || len(data) == 0 { - return nil - } - - var s string - if err := internal.UnmarshalJSON(data, &s); err != nil { - return err - } - b, err := hex.DecodeString(s) - if err != nil { - return internal.CantDecodeHex(err, s) - } - *binary = b - - return nil -} - -// Scan implements the sql.Scanner interface. -// Supports SQL NULL. -func (binary *Binary) Scan(src interface{}) error { - switch src := src.(type) { - case nil: - return nil - - case []byte: - if len(src) == 0 { - return nil - } - - b := make([]byte, len(src)) - copy(b, src) - *binary = b - - default: - return errors.Errorf("unable to scan type %T into Binary", src) - } - - return nil -} - -// Value implements the driver.Valuer interface. -// Supports SQL NULL. -func (binary Binary) Value() (driver.Value, error) { - if !binary.Valid() { - return nil, nil - } - - return []byte(binary), nil -} - -// Assert interface compliance. -var ( - _ contracts.ID = (*Binary)(nil) - _ encoding.TextMarshaler = (*Binary)(nil) - _ encoding.TextUnmarshaler = (*Binary)(nil) - _ json.Marshaler = (*Binary)(nil) - _ json.Unmarshaler = (*Binary)(nil) - _ sql.Scanner = (*Binary)(nil) - _ driver.Valuer = (*Binary)(nil) -) diff --git a/pkg/types/binary_test.go b/pkg/types/binary_test.go deleted file mode 100644 index 2a4f82920..000000000 --- a/pkg/types/binary_test.go +++ /dev/null @@ -1,29 +0,0 @@ -package types - -import ( - "github.com/stretchr/testify/require" - "testing" - "unicode/utf8" -) - -func TestBinary_MarshalJSON(t *testing.T) { - subtests := []struct { - name string - input Binary - output string - }{ - {"nil", nil, `null`}, - {"empty", make(Binary, 0, 1), `null`}, - {"space", Binary(" "), `"20"`}, - } - - for _, st := range subtests { - t.Run(st.name, func(t *testing.T) { - actual, err := st.input.MarshalJSON() - - require.NoError(t, err) - require.True(t, utf8.Valid(actual)) - require.Equal(t, st.output, string(actual)) - }) - } -} diff --git a/pkg/types/bool.go b/pkg/types/bool.go deleted file mode 100644 index 2b96c0992..000000000 --- a/pkg/types/bool.go +++ /dev/null @@ -1,105 +0,0 @@ -package types - -import ( - "database/sql" - "database/sql/driver" - "encoding" - "encoding/json" - "github.com/icinga/icingadb/internal" - "github.com/pkg/errors" - "strconv" -) - -var ( - enum = map[bool]string{ - true: "y", - false: "n", - } -) - -// Bool represents a bool for ENUM ('y', 'n'), which can be NULL. -type Bool struct { - Bool bool - Valid bool // Valid is true if Bool is not NULL -} - -// MarshalJSON implements the json.Marshaler interface. -func (b Bool) MarshalJSON() ([]byte, error) { - if !b.Valid { - return []byte("null"), nil - } - - return internal.MarshalJSON(b.Bool) -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -func (b *Bool) UnmarshalText(text []byte) error { - parsed, err := strconv.ParseUint(string(text), 10, 64) - if err != nil { - return internal.CantParseUint64(err, string(text)) - } - - *b = Bool{parsed != 0, true} - return nil -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (b *Bool) UnmarshalJSON(data []byte) error { - if string(data) == "null" || len(data) == 0 { - return nil - } - - if err := internal.UnmarshalJSON(data, &b.Bool); err != nil { - return err - } - - b.Valid = true - - return nil -} - -// Scan implements the sql.Scanner interface. -// Supports SQL NULL. -func (b *Bool) Scan(src interface{}) error { - if src == nil { - b.Bool, b.Valid = false, false - return nil - } - - v, ok := src.([]byte) - if !ok { - return errors.Errorf("bad []byte type assertion from %#v", src) - } - - switch string(v) { - case "y": - b.Bool = true - case "n": - b.Bool = false - default: - return errors.Errorf("bad bool %#v", v) - } - - b.Valid = true - - return nil -} - -// Value implements the driver.Valuer interface. -// Supports SQL NULL. -func (b Bool) Value() (driver.Value, error) { - if !b.Valid { - return nil, nil - } - - return enum[b.Bool], nil -} - -// Assert interface compliance. -var ( - _ json.Marshaler = (*Bool)(nil) - _ encoding.TextUnmarshaler = (*Bool)(nil) - _ json.Unmarshaler = (*Bool)(nil) - _ sql.Scanner = (*Bool)(nil) - _ driver.Valuer = (*Bool)(nil) -) diff --git a/pkg/types/bool_test.go b/pkg/types/bool_test.go deleted file mode 100644 index fe49588c8..000000000 --- a/pkg/types/bool_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package types - -import ( - "fmt" - "github.com/stretchr/testify/require" - "testing" - "unicode/utf8" -) - -func TestBool_MarshalJSON(t *testing.T) { - subtests := []struct { - input Bool - output string - }{ - {Bool{Bool: false, Valid: false}, `null`}, - {Bool{Bool: false, Valid: true}, `false`}, - {Bool{Bool: true, Valid: false}, `null`}, - {Bool{Bool: true, Valid: true}, `true`}, - } - - for _, st := range subtests { - t.Run(fmt.Sprintf("Bool-%#v_Valid-%#v", st.input.Bool, st.input.Valid), func(t *testing.T) { - actual, err := st.input.MarshalJSON() - - require.NoError(t, err) - require.True(t, utf8.Valid(actual)) - require.Equal(t, st.output, string(actual)) - }) - } -} diff --git a/pkg/types/float.go b/pkg/types/float.go deleted file mode 100644 index a4aedd672..000000000 --- a/pkg/types/float.go +++ /dev/null @@ -1,68 +0,0 @@ -package types - -import ( - "bytes" - "database/sql" - "database/sql/driver" - "encoding" - "encoding/json" - "github.com/icinga/icingadb/internal" - "strconv" -) - -// Float adds JSON support to sql.NullFloat64. -type Float struct { - sql.NullFloat64 -} - -// MarshalJSON implements the json.Marshaler interface. -// Supports JSON null. -func (f Float) MarshalJSON() ([]byte, error) { - var v interface{} - if f.Valid { - v = f.Float64 - } - - return internal.MarshalJSON(v) -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -func (f *Float) UnmarshalText(text []byte) error { - parsed, err := strconv.ParseFloat(string(text), 64) - if err != nil { - return internal.CantParseFloat64(err, string(text)) - } - - *f = Float{sql.NullFloat64{ - Float64: parsed, - Valid: true, - }} - - return nil -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -// Supports JSON null. -func (f *Float) UnmarshalJSON(data []byte) error { - // Ignore null, like in the main JSON package. - if bytes.HasPrefix(data, []byte{'n'}) { - return nil - } - - if err := internal.UnmarshalJSON(data, &f.Float64); err != nil { - return err - } - - f.Valid = true - - return nil -} - -// Assert interface compliance. -var ( - _ json.Marshaler = Float{} - _ encoding.TextUnmarshaler = (*Float)(nil) - _ json.Unmarshaler = (*Float)(nil) - _ driver.Valuer = Float{} - _ sql.Scanner = (*Float)(nil) -) diff --git a/pkg/types/int.go b/pkg/types/int.go deleted file mode 100644 index 0e51f2101..000000000 --- a/pkg/types/int.go +++ /dev/null @@ -1,68 +0,0 @@ -package types - -import ( - "bytes" - "database/sql" - "database/sql/driver" - "encoding" - "encoding/json" - "github.com/icinga/icingadb/internal" - "strconv" -) - -// Int adds JSON support to sql.NullInt64. -type Int struct { - sql.NullInt64 -} - -// MarshalJSON implements the json.Marshaler interface. -// Supports JSON null. -func (i Int) MarshalJSON() ([]byte, error) { - var v interface{} - if i.Valid { - v = i.Int64 - } - - return internal.MarshalJSON(v) -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -func (i *Int) UnmarshalText(text []byte) error { - parsed, err := strconv.ParseInt(string(text), 10, 64) - if err != nil { - return internal.CantParseInt64(err, string(text)) - } - - *i = Int{sql.NullInt64{ - Int64: parsed, - Valid: true, - }} - - return nil -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -// Supports JSON null. -func (i *Int) UnmarshalJSON(data []byte) error { - // Ignore null, like in the main JSON package. - if bytes.HasPrefix(data, []byte{'n'}) { - return nil - } - - if err := internal.UnmarshalJSON(data, &i.Int64); err != nil { - return err - } - - i.Valid = true - - return nil -} - -// Assert interface compliance. -var ( - _ json.Marshaler = Int{} - _ json.Unmarshaler = (*Int)(nil) - _ encoding.TextUnmarshaler = (*Int)(nil) - _ driver.Valuer = Int{} - _ sql.Scanner = (*Int)(nil) -) diff --git a/pkg/types/string.go b/pkg/types/string.go deleted file mode 100644 index ce2a4ac69..000000000 --- a/pkg/types/string.go +++ /dev/null @@ -1,82 +0,0 @@ -package types - -import ( - "bytes" - "database/sql" - "database/sql/driver" - "encoding" - "encoding/json" - "github.com/icinga/icingadb/internal" - "strings" -) - -// String adds JSON support to sql.NullString. -type String struct { - sql.NullString -} - -// MakeString constructs a new non-NULL String from s. -func MakeString(s string) String { - return String{sql.NullString{ - String: s, - Valid: true, - }} -} - -// MarshalJSON implements the json.Marshaler interface. -// Supports JSON null. -func (s String) MarshalJSON() ([]byte, error) { - var v interface{} - if s.Valid { - v = s.String - } - - return internal.MarshalJSON(v) -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -func (s *String) UnmarshalText(text []byte) error { - *s = String{sql.NullString{ - String: string(text), - Valid: true, - }} - - return nil -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -// Supports JSON null. -func (s *String) UnmarshalJSON(data []byte) error { - // Ignore null, like in the main JSON package. - if bytes.HasPrefix(data, []byte{'n'}) { - return nil - } - - if err := internal.UnmarshalJSON(data, &s.String); err != nil { - return err - } - - s.Valid = true - - return nil -} - -// Value implements the driver.Valuer interface. -// Supports SQL NULL. -func (s String) Value() (driver.Value, error) { - if !s.Valid { - return nil, nil - } - - // PostgreSQL does not allow null bytes in varchar, char and text fields. - return strings.ReplaceAll(s.String, "\x00", ""), nil -} - -// Assert interface compliance. -var ( - _ json.Marshaler = String{} - _ encoding.TextUnmarshaler = (*String)(nil) - _ json.Unmarshaler = (*String)(nil) - _ driver.Valuer = String{} - _ sql.Scanner = (*String)(nil) -) diff --git a/pkg/types/unix_milli.go b/pkg/types/unix_milli.go deleted file mode 100644 index 3f6f988f3..000000000 --- a/pkg/types/unix_milli.go +++ /dev/null @@ -1,95 +0,0 @@ -package types - -import ( - "database/sql" - "database/sql/driver" - "encoding" - "encoding/json" - "github.com/icinga/icingadb/internal" - "github.com/icinga/icingadb/pkg/utils" - "github.com/pkg/errors" - "strconv" - "time" -) - -// UnixMilli is a nullable millisecond UNIX timestamp in databases and JSON. -type UnixMilli time.Time - -// Time returns the time.Time conversion of UnixMilli. -func (t UnixMilli) Time() time.Time { - return time.Time(t) -} - -// MarshalJSON implements the json.Marshaler interface. -// Marshals to milliseconds. Supports JSON null. -func (t UnixMilli) MarshalJSON() ([]byte, error) { - if time.Time(t).IsZero() { - return []byte("null"), nil - } - - return []byte(strconv.FormatInt(time.Time(t).UnixMilli(), 10)), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -func (t *UnixMilli) UnmarshalText(text []byte) error { - parsed, err := strconv.ParseFloat(string(text), 64) - if err != nil { - return internal.CantParseFloat64(err, string(text)) - } - - *t = UnixMilli(utils.FromUnixMilli(int64(parsed))) - return nil -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -// Unmarshals from milliseconds. Supports JSON null. -func (t *UnixMilli) UnmarshalJSON(data []byte) error { - if string(data) == "null" || len(data) == 0 { - return nil - } - - ms, err := strconv.ParseFloat(string(data), 64) - if err != nil { - return internal.CantParseFloat64(err, string(data)) - } - tt := utils.FromUnixMilli(int64(ms)) - *t = UnixMilli(tt) - - return nil -} - -// Scan implements the sql.Scanner interface. -// Scans from milliseconds. Supports SQL NULL. -func (t *UnixMilli) Scan(src interface{}) error { - if src == nil { - return nil - } - - v, ok := src.(int64) - if !ok { - return errors.Errorf("bad int64 type assertion from %#v", src) - } - tt := utils.FromUnixMilli(v) - *t = UnixMilli(tt) - - return nil -} - -// Value implements the driver.Valuer interface. -// Returns milliseconds. Supports SQL NULL. -func (t UnixMilli) Value() (driver.Value, error) { - if t.Time().IsZero() { - return nil, nil - } - - return t.Time().UnixMilli(), nil -} - -// Assert interface compliance. -var ( - _ json.Marshaler = (*UnixMilli)(nil) - _ encoding.TextUnmarshaler = (*UnixMilli)(nil) - _ json.Unmarshaler = (*UnixMilli)(nil) - _ sql.Scanner = (*UnixMilli)(nil) - _ driver.Valuer = (*UnixMilli)(nil) -) diff --git a/pkg/types/unix_milli_test.go b/pkg/types/unix_milli_test.go deleted file mode 100644 index 985fa2ac1..000000000 --- a/pkg/types/unix_milli_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package types - -import ( - "github.com/stretchr/testify/require" - "testing" - "time" - "unicode/utf8" -) - -func TestUnixMilli_MarshalJSON(t *testing.T) { - subtests := []struct { - name string - input UnixMilli - output string - }{ - {"zero", UnixMilli{}, `null`}, - {"epoch", UnixMilli(time.Unix(0, 0)), `0`}, - {"nonzero", UnixMilli(time.Unix(1234567890, 62500000)), `1234567890062`}, - } - - for _, st := range subtests { - t.Run(st.name, func(t *testing.T) { - actual, err := st.input.MarshalJSON() - - require.NoError(t, err) - require.True(t, utf8.Valid(actual)) - require.Equal(t, st.output, string(actual)) - }) - } -} diff --git a/pkg/types/uuid.go b/pkg/types/uuid.go deleted file mode 100644 index 02acbcdb1..000000000 --- a/pkg/types/uuid.go +++ /dev/null @@ -1,24 +0,0 @@ -package types - -import ( - "database/sql/driver" - "encoding" - "github.com/google/uuid" -) - -// UUID is like uuid.UUID, but marshals itself binarily (not like xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) in SQL context. -type UUID struct { - uuid.UUID -} - -// Value implements driver.Valuer. -func (uuid UUID) Value() (driver.Value, error) { - return uuid.UUID[:], nil -} - -// Assert interface compliance. -var ( - _ encoding.TextUnmarshaler = (*UUID)(nil) - _ driver.Valuer = UUID{} - _ driver.Valuer = (*UUID)(nil) -) diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go deleted file mode 100644 index 4b0fe1df0..000000000 --- a/pkg/utils/utils.go +++ /dev/null @@ -1,231 +0,0 @@ -package utils - -import ( - "context" - "crypto/sha1" - "fmt" - "github.com/go-sql-driver/mysql" - "github.com/icinga/icingadb/pkg/contracts" - "github.com/lib/pq" - "github.com/pkg/errors" - "golang.org/x/exp/utf8string" - "math" - "net" - "os" - "path/filepath" - "strings" - "time" - "unicode" -) - -// FromUnixMilli creates and returns a time.Time value -// from the given milliseconds since the Unix epoch ms. -func FromUnixMilli(ms int64) time.Time { - sec, dec := math.Modf(float64(ms) / 1e3) - - return time.Unix(int64(sec), int64(dec*(1e9))) -} - -// Name returns the declared name of type t. -// Name is used in combination with Key -// to automatically guess an entity's -// database table and Redis key. -func Name(t interface{}) string { - s := strings.TrimLeft(fmt.Sprintf("%T", t), "*") - - return s[strings.LastIndex(s, ".")+1:] -} - -// TableName returns the table of t. -func TableName(t interface{}) string { - if tn, ok := t.(contracts.TableNamer); ok { - return tn.TableName() - } else { - return Key(Name(t), '_') - } -} - -// Key returns the name with all Unicode letters mapped to lower case letters, -// with an additional separator in front of each original upper case letter. -func Key(name string, sep byte) string { - return ConvertCamelCase(name, unicode.LowerCase, sep) -} - -// Timed calls the given callback with the time that has elapsed since the start. -// -// Timed should be installed by defer: -// -// func TimedExample(logger *zap.SugaredLogger) { -// defer utils.Timed(time.Now(), func(elapsed time.Duration) { -// logger.Debugf("Executed job in %s", elapsed) -// }) -// job() -// } -func Timed(start time.Time, callback func(elapsed time.Duration)) { - callback(time.Since(start)) -} - -// BatchSliceOfStrings groups the given keys into chunks of size count and streams them into a returned channel. -func BatchSliceOfStrings(ctx context.Context, keys []string, count int) <-chan []string { - batches := make(chan []string) - - go func() { - defer close(batches) - - for i := 0; i < len(keys); i += count { - end := i + count - if end > len(keys) { - end = len(keys) - } - - select { - case batches <- keys[i:end]: - case <-ctx.Done(): - return - } - } - }() - - return batches -} - -// IsContextCanceled returns whether the given error is context.Canceled. -func IsContextCanceled(err error) bool { - return errors.Is(err, context.Canceled) -} - -// Checksum returns the SHA-1 checksum of the data. -func Checksum(data interface{}) []byte { - var chksm [sha1.Size]byte - - switch data := data.(type) { - case string: - chksm = sha1.Sum([]byte(data)) - case []byte: - chksm = sha1.Sum(data) - default: - panic(fmt.Sprintf("Unable to create checksum for type %T", data)) - } - - return chksm[:] -} - -// Fatal panics with the given error. -func Fatal(err error) { - panic(err) -} - -// IsDeadlock returns whether the given error signals serialization failure. -func IsDeadlock(err error) bool { - var e *mysql.MySQLError - if errors.As(err, &e) { - switch e.Number { - case 1205, 1213: - return true - default: - return false - } - } - - var pe *pq.Error - if errors.As(err, &pe) { - switch pe.Code { - case "40001", "40P01": - return true - } - } - - return false -} - -var ellipsis = utf8string.NewString("...") - -// Ellipsize shortens s to <=limit runes and indicates shortening by "...". -func Ellipsize(s string, limit int) string { - utf8 := utf8string.NewString(s) - switch { - case utf8.RuneCount() <= limit: - return s - case utf8.RuneCount() <= ellipsis.RuneCount(): - return ellipsis.String() - default: - return utf8.Slice(0, limit-ellipsis.RuneCount()) + ellipsis.String() - } -} - -// ConvertCamelCase converts a (lower) CamelCase string into various cases. -// _case must be unicode.Lower or unicode.Upper. -// -// Example usage: -// -// # snake_case -// ConvertCamelCase(s, unicode.Lower, '_') -// -// # SCREAMING_SNAKE_CASE -// ConvertCamelCase(s, unicode.Upper, '_') -// -// # kebab-case -// ConvertCamelCase(s, unicode.Lower, '-') -// -// # SCREAMING-KEBAB-CASE -// ConvertCamelCase(s, unicode.Upper, '-') -// -// # other.separator -// ConvertCamelCase(s, unicode.Lower, '.') -func ConvertCamelCase(s string, _case int, sep byte) string { - r := []rune(s) - b := strings.Builder{} - b.Grow(len(r) + 2) // nominal 2 bytes of extra space for inserted delimiters - - b.WriteRune(unicode.To(_case, r[0])) - for _, r := range r[1:] { - if sep != 0 && unicode.IsUpper(r) { - b.WriteByte(sep) - } - - b.WriteRune(unicode.To(_case, r)) - } - - return b.String() -} - -// AppName returns the name of the executable that started this program (process). -func AppName() string { - exe, err := os.Executable() - if err != nil { - exe = os.Args[0] - } - - return filepath.Base(exe) -} - -// MaxInt returns the larger of the given integers. -func MaxInt(x, y int) int { - if x > y { - return x - } - - return y -} - -// JoinHostPort is like its equivalent in net., but handles UNIX sockets as well. -func JoinHostPort(host string, port int) string { - if strings.HasPrefix(host, "/") { - return host - } - - return net.JoinHostPort(host, fmt.Sprint(port)) -} - -// ChanFromSlice takes a slice of values and returns a channel from which these values can be received. -// This channel is closed after the last value was sent. -func ChanFromSlice[T any](values []T) <-chan T { - ch := make(chan T, len(values)) - for _, value := range values { - ch <- value - } - - close(ch) - - return ch -} diff --git a/pkg/utils/utils_test.go b/pkg/utils/utils_test.go deleted file mode 100644 index b0ea54b8f..000000000 --- a/pkg/utils/utils_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package utils - -import ( - "github.com/stretchr/testify/require" - "testing" -) - -func TestChanFromSlice(t *testing.T) { - t.Run("Nil", func(t *testing.T) { - ch := ChanFromSlice[int](nil) - require.NotNil(t, ch) - requireClosedEmpty(t, ch) - }) - - t.Run("Empty", func(t *testing.T) { - ch := ChanFromSlice([]int{}) - require.NotNil(t, ch) - requireClosedEmpty(t, ch) - }) - - t.Run("NonEmpty", func(t *testing.T) { - ch := ChanFromSlice([]int{42, 23, 1337}) - require.NotNil(t, ch) - requireReceive(t, ch, 42) - requireReceive(t, ch, 23) - requireReceive(t, ch, 1337) - requireClosedEmpty(t, ch) - }) -} - -// requireReceive is a helper function to check if a value can immediately be received from a channel. -func requireReceive(t *testing.T, ch <-chan int, expected int) { - t.Helper() - - select { - case v, ok := <-ch: - require.True(t, ok, "receiving should return a value") - require.Equal(t, expected, v) - default: - require.Fail(t, "receiving should not block") - } -} - -// requireReceive is a helper function to check if the channel is closed and empty. -func requireClosedEmpty(t *testing.T, ch <-chan int) { - t.Helper() - - select { - case _, ok := <-ch: - require.False(t, ok, "receiving from channel should not return anything") - default: - require.Fail(t, "receiving should not block") - } -} diff --git a/pkg/version/version.go b/pkg/version/version.go deleted file mode 100644 index 250318c7e..000000000 --- a/pkg/version/version.go +++ /dev/null @@ -1,180 +0,0 @@ -package version - -import ( - "bufio" - "errors" - "fmt" - "os" - "runtime" - "runtime/debug" - "strconv" - "strings" -) - -type VersionInfo struct { - Version string - Commit string -} - -// Version determines version and commit information based on multiple data sources: -// - Version information dynamically added by `git archive` in the remaining to parameters. -// - A hardcoded version number passed as first parameter. -// - Commit information added to the binary by `go build`. -// -// It's supposed to be called like this in combination with setting the `export-subst` attribute for the corresponding -// file in .gitattributes: -// -// var Version = version.Version("1.0.0-rc2", "$Format:%(describe)$", "$Format:%H$") -// -// When exported using `git archive`, the placeholders are replaced in the file and this version information is -// preferred. Otherwise the hardcoded version is used and augmented with commit information from the build metadata. -func Version(version, gitDescribe, gitHash string) *VersionInfo { - const hashLen = 7 // Same truncation length for the commit hash as used by git describe. - - if !strings.HasPrefix(gitDescribe, "$") && !strings.HasPrefix(gitHash, "$") { - if strings.HasPrefix(gitDescribe, "%") { - // Only Git 2.32+ supports %(describe), older versions don't expand it but keep it as-is. - // Fall back to the hardcoded version augmented with the commit hash. - gitDescribe = version - - if len(gitHash) >= hashLen { - gitDescribe += "-g" + gitHash[:hashLen] - } - } - - return &VersionInfo{ - Version: gitDescribe, - Commit: gitHash, - } - } else { - commit := "" - - if info, ok := debug.ReadBuildInfo(); ok { - modified := false - - for _, setting := range info.Settings { - switch setting.Key { - case "vcs.revision": - commit = setting.Value - case "vcs.modified": - modified, _ = strconv.ParseBool(setting.Value) - } - } - - if len(commit) >= hashLen { - version += "-g" + commit[:hashLen] - - if modified { - version += "-dirty" - commit += " (modified)" - } - } - } - - return &VersionInfo{ - Version: version, - Commit: commit, - } - } -} - -// Print writes verbose version output to stdout. -func (v *VersionInfo) Print() { - fmt.Println("Icinga DB version:", v.Version) - fmt.Println() - - fmt.Println("Build information:") - fmt.Printf(" Go version: %s (%s, %s)\n", runtime.Version(), runtime.GOOS, runtime.GOARCH) - if v.Commit != "" { - fmt.Println(" Git commit:", v.Commit) - } - - if r, err := readOsRelease(); err == nil { - fmt.Println() - fmt.Println("System information:") - fmt.Println(" Platform:", r.Name) - fmt.Println(" Platform version:", r.DisplayVersion()) - } -} - -// osRelease contains the information obtained from the os-release file. -type osRelease struct { - Name string - Version string - VersionId string - BuildId string -} - -// DisplayVersion returns the most suitable version information for display purposes. -func (o *osRelease) DisplayVersion() string { - if o.Version != "" { - // Most distributions set VERSION - return o.Version - } else if o.VersionId != "" { - // Some only set VERSION_ID (Alpine Linux for example) - return o.VersionId - } else if o.BuildId != "" { - // Others only set BUILD_ID (Arch Linux for example) - return o.BuildId - } else { - return "(unknown)" - } -} - -// readOsRelease reads and parses the os-release file. -func readOsRelease() (*osRelease, error) { - for _, path := range []string{"/etc/os-release", "/usr/lib/os-release"} { - f, err := os.Open(path) - if err != nil { - if os.IsNotExist(err) { - continue // Try next path. - } else { - return nil, err - } - } - - o := &osRelease{ - Name: "Linux", // Suggested default as per os-release(5) man page. - } - - scanner := bufio.NewScanner(f) - for scanner.Scan() { - line := scanner.Text() - if strings.HasPrefix(line, "#") { - continue // Ignore comment. - } - - parts := strings.SplitN(line, "=", 2) - if len(parts) != 2 { - continue // Ignore empty or possibly malformed line. - } - - key := parts[0] - val := parts[1] - - // Unquote strings. This isn't fully compliant with the specification which allows using some shell escape - // sequences. However, typically quotes are only used to allow whitespace within the value. - if len(val) >= 2 && (val[0] == '"' || val[0] == '\'') && val[0] == val[len(val)-1] { - val = val[1 : len(val)-1] - } - - switch key { - case "NAME": - o.Name = val - case "VERSION": - o.Version = val - case "VERSION_ID": - o.VersionId = val - case "BUILD_ID": - o.BuildId = val - } - } - if err := scanner.Err(); err != nil { - return nil, err - } - - return o, nil - } - - return nil, errors.New("os-release file not found") -}